Changeset 3d5701e for libcfa/src/heap.cfa
- Timestamp:
- Feb 25, 2020, 1:17:33 PM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 7dc2e015
- Parents:
- 9fb8f01 (diff), dd9e1ca (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/heap.cfa (modified) (40 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 24 13:12:45 201913 // Update Count : 55012 // Last Modified On : Tue Feb 4 10:04:51 2020 13 // Update Count : 648 14 14 // 15 15 … … 18 18 #include <stdio.h> // snprintf, fileno 19 19 #include <errno.h> // errno 20 #include <string.h> // memset, memcpy 20 21 extern "C" { 21 22 #include <sys/mman.h> // mmap, munmap … … 27 28 #include "bits/locks.hfa" // __spinlock_t 28 29 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 29 #include "stdlib.hfa" // bsearchl30 //#include "stdlib.hfa" // bsearchl 30 31 #include "malloc.h" 31 32 33 #define MIN(x, y) (y > x ? x : y) 32 34 33 35 static bool traceHeap = false; 34 36 35 inline bool traceHeap() { 36 return traceHeap; 37 } // traceHeap 37 inline bool traceHeap() { return traceHeap; } 38 38 39 39 bool traceHeapOn() { … … 49 49 } // traceHeapOff 50 50 51 52 static bool checkFree = false; 53 54 inline bool checkFree() { 55 return checkFree; 56 } // checkFree 57 58 bool checkFreeOn() { 59 bool temp = checkFree; 60 checkFree = true; 51 bool traceHeapTerm() { return false; } 52 53 54 static bool prtFree = false; 55 56 inline bool prtFree() { 57 return prtFree; 58 } // prtFree 59 60 bool prtFreeOn() { 61 bool temp = prtFree; 62 prtFree = true; 61 63 return temp; 62 } // checkFreeOn63 64 bool checkFreeOff() {65 bool temp = checkFree;66 checkFree = false;64 } // prtFreeOn 65 66 bool prtFreeOff() { 67 bool temp = prtFree; 68 prtFree = false; 67 69 return temp; 68 } // checkFreeOff 69 70 71 // static bool traceHeapTerm = false; 72 73 // inline bool traceHeapTerm() { 74 // return traceHeapTerm; 75 // } // traceHeapTerm 76 77 // bool traceHeapTermOn() { 78 // bool temp = traceHeapTerm; 79 // traceHeapTerm = true; 80 // return temp; 81 // } // traceHeapTermOn 82 83 // bool traceHeapTermOff() { 84 // bool temp = traceHeapTerm; 85 // traceHeapTerm = false; 86 // return temp; 87 // } // traceHeapTermOff 70 } // prtFreeOff 88 71 89 72 90 73 enum { 74 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 75 // the brk address is extended by the extension amount. 76 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024), 77 78 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; 79 // values greater than or equal to this value are mmap from the operating system. 91 80 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 92 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),93 81 }; 94 82 … … 105 93 static unsigned int allocFree; // running total of allocations minus frees 106 94 107 static void checkUnfreed() {95 static void prtUnfreed() { 108 96 if ( allocFree != 0 ) { 109 97 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 110 //char helpText[512];111 //int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"112 //"Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",113 //(long int)getpid(), allocFree, allocFree ); // always print the UNIX pid114 // __cfaabi_dbg_bits_write( helpText, len );115 } // if 116 } // checkUnfreed98 char helpText[512]; 99 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n" 100 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 101 (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid 102 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 103 } // if 104 } // prtUnfreed 117 105 118 106 extern "C" { … … 123 111 void heapAppStop() { // called by __cfaabi_appready_startdown 124 112 fclose( stdin ); fclose( stdout ); 125 checkUnfreed();113 prtUnfreed(); 126 114 } // heapAppStop 127 115 } // extern "C" 128 116 #endif // __CFA_DEBUG__ 117 129 118 130 119 // statically allocated variables => zero filled. … … 134 123 static unsigned int maxBucketsUsed; // maximum number of buckets in use 135 124 136 137 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa138 #define ALIGN 16139 125 140 126 #define SPINLOCK 0 … … 147 133 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 148 134 // Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 149 enum { NoBucketSizes = 9 3}; // number of buckets sizes135 enum { NoBucketSizes = 91 }; // number of buckets sizes 150 136 151 137 struct HeapManager { … … 194 180 } kind; // Kind 195 181 } header; // Header 196 char pad[ ALIGN- sizeof( Header )];182 char pad[libAlign() - sizeof( Header )]; 197 183 char data[0]; // storage 198 184 }; // Storage 199 185 200 static_assert( ALIGN >= sizeof( Storage ), "ALIGN< sizeof( Storage )" );186 static_assert( libAlign() >= sizeof( Storage ), "libAlign() < sizeof( Storage )" ); 201 187 202 188 struct FreeHeader { … … 228 214 #define __STATISTICS__ 229 215 216 // Bucket size must be multiple of 16. 230 217 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 231 218 static const unsigned int bucketSizes[] @= { // different bucket sizes 232 16, 32, 48, 64, 233 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224, 234 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896, 235 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144, 236 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 237 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 238 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 239 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 240 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 241 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 242 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792, 243 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016, 244 4_194_304 + sizeof(HeapManager.Storage) 219 16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4 220 96, 112, 128 + sizeof(HeapManager.Storage), // 3 221 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4 222 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4 223 640, 768, 896, 1_024 + sizeof(HeapManager.Storage), // 4 224 1_536, 2_048 + sizeof(HeapManager.Storage), // 2 225 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), // 4 226 6_144, 8_192 + sizeof(HeapManager.Storage), // 2 227 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(HeapManager.Storage), // 8 228 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(HeapManager.Storage), // 8 229 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(HeapManager.Storage), // 8 230 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(HeapManager.Storage), // 8 231 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(HeapManager.Storage), // 8 232 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(HeapManager.Storage), // 8 233 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), // 4 234 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), // 8 235 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(HeapManager.Storage), // 4 245 236 }; 246 237 … … 251 242 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 252 243 #endif // FASTLOOKUP 244 253 245 static int mmapFd = -1; // fake or actual fd for anonymous file 254 255 256 246 #ifdef __CFA_DEBUG__ 257 247 static bool heapBoot = 0; // detect recursion during boot … … 259 249 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 260 250 261 // #comment TD : The return type of this function should be commented262 static inline bool setMmapStart( size_t value ) {263 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;264 mmapStart = value; // set global265 266 // find the closest bucket size less than or equal to the mmapStart size267 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search268 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?269 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?270 return false;271 } // setMmapStart272 273 274 static void ?{}( HeapManager & manager ) with ( manager ) {275 pageSize = sysconf( _SC_PAGESIZE );276 277 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists278 freeLists[i].blockSize = bucketSizes[i];279 } // for280 281 #ifdef FASTLOOKUP282 unsigned int idx = 0;283 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {284 if ( i > bucketSizes[idx] ) idx += 1;285 lookup[i] = idx;286 } // for287 #endif // FASTLOOKUP288 289 if ( setMmapStart( default_mmap_start() ) ) {290 abort( "HeapManager : internal error, mmap start initialization failure." );291 } // if292 heapExpand = default_heap_expansion();293 294 char * End = (char *)sbrk( 0 );295 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment296 heapBegin = heapEnd = sbrk( 0 ); // get new start point297 } // HeapManager298 299 300 static void ^?{}( HeapManager & ) {301 #ifdef __STATISTICS__302 // if ( traceHeapTerm() ) {303 // printStats();304 // if ( checkfree() ) checkFree( heapManager, true );305 // } // if306 #endif // __STATISTICS__307 } // ~HeapManager308 309 310 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));311 void memory_startup( void ) {312 #ifdef __CFA_DEBUG__313 if ( unlikely( heapBoot ) ) { // check for recursion during system boot314 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.315 abort( "boot() : internal error, recursively invoked during system boot." );316 } // if317 heapBoot = true;318 #endif // __CFA_DEBUG__319 320 //assert( heapManager.heapBegin != 0 );321 //heapManager{};322 if ( heapManager.heapBegin == 0 ) heapManager{};323 } // memory_startup324 325 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));326 void memory_shutdown( void ) {327 ^heapManager{};328 } // memory_shutdown329 330 251 331 252 #ifdef __STATISTICS__ 332 static unsigned long long int mmap_storage; // heap statistics counters 253 // Heap statistics counters. 254 static unsigned long long int mmap_storage; 333 255 static unsigned int mmap_calls; 334 256 static unsigned long long int munmap_storage; … … 348 270 static unsigned long long int realloc_storage; 349 271 static unsigned int realloc_calls; 350 351 static int statfd; // statistics file descriptor (changed by malloc_stats_fd) 352 272 // Statistics file descriptor (changed by malloc_stats_fd). 273 static int statfd = STDERR_FILENO; // default stderr 353 274 354 275 // Use "write" because streams may be shutdown when calls are made. 355 276 static void printStats() { 356 277 char helpText[512]; 357 __cfaabi_ dbg_bits_print_buffer(helpText, sizeof(helpText),278 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 358 279 "\nHeap statistics:\n" 359 280 " malloc: calls %u / storage %llu\n" … … 405 326 sbrk_calls, sbrk_storage 406 327 ); 407 return write( fileno( stream ), helpText, len ); // -1 => error 328 __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit 329 return len; 408 330 } // printStatsXML 409 331 #endif // __STATISTICS__ 410 332 411 // #comment TD : Is this the samething as Out-of-Memory? 412 static inline void noMemory() {413 abort( "Heap memory exhausted at %zu bytes.\n"414 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",415 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );416 } // noMemory333 334 // static inline void noMemory() { 335 // abort( "Heap memory exhausted at %zu bytes.\n" 336 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.", 337 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) ); 338 // } // noMemory 417 339 418 340 419 341 static inline void checkAlign( size_t alignment ) { 420 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {421 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment);342 if ( alignment < libAlign() || ! libPow2( alignment ) ) { 343 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); 422 344 } // if 423 345 } // checkAlign … … 431 353 432 354 433 static inline void checkHeader( bool check, const char * name, void * addr ) { 434 if ( unlikely( check ) ) { // bad address ? 435 abort( "Attempt to %s storage %p with address outside the heap.\n" 436 "Possible cause is duplicate free on same block or overwriting of memory.", 437 name, addr ); 438 } // if 439 } // checkHeader 440 441 // #comment TD : function should be commented and/or have a more evocative name 442 // this isn't either a check or a constructor which is what I would expect this function to be 443 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) { 444 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 445 size_t offset = header->kind.fake.offset; 446 alignment = header->kind.fake.alignment & -2; // remove flag from value 447 #ifdef __CFA_DEBUG__ 448 checkAlign( alignment ); // check alignment 449 #endif // __CFA_DEBUG__ 450 header = (HeapManager.Storage.Header *)((char *)header - offset); 451 } // if 452 } // fakeHeader 453 454 // #comment TD : Why is this a define 455 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 456 457 static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 458 header = headerAddr( addr ); 459 460 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 461 fakeHeader( header, size, alignment ); 462 size = header->kind.real.blockSize & -3; // mmap size 463 return true; 464 } // if 465 466 #ifdef __CFA_DEBUG__ 467 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 468 #endif // __CFA_DEBUG__ 469 470 // #comment TD : This code looks weird... 471 // It's called as the first statement of both branches of the last if, with the same parameters in all cases 472 473 // header may be safe to dereference 474 fakeHeader( header, size, alignment ); 475 #ifdef __CFA_DEBUG__ 476 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 477 #endif // __CFA_DEBUG__ 478 479 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 480 #ifdef __CFA_DEBUG__ 481 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 482 abort( "Attempt to %s storage %p with corrupted header.\n" 483 "Possible cause is duplicate free on same block or overwriting of header information.", 484 name, addr ); 485 } // if 486 #endif // __CFA_DEBUG__ 487 size = freeElem->blockSize; 488 return false; 489 } // headers 490 491 492 static inline void * extend( size_t size ) with ( heapManager ) { 493 lock( extlock __cfaabi_dbg_ctx2 ); 494 ptrdiff_t rem = heapRemaining - size; 495 if ( rem < 0 ) { 496 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 497 498 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 499 if ( sbrk( increase ) == (void *)-1 ) { 500 unlock( extlock ); 501 errno = ENOMEM; 502 return 0; 503 } // if 504 #ifdef __STATISTICS__ 505 sbrk_calls += 1; 506 sbrk_storage += increase; 507 #endif // __STATISTICS__ 508 #ifdef __CFA_DEBUG__ 509 // Set new memory to garbage so subsequent uninitialized usages might fail. 510 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 511 #endif // __CFA_DEBUG__ 512 rem = heapRemaining + increase - size; 513 } // if 514 515 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 516 heapRemaining = rem; 517 heapEnd = (char *)heapEnd + size; 518 unlock( extlock ); 519 return block; 520 } // extend 521 522 355 // thunk problem 523 356 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 524 357 size_t l = 0, m, h = dim; … … 535 368 536 369 370 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 371 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; 372 mmapStart = value; // set global 373 374 // find the closest bucket size less than or equal to the mmapStart size 375 maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search 376 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 377 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 378 return false; 379 } // setMmapStart 380 381 382 static inline void checkHeader( bool check, const char name[], void * addr ) { 383 if ( unlikely( check ) ) { // bad address ? 384 abort( "Attempt to %s storage %p with address outside the heap.\n" 385 "Possible cause is duplicate free on same block or overwriting of memory.", 386 name, addr ); 387 } // if 388 } // checkHeader 389 390 391 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 392 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 393 size_t offset = header->kind.fake.offset; 394 alignment = header->kind.fake.alignment & -2; // remove flag from value 395 #ifdef __CFA_DEBUG__ 396 checkAlign( alignment ); // check alignment 397 #endif // __CFA_DEBUG__ 398 header = (HeapManager.Storage.Header *)((char *)header - offset); 399 } // if 400 } // fakeHeader 401 402 403 // <-------+----------------------------------------------------> bsize (bucket size) 404 // |header |addr 405 //================================================================================== 406 // | alignment 407 // <-----------------<------------+-----------------------------> bsize (bucket size) 408 // |fake-header | addr 409 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 410 411 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) 412 // |header |addr 413 //================================================================================== 414 // | alignment 415 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 416 // |fake-header |addr 417 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 418 419 420 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 421 header = headerAddr( addr ); 422 423 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 424 fakeHeader( header, alignment ); 425 size = header->kind.real.blockSize & -3; // mmap size 426 return true; 427 } // if 428 429 #ifdef __CFA_DEBUG__ 430 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 431 #endif // __CFA_DEBUG__ 432 433 // header may be safe to dereference 434 fakeHeader( header, alignment ); 435 #ifdef __CFA_DEBUG__ 436 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 437 #endif // __CFA_DEBUG__ 438 439 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 440 #ifdef __CFA_DEBUG__ 441 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 442 abort( "Attempt to %s storage %p with corrupted header.\n" 443 "Possible cause is duplicate free on same block or overwriting of header information.", 444 name, addr ); 445 } // if 446 #endif // __CFA_DEBUG__ 447 size = freeElem->blockSize; 448 return false; 449 } // headers 450 451 452 static inline void * extend( size_t size ) with ( heapManager ) { 453 lock( extlock __cfaabi_dbg_ctx2 ); 454 ptrdiff_t rem = heapRemaining - size; 455 if ( rem < 0 ) { 456 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 457 458 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 459 if ( sbrk( increase ) == (void *)-1 ) { 460 unlock( extlock ); 461 errno = ENOMEM; 462 return 0p; 463 } // if 464 #ifdef __STATISTICS__ 465 sbrk_calls += 1; 466 sbrk_storage += increase; 467 #endif // __STATISTICS__ 468 #ifdef __CFA_DEBUG__ 469 // Set new memory to garbage so subsequent uninitialized usages might fail. 470 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 471 #endif // __CFA_DEBUG__ 472 rem = heapRemaining + increase - size; 473 } // if 474 475 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 476 heapRemaining = rem; 477 heapEnd = (char *)heapEnd + size; 478 unlock( extlock ); 479 return block; 480 } // extend 481 482 537 483 static inline void * doMalloc( size_t size ) with ( heapManager ) { 538 484 HeapManager.Storage * block; // pointer to new block of storage … … 541 487 // along with the block and is a multiple of the alignment size. 542 488 489 if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0p; 543 490 size_t tsize = size + sizeof(HeapManager.Storage); 544 491 if ( likely( tsize < mmapStart ) ) { // small size => sbrk … … 573 520 block = freeElem->freeList.pop(); 574 521 #endif // SPINLOCK 575 if ( unlikely( block == 0 ) ) {// no free block ?522 if ( unlikely( block == 0p ) ) { // no free block ? 576 523 #if defined( SPINLOCK ) 577 524 unlock( freeElem->lock ); … … 582 529 583 530 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 584 if ( unlikely( block == 0 ) ) return 0;585 #if defined( SPINLOCK )531 if ( unlikely( block == 0p ) ) return 0p; 532 #if defined( SPINLOCK ) 586 533 } else { 587 534 freeElem->freeList = block->header.kind.real.next; 588 535 unlock( freeElem->lock ); 589 #endif // SPINLOCK536 #endif // SPINLOCK 590 537 } // if 591 538 592 539 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 593 540 } else { // large size => mmap 541 if ( unlikely( size > ~0ul - pageSize ) ) return 0p; 594 542 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size 595 543 #ifdef __STATISTICS__ … … 609 557 } // if 610 558 611 void * a rea= &(block->data); // adjust off header to user bytes559 void * addr = &(block->data); // adjust off header to user bytes 612 560 613 561 #ifdef __CFA_DEBUG__ 614 assert( ((uintptr_t)a rea& (libAlign() - 1)) == 0 ); // minimum alignment ?562 assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? 615 563 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST ); 616 564 if ( traceHeap() ) { 617 565 enum { BufferSize = 64 }; 618 566 char helpText[BufferSize]; 619 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", a rea, size, tsize );620 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", a rea, size );621 __cfaabi_ dbg_bits_write( helpText, len );567 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 568 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size ); 569 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 622 570 } // if 623 571 #endif // __CFA_DEBUG__ 624 572 625 return a rea;573 return addr; 626 574 } // doMalloc 627 575 … … 629 577 static inline void doFree( void * addr ) with ( heapManager ) { 630 578 #ifdef __CFA_DEBUG__ 631 if ( unlikely( heapManager.heapBegin == 0 ) ) {579 if ( unlikely( heapManager.heapBegin == 0p ) ) { 632 580 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); 633 581 } // if … … 675 623 char helpText[BufferSize]; 676 624 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); 677 __cfaabi_ dbg_bits_write( helpText, len );625 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 678 626 } // if 679 627 #endif // __CFA_DEBUG__ … … 681 629 682 630 683 size_t checkFree( HeapManager & manager ) with ( manager ) {631 size_t prtFree( HeapManager & manager ) with ( manager ) { 684 632 size_t total = 0; 685 633 #ifdef __STATISTICS__ 686 __cfaabi_ dbg_bits_acquire();687 __cfaabi_ dbg_bits_print_nolock("\nBin lists (bin size : free blocks on list)\n" );634 __cfaabi_bits_acquire(); 635 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); 688 636 #endif // __STATISTICS__ 689 637 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) { … … 694 642 695 643 #if defined( SPINLOCK ) 696 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0 ; p = p->header.kind.real.next ) {644 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 697 645 #else 698 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0 ; p = p->header.kind.real.next.top ) {646 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) { 699 647 #endif // SPINLOCK 700 648 total += size; … … 705 653 706 654 #ifdef __STATISTICS__ 707 __cfaabi_ dbg_bits_print_nolock("%7zu, %-7u ", size, N );708 if ( (i + 1) % 8 == 0 ) __cfaabi_ dbg_bits_print_nolock("\n" );655 __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N ); 656 if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); 709 657 #endif // __STATISTICS__ 710 658 } // for 711 659 #ifdef __STATISTICS__ 712 __cfaabi_ dbg_bits_print_nolock("\ntotal free blocks:%zu\n", total );713 __cfaabi_ dbg_bits_release();660 __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total ); 661 __cfaabi_bits_release(); 714 662 #endif // __STATISTICS__ 715 663 return (char *)heapEnd - (char *)heapBegin - total; 716 } // checkFree 664 } // prtFree 665 666 667 static void ?{}( HeapManager & manager ) with ( manager ) { 668 pageSize = sysconf( _SC_PAGESIZE ); 669 670 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists 671 freeLists[i].blockSize = bucketSizes[i]; 672 } // for 673 674 #ifdef FASTLOOKUP 675 unsigned int idx = 0; 676 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { 677 if ( i > bucketSizes[idx] ) idx += 1; 678 lookup[i] = idx; 679 } // for 680 #endif // FASTLOOKUP 681 682 if ( setMmapStart( default_mmap_start() ) ) { 683 abort( "HeapManager : internal error, mmap start initialization failure." ); 684 } // if 685 heapExpand = default_heap_expansion(); 686 687 char * end = (char *)sbrk( 0 ); 688 sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 689 heapBegin = heapEnd = sbrk( 0 ); // get new start point 690 } // HeapManager 691 692 693 static void ^?{}( HeapManager & ) { 694 #ifdef __STATISTICS__ 695 if ( traceHeapTerm() ) { 696 printStats(); 697 // if ( prtfree() ) prtFree( heapManager, true ); 698 } // if 699 #endif // __STATISTICS__ 700 } // ~HeapManager 701 702 703 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 704 void memory_startup( void ) { 705 #ifdef __CFA_DEBUG__ 706 if ( unlikely( heapBoot ) ) { // check for recursion during system boot 707 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 708 abort( "boot() : internal error, recursively invoked during system boot." ); 709 } // if 710 heapBoot = true; 711 #endif // __CFA_DEBUG__ 712 713 //assert( heapManager.heapBegin != 0 ); 714 //heapManager{}; 715 if ( heapManager.heapBegin == 0p ) heapManager{}; 716 } // memory_startup 717 718 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 719 void memory_shutdown( void ) { 720 ^heapManager{}; 721 } // memory_shutdown 717 722 718 723 719 724 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 720 725 //assert( heapManager.heapBegin != 0 ); 721 if ( unlikely( heapManager.heapBegin == 0 ) ) heapManager{}; // called before memory_startup ?722 void * a rea= doMalloc( size );723 if ( unlikely( a rea == 0) ) errno = ENOMEM; // POSIX724 return a rea;726 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 727 void * addr = doMalloc( size ); 728 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX 729 return addr; 725 730 } // mallocNoStats 731 732 733 static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) { 734 size_t size = noOfElems * elemSize; 735 char * addr = (char *)mallocNoStats( size ); 736 if ( unlikely( addr == 0p ) ) return 0p; 737 738 HeapManager.Storage.Header * header; 739 HeapManager.FreeHeader * freeElem; 740 size_t bsize, alignment; 741 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment ); 742 #ifndef __CFA_DEBUG__ 743 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 744 if ( ! mapped ) 745 #endif // __CFA_DEBUG__ 746 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 747 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 748 // `-header`-addr `-size 749 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros 750 751 header->kind.real.blockSize |= 2; // mark as zero filled 752 return addr; 753 } // callocNoStats 726 754 727 755 … … 743 771 // subtract libAlign() because it is already the minimum alignment 744 772 // add sizeof(Storage) for fake header 745 // #comment TD : this is the only place that calls doMalloc without calling mallocNoStats, why ? 746 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 747 if ( unlikely( area == 0 ) ) return area; 773 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 774 if ( unlikely( addr == 0p ) ) return addr; 748 775 749 776 // address in the block of the "next" alignment address 750 char * user = (char *)libCeiling( (uintptr_t)(a rea+ sizeof(HeapManager.Storage)), alignment );777 char * user = (char *)libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment ); 751 778 752 779 // address of header from malloc 753 HeapManager.Storage.Header * realHeader = headerAddr( a rea);780 HeapManager.Storage.Header * realHeader = headerAddr( addr ); 754 781 // address of fake header * before* the alignment location 755 782 HeapManager.Storage.Header * fakeHeader = headerAddr( user ); … … 761 788 return user; 762 789 } // memalignNoStats 790 791 792 static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) { 793 size_t size = noOfElems * elemSize; 794 char * addr = (char *)memalignNoStats( alignment, size ); 795 if ( unlikely( addr == 0p ) ) return 0p; 796 HeapManager.Storage.Header * header; 797 HeapManager.FreeHeader * freeElem; 798 size_t bsize; 799 bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment ); 800 #ifndef __CFA_DEBUG__ 801 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 802 if ( ! mapped ) 803 #endif // __CFA_DEBUG__ 804 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros 805 header->kind.real.blockSize |= 2; // mark as zero filled 806 807 return addr; 808 } // cmemalignNoStats 763 809 764 810 … … 774 820 extern "C" { 775 821 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not 776 // initialized. If size is 0, then malloc() returns either NULL, or a unique pointer value that can later be822 // initialized. If size is 0, then malloc() returns either 0p, or a unique pointer value that can later be 777 823 // successfully passed to free(). 778 824 void * malloc( size_t size ) { … … 786 832 787 833 // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to 788 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either NULL, or a834 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a 789 835 // unique pointer value that can later be successfully passed to free(). 790 836 void * calloc( size_t noOfElems, size_t elemSize ) { 791 size_t size = noOfElems * elemSize;792 837 #ifdef __STATISTICS__ 793 838 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST ); 794 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST ); 795 #endif // __STATISTICS__ 796 797 char * area = (char *)mallocNoStats( size ); 798 if ( unlikely( area == 0 ) ) return 0; 839 __atomic_add_fetch( &calloc_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 840 #endif // __STATISTICS__ 841 842 return callocNoStats( noOfElems, elemSize ); 843 } // calloc 844 845 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 846 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 847 // is larger than the old size, the added memory will not be initialized. If ptr is 0p, then the call is 848 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not 0p, then the call 849 // is equivalent to free(ptr). Unless ptr is 0p, it must have been returned by an earlier call to malloc(), 850 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 851 void * realloc( void * oaddr, size_t size ) { 852 #ifdef __STATISTICS__ 853 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 854 #endif // __STATISTICS__ 855 856 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 857 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 858 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 799 859 800 860 HeapManager.Storage.Header * header; 801 861 HeapManager.FreeHeader * freeElem; 802 size_t asize, alignment; 803 bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment ); 804 #ifndef __CFA_DEBUG__ 805 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 806 if ( ! mapped ) 807 #endif // __CFA_DEBUG__ 808 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros 809 810 header->kind.real.blockSize |= 2; // mark as zero filled 811 return area; 812 } // calloc 813 814 // #comment TD : Document this function 815 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 816 size_t size = noOfElems * elemSize; 817 #ifdef __STATISTICS__ 818 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 819 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST ); 820 #endif // __STATISTICS__ 821 822 char * area = (char *)memalignNoStats( alignment, size ); 823 if ( unlikely( area == 0 ) ) return 0; 824 HeapManager.Storage.Header * header; 825 HeapManager.FreeHeader * freeElem; 826 size_t asize; 827 bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment ); 828 #ifndef __CFA_DEBUG__ 829 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 830 if ( ! mapped ) 831 #endif // __CFA_DEBUG__ 832 memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros 833 header->kind.real.blockSize |= 2; // mark as zero filled 834 835 return area; 836 } // cmemalign 837 838 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 839 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 840 // is larger than the old size, the added memory will not be initialized. If ptr is NULL, then the call is 841 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not NULL, then the call 842 // is equivalent to free(ptr). Unless ptr is NULL, it must have been returned by an earlier call to malloc(), 843 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 844 void * realloc( void * addr, size_t size ) { 845 #ifdef __STATISTICS__ 846 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 847 #endif // __STATISTICS__ 848 849 if ( unlikely( addr == 0 ) ) return mallocNoStats( size ); // special cases 850 if ( unlikely( size == 0 ) ) { free( addr ); return 0; } 851 852 HeapManager.Storage.Header * header; 853 HeapManager.FreeHeader * freeElem; 854 size_t asize, alignment = 0; 855 headers( "realloc", addr, header, freeElem, asize, alignment ); 856 857 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block 858 if ( usize >= size ) { // already sufficient storage 862 size_t bsize, oalign = 0; 863 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 864 865 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 866 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size 867 // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know 868 // where to start filling, i.e., do not overwrite existing values in space. 869 // 859 870 // This case does not result in a new profiler entry because the previous one still exists and it must match with 860 871 // the free for this memory. Hence, this realloc does not appear in the profiler output. 861 return addr;872 return oaddr; 862 873 } // if 863 874 … … 866 877 #endif // __STATISTICS__ 867 878 868 void * area; 869 if ( unlikely( alignment != 0 ) ) { // previous request memalign? 870 area = memalign( alignment, size ); // create new aligned area 879 // change size and copy old content to new storage 880 881 void * naddr; 882 if ( unlikely( oalign != 0 ) ) { // previous request memalign? 883 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 884 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area 885 } else { 886 naddr = memalignNoStats( oalign, size ); // create new aligned area 887 } // if 871 888 } else { 872 area = mallocNoStats( size ); // create new area 889 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 890 naddr = callocNoStats( 1, size ); // create new area 891 } else { 892 naddr = mallocNoStats( size ); // create new area 893 } // if 873 894 } // if 874 if ( unlikely( area == 0 ) ) return 0; 875 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ? 876 assert( (header->kind.real.blockSize & 1) == 0 ); 877 bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment ); 878 #ifndef __CFA_DEBUG__ 879 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 880 if ( ! mapped ) 881 #endif // __CFA_DEBUG__ 882 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part 883 header->kind.real.blockSize |= 2; // mark new request as zero fill 884 } // if 885 memcpy( area, addr, usize ); // copy bytes 886 free( addr ); 887 return area; 895 if ( unlikely( naddr == 0p ) ) return 0p; 896 897 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 898 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 899 // To preserve prior fill, the entire bucket must be copied versus the size. 900 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 901 free( oaddr ); 902 return naddr; 888 903 } // realloc 889 890 904 891 905 // The obsolete function memalign() allocates size bytes and returns a pointer to the allocated memory. The memory … … 897 911 #endif // __STATISTICS__ 898 912 899 void * area = memalignNoStats( alignment, size ); 900 901 return area; 913 return memalignNoStats( alignment, size ); 902 914 } // memalign 915 916 917 // The cmemalign() function is the same as calloc() with memory alignment. 918 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 919 #ifdef __STATISTICS__ 920 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 921 __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 922 #endif // __STATISTICS__ 923 924 return cmemalignNoStats( alignment, noOfElems, elemSize ); 925 } // cmemalign 903 926 904 927 // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a … … 911 934 // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The 912 935 // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of 913 // sizeof(void *). If size is 0, then posix_memalign() returns either NULL, or a unique pointer value that can later936 // sizeof(void *). If size is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later 914 937 // be successfully passed to free(3). 915 938 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 916 939 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment 917 940 * memptr = memalign( alignment, size ); 918 if ( unlikely( * memptr == 0 ) ) return ENOMEM;941 if ( unlikely( * memptr == 0p ) ) return ENOMEM; 919 942 return 0; 920 943 } // posix_memalign … … 929 952 // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to 930 953 // malloc(), calloc() or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior 931 // occurs. If ptr is NULL, no operation is performed.954 // occurs. If ptr is 0p, no operation is performed. 932 955 void free( void * addr ) { 933 956 #ifdef __STATISTICS__ … … 935 958 #endif // __STATISTICS__ 936 959 937 // #comment TD : To decrease nesting I would but the special case in the 938 // else instead, plus it reads more naturally to have the 939 // short / normal case instead 940 if ( unlikely( addr == 0 ) ) { // special case 941 #ifdef __CFA_DEBUG__ 942 if ( traceHeap() ) { 943 #define nullmsg "Free( 0x0 ) size:0\n" 944 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf. 945 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 ); 946 } // if 947 #endif // __CFA_DEBUG__ 960 if ( unlikely( addr == 0p ) ) { // special case 961 // #ifdef __CFA_DEBUG__ 962 // if ( traceHeap() ) { 963 // #define nullmsg "Free( 0x0 ) size:0\n" 964 // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf. 965 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 ); 966 // } // if 967 // #endif // __CFA_DEBUG__ 948 968 return; 949 969 } // exit … … 951 971 doFree( addr ); 952 972 } // free 973 974 975 // The malloc_alignment() function returns the alignment of the allocation. 976 size_t malloc_alignment( void * addr ) { 977 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 978 HeapManager.Storage.Header * header = headerAddr( addr ); 979 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 980 return header->kind.fake.alignment & -2; // remove flag from value 981 } else { 982 return libAlign (); // minimum alignment 983 } // if 984 } // malloc_alignment 985 986 987 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc(). 988 bool malloc_zero_fill( void * addr ) { 989 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 990 HeapManager.Storage.Header * header = headerAddr( addr ); 991 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 992 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset); 993 } // if 994 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ? 995 } // malloc_zero_fill 996 997 998 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to 999 // a block of memory allocated by malloc(3) or a related function. 1000 size_t malloc_usable_size( void * addr ) { 1001 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1002 HeapManager.Storage.Header * header; 1003 HeapManager.FreeHeader * freeElem; 1004 size_t bsize, alignment; 1005 1006 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); 1007 return dataStorage( bsize, addr, header ); // data storage in bucket 1008 } // malloc_usable_size 1009 1010 1011 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and 1012 // related functions. 1013 void malloc_stats( void ) { 1014 #ifdef __STATISTICS__ 1015 printStats(); 1016 if ( prtFree() ) prtFree( heapManager ); 1017 #endif // __STATISTICS__ 1018 } // malloc_stats 1019 1020 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics. 1021 int malloc_stats_fd( int fd __attribute__(( unused )) ) { 1022 #ifdef __STATISTICS__ 1023 int temp = statfd; 1024 statfd = fd; 1025 return temp; 1026 #else 1027 return -1; 1028 #endif // __STATISTICS__ 1029 } // malloc_stats_fd 1030 953 1031 954 1032 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see … … 958 1036 choose( option ) { 959 1037 case M_TOP_PAD: 960 if ( setHeapExpand( value ) ) fallthru default;1038 if ( setHeapExpand( value ) ) return 1; 961 1039 case M_MMAP_THRESHOLD: 962 if ( setMmapStart( value ) ) fallthru default; 963 default: 964 // #comment TD : 1 for unsopported feels wrong 965 return 1; // success, or unsupported 1040 if ( setMmapStart( value ) ) return 1; 966 1041 } // switch 967 return 0; // error 1042 return 0; // error, unsupported 968 1043 } // mallopt 969 1044 … … 974 1049 } // malloc_trim 975 1050 976 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to977 // a block of memory allocated by malloc(3) or a related function.978 size_t malloc_usable_size( void * addr ) {979 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size980 981 HeapManager.Storage.Header * header;982 HeapManager.FreeHeader * freeElem;983 size_t size, alignment;984 985 headers( "malloc_usable_size", addr, header, freeElem, size, alignment );986 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block987 return usize;988 } // malloc_usable_size989 990 991 // The malloc_alignment() function returns the alignment of the allocation.992 size_t malloc_alignment( void * addr ) {993 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment994 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );995 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?996 return header->kind.fake.alignment & -2; // remove flag from value997 } else {998 return libAlign (); // minimum alignment999 } // if1000 } // malloc_alignment1001 1002 1003 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().1004 bool malloc_zero_fill( void * addr ) {1005 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill1006 1007 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );1008 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1009 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);1010 } // if1011 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?1012 } // malloc_zero_fill1013 1014 1015 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and1016 // related functions.1017 void malloc_stats( void ) {1018 #ifdef __STATISTICS__1019 printStats();1020 if ( checkFree() ) checkFree( heapManager );1021 #endif // __STATISTICS__1022 } // malloc_stats1023 1024 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics.1025 int malloc_stats_fd( int fd ) {1026 #ifdef __STATISTICS__1027 int temp = statfd;1028 statfd = fd;1029 return temp;1030 #else1031 return -1;1032 #endif // __STATISTICS__1033 } // malloc_stats_fd1034 1051 1035 1052 // The malloc_info() function exports an XML string that describes the current state of the memory-allocation … … 1037 1054 // information about all arenas (see malloc(3)). 1038 1055 int malloc_info( int options, FILE * stream ) { 1056 if ( options != 0 ) { errno = EINVAL; return -1; } 1039 1057 return printStatsXML( stream ); 1040 1058 } // malloc_info … … 1046 1064 // structure is returned as the function result. (It is the caller's responsibility to free(3) this memory.) 1047 1065 void * malloc_get_state( void ) { 1048 return 0 ; // unsupported1066 return 0p; // unsupported 1049 1067 } // malloc_get_state 1050 1068 … … 1058 1076 1059 1077 1078 // Must have CFA linkage to overload with C linkage realloc. 1079 void * realloc( void * oaddr, size_t nalign, size_t size ) { 1080 #ifdef __STATISTICS__ 1081 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1082 #endif // __STATISTICS__ 1083 1084 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1085 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 1086 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 1087 1088 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum 1089 #ifdef __CFA_DEBUG__ 1090 else 1091 checkAlign( nalign ); // check alignment 1092 #endif // __CFA_DEBUG__ 1093 1094 HeapManager.Storage.Header * header; 1095 HeapManager.FreeHeader * freeElem; 1096 size_t bsize, oalign = 0; 1097 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1098 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1099 1100 if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out 1101 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1102 return realloc( oaddr, size ); 1103 } // if 1104 1105 #ifdef __STATISTICS__ 1106 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1107 #endif // __STATISTICS__ 1108 1109 // change size and copy old content to new storage 1110 1111 void * naddr; 1112 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1113 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1114 } else { 1115 naddr = memalignNoStats( nalign, size ); // create new aligned area 1116 } // if 1117 1118 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1119 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 1120 // To preserve prior fill, the entire bucket must be copied versus the size. 1121 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1122 free( oaddr ); 1123 return naddr; 1124 } // realloc 1125 1126 1060 1127 // Local Variables: // 1061 1128 // tab-width: 4 //
Note:
See TracChangeset
for help on using the changeset viewer.