- Timestamp:
- Nov 24, 2019, 5:58:39 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 58e280f4
- Parents:
- 4b464b5
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r4b464b5 r1e034d9 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Nov 22 14:16:30201913 // Update Count : 6 2612 // Last Modified On : Sun Nov 24 17:56:15 2019 13 // Update Count : 638 14 14 // 15 15 … … 18 18 #include <stdio.h> // snprintf, fileno 19 19 #include <errno.h> // errno 20 #include <string.h> // memset, memcpy 20 21 extern "C" { 21 22 #include <sys/mman.h> // mmap, munmap … … 27 28 #include "bits/locks.hfa" // __spinlock_t 28 29 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 29 #include "stdlib.hfa" // bsearchl30 //#include "stdlib.hfa" // bsearchl 30 31 #include "malloc.h" 31 32 … … 90 91 91 92 enum { 93 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 94 // the brk address is extended by the extension amount. 95 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024), 96 97 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; 98 // values greater than or equal to this value are mmap from the operating system. 92 99 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 93 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),94 100 }; 95 101 … … 128 134 } // extern "C" 129 135 #endif // __CFA_DEBUG__ 136 130 137 131 138 // statically allocated variables => zero filled. … … 226 233 #define __STATISTICS__ 227 234 235 // Bucket size must be multiple of 16. 228 236 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 229 237 static const unsigned int bucketSizes[] @= { // different bucket sizes … … 365 373 366 374 367 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 368 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; 369 mmapStart = value; // set global 370 371 // find the closest bucket size less than or equal to the mmapStart size 372 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search 373 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 374 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 375 return false; 376 } // setMmapStart 377 378 379 static inline void checkHeader( bool check, const char * name, void * addr ) { 380 if ( unlikely( check ) ) { // bad address ? 381 abort( "Attempt to %s storage %p with address outside the heap.\n" 382 "Possible cause is duplicate free on same block or overwriting of memory.", 383 name, addr ); 384 } // if 385 } // checkHeader 386 387 388 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 389 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 390 size_t offset = header->kind.fake.offset; 391 alignment = header->kind.fake.alignment & -2; // remove flag from value 392 #ifdef __CFA_DEBUG__ 393 checkAlign( alignment ); // check alignment 394 #endif // __CFA_DEBUG__ 395 header = (HeapManager.Storage.Header *)((char *)header - offset); 396 } // if 397 } // fakeHeader 398 399 400 // <-------+----------------------------------------------------> bsize (bucket size) 401 // |header |addr 402 //================================================================================== 403 // | alignment 404 // <-----------------<------------+-----------------------------> bsize (bucket size) 405 // |fake-header | addr 406 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 407 408 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) 409 // |header |addr 410 //================================================================================== 411 // | alignment 412 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 413 // |fake-header |addr 414 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 415 416 417 static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 418 header = headerAddr( addr ); 419 420 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 421 fakeHeader( header, alignment ); 422 size = header->kind.real.blockSize & -3; // mmap size 423 return true; 424 } // if 425 426 #ifdef __CFA_DEBUG__ 427 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 428 #endif // __CFA_DEBUG__ 429 430 // header may be safe to dereference 431 fakeHeader( header, alignment ); 432 #ifdef __CFA_DEBUG__ 433 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 434 #endif // __CFA_DEBUG__ 435 436 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 437 #ifdef __CFA_DEBUG__ 438 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 439 abort( "Attempt to %s storage %p with corrupted header.\n" 440 "Possible cause is duplicate free on same block or overwriting of header information.", 441 name, addr ); 442 } // if 443 #endif // __CFA_DEBUG__ 444 size = freeElem->blockSize; 445 return false; 446 } // headers 447 448 449 static inline void * extend( size_t size ) with ( heapManager ) { 450 lock( extlock __cfaabi_dbg_ctx2 ); 451 ptrdiff_t rem = heapRemaining - size; 452 if ( rem < 0 ) { 453 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 454 455 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 456 if ( sbrk( increase ) == (void *)-1 ) { 457 unlock( extlock ); 458 errno = ENOMEM; 459 return 0p; 460 } // if 461 #ifdef __STATISTICS__ 462 sbrk_calls += 1; 463 sbrk_storage += increase; 464 #endif // __STATISTICS__ 465 #ifdef __CFA_DEBUG__ 466 // Set new memory to garbage so subsequent uninitialized usages might fail. 467 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 468 #endif // __CFA_DEBUG__ 469 rem = heapRemaining + increase - size; 470 } // if 471 472 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 473 heapRemaining = rem; 474 heapEnd = (char *)heapEnd + size; 475 unlock( extlock ); 476 return block; 477 } // extend 478 479 375 // thunk problem 480 376 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 481 377 size_t l = 0, m, h = dim; … … 490 386 return l; 491 387 } // Bsearchl 388 389 390 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 391 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; 392 mmapStart = value; // set global 393 394 // find the closest bucket size less than or equal to the mmapStart size 395 maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search 396 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 397 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 398 return false; 399 } // setMmapStart 400 401 402 static inline void checkHeader( bool check, const char * name, void * addr ) { 403 if ( unlikely( check ) ) { // bad address ? 404 abort( "Attempt to %s storage %p with address outside the heap.\n" 405 "Possible cause is duplicate free on same block or overwriting of memory.", 406 name, addr ); 407 } // if 408 } // checkHeader 409 410 411 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 412 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 413 size_t offset = header->kind.fake.offset; 414 alignment = header->kind.fake.alignment & -2; // remove flag from value 415 #ifdef __CFA_DEBUG__ 416 checkAlign( alignment ); // check alignment 417 #endif // __CFA_DEBUG__ 418 header = (HeapManager.Storage.Header *)((char *)header - offset); 419 } // if 420 } // fakeHeader 421 422 423 // <-------+----------------------------------------------------> bsize (bucket size) 424 // |header |addr 425 //================================================================================== 426 // | alignment 427 // <-----------------<------------+-----------------------------> bsize (bucket size) 428 // |fake-header | addr 429 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 430 431 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) 432 // |header |addr 433 //================================================================================== 434 // | alignment 435 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 436 // |fake-header |addr 437 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 438 439 440 static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 441 header = headerAddr( addr ); 442 443 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 444 fakeHeader( header, alignment ); 445 size = header->kind.real.blockSize & -3; // mmap size 446 return true; 447 } // if 448 449 #ifdef __CFA_DEBUG__ 450 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 451 #endif // __CFA_DEBUG__ 452 453 // header may be safe to dereference 454 fakeHeader( header, alignment ); 455 #ifdef __CFA_DEBUG__ 456 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 457 #endif // __CFA_DEBUG__ 458 459 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 460 #ifdef __CFA_DEBUG__ 461 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 462 abort( "Attempt to %s storage %p with corrupted header.\n" 463 "Possible cause is duplicate free on same block or overwriting of header information.", 464 name, addr ); 465 } // if 466 #endif // __CFA_DEBUG__ 467 size = freeElem->blockSize; 468 return false; 469 } // headers 470 471 472 static inline void * extend( size_t size ) with ( heapManager ) { 473 lock( extlock __cfaabi_dbg_ctx2 ); 474 ptrdiff_t rem = heapRemaining - size; 475 if ( rem < 0 ) { 476 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 477 478 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 479 if ( sbrk( increase ) == (void *)-1 ) { 480 unlock( extlock ); 481 errno = ENOMEM; 482 return 0p; 483 } // if 484 #ifdef __STATISTICS__ 485 sbrk_calls += 1; 486 sbrk_storage += increase; 487 #endif // __STATISTICS__ 488 #ifdef __CFA_DEBUG__ 489 // Set new memory to garbage so subsequent uninitialized usages might fail. 490 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 491 #endif // __CFA_DEBUG__ 492 rem = heapRemaining + increase - size; 493 } // if 494 495 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 496 heapRemaining = rem; 497 heapEnd = (char *)heapEnd + size; 498 unlock( extlock ); 499 return block; 500 } // extend 492 501 493 502 … … 541 550 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 542 551 if ( unlikely( block == 0p ) ) return 0p; 543 552 #if defined( SPINLOCK ) 544 553 } else { 545 554 freeElem->freeList = block->header.kind.real.next; 546 555 unlock( freeElem->lock ); 547 556 #endif // SPINLOCK 548 557 } // if 549 558 … … 696 705 heapExpand = default_heap_expansion(); 697 706 698 char * End = (char *)sbrk( 0 );699 sbrk( (char *)libCeiling( (long unsigned int) End, libAlign() ) - End ); // move start of heap to multiple of alignment707 char * end = (char *)sbrk( 0 ); 708 sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 700 709 heapBegin = heapEnd = sbrk( 0 ); // get new start point 701 710 } // HeapManager … … 755 764 if ( ! mapped ) 756 765 #endif // __CFA_DEBUG__ 757 758 759 766 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 767 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 768 // `-header`-addr `-size 760 769 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros 761 770 … … 904 913 } // if 905 914 if ( unlikely( naddr == 0p ) ) return 0p; 915 906 916 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 907 917 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket … … 971 981 // if ( traceHeap() ) { 972 982 // #define nullmsg "Free( 0x0 ) size:0\n" 973 // // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.983 // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf. 974 984 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 ); 975 985 // } // if … … 982 992 983 993 984 994 // The malloc_alignment() function returns the alignment of the allocation. 985 995 size_t malloc_alignment( void * addr ) { 986 996 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment … … 994 1004 995 1005 996 1006 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc(). 997 1007 bool malloc_zero_fill( void * addr ) { 998 1008 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill … … 1018 1028 1019 1029 1020 1021 1030 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and 1031 // related functions. 1022 1032 void malloc_stats( void ) { 1023 1033 #ifdef __STATISTICS__ … … 1087 1097 // Must have CFA linkage to overload with C linkage realloc. 1088 1098 void * realloc( void * oaddr, size_t nalign, size_t size ) { 1089 1099 #ifdef __STATISTICS__ 1090 1100 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1091 1101 #endif // __STATISTICS__ 1092 1102 1093 1103 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1094 1104 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 1095 1105 1096 1106 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum 1097 1107 #ifdef __CFA_DEBUG__ 1098 1108 else 1099 1109 checkAlign( nalign ); // check alignment 1100 1110 #endif // __CFA_DEBUG__ … … 1104 1114 size_t bsize, oalign = 0; 1105 1115 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1106 1107 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1116 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1108 1117 1109 1118 if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out 1110 1119 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1111 1120 return realloc( oaddr, size ); 1112 1113 1114 1121 } // if 1122 1123 #ifdef __STATISTICS__ 1115 1124 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1116 #endif // __STATISTICS__ 1117 1118 // change size and copy old content to new storage 1119 1120 void * naddr; 1121 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1122 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1123 } else { 1124 naddr = memalignNoStats( nalign, size ); // create new aligned area 1125 } // if 1126 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 1125 #endif // __STATISTICS__ 1126 1127 // change size and copy old content to new storage 1128 1129 void * naddr; 1130 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1131 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1132 } else { 1133 naddr = memalignNoStats( nalign, size ); // create new aligned area 1134 } // if 1135 1136 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1137 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 1127 1138 // To preserve prior fill, the entire bucket must be copied versus the size. 1128 1129 1130 1139 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1140 free( oaddr ); 1141 return naddr; 1131 1142 } // realloc 1132 1143
Note: See TracChangeset
for help on using the changeset viewer.