// // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // heap.cfa -- // // Author : Peter A. Buhr // Created On : Tue Dec 19 21:58:35 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Fri Apr 29 19:05:03 2022 // Update Count : 1167 // #include // memset, memcpy #include // ULONG_MAX #include // EXIT_FAILURE #include // errno, ENOMEM, EINVAL #include // STDERR_FILENO, sbrk, sysconf #include // memalign, malloc_usable_size #include // mmap, munmap #include // get_nprocs #include "bits/align.hfa" // libAlign #include "bits/defs.hfa" // likely, unlikely #include "bits/locks.hfa" // __spinlock_t #include "startup.hfa" // STARTUP_PRIORITY_MEMORY #include "math.hfa" // min #include "bitmanip.hfa" // is_pow2, ceiling2 #define FASTLOOKUP #define __STATISTICS__ static bool traceHeap = false; inline bool traceHeap() { return traceHeap; } bool traceHeapOn() { bool temp = traceHeap; traceHeap = true; return temp; } // traceHeapOn bool traceHeapOff() { bool temp = traceHeap; traceHeap = false; return temp; } // traceHeapOff bool traceHeapTerm() { return false; } static bool prtFree = false; bool prtFree() { return prtFree; } // prtFree bool prtFreeOn() { bool temp = prtFree; prtFree = true; return temp; } // prtFreeOn bool prtFreeOff() { bool temp = prtFree; prtFree = false; return temp; } // prtFreeOff enum { // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk // address is extended by the extension amount. __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values // greater than or equal to this value are mmap from the operating system. __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from // the malloc/free counter to adjust for storage the program does not free. __CFA_DEFAULT_HEAP_UNFREED__ = 0 }; // enum //####################### Heap Statistics #################### #ifdef __STATISTICS__ enum { CntTriples = 12 }; // number of counter triples enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE }; struct StatsOverlay { // overlay for iteration unsigned int calls, calls_0; unsigned long long int request, alloc; }; // Heap statistics counters. union HeapStatistics { struct { // minimum qualification unsigned int malloc_calls, malloc_0_calls; unsigned long long int malloc_storage_request, malloc_storage_alloc; unsigned int aalloc_calls, aalloc_0_calls; unsigned long long int aalloc_storage_request, aalloc_storage_alloc; unsigned int calloc_calls, calloc_0_calls; unsigned long long int calloc_storage_request, calloc_storage_alloc; unsigned int memalign_calls, memalign_0_calls; unsigned long long int memalign_storage_request, memalign_storage_alloc; unsigned int amemalign_calls, amemalign_0_calls; unsigned long long int amemalign_storage_request, amemalign_storage_alloc; unsigned int cmemalign_calls, cmemalign_0_calls; unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc; unsigned int resize_calls, resize_0_calls; unsigned long long int resize_storage_request, resize_storage_alloc; unsigned int realloc_calls, realloc_0_calls; unsigned long long int realloc_storage_request, realloc_storage_alloc; unsigned int free_calls, free_null_calls; unsigned long long int free_storage_request, free_storage_alloc; unsigned int away_pulls, away_pushes; unsigned long long int away_storage_request, away_storage_alloc; unsigned int mmap_calls, mmap_0_calls; // no zero calls unsigned long long int mmap_storage_request, mmap_storage_alloc; unsigned int munmap_calls, munmap_0_calls; // no zero calls unsigned long long int munmap_storage_request, munmap_storage_alloc; }; struct StatsOverlay counters[CntTriples]; // overlay for iteration }; // HeapStatistics static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay), "Heap statistics counter-triplets does not match with array size" ); static void HeapStatisticsCtor( HeapStatistics & stats ) { memset( &stats, '\0', sizeof(stats) ); // very fast // for ( unsigned int i = 0; i < CntTriples; i += 1 ) { // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0; // } // for } // HeapStatisticsCtor static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) { for ( unsigned int i = 0; i < CntTriples; i += 1 ) { lhs.counters[i].calls += rhs.counters[i].calls; lhs.counters[i].calls_0 += rhs.counters[i].calls_0; lhs.counters[i].request += rhs.counters[i].request; lhs.counters[i].alloc += rhs.counters[i].alloc; } // for return lhs; } // ?+=? #endif // __STATISTICS__ #define SPINLOCK 0 #define LOCKFREE 1 #define BUCKETLOCK SPINLOCK #if BUCKETLOCK == SPINLOCK #elif BUCKETLOCK == LOCKFREE #include #else #error undefined lock type for bucket lock #endif // LOCKFREE // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. enum { NoBucketSizes = 91 }; // number of buckets sizes struct Heap { struct Storage { struct Header { // header union Kind { struct RealHeader { union { struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header union { // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) void * home; // allocated block points back to home locations (must overlay alignment) size_t blockSize; // size for munmap (must overlay alignment) #if BUCKETLOCK == SPINLOCK Storage * next; // freed block points to next freed block of same size #endif // SPINLOCK }; size_t size; // allocation size in bytes }; #if BUCKETLOCK == LOCKFREE Link(Storage) next; // freed block points next freed block of same size (double-wide) #endif // LOCKFREE }; } real; // RealHeader struct FakeHeader { uintptr_t alignment; // 1st low-order bit => fake header & alignment uintptr_t offset; } fake; // FakeHeader } kind; // Kind } header; // Header char pad[libAlign() - sizeof( Header )]; char data[0]; // storage }; // Storage static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); struct FreeHeader { size_t blockSize __attribute__(( aligned (8) )); // size of allocations on this list #if BUCKETLOCK == SPINLOCK __spinlock_t lock; Storage * freeList; #else StackLF(Storage) freeList; #endif // BUCKETLOCK } __attribute__(( aligned (8) )); // FreeHeader FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes __spinlock_t extlock; // protects allocation-buffer extension void * heapBegin; // start of heap void * heapEnd; // logical end of heap size_t heapRemaining; // amount of storage not allocated in the current chunk }; // Heap #if BUCKETLOCK == LOCKFREE static inline { Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } void ?{}( Heap.FreeHeader & ) {} void ^?{}( Heap.FreeHeader & ) {} } // distribution #endif // LOCKFREE static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } #ifdef FASTLOOKUP enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes #endif // FASTLOOKUP static const off_t mmapFd = -1; // fake or actual fd for anonymous file #ifdef __CFA_DEBUG__ static bool heapBoot = 0; // detect recursion during boot #endif // __CFA_DEBUG__ // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. static const unsigned int bucketSizes[] @= { // different bucket sizes 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 1_536, 2_048 + sizeof(Heap.Storage), // 2 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 6_144, 8_192 + sizeof(Heap.Storage), // 2 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 }; static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); // The constructor for heapManager is called explicitly in memory_startup. static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing //####################### Memory Allocation Routines Helpers #################### #ifdef __CFA_DEBUG__ static size_t allocUnfreed; // running total of allocations minus frees static void prtUnfreed() { if ( allocUnfreed != 0 ) { // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. char helpText[512]; __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid } // if } // prtUnfreed extern int cfa_main_returned; // from interpose.cfa extern "C" { void heapAppStart() { // called by __cfaabi_appready_startup allocUnfreed = 0; } // heapAppStart void heapAppStop() { // called by __cfaabi_appready_startdown fclose( stdin ); fclose( stdout ); if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called } // heapAppStop } // extern "C" #endif // __CFA_DEBUG__ #ifdef __STATISTICS__ static HeapStatistics stats; // zero filled static unsigned int sbrk_calls; static unsigned long long int sbrk_storage; // Statistics file descriptor (changed by malloc_stats_fd). static int stats_fd = STDERR_FILENO; // default stderr #define prtFmt \ "\nHeap statistics: (storage request / allocation)\n" \ " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \ " sbrk calls %'u; storage %'llu bytes\n" \ " mmap calls %'u; storage %'llu / %'llu bytes\n" \ " munmap calls %'u; storage %'llu / %'llu bytes\n" \ // Use "write" because streams may be shutdown when calls are made. static int printStats() { // see malloc_stats char helpText[sizeof(prtFmt) + 1024]; // space for message and values return __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt, stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, sbrk_calls, sbrk_storage, stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc ); } // printStats #define prtFmtXML \ "\n" \ "\n" \ "\n" \ "\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ " bytes\n" \ " bytes\n" \ " bytes\n" \ " bytes\n" \ "" static int printStatsXML( FILE * stream ) { // see malloc_info char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML, stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, sbrk_calls, sbrk_storage, stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc ); } // printStatsXML #endif // __STATISTICS__ // statically allocated variables => zero filled. static size_t heapExpand; // sbrk advance static size_t mmapStart; // cross over point for mmap static unsigned int maxBucketsUsed; // maximum number of buckets in use // extern visibility, used by runtime kernel size_t __page_size; // architecture pagesize int __map_prot; // common mmap/mprotect protection // thunk problem size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { size_t l = 0, m, h = dim; while ( l < h ) { m = (l + h) / 2; if ( (unsigned int &)(vals[m]) < key ) { // cast away const l = m + 1; } else { h = m; } // if } // while return l; } // Bsearchl static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false; mmapStart = value; // set global // find the closest bucket size less than or equal to the mmapStart size maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? return true; } // setMmapStart // <-------+----------------------------------------------------> bsize (bucket size) // |header |addr //================================================================================== // align/offset | // <-----------------<------------+-----------------------------> bsize (bucket size) // |fake-header | addr #define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) )) #define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset)) // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) // |header |addr //================================================================================== // align/offset | // <------------------------------<<---------- dsize --------->>> bsize (bucket size) // |fake-header |addr #define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) static inline void checkAlign( size_t alignment ) { if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) { abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); } // if } // checkAlign static inline void checkHeader( bool check, const char name[], void * addr ) { if ( unlikely( check ) ) { // bad address ? abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n" "Possible cause is duplicate free on same block or overwriting of memory.", name, addr ); } // if } // checkHeader // Manipulate sticky bits stored in unused 3 low-order bits of an address. // bit0 => alignment => fake header // bit1 => zero filled (calloc) // bit2 => mapped allocation versus sbrk #define StickyBits( header ) (((header)->kind.real.blockSize & 0x7)) #define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7) #define MarkAlignmentBit( align ) ((align) | 1) #define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1)) #define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1) #define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2)) #define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2)) #define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2) #define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4)) #define MarkMmappedBit( size ) ((size) | 4) static inline void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? alignment = ClearAlignmentBit( header ); // clear flag from value #ifdef __CFA_DEBUG__ checkAlign( alignment ); // check alignment #endif // __CFA_DEBUG__ header = RealHeader( header ); // backup from fake to real header } else { alignment = libAlign(); // => no fake header } // if } // fakeHeader static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapManager ) { header = HeaderAddr( addr ); #ifdef __CFA_DEBUG__ checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ? #endif // __CFA_DEBUG__ if ( likely( ! StickyBits( header ) ) ) { // no sticky bits ? freeHead = (Heap.FreeHeader *)(header->kind.real.home); alignment = libAlign(); } else { fakeHeader( header, alignment ); if ( unlikely( MmappedBit( header ) ) ) { // mmapped ? verify( addr < heapBegin || heapEnd < addr ); size = ClearStickyBits( header->kind.real.blockSize ); // mmap size return true; } // if freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home )); } // if size = freeHead->blockSize; #ifdef __CFA_DEBUG__ checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) Heap * homeManager; if ( unlikely( freeHead == 0p || // freed and only free-list node => null link // freed and link points at another free block not to a bucket in the bucket array. freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" "Possible cause is duplicate free on same block or overwriting of header information.", name, addr ); } // if #endif // __CFA_DEBUG__ return false; } // headers // #ifdef __CFA_DEBUG__ // #if __SIZEOF_POINTER__ == 4 // #define MASK 0xdeadbeef // #else // #define MASK 0xdeadbeefdeadbeef // #endif // #define STRIDE size_t // static void * Memset( void * addr, STRIDE size ) { // debug only // if ( size % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, size %zd not multiple of %zd.", size, sizeof(STRIDE) ); // if ( (STRIDE)addr % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, addr %p not multiple of %zd.", addr, sizeof(STRIDE) ); // STRIDE * end = (STRIDE *)addr + size / sizeof(STRIDE); // for ( STRIDE * p = (STRIDE *)addr; p < end; p += 1 ) *p = MASK; // return addr; // } // Memset // #endif // __CFA_DEBUG__ #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." static inline void * extend( size_t size ) with( heapManager ) { lock( extlock __cfaabi_dbg_ctx2 ); ptrdiff_t rem = heapRemaining - size; if ( unlikely( rem < 0 ) ) { // If the size requested is bigger than the current remaining storage, increase the size of the heap. size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size ); // Do not call abort or strerror( errno ) as they may call malloc. if ( sbrk( increase ) == (void *)-1 ) { // failed, no memory ? unlock( extlock ); __cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size ); _exit( EXIT_FAILURE ); // give up } // if // Make storage executable for thunks. if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { unlock( extlock ); __cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno ); _exit( EXIT_FAILURE ); } // if #ifdef __STATISTICS__ sbrk_calls += 1; sbrk_storage += increase; #endif // __STATISTICS__ #ifdef __CFA_DEBUG__ // Set new memory to garbage so subsequent uninitialized usages might fail. memset( (char *)heapEnd + heapRemaining, '\xde', increase ); //Memset( (char *)heapEnd + heapRemaining, increase ); #endif // __CFA_DEBUG__ rem = heapRemaining + increase - size; } // if Heap.Storage * block = (Heap.Storage *)heapEnd; heapRemaining = rem; heapEnd = (char *)heapEnd + size; unlock( extlock ); return block; } // extend static inline void * doMalloc( size_t size ) with( heapManager ) { Heap.Storage * block; // pointer to new block of storage // Look up size in the size list. Make sure the user request includes space for the header that must be allocated // along with the block and is a multiple of the alignment size. size_t tsize = size + sizeof(Heap.Storage); if ( likely( tsize < mmapStart ) ) { // small size => sbrk size_t posn; #ifdef FASTLOOKUP if ( tsize < LookupSizes ) posn = lookup[tsize]; else #endif // FASTLOOKUP posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); Heap.FreeHeader * freeElem = &freeLists[posn]; verify( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? verify( tsize <= freeElem->blockSize ); // search failure ? tsize = freeElem->blockSize; // total space needed for request // Spin until the lock is acquired for this particular size of block. #if BUCKETLOCK == SPINLOCK lock( freeElem->lock __cfaabi_dbg_ctx2 ); block = freeElem->freeList; // remove node from stack #else block = pop( freeElem->freeList ); #endif // BUCKETLOCK if ( unlikely( block == 0p ) ) { // no free block ? #if BUCKETLOCK == SPINLOCK unlock( freeElem->lock ); #endif // BUCKETLOCK // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more // and then carve it off. block = (Heap.Storage *)extend( tsize ); // mutual exclusion on call #if BUCKETLOCK == SPINLOCK } else { freeElem->freeList = block->header.kind.real.next; unlock( freeElem->lock ); #endif // BUCKETLOCK } // if block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size } else { // large size => mmap if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; tsize = ceiling2( tsize, __page_size ); // must be multiple of page size #ifdef __STATISTICS__ __atomic_add_fetch( &stats.mmap_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.mmap_storage_request, size, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); if ( block == (Heap.Storage *)MAP_FAILED ) { // failed ? if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory // Do not call strerror( errno ) as it may call malloc. abort( "(Heap &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno ); } //if #ifdef __CFA_DEBUG__ // Set new memory to garbage so subsequent uninitialized usages might fail. memset( block, '\xde', tsize ); //Memset( block, tsize ); #endif // __CFA_DEBUG__ block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap } // if block->header.kind.real.size = size; // store allocation size void * addr = &(block->data); // adjust off header to user bytes verify( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? #ifdef __CFA_DEBUG__ __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST ); if ( traceHeap() ) { char helpText[64]; __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug } // if #endif // __CFA_DEBUG__ return addr; } // doMalloc static inline void doFree( void * addr ) with( heapManager ) { #ifdef __CFA_DEBUG__ if ( unlikely( heapManager.heapBegin == 0p ) ) { abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); } // if #endif // __CFA_DEBUG__ Heap.Storage.Header * header; Heap.FreeHeader * freeElem; size_t size, alignment; // not used (see realloc) if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? #ifdef __STATISTICS__ __atomic_add_fetch( &stats.munmap_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.munmap_storage_alloc, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ if ( munmap( header, size ) == -1 ) { abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n" "Possible cause is invalid pointer.", addr ); } // if } else { #ifdef __CFA_DEBUG__ // Set free memory to garbage so subsequent usages might fail. memset( ((Heap.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( Heap.Storage ) ); //Memset( ((Heap.Storage *)header)->data, freeElem->blockSize - sizeof( Heap.Storage ) ); #endif // __CFA_DEBUG__ #ifdef __STATISTICS__ __atomic_add_fetch( &stats.free_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.free_storage_alloc, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ #if BUCKETLOCK == SPINLOCK lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock header->kind.real.next = freeElem->freeList; // push on stack freeElem->freeList = (Heap.Storage *)header; unlock( freeElem->lock ); // release spin lock #else push( freeElem->freeList, *(Heap.Storage *)header ); #endif // BUCKETLOCK } // if #ifdef __CFA_DEBUG__ __atomic_add_fetch( &allocUnfreed, -size, __ATOMIC_SEQ_CST ); if ( traceHeap() ) { char helpText[64]; __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug } // if #endif // __CFA_DEBUG__ } // doFree size_t prtFree( Heap & manager ) with( manager ) { size_t total = 0; #ifdef __STATISTICS__ __cfaabi_bits_acquire(); __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); #endif // __STATISTICS__ for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) { size_t size = freeLists[i].blockSize; #ifdef __STATISTICS__ unsigned int N = 0; #endif // __STATISTICS__ #if BUCKETLOCK == SPINLOCK for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { #else for(;;) { // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { // Heap.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works` // typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works` // p = temp; #endif // BUCKETLOCK total += size; #ifdef __STATISTICS__ N += 1; #endif // __STATISTICS__ } // for #ifdef __STATISTICS__ __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N ); if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); #endif // __STATISTICS__ } // for #ifdef __STATISTICS__ __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total ); __cfaabi_bits_release(); #endif // __STATISTICS__ return (char *)heapEnd - (char *)heapBegin - total; } // prtFree static void ?{}( Heap & manager ) with( manager ) { __page_size = sysconf( _SC_PAGESIZE ); __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists freeLists[i].blockSize = bucketSizes[i]; } // for #ifdef FASTLOOKUP unsigned int idx = 0; for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { if ( i > bucketSizes[idx] ) idx += 1; lookup[i] = idx; } // for #endif // FASTLOOKUP if ( ! setMmapStart( malloc_mmap_start() ) ) { abort( "Heap : internal error, mmap start initialization failure." ); } // if heapExpand = malloc_expansion(); char * end = (char *)sbrk( 0 ); heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment } // Heap static void ^?{}( Heap & ) { #ifdef __STATISTICS__ if ( traceHeapTerm() ) { printStats(); // prtUnfreed() called in heapAppStop() } // if #endif // __STATISTICS__ } // ~Heap static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); void memory_startup( void ) { #ifdef __CFA_DEBUG__ if ( heapBoot ) { // check for recursion during system boot abort( "boot() : internal error, recursively invoked during system boot." ); } // if heapBoot = true; #endif // __CFA_DEBUG__ //verify( heapManager.heapBegin != 0 ); //heapManager{}; if ( heapManager.heapBegin == 0p ) heapManager{}; // sanity check } // memory_startup static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); void memory_shutdown( void ) { ^heapManager{}; } // memory_shutdown static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics verify( heapManager.heapBegin != 0p ); // called before memory_startup ? if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER #if __SIZEOF_POINTER__ == 8 verify( size < ((typeof(size_t))1 << 48) ); #endif // __SIZEOF_POINTER__ == 8 return doMalloc( size ); } // mallocNoStats static inline void * memalignNoStats( size_t alignment, size_t size ) { if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER #ifdef __CFA_DEBUG__ checkAlign( alignment ); // check alignment #endif // __CFA_DEBUG__ // if alignment <= default alignment, do normal malloc as two headers are unnecessary if ( unlikely( alignment <= libAlign() ) ) return mallocNoStats( size ); // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC. // .-------------v-----------------v----------------v----------, // | Real Header | ... padding ... | Fake Header | data ... | // `-------------^-----------------^-+--------------^----------' // |<--------------------------------' offset/align |<-- alignment boundary // subtract libAlign() because it is already the minimum alignment // add sizeof(Storage) for fake header char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap.Storage) ); // address in the block of the "next" alignment address char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment ); // address of header from malloc Heap.Storage.Header * RealHeader = HeaderAddr( addr ); RealHeader->kind.real.size = size; // correct size to eliminate above alignment offset // address of fake header * before* the alignment location Heap.Storage.Header * fakeHeader = HeaderAddr( user ); // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)RealHeader; // SKULLDUGGERY: odd alignment implies fake header fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment ); return user; } // memalignNoStats //####################### Memory Allocation Routines #################### extern "C" { // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0, // then malloc() returns a unique pointer value that can later be successfully passed to free(). void * malloc( size_t size ) { #ifdef __STATISTICS__ if ( likely( size > 0 ) ) { __atomic_add_fetch( &stats.malloc_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.malloc_storage_request, size, __ATOMIC_SEQ_CST ); } else { __atomic_add_fetch( &stats.malloc_0_calls, 1, __ATOMIC_SEQ_CST ); } // if #endif // __STATISTICS__ return mallocNoStats( size ); } // malloc // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. void * aalloc( size_t dim, size_t elemSize ) { size_t size = dim * elemSize; #ifdef __STATISTICS__ if ( likely( size > 0 ) ) { __atomic_add_fetch( &stats.aalloc_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.aalloc_storage_request, size, __ATOMIC_SEQ_CST ); } else { __atomic_add_fetch( &stats.aalloc_0_calls, 1, __ATOMIC_SEQ_CST ); } // if #endif // __STATISTICS__ return mallocNoStats( size ); } // aalloc // Same as aalloc() with memory set to zero. void * calloc( size_t dim, size_t elemSize ) { size_t size = dim * elemSize; if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER #ifdef __STATISTICS__ __atomic_add_fetch( &stats.calloc_0_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ return 0p; } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.calloc_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ char * addr = (char *)mallocNoStats( size ); Heap.Storage.Header * header; Heap.FreeHeader * freeElem; size_t bsize, alignment; #ifndef __CFA_DEBUG__ bool mapped = #endif // __CFA_DEBUG__ headers( "calloc", addr, header, freeElem, bsize, alignment ); #ifndef __CFA_DEBUG__ // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. if ( ! mapped ) #endif // __CFA_DEBUG__ // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined // `-header`-addr `-size memset( addr, '\0', size ); // set to zeros MarkZeroFilledBit( header ); // mark as zero fill return addr; } // calloc // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is // 0p, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. void * resize( void * oaddr, size_t size ) { // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. if ( unlikely( size == 0 ) ) { // special cases #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ free( oaddr ); return 0p; } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ if ( unlikely( oaddr == 0p ) ) { #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ return mallocNoStats( size ); } // if Heap.Storage.Header * header; Heap.FreeHeader * freeElem; size_t bsize, oalign; headers( "resize", oaddr, header, freeElem, bsize, oalign ); size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket // same size, DO NOT preserve STICKY PROPERTIES. if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size ClearZeroFillBit( header ); // no alignment and turn off 0 fill header->kind.real.size = size; // reset allocation size return oaddr; } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ // change size, DO NOT preserve STICKY PROPERTIES. free( oaddr ); return mallocNoStats( size ); // create new area } // resize // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of // the old and new sizes. void * realloc( void * oaddr, size_t size ) { // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. if ( unlikely( size == 0 ) ) { // special cases #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ free( oaddr ); return 0p; } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ if ( unlikely( oaddr == 0p ) ) { #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ return mallocNoStats( size ); } // if Heap.Storage.Header * header; Heap.FreeHeader * freeElem; size_t bsize, oalign; headers( "realloc", oaddr, header, freeElem, bsize, oalign ); size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket size_t osize = header->kind.real.size; // old allocation size bool ozfill = ZeroFillBit( header ); // old allocation zero filled if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage header->kind.real.size = size; // reset allocation size if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage } // if return oaddr; } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ // change size and copy old content to new storage void * naddr; if ( likely( oalign == libAlign() ) ) { // previous request not aligned ? naddr = mallocNoStats( size ); // create new area } else { naddr = memalignNoStats( oalign, size ); // create new aligned area } // if headers( "realloc", naddr, header, freeElem, bsize, oalign ); memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes free( oaddr ); if ( unlikely( ozfill ) ) { // previous request zero fill ? MarkZeroFilledBit( header ); // mark new request as zero filled if ( size > osize ) { // previous request larger ? memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage } // if } // if return naddr; } // realloc // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize. void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) { return realloc( oaddr, dim * elemSize ); } // reallocarray // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) void * memalign( size_t alignment, size_t size ) { #ifdef __STATISTICS__ if ( likely( size > 0 ) ) { __atomic_add_fetch( &stats.memalign_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.memalign_storage_request, size, __ATOMIC_SEQ_CST ); } else { __atomic_add_fetch( &stats.memalign_0_calls, 1, __ATOMIC_SEQ_CST ); } // if #endif // __STATISTICS__ return memalignNoStats( alignment, size ); } // memalign // Same as aalloc() with memory alignment. void * amemalign( size_t alignment, size_t dim, size_t elemSize ) { size_t size = dim * elemSize; #ifdef __STATISTICS__ if ( likely( size > 0 ) ) { __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.cmemalign_storage_request, size, __ATOMIC_SEQ_CST ); } else { __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); } // if #endif // __STATISTICS__ return memalignNoStats( alignment, size ); } // amemalign // Same as calloc() with memory alignment. void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) { size_t size = dim * elemSize; if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER #ifdef __STATISTICS__ __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ return 0p; } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ char * addr = (char *)memalignNoStats( alignment, size ); Heap.Storage.Header * header; Heap.FreeHeader * freeElem; size_t bsize; #ifndef __CFA_DEBUG__ bool mapped = #endif // __CFA_DEBUG__ headers( "cmemalign", addr, header, freeElem, bsize, alignment ); // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. #ifndef __CFA_DEBUG__ if ( ! mapped ) #endif // __CFA_DEBUG__ // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined // `-header`-addr `-size memset( addr, '\0', size ); // set to zeros MarkZeroFilledBit( header ); // mark as zero filled return addr; } // cmemalign // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple // of alignment. This requirement is universally ignored. void * aligned_alloc( size_t alignment, size_t size ) { return memalign( alignment, size ); } // aligned_alloc // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to // free(3). int posix_memalign( void ** memptr, size_t alignment, size_t size ) { if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment *memptr = memalign( alignment, size ); return 0; } // posix_memalign // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). void * valloc( size_t size ) { return memalign( __page_size, size ); } // valloc // Same as valloc but rounds size to multiple of page size. void * pvalloc( size_t size ) { return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size } // pvalloc // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc() // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is // 0p, no operation is performed. void free( void * addr ) { if ( unlikely( addr == 0p ) ) { // special case #ifdef __STATISTICS__ __atomic_add_fetch( &stats.free_null_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ // #ifdef __CFA_DEBUG__ // if ( traceHeap() ) { // #define nullmsg "Free( 0x0 ) size:0\n" // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf. // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 ); // } // if // #endif // __CFA_DEBUG__ return; } // exit doFree( addr ); } // free // Returns the alignment of an allocation. size_t malloc_alignment( void * addr ) { if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment Heap.Storage.Header * header = HeaderAddr( addr ); if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? return ClearAlignmentBit( header ); // clear flag from value } else { return libAlign(); // minimum alignment } // if } // malloc_alignment // Returns true if the allocation is zero filled, e.g., allocated by calloc(). bool malloc_zero_fill( void * addr ) { if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill Heap.Storage.Header * header = HeaderAddr( addr ); if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? header = RealHeader( header ); // backup from fake to real header } // if return ZeroFillBit( header ); // zero filled ? } // malloc_zero_fill // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). size_t malloc_size( void * addr ) { if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size Heap.Storage.Header * header = HeaderAddr( addr ); if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? header = RealHeader( header ); // backup from fake to real header } // if return header->kind.real.size; } // malloc_size // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by // malloc or a related function. size_t malloc_usable_size( void * addr ) { if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size Heap.Storage.Header * header; Heap.FreeHeader * freeElem; size_t bsize, alignment; headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); return DataStorage( bsize, addr, header ); // data storage in bucket } // malloc_usable_size // Prints (on default standard error) statistics about memory allocated by malloc and related functions. void malloc_stats( void ) { #ifdef __STATISTICS__ printStats(); if ( prtFree() ) prtFree( heapManager ); #endif // __STATISTICS__ } // malloc_stats // Changes the file descriptor where malloc_stats() writes statistics. int malloc_stats_fd( int fd __attribute__(( unused )) ) { #ifdef __STATISTICS__ int temp = stats_fd; stats_fd = fd; return temp; #else return -1; // unsupported #endif // __STATISTICS__ } // malloc_stats_fd // Prints an XML string that describes the current state of the memory-allocation implementation in the caller. // The string is printed on the file stream stream. The exported string includes information about all arenas (see // malloc). int malloc_info( int options, FILE * stream __attribute__(( unused )) ) { if ( options != 0 ) { errno = EINVAL; return -1; } #ifdef __STATISTICS__ return printStatsXML( stream ); #else return 0; // unsupported #endif // __STATISTICS__ } // malloc_info // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument // specifies the parameter to be modified, and value specifies the new value for that parameter. int mallopt( int option, int value ) { if ( value < 0 ) return 0; choose( option ) { case M_TOP_PAD: heapExpand = ceiling2( value, __page_size ); return 1; case M_MMAP_THRESHOLD: if ( setMmapStart( value ) ) return 1; } // choose return 0; // error, unsupported } // mallopt // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). int malloc_trim( size_t ) { return 0; // => impossible to release memory } // malloc_trim // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function // result. (The caller must free this memory.) void * malloc_get_state( void ) { return 0p; // unsupported } // malloc_get_state // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data // structure pointed to by state. int malloc_set_state( void * ) { return 0; // unsupported } // malloc_set_state // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; } // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; } // Amount subtracted to adjust for unfreed program storage (debug only). __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; } } // extern "C" // Must have CFA linkage to overload with C linkage realloc. void * resize( void * oaddr, size_t nalign, size_t size ) { // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. if ( unlikely( size == 0 ) ) { // special cases #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ free( oaddr ); return 0p; } // if if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum #ifdef __CFA_DEBUG__ else checkAlign( nalign ); // check alignment #endif // __CFA_DEBUG__ if ( unlikely( oaddr == 0p ) ) { #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ return memalignNoStats( nalign, size ); } // if // Attempt to reuse existing alignment. Heap.Storage.Header * header = HeaderAddr( oaddr ); bool isFakeHeader = AlignmentBit( header ); // old fake header ? size_t oalign; if ( unlikely( isFakeHeader ) ) { oalign = ClearAlignmentBit( header ); // old alignment if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? && ( oalign <= nalign // going down || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ? ) ) { HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) Heap.FreeHeader * freeElem; size_t bsize, oalign; headers( "resize", oaddr, header, freeElem, bsize, oalign ); size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) ClearZeroFillBit( header ); // turn off 0 fill header->kind.real.size = size; // reset allocation size return oaddr; } // if } // if } else if ( ! isFakeHeader // old real header (aligned on libAlign) ? && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed return resize( oaddr, size ); // duplicate special case checks } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ // change size, DO NOT preserve STICKY PROPERTIES. free( oaddr ); return memalignNoStats( nalign, size ); // create new aligned area } // resize void * realloc( void * oaddr, size_t nalign, size_t size ) { // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. if ( unlikely( size == 0 ) ) { // special cases #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ free( oaddr ); return 0p; } // if if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum #ifdef __CFA_DEBUG__ else checkAlign( nalign ); // check alignment #endif // __CFA_DEBUG__ if ( unlikely( oaddr == 0p ) ) { #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ return memalignNoStats( nalign, size ); } // if // Attempt to reuse existing alignment. Heap.Storage.Header * header = HeaderAddr( oaddr ); bool isFakeHeader = AlignmentBit( header ); // old fake header ? size_t oalign; if ( unlikely( isFakeHeader ) ) { oalign = ClearAlignmentBit( header ); // old alignment if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? && ( oalign <= nalign // going down || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ? ) ) { HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) return realloc( oaddr, size ); // duplicate special case checks } // if } else if ( ! isFakeHeader // old real header (aligned on libAlign) ? && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed return realloc( oaddr, size ); // duplicate special case checks } // if #ifdef __STATISTICS__ __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); #endif // __STATISTICS__ Heap.FreeHeader * freeElem; size_t bsize; headers( "realloc", oaddr, header, freeElem, bsize, oalign ); // change size and copy old content to new storage size_t osize = header->kind.real.size; // old allocation size bool ozfill = ZeroFillBit( header ); // old allocation zero filled void * naddr = memalignNoStats( nalign, size ); // create new aligned area headers( "realloc", naddr, header, freeElem, bsize, oalign ); memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes free( oaddr ); if ( unlikely( ozfill ) ) { // previous request zero fill ? MarkZeroFilledBit( header ); // mark new request as zero filled if ( size > osize ) { // previous request larger ? memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage } // if } // if return naddr; } // realloc // Local Variables: // // tab-width: 4 // // compile-command: "cfa -nodebug -O2 heap.cfa" // // End: //