// // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // heap.cfa -- // // Author : Peter A. Buhr // Created On : Tue Dec 19 21:58:35 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Sun Oct 30 20:56:20 2022 // Update Count : 1584 // #include #include // memset, memcpy #include // ULONG_MAX #include // EXIT_FAILURE #include // errno, ENOMEM, EINVAL #include // STDERR_FILENO, sbrk, sysconf #include // memalign, malloc_usable_size #include // mmap, munmap extern "C" { #include // get_nprocs } // extern "C" #include "bits/align.hfa" // libAlign #include "bits/defs.hfa" // likely, unlikely #include "concurrency/kernel/fwd.hfa" // __POLL_PREEMPTION #include "startup.hfa" // STARTUP_PRIORITY_MEMORY #include "math.hfa" // ceiling, min #include "bitmanip.hfa" // is_pow2, ceiling2 // supported mallopt options #ifndef M_MMAP_THRESHOLD #define M_MMAP_THRESHOLD (-1) #endif // M_MMAP_THRESHOLD #ifndef M_TOP_PAD #define M_TOP_PAD (-2) #endif // M_TOP_PAD #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size #define OWNERSHIP // return freed memory to owner thread #define RETURNSPIN // toggle spinlock / lockfree queue #if ! defined( OWNERSHIP ) && defined( RETURNSPIN ) #warning "RETURNSPIN is ignored without OWNERSHIP; suggest commenting out RETURNSPIN" #endif // ! OWNERSHIP && RETURNSPIN #define CACHE_ALIGN 64 #define CALIGN __attribute__(( aligned(CACHE_ALIGN) )) #define TLSMODEL __attribute__(( tls_model("initial-exec") )) //#define __STATISTICS__ enum { // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk // address is extended by the extension amount. __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values // greater than or equal to this value are mmap from the operating system. __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from // the malloc/free counter to adjust for storage the program does not free. __CFA_DEFAULT_HEAP_UNFREED__ = 0 }; // enum //####################### Heap Trace/Print #################### static bool traceHeap = false; inline bool traceHeap() libcfa_public { return traceHeap; } bool traceHeapOn() libcfa_public { bool temp = traceHeap; traceHeap = true; return temp; } // traceHeapOn bool traceHeapOff() libcfa_public { bool temp = traceHeap; traceHeap = false; return temp; } // traceHeapOff bool traceHeapTerm() libcfa_public { return false; } static bool prtFree = false; bool prtFree() { return prtFree; } // prtFree bool prtFreeOn() { bool temp = prtFree; prtFree = true; return temp; } // prtFreeOn bool prtFreeOff() { bool temp = prtFree; prtFree = false; return temp; } // prtFreeOff //######################### Helpers ######################### // generic Bsearchl does not inline, so substitute with hand-coded binary-search. inline __attribute__((always_inline)) static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) { size_t l = 0, m, h = dim; while ( l < h ) { m = (l + h) / 2; if ( (unsigned int &)(vals[m]) < key ) { // cast away const l = m + 1; } else { h = m; } // if } // while return l; } // Bsearchl // pause to prevent excess processor bus usage #if defined( __i386 ) || defined( __x86_64 ) #define Pause() __asm__ __volatile__ ( "pause" : : : ) #elif defined(__ARM_ARCH) #define Pause() __asm__ __volatile__ ( "YIELD" : : : ) #else #error unsupported architecture #endif typedef volatile uintptr_t SpinLock_t CALIGN; // aligned addressable word-size static inline __attribute__((always_inline)) void lock( volatile SpinLock_t & slock ) { enum { SPIN_START = 4, SPIN_END = 64 * 1024, }; unsigned int spin = SPIN_START; for ( unsigned int i = 1;; i += 1 ) { if ( slock == 0 && __atomic_test_and_set( &slock, __ATOMIC_SEQ_CST ) == 0 ) break; // Fence for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin spin += spin; // powers of 2 //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2 if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning } // for } // spin_lock static inline __attribute__((always_inline)) void unlock( volatile SpinLock_t & slock ) { __atomic_clear( &slock, __ATOMIC_SEQ_CST ); // Fence } // spin_unlock //####################### Heap Statistics #################### #ifdef __STATISTICS__ enum { CntTriples = 12 }; // number of counter triples enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE }; struct StatsOverlay { // overlay for iteration unsigned int calls, calls_0; unsigned long long int request, alloc; }; // Heap statistics counters. union HeapStatistics { struct { // minimum qualification unsigned int malloc_calls, malloc_0_calls; unsigned long long int malloc_storage_request, malloc_storage_alloc; unsigned int aalloc_calls, aalloc_0_calls; unsigned long long int aalloc_storage_request, aalloc_storage_alloc; unsigned int calloc_calls, calloc_0_calls; unsigned long long int calloc_storage_request, calloc_storage_alloc; unsigned int memalign_calls, memalign_0_calls; unsigned long long int memalign_storage_request, memalign_storage_alloc; unsigned int amemalign_calls, amemalign_0_calls; unsigned long long int amemalign_storage_request, amemalign_storage_alloc; unsigned int cmemalign_calls, cmemalign_0_calls; unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc; unsigned int resize_calls, resize_0_calls; unsigned long long int resize_storage_request, resize_storage_alloc; unsigned int realloc_calls, realloc_0_calls; unsigned long long int realloc_storage_request, realloc_storage_alloc; unsigned int free_calls, free_null_calls; unsigned long long int free_storage_request, free_storage_alloc; unsigned int return_pulls, return_pushes; unsigned long long int return_storage_request, return_storage_alloc; unsigned int mmap_calls, mmap_0_calls; // no zero calls unsigned long long int mmap_storage_request, mmap_storage_alloc; unsigned int munmap_calls, munmap_0_calls; // no zero calls unsigned long long int munmap_storage_request, munmap_storage_alloc; }; struct StatsOverlay counters[CntTriples]; // overlay for iteration }; // HeapStatistics static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay), "Heap statistics counter-triplets does not match with array size" ); static void HeapStatisticsCtor( HeapStatistics & stats ) { memset( &stats, '\0', sizeof(stats) ); // very fast // for ( unsigned int i = 0; i < CntTriples; i += 1 ) { // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0; // } // for } // HeapStatisticsCtor static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) { for ( unsigned int i = 0; i < CntTriples; i += 1 ) { lhs.counters[i].calls += rhs.counters[i].calls; lhs.counters[i].calls_0 += rhs.counters[i].calls_0; lhs.counters[i].request += rhs.counters[i].request; lhs.counters[i].alloc += rhs.counters[i].alloc; } // for return lhs; } // ?+=? #endif // __STATISTICS__ // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. enum { NoBucketSizes = 91 }; // number of buckets sizes struct Heap { struct Storage { struct Header { // header union Kind { struct RealHeader { union { struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header union { // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) void * home; // allocated block points back to home locations (must overlay alignment) size_t blockSize; // size for munmap (must overlay alignment) Storage * next; // freed block points to next freed block of same size }; size_t size; // allocation size in bytes }; }; } real; // RealHeader struct FakeHeader { uintptr_t alignment; // 1st low-order bit => fake header & alignment uintptr_t offset; } fake; // FakeHeader } kind; // Kind } header; // Header char pad[libAlign() - sizeof( Header )]; char data[0]; // storage }; // Storage static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); struct __attribute__(( aligned (8) )) FreeHeader { size_t blockSize __attribute__(( aligned(8) )); // size of allocations on this list #ifdef OWNERSHIP #ifdef RETURNSPIN SpinLock_t returnLock; #endif // RETURNSPIN Storage * returnList; // other thread return list #endif // OWNERSHIP Storage * freeList; // thread free list Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap) }; // FreeHeader FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes void * heapBuffer; // start of free storage in buffer size_t heapReserve; // amount of remaining free storage in buffer #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics or check unfreed storage #endif // __STATISTICS__ || __CFA_DEBUG__ Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads #ifdef __CFA_DEBUG__ int64_t allocUnfreed; // running total of allocations minus frees; can be negative #endif // __CFA_DEBUG__ #ifdef __STATISTICS__ HeapStatistics stats; // local statistic table for this heap #endif // __STATISTICS__ }; // Heap struct HeapMaster { SpinLock_t extLock; // protects allocation-buffer extension SpinLock_t mgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd void * heapBegin; // start of heap void * heapEnd; // logical end of heap size_t heapRemaining; // amount of storage not allocated in the current chunk size_t pageSize; // architecture pagesize size_t heapExpand; // sbrk advance size_t mmapStart; // cross over point for mmap unsigned int maxBucketsUsed; // maximum number of buckets in use Heap * heapManagersList; // heap-list head Heap * freeHeapManagersList; // free-list head // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links. Heap * heapManagersStorage; // next heap to use in heap superblock Heap * heapManagersStorageEnd; // logical heap outside of superblock's end #ifdef __STATISTICS__ HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting unsigned long int threads_started, threads_exited; // counts threads that have started and exited unsigned long int reused_heap, new_heap; // counts reusability of heaps unsigned int sbrk_calls; unsigned long long int sbrk_storage; int stats_fd; #endif // __STATISTICS__ }; // HeapMaster #ifdef FASTLOOKUP enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes #endif // FASTLOOKUP static volatile bool heapMasterBootFlag = false; // trigger for first heap static HeapMaster heapMaster @= {}; // program global static void heapMasterCtor(); static void heapMasterDtor(); static Heap * getHeap(); // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. static const unsigned int bucketSizes[] @= { // different bucket sizes 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 1_536, 2_048 + sizeof(Heap.Storage), // 2 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 6_144, 8_192 + sizeof(Heap.Storage), // 2 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 }; static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); // extern visibility, used by runtime kernel libcfa_public size_t __page_size; // architecture pagesize libcfa_public int __map_prot; // common mmap/mprotect protection // Thread-local storage is allocated lazily when the storage is accessed. static __thread size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing static __thread Heap * volatile heapManager CALIGN TLSMODEL; static __thread size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing // declare helper functions for HeapMaster void noMemory(); // forward, called by "builtin_new" when malloc returns 0 void heapMasterCtor() with( heapMaster ) { // Singleton pattern to initialize heap master verify( bucketSizes[0] == (16 + sizeof(Heap.Storage)) ); __page_size = sysconf( _SC_PAGESIZE ); __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; extLock = 0; mgrLock = 0; char * end = (char *)sbrk( 0 ); heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment heapRemaining = 0; heapExpand = malloc_expansion(); mmapStart = malloc_mmap_start(); // find the closest bucket size less than or equal to the mmapStart size maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search verify( (mmapStart >= pageSize) && (bucketSizes[NoBucketSizes - 1] >= mmapStart) ); verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? heapManagersList = 0p; freeHeapManagersList = 0p; heapManagersStorage = 0p; heapManagersStorageEnd = 0p; #ifdef __STATISTICS__ HeapStatisticsCtor( stats ); // clear statistic counters threads_started = threads_exited = 0; reused_heap = new_heap = 0; sbrk_calls = sbrk_storage = 0; stats_fd = STDERR_FILENO; #endif // __STATISTICS__ #ifdef FASTLOOKUP for ( unsigned int i = 0, idx = 0; i < LookupSizes; i += 1 ) { if ( i > bucketSizes[idx] ) idx += 1; lookup[i] = idx; verify( i <= bucketSizes[idx] ); verify( (i <= 32 && idx == 0) || (i > bucketSizes[idx - 1]) ); } // for #endif // FASTLOOKUP heapMasterBootFlag = true; } // heapMasterCtor #define NO_MEMORY_MSG "**** Error **** insufficient heap memory available to allocate %zd new bytes." Heap * getHeap() with( heapMaster ) { Heap * heap; if ( freeHeapManagersList ) { // free heap for reused ? heap = freeHeapManagersList; freeHeapManagersList = heap->nextFreeHeapManager; #ifdef __STATISTICS__ reused_heap += 1; #endif // __STATISTICS__ } else { // free heap not found, create new // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps * // 12K ~= 120K byte superblock. Where 128-heap superblock handles a medium sized multi-processor server. size_t remaining = heapManagersStorageEnd - heapManagersStorage; // remaining free heaps in superblock if ( ! heapManagersStorage || remaining != 0 ) { // Each block of heaps is a multiple of the number of cores on the computer. int HeapDim = get_nprocs(); // get_nprocs_conf does not work size_t size = HeapDim * sizeof( Heap ); heapManagersStorage = (Heap *)mmap( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); if ( unlikely( heapManagersStorage == (Heap *)MAP_FAILED ) ) { // failed ? if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory // Do not call strerror( errno ) as it may call malloc. abort( "**** Error **** attempt to allocate block of heaps of size %zu bytes and mmap failed with errno %d.", size, errno ); } // if heapManagersStorageEnd = &heapManagersStorage[HeapDim]; // outside array } // if heap = heapManagersStorage; heapManagersStorage = heapManagersStorage + 1; // bump next heap #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) heap->nextHeapManager = heapManagersList; #endif // __STATISTICS__ || __CFA_DEBUG__ heapManagersList = heap; #ifdef __STATISTICS__ new_heap += 1; #endif // __STATISTICS__ with( *heap ) { for ( unsigned int j = 0; j < NoBucketSizes; j += 1 ) { // initialize free lists #ifdef OWNERSHIP #ifdef RETURNSPIN freeLists[j].returnLock = 0; freeLists[j].returnList = 0p; #endif // RETURNSPIN #endif // OWNERSHIP freeLists[j].freeList = 0p; freeLists[j].homeManager = heap; freeLists[j].blockSize = bucketSizes[j]; } // for heapBuffer = 0p; heapReserve = 0; nextFreeHeapManager = 0p; #ifdef __CFA_DEBUG__ allocUnfreed = 0; #endif // __CFA_DEBUG__ } // with } // if return heap; } // getHeap void heapManagerCtor() libcfa_public { if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor(); lock( heapMaster.mgrLock ); // protect heapMaster counters // get storage for heap manager heapManager = getHeap(); #ifdef __STATISTICS__ HeapStatisticsCtor( heapManager->stats ); // heap local heapMaster.threads_started += 1; #endif // __STATISTICS__ unlock( heapMaster.mgrLock ); } // heapManagerCtor void heapManagerDtor() libcfa_public { lock( heapMaster.mgrLock ); // place heap on list of free heaps for reusability heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList; heapMaster.freeHeapManagersList = heapManager; #ifdef __STATISTICS__ heapMaster.threads_exited += 1; #endif // __STATISTICS__ // Do not set heapManager to NULL because it is used after Cforall is shutdown but before the program shuts down. unlock( heapMaster.mgrLock ); } // heapManagerDtor //####################### Memory Allocation Routines Helpers #################### extern int cfa_main_returned; // from interpose.cfa extern "C" { void memory_startup( void ) { if ( ! heapMasterBootFlag ) heapManagerCtor(); // sanity check } // memory_startup void memory_shutdown( void ) { heapManagerDtor(); } // memory_shutdown void heapAppStart() { // called by __cfaabi_appready_startup verify( heapManager ); #ifdef __CFA_DEBUG__ heapManager->allocUnfreed = 0; // clear prior allocation counts #endif // __CFA_DEBUG__ #ifdef __STATISTICS__ HeapStatisticsCtor( heapManager->stats ); // clear prior statistic counters #endif // __STATISTICS__ } // heapAppStart void heapAppStop() { // called by __cfaabi_appready_startdown fclose( stdin ); fclose( stdout ); // free buffer storage if ( ! cfa_main_returned ) return; // do not check unfreed storage if exit called #ifdef __CFA_DEBUG__ // allocUnfreed is set to 0 when a heap is created and it accumulates any unfreed storage during its multiple thread // usages. At the end, add up each heap allocUnfreed value across all heaps to get the total unfreed storage. int64_t allocUnfreed = 0; for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) { allocUnfreed += heap->allocUnfreed; } // for allocUnfreed -= malloc_unfreed(); // subtract any user specified unfreed storage if ( allocUnfreed > 0 ) { // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. char helpText[512]; __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %ju(0x%jx) bytes of storage allocated but not freed.\n" "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid } // if #endif // __CFA_DEBUG__ } // heapAppStop } // extern "C" #ifdef __STATISTICS__ static HeapStatistics stats; // zero filled #define prtFmt \ "\nHeap statistics: (storage request / allocation)\n" \ " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \ " return pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n" \ " sbrk calls %'u; storage %'llu bytes\n" \ " mmap calls %'u; storage %'llu / %'llu bytes\n" \ " munmap calls %'u; storage %'llu / %'llu bytes\n" \ " threads started %'lu; exited %'lu\n" \ " heaps new %'lu; reused %'lu\n" // Use "write" because streams may be shutdown when calls are made. static int printStats( HeapStatistics & stats ) with( heapMaster, stats ) { // see malloc_stats char helpText[sizeof(prtFmt) + 1024]; // space for message and values return __cfaabi_bits_print_buffer( stats_fd, helpText, sizeof(helpText), prtFmt, malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, free_calls, free_null_calls, free_storage_request, free_storage_alloc, return_pulls, return_pushes, return_storage_request, return_storage_alloc, sbrk_calls, sbrk_storage, mmap_calls, mmap_storage_request, mmap_storage_alloc, munmap_calls, munmap_storage_request, munmap_storage_alloc, threads_started, threads_exited, new_heap, reused_heap ); } // printStats #define prtFmtXML \ "\n" \ "\n" \ "\n" \ "\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ "0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ " bytes\n" \ " bytes\n" \ " bytes\n" \ " bytes\n" \ " bytes\n" \ "\n" \ "\n" \ "" static int printStatsXML( HeapStatistics & stats, FILE * stream ) with( heapMaster, stats ) { // see malloc_info char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML, malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, free_calls, free_null_calls, free_storage_request, free_storage_alloc, return_pulls, return_pushes, return_storage_request, return_storage_alloc, sbrk_calls, sbrk_storage, mmap_calls, mmap_storage_request, mmap_storage_alloc, munmap_calls, munmap_storage_request, munmap_storage_alloc, threads_started, threads_exited, new_heap, reused_heap ); } // printStatsXML static HeapStatistics & collectStats( HeapStatistics & stats ) with( heapMaster ) { lock( mgrLock ); stats += heapMaster.stats; for ( Heap * heap = heapManagersList; heap; heap = heap->nextHeapManager ) { stats += heap->stats; } // for unlock( mgrLock ); return stats; } // collectStats #endif // __STATISTICS__ static bool setMmapStart( size_t value ) with( heapMaster ) { // true => mmapped, false => sbrk if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false; mmapStart = value; // set global // find the closest bucket size less than or equal to the mmapStart size maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? return true; } // setMmapStart // <-------+----------------------------------------------------> bsize (bucket size) // |header |addr //================================================================================== // align/offset | // <-----------------<------------+-----------------------------> bsize (bucket size) // |fake-header | addr #define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) )) #define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset)) // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) // |header |addr //================================================================================== // align/offset | // <------------------------------<<---------- dsize --------->>> bsize (bucket size) // |fake-header |addr #define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) inline __attribute__((always_inline)) static void checkAlign( size_t alignment ) { if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) { abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); } // if } // checkAlign inline __attribute__((always_inline)) static void checkHeader( bool check, const char name[], void * addr ) { if ( unlikely( check ) ) { // bad address ? abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n" "Possible cause is duplicate free on same block or overwriting of memory.", name, addr ); } // if } // checkHeader // Manipulate sticky bits stored in unused 3 low-order bits of an address. // bit0 => alignment => fake header // bit1 => zero filled (calloc) // bit2 => mapped allocation versus sbrk #define StickyBits( header ) (((header)->kind.real.blockSize & 0x7)) #define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7) #define MarkAlignmentBit( align ) ((align) | 1) #define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1)) #define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1) #define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2)) #define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2)) #define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2) #define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4)) #define MarkMmappedBit( size ) ((size) | 4) inline __attribute__((always_inline)) static void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? alignment = ClearAlignmentBit( header ); // clear flag from value #ifdef __CFA_DEBUG__ checkAlign( alignment ); // check alignment #endif // __CFA_DEBUG__ header = RealHeader( header ); // backup from fake to real header } else { alignment = libAlign(); // => no fake header } // if } // fakeHeader inline __attribute__((always_inline)) static bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapMaster, *heapManager ) { header = HeaderAddr( addr ); #ifdef __CFA_DEBUG__ checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ? #endif // __CFA_DEBUG__ if ( likely( ! StickyBits( header ) ) ) { // no sticky bits ? freeHead = (Heap.FreeHeader *)(header->kind.real.home); alignment = libAlign(); } else { fakeHeader( header, alignment ); if ( unlikely( MmappedBit( header ) ) ) { // mmapped ? verify( addr < heapBegin || heapEnd < addr ); size = ClearStickyBits( header->kind.real.blockSize ); // mmap size return true; } // if freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home )); } // if size = freeHead->blockSize; #ifdef __CFA_DEBUG__ checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) Heap * homeManager; if ( unlikely( freeHead == 0p || // freed and only free-list node => null link // freed and link points at another free block not to a bucket in the bucket array. (homeManager = freeHead->homeManager, freeHead < &homeManager->freeLists[0] || &homeManager->freeLists[NoBucketSizes] <= freeHead ) ) ) { abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" "Possible cause is duplicate free on same block or overwriting of header information.", name, addr ); } // if #endif // __CFA_DEBUG__ return false; } // headers static void * master_extend( size_t size ) with( heapMaster ) { lock( extLock ); ptrdiff_t rem = heapRemaining - size; if ( unlikely( rem < 0 ) ) { // If the size requested is bigger than the current remaining storage, increase the size of the heap. size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() ); if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ? unlock( extLock ); abort( NO_MEMORY_MSG, size ); // give up } // if // Make storage executable for thunks. if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { unlock( extLock ); abort( "**** Error **** attempt to make heap storage executable for thunks and mprotect failed with errno %d.", errno ); } // if rem = heapRemaining + increase - size; #ifdef __STATISTICS__ sbrk_calls += 1; sbrk_storage += increase; #endif // __STATISTICS__ } // if Heap.Storage * block = (Heap.Storage *)heapEnd; heapRemaining = rem; heapEnd = (char *)heapEnd + size; unlock( extLock ); return block; } // master_extend __attribute__(( noinline )) static void * manager_extend( size_t size ) with( *heapManager ) { ptrdiff_t rem = heapReserve - size; if ( unlikely( rem < 0 ) ) { // negative // If the size requested is bigger than the current remaining reserve, use the current reserve to populate // smaller freeLists, and increase the reserve. rem = heapReserve; // positive if ( rem >= bucketSizes[0] ) { // minimal size ? otherwise ignore size_t bucket; #ifdef FASTLOOKUP if ( likely( rem < LookupSizes ) ) bucket = lookup[rem]; #endif // FASTLOOKUP bucket = Bsearchl( rem, bucketSizes, heapMaster.maxBucketsUsed ); verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed ); Heap.FreeHeader * freeHead = &(freeLists[bucket]); // The remaining storage many not be bucket size, whereas all other allocations are. Round down to previous // bucket size in this case. if ( unlikely( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1; Heap.Storage * block = (Heap.Storage *)heapBuffer; block->header.kind.real.next = freeHead->freeList; // push on stack freeHead->freeList = block; } // if size_t increase = ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), libAlign() ); heapBuffer = master_extend( increase ); rem = increase - size; } // if Heap.Storage * block = (Heap.Storage *)heapBuffer; heapReserve = rem; heapBuffer = (char *)heapBuffer + size; return block; } // manager_extend #define BOOT_HEAP_MANAGER \ if ( unlikely( ! heapMasterBootFlag ) ) { \ heapManagerCtor(); /* trigger for first heap */ \ } /* if */ #ifdef __STATISTICS__ #define STAT_NAME __counter #define STAT_PARM , unsigned int STAT_NAME #define STAT_ARG( name ) , name #define STAT_0_CNT( counter ) stats.counters[counter].calls_0 += 1 #else #define STAT_NAME #define STAT_PARM #define STAT_ARG( name ) #define STAT_0_CNT( counter ) #endif // __STATISTICS__ #define PROLOG( counter, ... ) \ BOOT_HEAP_MANAGER; \ if ( unlikely( size == 0 ) || /* 0 BYTE ALLOCATION RETURNS NULL POINTER */ \ unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) { /* error check */ \ STAT_0_CNT( counter ); \ __VA_ARGS__; \ return 0p; \ } /* if */ #define SCRUB_SIZE 1024lu // Do not use '\xfe' for scrubbing because dereferencing an address composed of it causes a SIGSEGV *without* a valid IP // pointer in the interrupt frame. #define SCRUB '\xff' static void * doMalloc( size_t size STAT_PARM ) libcfa_nopreempt with( *heapManager ) { PROLOG( STAT_NAME ); verify( heapManager ); Heap.Storage * block; // pointer to new block of storage // Look up size in the size list. Make sure the user request includes space for the header that must be allocated // along with the block and is a multiple of the alignment size. size_t tsize = size + sizeof(Heap.Storage); #ifdef __STATISTICS__ stats.counters[STAT_NAME].calls += 1; stats.counters[STAT_NAME].request += size; #endif // __STATISTICS__ #ifdef __CFA_DEBUG__ allocUnfreed += size; #endif // __CFA_DEBUG__ if ( likely( tsize < heapMaster.mmapStart ) ) { // small size => sbrk size_t bucket; #ifdef FASTLOOKUP if ( likely( tsize < LookupSizes ) ) bucket = lookup[tsize]; else #endif // FASTLOOKUP bucket = Bsearchl( tsize, bucketSizes, heapMaster.maxBucketsUsed ); verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed ); Heap.FreeHeader * freeHead = &freeLists[bucket]; verify( freeHead <= &freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ? verify( tsize <= freeHead->blockSize ); // search failure ? tsize = freeHead->blockSize; // total space needed for request #ifdef __STATISTICS__ stats.counters[STAT_NAME].alloc += tsize; #endif // __STATISTICS__ block = freeHead->freeList; // remove node from stack if ( unlikely( block == 0p ) ) { // no free block ? // Freelist for this size is empty, so check return list (OWNERSHIP), carve it out of the heap, if there // is enough left, or get some more heap storage and carve it off. #ifdef OWNERSHIP if ( unlikely( freeHead->returnList ) ) { // race, get next time if lose race #ifdef RETURNSPIN lock( freeHead->returnLock ); block = freeHead->returnList; freeHead->returnList = 0p; unlock( freeHead->returnLock ); #else block = __atomic_exchange_n( &freeHead->returnList, 0p, __ATOMIC_SEQ_CST ); #endif // RETURNSPIN verify( block ); #ifdef __STATISTICS__ stats.return_pulls += 1; #endif // __STATISTICS__ // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. freeHead->freeList = block->header.kind.real.next; // merge returnList into freeHead } else { #endif // OWNERSHIP // Do not leave kernel thread as manager_extend accesses heapManager. disable_interrupts(); block = (Heap.Storage *)manager_extend( tsize ); // mutual exclusion on call enable_interrupts( false ); // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. #ifdef __CFA_DEBUG__ // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes. memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); #endif // __CFA_DEBUG__ #ifdef OWNERSHIP } // if #endif // OWNERSHIP } else { // Memory is scrubbed in doFree. freeHead->freeList = block->header.kind.real.next; } // if block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size } else { // large size => mmap if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; tsize = ceiling2( tsize, __page_size ); // must be multiple of page size #ifdef __STATISTICS__ stats.counters[STAT_NAME].alloc += tsize; stats.mmap_calls += 1; stats.mmap_storage_request += size; stats.mmap_storage_alloc += tsize; #endif // __STATISTICS__ disable_interrupts(); block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); enable_interrupts( false ); // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. if ( unlikely( block == (Heap.Storage *)MAP_FAILED ) ) { // failed ? if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory // Do not call strerror( errno ) as it may call malloc. abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.", size, heapMaster.mmapStart, errno ); } // if block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap #ifdef __CFA_DEBUG__ // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes. The // rest of the storage set to 0 by mmap. memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); #endif // __CFA_DEBUG__ } // if block->header.kind.real.size = size; // store allocation size void * addr = &(block->data); // adjust off header to user bytes verify( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? #ifdef __CFA_DEBUG__ if ( traceHeap() ) { char helpText[64]; __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug } // if #endif // __CFA_DEBUG__ // poll_interrupts(); // call rollforward return addr; } // doMalloc static void doFree( void * addr ) libcfa_nopreempt with( *heapManager ) { verify( addr ); // detect free after thread-local storage destruction and use global stats in that case Heap.Storage.Header * header; Heap.FreeHeader * freeHead; size_t size, alignment; bool mapped = headers( "free", addr, header, freeHead, size, alignment ); #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) size_t rsize = header->kind.real.size; // optimization #endif // __STATISTICS__ || __CFA_DEBUG__ #ifdef __STATISTICS__ stats.free_storage_request += rsize; stats.free_storage_alloc += size; #endif // __STATISTICS__ #ifdef __CFA_DEBUG__ allocUnfreed -= rsize; #endif // __CFA_DEBUG__ if ( unlikely( mapped ) ) { // mmapped ? #ifdef __STATISTICS__ stats.munmap_calls += 1; stats.munmap_storage_request += rsize; stats.munmap_storage_alloc += size; #endif // __STATISTICS__ // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. // Does not matter where this storage is freed. if ( unlikely( munmap( header, size ) == -1 ) ) { // Do not call strerror( errno ) as it may call malloc. abort( "**** Error **** attempt to deallocate large object %p and munmap failed with errno %d.\n" "Possible cause is invalid delete pointer: either not allocated or with corrupt header.", addr, errno ); } // if } else { #ifdef __CFA_DEBUG__ // memset is NOT always inlined! disable_interrupts(); // Scrub old memory so subsequent usages might fail. Only scrub the first/last SCRUB_SIZE bytes. char * data = ((Heap.Storage *)header)->data; // data address size_t dsize = size - sizeof(Heap.Storage); // data size if ( dsize <= SCRUB_SIZE * 2 ) { memset( data, SCRUB, dsize ); // scrub all } else { memset( data, SCRUB, SCRUB_SIZE ); // scrub front memset( data + dsize - SCRUB_SIZE, SCRUB, SCRUB_SIZE ); // scrub back } // if enable_interrupts( false ); #endif // __CFA_DEBUG__ #ifdef OWNERSHIP if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread header->kind.real.next = freeHead->freeList; // push on stack freeHead->freeList = (Heap.Storage *)header; } else { // return to thread owner verify( heapManager ); #ifdef RETURNSPIN lock( freeHead->returnLock ); header->kind.real.next = freeHead->returnList; // push to bucket return list freeHead->returnList = (Heap.Storage *)header; unlock( freeHead->returnLock ); #else // lock free header->kind.real.next = freeHead->returnList; // link new node to top node // CAS resets header->kind.real.next = freeHead->returnList on failure while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, (Heap.Storage *)header, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ); #endif // RETURNSPIN } // if #else // no OWNERSHIP // kind.real.home is address in owner thread's freeLists, so compute the equivalent position in this thread's freeList. freeHead = &freeLists[ClearStickyBits( (Heap.FreeHeader *)(header->kind.real.home) ) - &freeHead->homeManager->freeLists[0]]; header->kind.real.next = freeHead->freeList; // push on stack freeHead->freeList = (Heap.Storage *)header; #endif // ! OWNERSHIP #ifdef __U_STATISTICS__ stats.return_pushes += 1; stats.return_storage_request += rsize; stats.return_storage_alloc += size; #endif // __U_STATISTICS__ // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. } // if #ifdef __CFA_DEBUG__ if ( traceHeap() ) { char helpText[64]; __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug } // if #endif // __CFA_DEBUG__ // poll_interrupts(); // call rollforward } // doFree size_t prtFree( Heap & manager ) with( manager ) { size_t total = 0; #ifdef __STATISTICS__ __cfaabi_bits_acquire(); __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); #endif // __STATISTICS__ for ( unsigned int i = 0; i < heapMaster.maxBucketsUsed; i += 1 ) { size_t size = freeLists[i].blockSize; #ifdef __STATISTICS__ unsigned int N = 0; #endif // __STATISTICS__ for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { total += size; #ifdef __STATISTICS__ N += 1; #endif // __STATISTICS__ } // for #ifdef __STATISTICS__ __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N ); if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); #endif // __STATISTICS__ } // for #ifdef __STATISTICS__ __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total ); __cfaabi_bits_release(); #endif // __STATISTICS__ return (char *)heapMaster.heapEnd - (char *)heapMaster.heapBegin - total; } // prtFree #ifdef __STATISTICS__ static void incCalls( intptr_t statName ) libcfa_nopreempt { heapManager->stats.counters[statName].calls += 1; } // incCalls static void incZeroCalls( intptr_t statName ) libcfa_nopreempt { heapManager->stats.counters[statName].calls_0 += 1; } // incZeroCalls #endif // __STATISTICS__ #ifdef __CFA_DEBUG__ static void incUnfreed( intptr_t offset ) libcfa_nopreempt { heapManager->allocUnfreed += offset; } // incUnfreed #endif // __CFA_DEBUG__ static void * memalignNoStats( size_t alignment, size_t size STAT_PARM ) { checkAlign( alignment ); // check alignment // if alignment <= default alignment or size == 0, do normal malloc as two headers are unnecessary if ( unlikely( alignment <= libAlign() || size == 0 ) ) return doMalloc( size STAT_ARG( STAT_NAME ) ); // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC. // .-------------v-----------------v----------------v----------, // | Real Header | ... padding ... | Fake Header | data ... | // `-------------^-----------------^-+--------------^----------' // |<--------------------------------' offset/align |<-- alignment boundary // subtract libAlign() because it is already the minimum alignment // add sizeof(Storage) for fake header size_t offset = alignment - libAlign() + sizeof(Heap.Storage); char * addr = (char *)doMalloc( size + offset STAT_ARG( STAT_NAME ) ); // address in the block of the "next" alignment address char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment ); // address of header from malloc Heap.Storage.Header * realHeader = HeaderAddr( addr ); realHeader->kind.real.size = size; // correct size to eliminate above alignment offset #ifdef __CFA_DEBUG__ incUnfreed( -offset ); // adjustment off the offset from call to doMalloc #endif // __CFA_DEBUG__ // address of fake header *before* the alignment location Heap.Storage.Header * fakeHeader = HeaderAddr( user ); // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; // SKULLDUGGERY: odd alignment implies fake header fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment ); return user; } // memalignNoStats //####################### Memory Allocation Routines #################### extern "C" { // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0, // then malloc() returns a unique pointer value that can later be successfully passed to free(). void * malloc( size_t size ) libcfa_public { return doMalloc( size STAT_ARG( MALLOC ) ); } // malloc // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. void * aalloc( size_t dim, size_t elemSize ) libcfa_public { return doMalloc( dim * elemSize STAT_ARG( AALLOC ) ); } // aalloc // Same as aalloc() with memory set to zero. void * calloc( size_t dim, size_t elemSize ) libcfa_public { size_t size = dim * elemSize; char * addr = (char *)doMalloc( size STAT_ARG( CALLOC ) ); if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned Heap.Storage.Header * header; Heap.FreeHeader * freeHead; size_t bsize, alignment; #ifndef __CFA_DEBUG__ bool mapped = #endif // __CFA_DEBUG__ headers( "calloc", addr, header, freeHead, bsize, alignment ); #ifndef __CFA_DEBUG__ // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. if ( likely( ! mapped ) ) #endif // __CFA_DEBUG__ // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined // `-header`-addr `-size memset( addr, '\0', size ); // set to zeros MarkZeroFilledBit( header ); // mark as zero fill return addr; } // calloc // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is // 0p, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. void * resize( void * oaddr, size_t size ) libcfa_public { if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) return doMalloc( size STAT_ARG( RESIZE ) ); } // if PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr ) Heap.Storage.Header * header; Heap.FreeHeader * freeHead; size_t bsize, oalign; headers( "resize", oaddr, header, freeHead, bsize, oalign ); size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket // same size, DO NOT preserve STICKY PROPERTIES. if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size ClearZeroFillBit( header ); // no alignment and turn off 0 fill #ifdef __CFA_DEBUG__ incUnfreed( size - header->kind.real.size ); // adjustment off the size difference #endif // __CFA_DEBUG__ header->kind.real.size = size; // reset allocation size #ifdef __STATISTICS__ incCalls( RESIZE ); #endif // __STATISTICS__ return oaddr; } // if // change size, DO NOT preserve STICKY PROPERTIES. doFree( oaddr ); // free previous storage return doMalloc( size STAT_ARG( RESIZE ) ); // create new area } // resize // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of // the old and new sizes. void * realloc( void * oaddr, size_t size ) libcfa_public { if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) return doMalloc( size STAT_ARG( REALLOC ) ); } // if PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr ) Heap.Storage.Header * header; Heap.FreeHeader * freeHead; size_t bsize, oalign; headers( "realloc", oaddr, header, freeHead, bsize, oalign ); size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket size_t osize = header->kind.real.size; // old allocation size bool ozfill = ZeroFillBit( header ); // old allocation zero filled if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage #ifdef __CFA_DEBUG__ incUnfreed( size - header->kind.real.size ); // adjustment off the size difference #endif // __CFA_DEBUG__ header->kind.real.size = size; // reset allocation size if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage } // if #ifdef __STATISTICS__ incCalls( REALLOC ); #endif // __STATISTICS__ return oaddr; } // if // change size and copy old content to new storage void * naddr; if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ? naddr = doMalloc( size STAT_ARG( REALLOC ) ); // create new area } else { naddr = memalignNoStats( oalign, size STAT_ARG( REALLOC ) ); // create new aligned area } // if headers( "realloc", naddr, header, freeHead, bsize, oalign ); // To preserve prior fill, the entire bucket must be copied versus the size. memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes doFree( oaddr ); // free previous storage if ( unlikely( ozfill ) ) { // previous request zero fill ? MarkZeroFilledBit( header ); // mark new request as zero filled if ( size > osize ) { // previous request larger ? memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage } // if } // if return naddr; } // realloc // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize. void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public { return realloc( oaddr, dim * elemSize ); } // reallocarray // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) void * memalign( size_t alignment, size_t size ) libcfa_public { return memalignNoStats( alignment, size STAT_ARG( MEMALIGN ) ); } // memalign // Same as aalloc() with memory alignment. void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { return memalignNoStats( alignment, dim * elemSize STAT_ARG( AMEMALIGN ) ); } // amemalign // Same as calloc() with memory alignment. void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { size_t size = dim * elemSize; char * addr = (char *)memalignNoStats( alignment, size STAT_ARG( CMEMALIGN ) ); if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned Heap.Storage.Header * header; Heap.FreeHeader * freeHead; size_t bsize; #ifndef __CFA_DEBUG__ bool mapped = #endif // __CFA_DEBUG__ headers( "cmemalign", addr, header, freeHead, bsize, alignment ); // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. #ifndef __CFA_DEBUG__ if ( ! mapped ) #endif // __CFA_DEBUG__ // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined // `-header`-addr `-size memset( addr, '\0', size ); // set to zeros MarkZeroFilledBit( header ); // mark as zero filled return addr; } // cmemalign // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple // of alignment. This requirement is universally ignored. void * aligned_alloc( size_t alignment, size_t size ) libcfa_public { return memalign( alignment, size ); } // aligned_alloc // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to // free(3). int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public { if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment *memptr = memalign( alignment, size ); return 0; } // posix_memalign // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). void * valloc( size_t size ) libcfa_public { return memalign( __page_size, size ); } // valloc // Same as valloc but rounds size to multiple of page size. void * pvalloc( size_t size ) libcfa_public { return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size } // pvalloc // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc() // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is // 0p, no operation is performed. void free( void * addr ) libcfa_public { // verify( heapManager ); if ( unlikely( addr == 0p ) ) { // special case #ifdef __STATISTICS__ if ( heapManager ) incZeroCalls( FREE ); #endif // __STATISTICS__ return; } // if #ifdef __STATISTICS__ incCalls( FREE ); #endif // __STATISTICS__ doFree( addr ); // handles heapManager == nullptr } // free // Returns the alignment of an allocation. size_t malloc_alignment( void * addr ) libcfa_public { if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment Heap.Storage.Header * header = HeaderAddr( addr ); if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? return ClearAlignmentBit( header ); // clear flag from value } else { return libAlign(); // minimum alignment } // if } // malloc_alignment // Returns true if the allocation is zero filled, e.g., allocated by calloc(). bool malloc_zero_fill( void * addr ) libcfa_public { if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill Heap.Storage.Header * header = HeaderAddr( addr ); if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? header = RealHeader( header ); // backup from fake to real header } // if return ZeroFillBit( header ); // zero filled ? } // malloc_zero_fill // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). size_t malloc_size( void * addr ) libcfa_public { if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size Heap.Storage.Header * header = HeaderAddr( addr ); if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? header = RealHeader( header ); // backup from fake to real header } // if return header->kind.real.size; } // malloc_size // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by // malloc or a related function. size_t malloc_usable_size( void * addr ) libcfa_public { if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size Heap.Storage.Header * header; Heap.FreeHeader * freeHead; size_t bsize, alignment; headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment ); return DataStorage( bsize, addr, header ); // data storage in bucket } // malloc_usable_size // Prints (on default standard error) statistics about memory allocated by malloc and related functions. void malloc_stats( void ) libcfa_public { #ifdef __STATISTICS__ HeapStatistics stats; HeapStatisticsCtor( stats ); if ( printStats( collectStats( stats ) ) == -1 ) { #else #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n" if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) { #endif // __STATISTICS__ abort( "**** Error **** write failed in malloc_stats" ); } // if } // malloc_stats // Changes the file descriptor where malloc_stats() writes statistics. int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { #ifdef __STATISTICS__ int temp = heapMaster.stats_fd; heapMaster.stats_fd = fd; return temp; #else return -1; // unsupported #endif // __STATISTICS__ } // malloc_stats_fd // Prints an XML string that describes the current state of the memory-allocation implementation in the caller. // The string is printed on the file stream stream. The exported string includes information about all arenas (see // malloc). int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public { if ( options != 0 ) { errno = EINVAL; return -1; } #ifdef __STATISTICS__ HeapStatistics stats; HeapStatisticsCtor( stats ); return printStatsXML( collectStats( stats ), stream ); // returns bytes written or -1 #else return 0; // unsupported #endif // __STATISTICS__ } // malloc_info // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument // specifies the parameter to be modified, and value specifies the new value for that parameter. int mallopt( int option, int value ) libcfa_public { if ( value < 0 ) return 0; choose( option ) { case M_TOP_PAD: heapMaster.heapExpand = ceiling2( value, __page_size ); return 1; case M_MMAP_THRESHOLD: if ( setMmapStart( value ) ) return 1; } // choose return 0; // error, unsupported } // mallopt // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). int malloc_trim( size_t ) libcfa_public { return 0; // => impossible to release memory } // malloc_trim // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function // result. (The caller must free this memory.) void * malloc_get_state( void ) libcfa_public { return 0p; // unsupported } // malloc_get_state // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data // structure pointed to by state. int malloc_set_state( void * ) libcfa_public { return 0; // unsupported } // malloc_set_state // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; } // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; } // Amount subtracted to adjust for unfreed program storage (debug only). __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; } } // extern "C" // Must have CFA linkage to overload with C linkage realloc. void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public { if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); } // if PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr ) // Attempt to reuse existing alignment. Heap.Storage.Header * header = HeaderAddr( oaddr ); bool isFakeHeader = AlignmentBit( header ); // old fake header ? size_t oalign; if ( unlikely( isFakeHeader ) ) { checkAlign( nalign ); // check alignment oalign = ClearAlignmentBit( header ); // old alignment if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? && ( oalign <= nalign // going down || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ? ) ) { HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) Heap.FreeHeader * freeHead; size_t bsize, oalign; headers( "resize", oaddr, header, freeHead, bsize, oalign ); size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) ClearZeroFillBit( header ); // turn off 0 fill #ifdef __CFA_DEBUG__ incUnfreed( size - header->kind.real.size ); // adjustment off the size difference #endif // __CFA_DEBUG__ header->kind.real.size = size; // reset allocation size #ifdef __STATISTICS__ incCalls( RESIZE ); #endif // __STATISTICS__ return oaddr; } // if } // if } else if ( ! isFakeHeader // old real header (aligned on libAlign) ? && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed return resize( oaddr, size ); // duplicate special case checks } // if // change size, DO NOT preserve STICKY PROPERTIES. doFree( oaddr ); // free previous storage return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); // create new aligned area } // resize void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public { if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) return memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); } // if PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr ) // Attempt to reuse existing alignment. Heap.Storage.Header * header = HeaderAddr( oaddr ); bool isFakeHeader = AlignmentBit( header ); // old fake header ? size_t oalign; if ( unlikely( isFakeHeader ) ) { checkAlign( nalign ); // check alignment oalign = ClearAlignmentBit( header ); // old alignment if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? && ( oalign <= nalign // going down || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ? ) ) { HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) return realloc( oaddr, size ); // duplicate special case checks } // if } else if ( ! isFakeHeader // old real header (aligned on libAlign) ? && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed return realloc( oaddr, size ); // duplicate special case checks } // if Heap.FreeHeader * freeHead; size_t bsize; headers( "realloc", oaddr, header, freeHead, bsize, oalign ); // change size and copy old content to new storage size_t osize = header->kind.real.size; // old allocation size bool ozfill = ZeroFillBit( header ); // old allocation zero filled void * naddr = memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); // create new aligned area headers( "realloc", naddr, header, freeHead, bsize, oalign ); memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes doFree( oaddr ); // free previous storage if ( unlikely( ozfill ) ) { // previous request zero fill ? MarkZeroFilledBit( header ); // mark new request as zero filled if ( size > osize ) { // previous request larger ? memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage } // if } // if return naddr; } // realloc void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ) __THROW { return realloc( oaddr, nalign, dim * elemSize ); } // reallocarray // Local Variables: // // tab-width: 4 // // compile-command: "cfa -nodebug -O2 heap.cfa" // // End: //