Changeset 24ceace for libcfa/src
- Timestamp:
- May 2, 2022, 3:19:03 AM (4 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 12bb5ab1, 49a1684
- Parents:
- 9e7236f4 (diff), 4b4f95f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 6 edited
-
bits/align.hfa (modified) (2 diffs)
-
bits/debug.cfa (modified) (5 diffs)
-
bits/debug.hfa (modified) (2 diffs)
-
heap.cfa (modified) (69 diffs)
-
heap.hfa (modified) (3 diffs)
-
stdlib.hfa (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/align.hfa
r9e7236f4 r24ceace 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Nov 16 18:58:22 201913 // Update Count : 312 // Last Modified On : Fri Apr 29 19:14:43 2022 13 // Update Count : 4 14 14 // 15 15 // This library is free software; you can redistribute it and/or modify it … … 35 35 //#define libAlign() (sizeof(double)) 36 36 // gcc-7 uses xmms instructions, which require 16 byte alignment. 37 #define libAlign() ( 16)37 #define libAlign() (__BIGGEST_ALIGNMENT__) 38 38 39 39 // Check for power of 2 -
libcfa/src/bits/debug.cfa
r9e7236f4 r24ceace 10 10 // Created On : Thu Mar 30 12:30:01 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jun 17 11:07:13 202013 // Update Count : 1 212 // Last Modified On : Fri Apr 22 18:20:26 2022 13 // Update Count : 13 14 14 // 15 15 … … 44 44 void __cfaabi_bits_release() __attribute__((__weak__)) {} 45 45 46 void__cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) {46 int __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 47 47 va_list args; 48 48 … … 55 55 __cfaabi_bits_release(); 56 56 va_end( args ); 57 return len; 57 58 } 58 59 59 void__cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) {60 int __cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 60 61 va_list args; 61 62 … … 66 67 67 68 va_end( args ); 69 return len; 68 70 } 69 71 70 void__cfaabi_bits_print_vararg( int fd, const char fmt[], va_list args ) {72 int __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list args ) { 71 73 int len = vsnprintf( buffer, buffer_size, fmt, args ); 72 74 __cfaabi_bits_write( fd, buffer, len ); 75 return len; 73 76 } 74 77 75 void__cfaabi_bits_print_buffer( int fd, char in_buffer[], int in_buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )) {78 int __cfaabi_bits_print_buffer( int fd, char in_buffer[], int in_buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )) { 76 79 va_list args; 77 80 … … 82 85 83 86 va_end( args ); 87 return len; 84 88 } 85 89 } -
libcfa/src/bits/debug.hfa
r9e7236f4 r24ceace 9 9 // Author : Thierry Delisle 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Apr 27 10:15:00 202013 // Update Count : 1 011 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 22 18:21:56 2022 13 // Update Count : 11 14 14 // 15 15 … … 46 46 extern void __cfaabi_bits_acquire(); 47 47 extern void __cfaabi_bits_release(); 48 extern void__cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) ));49 extern void__cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) ));50 extern void__cfaabi_bits_print_vararg( int fd, const char fmt[], va_list arg );51 extern void__cfaabi_bits_print_buffer( int fd, char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) ));48 extern int __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )); 49 extern int __cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )); 50 extern int __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list arg ); 51 extern int __cfaabi_bits_print_buffer( int fd, char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )); 52 52 53 53 #if defined(__CFA_DEBUG_PRINT__) \ -
libcfa/src/heap.cfa
r9e7236f4 r24ceace 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jan 2 23:29:41202213 // Update Count : 1 05812 // Last Modified On : Fri Apr 29 19:05:03 2022 13 // Update Count : 1167 14 14 // 15 15 16 #include <unistd.h> // sbrk, sysconf17 #include <stdlib.h> // EXIT_FAILURE18 #include <stdbool.h> // true, false19 #include <stdio.h> // snprintf, fileno20 #include <errno.h> // errno21 16 #include <string.h> // memset, memcpy 22 17 #include <limits.h> // ULONG_MAX 18 #include <stdlib.h> // EXIT_FAILURE 19 #include <errno.h> // errno, ENOMEM, EINVAL 20 #include <unistd.h> // STDERR_FILENO, sbrk, sysconf 23 21 #include <malloc.h> // memalign, malloc_usable_size 24 22 #include <sys/mman.h> // mmap, munmap 23 #include <sys/sysinfo.h> // get_nprocs 25 24 26 25 #include "bits/align.hfa" // libAlign … … 31 30 #include "bitmanip.hfa" // is_pow2, ceiling2 32 31 32 #define FASTLOOKUP 33 #define __STATISTICS__ 34 35 33 36 static bool traceHeap = false; 34 37 … … 70 73 71 74 enum { 72 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 73 // the brk address is extended by the extension amount. 74 __CFA_DEFAULT_HEAP_EXPANSION__ = (10 * 1024 * 1024), 75 76 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; 77 // values greater than or equal to this value are mmap from the operating system. 78 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 75 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk 76 // address is extended by the extension amount. 77 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, 78 79 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values 80 // greater than or equal to this value are mmap from the operating system. 81 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, 82 83 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from 84 // the malloc/free counter to adjust for storage the program does not free. 85 __CFA_DEFAULT_HEAP_UNFREED__ = 0 86 }; // enum 87 88 89 //####################### Heap Statistics #################### 90 91 92 #ifdef __STATISTICS__ 93 enum { CntTriples = 12 }; // number of counter triples 94 enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE }; 95 96 struct StatsOverlay { // overlay for iteration 97 unsigned int calls, calls_0; 98 unsigned long long int request, alloc; 79 99 }; 80 100 81 size_t default_mmap_start() __attribute__(( weak )) { 82 return __CFA_DEFAULT_MMAP_START__; 83 } // default_mmap_start 84 85 size_t default_heap_expansion() __attribute__(( weak )) { 86 return __CFA_DEFAULT_HEAP_EXPANSION__; 87 } // default_heap_expansion 88 89 90 #ifdef __CFA_DEBUG__ 91 static size_t allocUnfreed; // running total of allocations minus frees 92 93 static void prtUnfreed() { 94 if ( allocUnfreed != 0 ) { 95 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 96 char helpText[512]; 97 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 98 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 99 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 100 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 101 } // if 102 } // prtUnfreed 103 104 extern int cfa_main_returned; // from interpose.cfa 105 extern "C" { 106 void heapAppStart() { // called by __cfaabi_appready_startup 107 allocUnfreed = 0; 108 } // heapAppStart 109 110 void heapAppStop() { // called by __cfaabi_appready_startdown 111 fclose( stdin ); fclose( stdout ); 112 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 113 } // heapAppStop 114 } // extern "C" 115 #endif // __CFA_DEBUG__ 116 117 118 // statically allocated variables => zero filled. 119 size_t __page_size; // architecture pagesize 120 int __map_prot; // common mmap/mprotect protection 121 static size_t heapExpand; // sbrk advance 122 static size_t mmapStart; // cross over point for mmap 123 static unsigned int maxBucketsUsed; // maximum number of buckets in use 101 // Heap statistics counters. 102 union HeapStatistics { 103 struct { // minimum qualification 104 unsigned int malloc_calls, malloc_0_calls; 105 unsigned long long int malloc_storage_request, malloc_storage_alloc; 106 unsigned int aalloc_calls, aalloc_0_calls; 107 unsigned long long int aalloc_storage_request, aalloc_storage_alloc; 108 unsigned int calloc_calls, calloc_0_calls; 109 unsigned long long int calloc_storage_request, calloc_storage_alloc; 110 unsigned int memalign_calls, memalign_0_calls; 111 unsigned long long int memalign_storage_request, memalign_storage_alloc; 112 unsigned int amemalign_calls, amemalign_0_calls; 113 unsigned long long int amemalign_storage_request, amemalign_storage_alloc; 114 unsigned int cmemalign_calls, cmemalign_0_calls; 115 unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc; 116 unsigned int resize_calls, resize_0_calls; 117 unsigned long long int resize_storage_request, resize_storage_alloc; 118 unsigned int realloc_calls, realloc_0_calls; 119 unsigned long long int realloc_storage_request, realloc_storage_alloc; 120 unsigned int free_calls, free_null_calls; 121 unsigned long long int free_storage_request, free_storage_alloc; 122 unsigned int away_pulls, away_pushes; 123 unsigned long long int away_storage_request, away_storage_alloc; 124 unsigned int mmap_calls, mmap_0_calls; // no zero calls 125 unsigned long long int mmap_storage_request, mmap_storage_alloc; 126 unsigned int munmap_calls, munmap_0_calls; // no zero calls 127 unsigned long long int munmap_storage_request, munmap_storage_alloc; 128 }; 129 struct StatsOverlay counters[CntTriples]; // overlay for iteration 130 }; // HeapStatistics 131 132 static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay), 133 "Heap statistics counter-triplets does not match with array size" ); 134 135 static void HeapStatisticsCtor( HeapStatistics & stats ) { 136 memset( &stats, '\0', sizeof(stats) ); // very fast 137 // for ( unsigned int i = 0; i < CntTriples; i += 1 ) { 138 // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0; 139 // } // for 140 } // HeapStatisticsCtor 141 142 static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) { 143 for ( unsigned int i = 0; i < CntTriples; i += 1 ) { 144 lhs.counters[i].calls += rhs.counters[i].calls; 145 lhs.counters[i].calls_0 += rhs.counters[i].calls_0; 146 lhs.counters[i].request += rhs.counters[i].request; 147 lhs.counters[i].alloc += rhs.counters[i].alloc; 148 } // for 149 return lhs; 150 } // ?+=? 151 #endif // __STATISTICS__ 124 152 125 153 … … 135 163 136 164 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 137 // Break recu sion by hardcoding number of buckets and statically checking number is correct after bucket array defined.165 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 138 166 enum { NoBucketSizes = 91 }; // number of buckets sizes 139 167 140 struct Heap Manager{168 struct Heap { 141 169 struct Storage { 142 170 struct Header { // header … … 145 173 union { 146 174 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4148 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header149 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4150 151 175 union { 176 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 152 177 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 153 // 2nd low-order bit => zero filled154 178 void * home; // allocated block points back to home locations (must overlay alignment) 155 179 size_t blockSize; // size for munmap (must overlay alignment) 156 180 #if BUCKETLOCK == SPINLOCK 157 Storage * next; // freed block points next freed block of same size181 Storage * next; // freed block points to next freed block of same size 158 182 #endif // SPINLOCK 159 183 }; 160 184 size_t size; // allocation size in bytes 161 162 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4163 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header164 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4165 185 }; 166 186 #if BUCKETLOCK == LOCKFREE … … 171 191 172 192 struct FakeHeader { 173 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 174 uint32_t alignment; // 1st low-order bit => fake header & alignment 175 #endif // __ORDER_LITTLE_ENDIAN__ 176 177 uint32_t offset; 178 179 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 180 uint32_t alignment; // low-order bits of home/blockSize used for tricks 181 #endif // __ORDER_BIG_ENDIAN__ 193 uintptr_t alignment; // 1st low-order bit => fake header & alignment 194 uintptr_t offset; 182 195 } fake; // FakeHeader 183 196 } kind; // Kind 184 197 } header; // Header 198 185 199 char pad[libAlign() - sizeof( Header )]; 186 200 char data[0]; // storage 187 201 }; // Storage 188 202 189 static_assert( libAlign() >= sizeof( Storage ), " libAlign()< sizeof( Storage )" );203 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 190 204 191 205 struct FreeHeader { 206 size_t blockSize __attribute__(( aligned (8) )); // size of allocations on this list 192 207 #if BUCKETLOCK == SPINLOCK 193 __spinlock_t lock; // must be first field for alignment208 __spinlock_t lock; 194 209 Storage * freeList; 195 210 #else 196 211 StackLF(Storage) freeList; 197 212 #endif // BUCKETLOCK 198 size_t blockSize; // size of allocations on this list199 }; // FreeHeader 200 201 // must be first fields for alignment 213 } __attribute__(( aligned (8) )); // FreeHeader 214 215 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 216 202 217 __spinlock_t extlock; // protects allocation-buffer extension 203 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes204 205 218 void * heapBegin; // start of heap 206 219 void * heapEnd; // logical end of heap 207 220 size_t heapRemaining; // amount of storage not allocated in the current chunk 208 }; // Heap Manager221 }; // Heap 209 222 210 223 #if BUCKETLOCK == LOCKFREE 211 224 static inline { 212 Link(Heap Manager.Storage) * ?`next( HeapManager.Storage * this ) { return &this->header.kind.real.next; }213 void ?{}( Heap Manager.FreeHeader & ) {}214 void ^?{}( Heap Manager.FreeHeader & ) {}225 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 226 void ?{}( Heap.FreeHeader & ) {} 227 void ^?{}( Heap.FreeHeader & ) {} 215 228 } // distribution 216 229 #endif // LOCKFREE 217 230 218 static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } 219 220 221 #define FASTLOOKUP 222 #define __STATISTICS__ 231 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 232 233 234 #ifdef FASTLOOKUP 235 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 236 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 237 #endif // FASTLOOKUP 238 239 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 240 #ifdef __CFA_DEBUG__ 241 static bool heapBoot = 0; // detect recursion during boot 242 #endif // __CFA_DEBUG__ 243 223 244 224 245 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. … … 226 247 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 227 248 static const unsigned int bucketSizes[] @= { // different bucket sizes 228 16 + sizeof(Heap Manager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4229 96 + sizeof(Heap Manager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3230 160, 192, 224, 256 + sizeof(Heap Manager.Storage), // 4231 320, 384, 448, 512 + sizeof(Heap Manager.Storage), // 4232 640, 768, 896, 1_024 + sizeof(Heap Manager.Storage), // 4233 1_536, 2_048 + sizeof(Heap Manager.Storage), // 2234 2_560, 3_072, 3_584, 4_096 + sizeof(Heap Manager.Storage), // 4235 6_144, 8_192 + sizeof(Heap Manager.Storage), // 2236 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap Manager.Storage), // 8237 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap Manager.Storage), // 8238 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap Manager.Storage), // 8239 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap Manager.Storage), // 8240 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap Manager.Storage), // 8241 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap Manager.Storage), // 8242 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap Manager.Storage), // 4243 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap Manager.Storage), // 8244 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap Manager.Storage), // 4249 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 250 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 251 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 252 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 253 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 254 1_536, 2_048 + sizeof(Heap.Storage), // 2 255 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 256 6_144, 8_192 + sizeof(Heap.Storage), // 2 257 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 258 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 259 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 260 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 261 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 262 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 263 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 264 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 265 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 245 266 }; 246 267 247 268 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 248 269 249 #ifdef FASTLOOKUP 250 enum { LookupSizes = 65_536 + sizeof(HeapManager.Storage) }; // number of fast lookup sizes 251 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 252 #endif // FASTLOOKUP 253 254 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 270 // The constructor for heapManager is called explicitly in memory_startup. 271 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 272 273 274 //####################### Memory Allocation Routines Helpers #################### 275 276 255 277 #ifdef __CFA_DEBUG__ 256 static bool heapBoot = 0; // detect recursion during boot 278 static size_t allocUnfreed; // running total of allocations minus frees 279 280 static void prtUnfreed() { 281 if ( allocUnfreed != 0 ) { 282 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 283 char helpText[512]; 284 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 285 "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 286 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 287 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 288 } // if 289 } // prtUnfreed 290 291 extern int cfa_main_returned; // from interpose.cfa 292 extern "C" { 293 void heapAppStart() { // called by __cfaabi_appready_startup 294 allocUnfreed = 0; 295 } // heapAppStart 296 297 void heapAppStop() { // called by __cfaabi_appready_startdown 298 fclose( stdin ); fclose( stdout ); 299 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 300 } // heapAppStop 301 } // extern "C" 257 302 #endif // __CFA_DEBUG__ 258 303 259 // The constructor for heapManager is called explicitly in memory_startup.260 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing261 262 304 263 305 #ifdef __STATISTICS__ 264 // Heap statistics counters. 265 static unsigned int malloc_calls, malloc_0_calls; 266 static unsigned long long int malloc_storage_request, malloc_storage_alloc; 267 static unsigned int aalloc_calls, aalloc_0_calls; 268 static unsigned long long int aalloc_storage_request, aalloc_storage_alloc; 269 static unsigned int calloc_calls, calloc_0_calls; 270 static unsigned long long int calloc_storage_request, calloc_storage_alloc; 271 static unsigned int memalign_calls, memalign_0_calls; 272 static unsigned long long int memalign_storage_request, memalign_storage_alloc; 273 static unsigned int amemalign_calls, amemalign_0_calls; 274 static unsigned long long int amemalign_storage_request, amemalign_storage_alloc; 275 static unsigned int cmemalign_calls, cmemalign_0_calls; 276 static unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc; 277 static unsigned int resize_calls, resize_0_calls; 278 static unsigned long long int resize_storage_request, resize_storage_alloc; 279 static unsigned int realloc_calls, realloc_0_calls; 280 static unsigned long long int realloc_storage_request, realloc_storage_alloc; 281 static unsigned int free_calls, free_null_calls; 282 static unsigned long long int free_storage_request, free_storage_alloc; 283 static unsigned int mmap_calls; 284 static unsigned long long int mmap_storage_request, mmap_storage_alloc; 285 static unsigned int munmap_calls; 286 static unsigned long long int munmap_storage_request, munmap_storage_alloc; 306 static HeapStatistics stats; // zero filled 287 307 static unsigned int sbrk_calls; 288 308 static unsigned long long int sbrk_storage; … … 290 310 static int stats_fd = STDERR_FILENO; // default stderr 291 311 312 #define prtFmt \ 313 "\nHeap statistics: (storage request / allocation)\n" \ 314 " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 315 " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 316 " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 317 " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 318 " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 319 " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 320 " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 321 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 322 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \ 323 " sbrk calls %'u; storage %'llu bytes\n" \ 324 " mmap calls %'u; storage %'llu / %'llu bytes\n" \ 325 " munmap calls %'u; storage %'llu / %'llu bytes\n" \ 326 292 327 // Use "write" because streams may be shutdown when calls are made. 293 static void printStats() { 294 char helpText[1024]; 295 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 296 "\nHeap statistics: (storage request / allocation + header)\n" 297 " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 298 " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 299 " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 300 " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 301 " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 302 " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 303 " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 304 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 305 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" 306 " sbrk calls %'u; storage %'llu bytes\n" 307 " mmap calls %'u; storage %'llu / %'llu bytes\n" 308 " munmap calls %'u; storage %'llu / %'llu bytes\n", 309 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 310 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 311 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 312 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 313 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 314 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 315 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 316 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 317 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 318 sbrk_calls, sbrk_storage, 319 mmap_calls, mmap_storage_request, mmap_storage_alloc, 320 munmap_calls, munmap_storage_request, munmap_storage_alloc 328 static int printStats() { // see malloc_stats 329 char helpText[sizeof(prtFmt) + 1024]; // space for message and values 330 return __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt, 331 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 332 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 333 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 334 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 335 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 336 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 337 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 338 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 339 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 340 sbrk_calls, sbrk_storage, 341 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 342 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 321 343 ); 322 344 } // printStats 323 345 346 #define prtFmtXML \ 347 "<malloc version=\"1\">\n" \ 348 "<heap nr=\"0\">\n" \ 349 "<sizes>\n" \ 350 "</sizes>\n" \ 351 "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 352 "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 353 "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 354 "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 355 "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 356 "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 357 "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 358 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 359 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 360 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \ 361 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \ 362 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 363 "</malloc>" 364 324 365 static int printStatsXML( FILE * stream ) { // see malloc_info 325 char helpText[1024]; 326 int len = snprintf( helpText, sizeof(helpText), 327 "<malloc version=\"1\">\n" 328 "<heap nr=\"0\">\n" 329 "<sizes>\n" 330 "</sizes>\n" 331 "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 332 "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 333 "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 334 "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 335 "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 336 "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 337 "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 338 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 339 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 340 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" 341 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" 342 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 343 "</malloc>", 344 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 345 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 346 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 347 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 348 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 349 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 350 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 351 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 352 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 353 sbrk_calls, sbrk_storage, 354 mmap_calls, mmap_storage_request, mmap_storage_alloc, 355 munmap_calls, munmap_storage_request, munmap_storage_alloc 366 char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values 367 return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML, 368 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 369 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 370 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 371 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 372 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 373 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 374 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 375 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 376 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 377 sbrk_calls, sbrk_storage, 378 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 379 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 356 380 ); 357 __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit358 return len;359 381 } // printStatsXML 360 382 #endif // __STATISTICS__ 383 384 385 // statically allocated variables => zero filled. 386 static size_t heapExpand; // sbrk advance 387 static size_t mmapStart; // cross over point for mmap 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 // extern visibility, used by runtime kernel 390 size_t __page_size; // architecture pagesize 391 int __map_prot; // common mmap/mprotect protection 361 392 362 393 … … 394 425 // <-----------------<------------+-----------------------------> bsize (bucket size) 395 426 // |fake-header | addr 396 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))397 #define realHeader( header ) ((HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset))427 #define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) )) 428 #define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset)) 398 429 399 430 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) … … 403 434 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 404 435 // |fake-header |addr 405 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))436 #define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 406 437 407 438 408 439 static inline void checkAlign( size_t alignment ) { 409 if ( alignment < libAlign() || ! is_pow2( alignment) ) {410 abort( " Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );440 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) { 441 abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); 411 442 } // if 412 443 } // checkAlign … … 415 446 static inline void checkHeader( bool check, const char name[], void * addr ) { 416 447 if ( unlikely( check ) ) { // bad address ? 417 abort( " Attempt to %s storage %p with address outside the heap.\n"448 abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n" 418 449 "Possible cause is duplicate free on same block or overwriting of memory.", 419 450 name, addr ); … … 422 453 423 454 424 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 425 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 426 alignment = header->kind.fake.alignment & -2; // remove flag from value 455 // Manipulate sticky bits stored in unused 3 low-order bits of an address. 456 // bit0 => alignment => fake header 457 // bit1 => zero filled (calloc) 458 // bit2 => mapped allocation versus sbrk 459 #define StickyBits( header ) (((header)->kind.real.blockSize & 0x7)) 460 #define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7) 461 #define MarkAlignmentBit( align ) ((align) | 1) 462 #define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1)) 463 #define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1) 464 #define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2)) 465 #define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2)) 466 #define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2) 467 #define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4)) 468 #define MarkMmappedBit( size ) ((size) | 4) 469 470 471 static inline void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { 472 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? 473 alignment = ClearAlignmentBit( header ); // clear flag from value 427 474 #ifdef __CFA_DEBUG__ 428 475 checkAlign( alignment ); // check alignment 429 476 #endif // __CFA_DEBUG__ 430 header = realHeader( header ); // backup from fake to real header477 header = RealHeader( header ); // backup from fake to real header 431 478 } else { 432 479 alignment = libAlign(); // => no fake header … … 435 482 436 483 437 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, 438 size_t & size, size_t & alignment ) with( heapManager ) { 439 header = headerAddr( addr ); 440 441 if ( unlikely( addr < heapBegin || heapEnd < addr ) ) { // mmapped ? 484 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, 485 Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapManager ) { 486 header = HeaderAddr( addr ); 487 488 #ifdef __CFA_DEBUG__ 489 checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ? 490 #endif // __CFA_DEBUG__ 491 492 if ( likely( ! StickyBits( header ) ) ) { // no sticky bits ? 493 freeHead = (Heap.FreeHeader *)(header->kind.real.home); 494 alignment = libAlign(); 495 } else { 442 496 fakeHeader( header, alignment ); 443 size = header->kind.real.blockSize & -3; // mmap size 444 return true; 445 } // if 497 if ( unlikely( MmappedBit( header ) ) ) { // mmapped ? 498 verify( addr < heapBegin || heapEnd < addr ); 499 size = ClearStickyBits( header->kind.real.blockSize ); // mmap size 500 return true; 501 } // if 502 503 freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home )); 504 } // if 505 size = freeHead->blockSize; 446 506 447 507 #ifdef __CFA_DEBUG__ 448 checkHeader( header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 449 #endif // __CFA_DEBUG__ 450 451 // header may be safe to dereference 452 fakeHeader( header, alignment ); 453 #ifdef __CFA_DEBUG__ 454 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 455 #endif // __CFA_DEBUG__ 456 457 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 458 #ifdef __CFA_DEBUG__ 459 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 460 abort( "Attempt to %s storage %p with corrupted header.\n" 508 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 509 510 Heap * homeManager; 511 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link 512 // freed and link points at another free block not to a bucket in the bucket array. 513 freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { 514 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" 461 515 "Possible cause is duplicate free on same block or overwriting of header information.", 462 516 name, addr ); 463 517 } // if 464 518 #endif // __CFA_DEBUG__ 465 size = freeElem->blockSize; 519 466 520 return false; 467 521 } // headers … … 490 544 static inline void * extend( size_t size ) with( heapManager ) { 491 545 lock( extlock __cfaabi_dbg_ctx2 ); 546 492 547 ptrdiff_t rem = heapRemaining - size; 493 if ( rem < 0) {548 if ( unlikely( rem < 0 ) ) { 494 549 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 495 550 … … 501 556 _exit( EXIT_FAILURE ); // give up 502 557 } // if 558 503 559 // Make storage executable for thunks. 504 560 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { … … 507 563 _exit( EXIT_FAILURE ); 508 564 } // if 565 509 566 #ifdef __STATISTICS__ 510 567 sbrk_calls += 1; 511 568 sbrk_storage += increase; 512 569 #endif // __STATISTICS__ 570 513 571 #ifdef __CFA_DEBUG__ 514 572 // Set new memory to garbage so subsequent uninitialized usages might fail. … … 516 574 //Memset( (char *)heapEnd + heapRemaining, increase ); 517 575 #endif // __CFA_DEBUG__ 576 518 577 rem = heapRemaining + increase - size; 519 578 } // if 520 579 521 Heap Manager.Storage * block = (HeapManager.Storage *)heapEnd;580 Heap.Storage * block = (Heap.Storage *)heapEnd; 522 581 heapRemaining = rem; 523 582 heapEnd = (char *)heapEnd + size; … … 528 587 529 588 static inline void * doMalloc( size_t size ) with( heapManager ) { 530 Heap Manager.Storage * block; // pointer to new block of storage589 Heap.Storage * block; // pointer to new block of storage 531 590 532 591 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated 533 592 // along with the block and is a multiple of the alignment size. 534 593 535 if ( unlikely( size > ULONG_MAX - sizeof(HeapManager.Storage) ) ) return 0p;536 size_t tsize = size + sizeof(HeapManager.Storage); 594 size_t tsize = size + sizeof(Heap.Storage); 595 537 596 if ( likely( tsize < mmapStart ) ) { // small size => sbrk 538 597 size_t posn; … … 542 601 #endif // FASTLOOKUP 543 602 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); 544 Heap Manager.FreeHeader * freeElem = &freeLists[posn];603 Heap.FreeHeader * freeElem = &freeLists[posn]; 545 604 verify( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 546 605 verify( tsize <= freeElem->blockSize ); // search failure ? … … 563 622 // and then carve it off. 564 623 565 block = (Heap Manager.Storage *)extend( tsize ); // mutual exclusion on call624 block = (Heap.Storage *)extend( tsize ); // mutual exclusion on call 566 625 #if BUCKETLOCK == SPINLOCK 567 626 } else { … … 576 635 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 577 636 #ifdef __STATISTICS__ 578 __atomic_add_fetch( & mmap_calls, 1, __ATOMIC_SEQ_CST );579 __atomic_add_fetch( & mmap_storage_request, size, __ATOMIC_SEQ_CST );580 __atomic_add_fetch( & mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST );581 #endif // __STATISTICS__ 582 583 block = (Heap Manager.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );584 if ( block == (Heap Manager.Storage *)MAP_FAILED ) { // failed ?637 __atomic_add_fetch( &stats.mmap_calls, 1, __ATOMIC_SEQ_CST ); 638 __atomic_add_fetch( &stats.mmap_storage_request, size, __ATOMIC_SEQ_CST ); 639 __atomic_add_fetch( &stats.mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST ); 640 #endif // __STATISTICS__ 641 642 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 643 if ( block == (Heap.Storage *)MAP_FAILED ) { // failed ? 585 644 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 586 645 // Do not call strerror( errno ) as it may call malloc. 587 abort( "(Heap Manager&)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno );646 abort( "(Heap &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno ); 588 647 } //if 589 648 #ifdef __CFA_DEBUG__ … … 592 651 //Memset( block, tsize ); 593 652 #endif // __CFA_DEBUG__ 594 block->header.kind.real.blockSize = tsize;// storage size for munmap653 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap 595 654 } // if 596 655 … … 602 661 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST ); 603 662 if ( traceHeap() ) { 604 enum { BufferSize = 64 }; 605 char helpText[BufferSize]; 606 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 607 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 663 char helpText[64]; 664 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 665 "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug 608 666 } // if 609 667 #endif // __CFA_DEBUG__ … … 620 678 #endif // __CFA_DEBUG__ 621 679 622 Heap Manager.Storage.Header * header;623 Heap Manager.FreeHeader * freeElem;680 Heap.Storage.Header * header; 681 Heap.FreeHeader * freeElem; 624 682 size_t size, alignment; // not used (see realloc) 625 683 626 684 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? 627 685 #ifdef __STATISTICS__ 628 __atomic_add_fetch( & munmap_calls, 1, __ATOMIC_SEQ_CST );629 __atomic_add_fetch( & munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST );630 __atomic_add_fetch( & munmap_storage_alloc, size, __ATOMIC_SEQ_CST );686 __atomic_add_fetch( &stats.munmap_calls, 1, __ATOMIC_SEQ_CST ); 687 __atomic_add_fetch( &stats.munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); 688 __atomic_add_fetch( &stats.munmap_storage_alloc, size, __ATOMIC_SEQ_CST ); 631 689 #endif // __STATISTICS__ 632 690 if ( munmap( header, size ) == -1 ) { … … 638 696 #ifdef __CFA_DEBUG__ 639 697 // Set free memory to garbage so subsequent usages might fail. 640 memset( ((Heap Manager.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( HeapManager.Storage ) );641 //Memset( ((Heap Manager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) );698 memset( ((Heap.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( Heap.Storage ) ); 699 //Memset( ((Heap.Storage *)header)->data, freeElem->blockSize - sizeof( Heap.Storage ) ); 642 700 #endif // __CFA_DEBUG__ 643 701 644 702 #ifdef __STATISTICS__ 645 __atomic_add_fetch( & free_calls, 1, __ATOMIC_SEQ_CST );646 __atomic_add_fetch( & free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST );647 __atomic_add_fetch( & free_storage_alloc, size, __ATOMIC_SEQ_CST );703 __atomic_add_fetch( &stats.free_calls, 1, __ATOMIC_SEQ_CST ); 704 __atomic_add_fetch( &stats.free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); 705 __atomic_add_fetch( &stats.free_storage_alloc, size, __ATOMIC_SEQ_CST ); 648 706 #endif // __STATISTICS__ 649 707 … … 651 709 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 652 710 header->kind.real.next = freeElem->freeList; // push on stack 653 freeElem->freeList = (Heap Manager.Storage *)header;711 freeElem->freeList = (Heap.Storage *)header; 654 712 unlock( freeElem->lock ); // release spin lock 655 713 #else 656 push( freeElem->freeList, *(Heap Manager.Storage *)header );714 push( freeElem->freeList, *(Heap.Storage *)header ); 657 715 #endif // BUCKETLOCK 658 716 } // if … … 662 720 if ( traceHeap() ) { 663 721 char helpText[64]; 664 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );665 __cfaabi_bits_write( STDERR_FILENO, helpText, len); // print debug/nodebug722 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 723 "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug 666 724 } // if 667 725 #endif // __CFA_DEBUG__ … … 669 727 670 728 671 size_t prtFree( Heap Manager& manager ) with( manager ) {729 size_t prtFree( Heap & manager ) with( manager ) { 672 730 size_t total = 0; 673 731 #ifdef __STATISTICS__ … … 682 740 683 741 #if BUCKETLOCK == SPINLOCK 684 for ( Heap Manager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {742 for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 685 743 #else 686 744 for(;;) { 687 // for ( Heap Manager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) {688 // for ( Heap Manager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) {689 // Heap Manager.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works`745 // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 746 // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { 747 // Heap.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works` 690 748 // typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works` 691 749 // p = temp; … … 710 768 711 769 712 static void ?{}( Heap Manager& manager ) with( manager ) {770 static void ?{}( Heap & manager ) with( manager ) { 713 771 __page_size = sysconf( _SC_PAGESIZE ); 714 772 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; … … 726 784 #endif // FASTLOOKUP 727 785 728 if ( ! setMmapStart( default_mmap_start() ) ) {729 abort( "Heap Manager: internal error, mmap start initialization failure." );730 } // if 731 heapExpand = default_heap_expansion();786 if ( ! setMmapStart( malloc_mmap_start() ) ) { 787 abort( "Heap : internal error, mmap start initialization failure." ); 788 } // if 789 heapExpand = malloc_expansion(); 732 790 733 791 char * end = (char *)sbrk( 0 ); 734 792 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment 735 } // Heap Manager736 737 738 static void ^?{}( Heap Manager& ) {793 } // Heap 794 795 796 static void ^?{}( Heap & ) { 739 797 #ifdef __STATISTICS__ 740 798 if ( traceHeapTerm() ) { … … 743 801 } // if 744 802 #endif // __STATISTICS__ 745 } // ~Heap Manager803 } // ~Heap 746 804 747 805 … … 796 854 // subtract libAlign() because it is already the minimum alignment 797 855 // add sizeof(Storage) for fake header 798 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap Manager.Storage) );856 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap.Storage) ); 799 857 800 858 // address in the block of the "next" alignment address 801 char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap Manager.Storage)), alignment );859 char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment ); 802 860 803 861 // address of header from malloc 804 Heap Manager.Storage.Header * realHeader = headerAddr( addr );805 realHeader->kind.real.size = size; // correct size to eliminate above alignment offset862 Heap.Storage.Header * RealHeader = HeaderAddr( addr ); 863 RealHeader->kind.real.size = size; // correct size to eliminate above alignment offset 806 864 // address of fake header * before* the alignment location 807 Heap Manager.Storage.Header * fakeHeader = headerAddr( user );865 Heap.Storage.Header * fakeHeader = HeaderAddr( user ); 808 866 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 809 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *) realHeader;810 // SKULLDUGGERY: odd alignment impl es fake header811 fakeHeader->kind.fake.alignment = alignment | 1;867 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)RealHeader; 868 // SKULLDUGGERY: odd alignment implies fake header 869 fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment ); 812 870 813 871 return user; 814 872 } // memalignNoStats 873 874 875 //####################### Memory Allocation Routines #################### 815 876 816 877 … … 821 882 #ifdef __STATISTICS__ 822 883 if ( likely( size > 0 ) ) { 823 __atomic_add_fetch( & malloc_calls, 1, __ATOMIC_SEQ_CST );824 __atomic_add_fetch( & malloc_storage_request, size, __ATOMIC_SEQ_CST );884 __atomic_add_fetch( &stats.malloc_calls, 1, __ATOMIC_SEQ_CST ); 885 __atomic_add_fetch( &stats.malloc_storage_request, size, __ATOMIC_SEQ_CST ); 825 886 } else { 826 __atomic_add_fetch( & malloc_0_calls, 1, __ATOMIC_SEQ_CST );887 __atomic_add_fetch( &stats.malloc_0_calls, 1, __ATOMIC_SEQ_CST ); 827 888 } // if 828 889 #endif // __STATISTICS__ … … 837 898 #ifdef __STATISTICS__ 838 899 if ( likely( size > 0 ) ) { 839 __atomic_add_fetch( & aalloc_calls, 1, __ATOMIC_SEQ_CST );840 __atomic_add_fetch( & aalloc_storage_request, size, __ATOMIC_SEQ_CST );900 __atomic_add_fetch( &stats.aalloc_calls, 1, __ATOMIC_SEQ_CST ); 901 __atomic_add_fetch( &stats.aalloc_storage_request, size, __ATOMIC_SEQ_CST ); 841 902 } else { 842 __atomic_add_fetch( & aalloc_0_calls, 1, __ATOMIC_SEQ_CST );903 __atomic_add_fetch( &stats.aalloc_0_calls, 1, __ATOMIC_SEQ_CST ); 843 904 } // if 844 905 #endif // __STATISTICS__ … … 853 914 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 854 915 #ifdef __STATISTICS__ 855 __atomic_add_fetch( & calloc_0_calls, 1, __ATOMIC_SEQ_CST );916 __atomic_add_fetch( &stats.calloc_0_calls, 1, __ATOMIC_SEQ_CST ); 856 917 #endif // __STATISTICS__ 857 918 return 0p; 858 919 } // if 859 920 #ifdef __STATISTICS__ 860 __atomic_add_fetch( & calloc_calls, 1, __ATOMIC_SEQ_CST );861 __atomic_add_fetch( & calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST );921 __atomic_add_fetch( &stats.calloc_calls, 1, __ATOMIC_SEQ_CST ); 922 __atomic_add_fetch( &stats.calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 862 923 #endif // __STATISTICS__ 863 924 864 925 char * addr = (char *)mallocNoStats( size ); 865 926 866 Heap Manager.Storage.Header * header;867 Heap Manager.FreeHeader * freeElem;927 Heap.Storage.Header * header; 928 Heap.FreeHeader * freeElem; 868 929 size_t bsize, alignment; 869 930 … … 881 942 memset( addr, '\0', size ); // set to zeros 882 943 883 header->kind.real.blockSize |= 2; // mark as zero filled944 MarkZeroFilledBit( header ); // mark as zero fill 884 945 return addr; 885 946 } // calloc … … 894 955 if ( unlikely( size == 0 ) ) { // special cases 895 956 #ifdef __STATISTICS__ 896 __atomic_add_fetch( & resize_0_calls, 1, __ATOMIC_SEQ_CST );957 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 897 958 #endif // __STATISTICS__ 898 959 free( oaddr ); … … 900 961 } // if 901 962 #ifdef __STATISTICS__ 902 __atomic_add_fetch( & resize_calls, 1, __ATOMIC_SEQ_CST );963 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 903 964 #endif // __STATISTICS__ 904 965 905 966 if ( unlikely( oaddr == 0p ) ) { 906 967 #ifdef __STATISTICS__ 907 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );968 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 908 969 #endif // __STATISTICS__ 909 970 return mallocNoStats( size ); 910 971 } // if 911 972 912 Heap Manager.Storage.Header * header;913 Heap Manager.FreeHeader * freeElem;973 Heap.Storage.Header * header; 974 Heap.FreeHeader * freeElem; 914 975 size_t bsize, oalign; 915 976 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 916 977 917 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket978 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket 918 979 // same size, DO NOT preserve STICKY PROPERTIES. 919 980 if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 920 header->kind.real.blockSize &= -2;// no alignment and turn off 0 fill981 ClearZeroFillBit( header ); // no alignment and turn off 0 fill 921 982 header->kind.real.size = size; // reset allocation size 922 983 return oaddr; … … 924 985 925 986 #ifdef __STATISTICS__ 926 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );987 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 927 988 #endif // __STATISTICS__ 928 989 … … 939 1000 if ( unlikely( size == 0 ) ) { // special cases 940 1001 #ifdef __STATISTICS__ 941 __atomic_add_fetch( & realloc_0_calls, 1, __ATOMIC_SEQ_CST );1002 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 942 1003 #endif // __STATISTICS__ 943 1004 free( oaddr ); … … 945 1006 } // if 946 1007 #ifdef __STATISTICS__ 947 __atomic_add_fetch( & realloc_calls, 1, __ATOMIC_SEQ_CST );1008 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 948 1009 #endif // __STATISTICS__ 949 1010 950 1011 if ( unlikely( oaddr == 0p ) ) { 951 1012 #ifdef __STATISTICS__ 952 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );1013 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 953 1014 #endif // __STATISTICS__ 954 1015 return mallocNoStats( size ); 955 1016 } // if 956 1017 957 Heap Manager.Storage.Header * header;958 Heap Manager.FreeHeader * freeElem;1018 Heap.Storage.Header * header; 1019 Heap.FreeHeader * freeElem; 959 1020 size_t bsize, oalign; 960 1021 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 961 1022 962 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket1023 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket 963 1024 size_t osize = header->kind.real.size; // old allocation size 964 bool ozfill = (header->kind.real.blockSize & 2);// old allocation zero filled1025 bool ozfill = ZeroFillBit( header ); // old allocation zero filled 965 1026 if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage 966 1027 header->kind.real.size = size; // reset allocation size … … 972 1033 973 1034 #ifdef __STATISTICS__ 974 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );1035 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 975 1036 #endif // __STATISTICS__ 976 1037 … … 989 1050 990 1051 if ( unlikely( ozfill ) ) { // previous request zero fill ? 991 header->kind.real.blockSize |= 2;// mark new request as zero filled1052 MarkZeroFilledBit( header ); // mark new request as zero filled 992 1053 if ( size > osize ) { // previous request larger ? 993 1054 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage … … 998 1059 999 1060 1061 // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize. 1062 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) { 1063 return realloc( oaddr, dim * elemSize ); 1064 } // reallocarray 1065 1066 1000 1067 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 1001 1068 void * memalign( size_t alignment, size_t size ) { 1002 1069 #ifdef __STATISTICS__ 1003 1070 if ( likely( size > 0 ) ) { 1004 __atomic_add_fetch( & memalign_calls, 1, __ATOMIC_SEQ_CST );1005 __atomic_add_fetch( & memalign_storage_request, size, __ATOMIC_SEQ_CST );1071 __atomic_add_fetch( &stats.memalign_calls, 1, __ATOMIC_SEQ_CST ); 1072 __atomic_add_fetch( &stats.memalign_storage_request, size, __ATOMIC_SEQ_CST ); 1006 1073 } else { 1007 __atomic_add_fetch( & memalign_0_calls, 1, __ATOMIC_SEQ_CST );1074 __atomic_add_fetch( &stats.memalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1008 1075 } // if 1009 1076 #endif // __STATISTICS__ … … 1018 1085 #ifdef __STATISTICS__ 1019 1086 if ( likely( size > 0 ) ) { 1020 __atomic_add_fetch( & cmemalign_calls, 1, __ATOMIC_SEQ_CST );1021 __atomic_add_fetch( & cmemalign_storage_request, size, __ATOMIC_SEQ_CST );1087 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1088 __atomic_add_fetch( &stats.cmemalign_storage_request, size, __ATOMIC_SEQ_CST ); 1022 1089 } else { 1023 __atomic_add_fetch( & cmemalign_0_calls, 1, __ATOMIC_SEQ_CST );1090 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1024 1091 } // if 1025 1092 #endif // __STATISTICS__ … … 1034 1101 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 1035 1102 #ifdef __STATISTICS__ 1036 __atomic_add_fetch( & cmemalign_0_calls, 1, __ATOMIC_SEQ_CST );1103 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1037 1104 #endif // __STATISTICS__ 1038 1105 return 0p; 1039 1106 } // if 1040 1107 #ifdef __STATISTICS__ 1041 __atomic_add_fetch( & cmemalign_calls, 1, __ATOMIC_SEQ_CST );1042 __atomic_add_fetch( & cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST );1108 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1109 __atomic_add_fetch( &stats.cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 1043 1110 #endif // __STATISTICS__ 1044 1111 1045 1112 char * addr = (char *)memalignNoStats( alignment, size ); 1046 1113 1047 Heap Manager.Storage.Header * header;1048 Heap Manager.FreeHeader * freeElem;1114 Heap.Storage.Header * header; 1115 Heap.FreeHeader * freeElem; 1049 1116 size_t bsize; 1050 1117 … … 1062 1129 memset( addr, '\0', size ); // set to zeros 1063 1130 1064 header->kind.real.blockSize |= 2;// mark as zero filled1131 MarkZeroFilledBit( header ); // mark as zero filled 1065 1132 return addr; 1066 1133 } // cmemalign … … 1068 1135 1069 1136 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple 1070 // of alignment. This requirement is universally ignored.1137 // of alignment. This requirement is universally ignored. 1071 1138 void * aligned_alloc( size_t alignment, size_t size ) { 1072 1139 return memalign( alignment, size ); … … 1079 1146 // free(3). 1080 1147 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 1081 if ( alignment < libAlign() || ! is_pow2( alignment) ) return EINVAL; // check alignment1082 * memptr = memalign( alignment, size );1148 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment 1149 *memptr = memalign( alignment, size ); 1083 1150 return 0; 1084 1151 } // posix_memalign … … 1094 1161 // Same as valloc but rounds size to multiple of page size. 1095 1162 void * pvalloc( size_t size ) { 1096 return memalign( __page_size, ceiling2( size, __page_size ) ); 1163 return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size 1097 1164 } // pvalloc 1098 1165 … … 1104 1171 if ( unlikely( addr == 0p ) ) { // special case 1105 1172 #ifdef __STATISTICS__ 1106 __atomic_add_fetch( & free_null_calls, 1, __ATOMIC_SEQ_CST );1173 __atomic_add_fetch( &stats.free_null_calls, 1, __ATOMIC_SEQ_CST ); 1107 1174 #endif // __STATISTICS__ 1108 1175 … … 1124 1191 size_t malloc_alignment( void * addr ) { 1125 1192 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1126 Heap Manager.Storage.Header * header = headerAddr( addr );1127 if ( (header->kind.fake.alignment & 1) == 1 ) {// fake header ?1128 return header->kind.fake.alignment & -2; // removeflag from value1193 Heap.Storage.Header * header = HeaderAddr( addr ); 1194 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? 1195 return ClearAlignmentBit( header ); // clear flag from value 1129 1196 } else { 1130 1197 return libAlign(); // minimum alignment 1131 1198 } // if 1132 1199 } // malloc_alignment 1133 1134 1135 // Set the alignment for an the allocation and return previous alignment or 0 if no alignment.1136 size_t malloc_alignment_set$( void * addr, size_t alignment ) {1137 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment1138 size_t ret;1139 HeapManager.Storage.Header * header = headerAddr( addr );1140 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1141 ret = header->kind.fake.alignment & -2; // remove flag from old value1142 header->kind.fake.alignment = alignment | 1; // add flag to new value1143 } else {1144 ret = 0; // => no alignment to change1145 } // if1146 return ret;1147 } // malloc_alignment_set$1148 1200 1149 1201 … … 1151 1203 bool malloc_zero_fill( void * addr ) { 1152 1204 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1153 Heap Manager.Storage.Header * header = headerAddr( addr );1154 if ( (header->kind.fake.alignment & 1) == 1 ) {// fake header ?1155 header = realHeader( header ); // backup from fake to real header1156 } // if 1157 return (header->kind.real.blockSize & 2) != 0;// zero filled ?1205 Heap.Storage.Header * header = HeaderAddr( addr ); 1206 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? 1207 header = RealHeader( header ); // backup from fake to real header 1208 } // if 1209 return ZeroFillBit( header ); // zero filled ? 1158 1210 } // malloc_zero_fill 1159 1211 1160 // Set allocation is zero filled and return previous zero filled. 1161 bool malloc_zero_fill_set$( void * addr ) { 1162 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1163 HeapManager.Storage.Header * header = headerAddr( addr ); 1164 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1165 header = realHeader( header ); // backup from fake to real header 1166 } // if 1167 bool ret = (header->kind.real.blockSize & 2) != 0; // zero filled ? 1168 header->kind.real.blockSize |= 2; // mark as zero filled 1169 return ret; 1170 } // malloc_zero_fill_set$ 1171 1172 1173 // Returns original total allocation size (not bucket size) => array size is dimension * sizeif(T). 1212 1213 // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). 1174 1214 size_t malloc_size( void * addr ) { 1175 1215 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size 1176 Heap Manager.Storage.Header * header = headerAddr( addr );1177 if ( (header->kind.fake.alignment & 1) == 1 ) {// fake header ?1178 header = realHeader( header ); // backup from fake to real header1216 Heap.Storage.Header * header = HeaderAddr( addr ); 1217 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? 1218 header = RealHeader( header ); // backup from fake to real header 1179 1219 } // if 1180 1220 return header->kind.real.size; 1181 1221 } // malloc_size 1182 1183 // Set allocation size and return previous size.1184 size_t malloc_size_set$( void * addr, size_t size ) {1185 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size1186 HeapManager.Storage.Header * header = headerAddr( addr );1187 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1188 header = realHeader( header ); // backup from fake to real header1189 } // if1190 size_t ret = header->kind.real.size;1191 header->kind.real.size = size;1192 return ret;1193 } // malloc_size_set$1194 1222 1195 1223 … … 1198 1226 size_t malloc_usable_size( void * addr ) { 1199 1227 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1200 Heap Manager.Storage.Header * header;1201 Heap Manager.FreeHeader * freeElem;1228 Heap.Storage.Header * header; 1229 Heap.FreeHeader * freeElem; 1202 1230 size_t bsize, alignment; 1203 1231 1204 1232 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); 1205 return dataStorage( bsize, addr, header ); // data storage in bucket1233 return DataStorage( bsize, addr, header ); // data storage in bucket 1206 1234 } // malloc_usable_size 1207 1235 … … 1216 1244 1217 1245 1218 // Changes the file descript er where malloc_stats() writes statistics.1246 // Changes the file descriptor where malloc_stats() writes statistics. 1219 1247 int malloc_stats_fd( int fd __attribute__(( unused )) ) { 1220 1248 #ifdef __STATISTICS__ … … 1223 1251 return temp; 1224 1252 #else 1225 return -1; 1253 return -1; // unsupported 1226 1254 #endif // __STATISTICS__ 1227 1255 } // malloc_stats_fd 1256 1257 1258 // Prints an XML string that describes the current state of the memory-allocation implementation in the caller. 1259 // The string is printed on the file stream stream. The exported string includes information about all arenas (see 1260 // malloc). 1261 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) { 1262 if ( options != 0 ) { errno = EINVAL; return -1; } 1263 #ifdef __STATISTICS__ 1264 return printStatsXML( stream ); 1265 #else 1266 return 0; // unsupported 1267 #endif // __STATISTICS__ 1268 } // malloc_info 1228 1269 1229 1270 … … 1231 1272 // specifies the parameter to be modified, and value specifies the new value for that parameter. 1232 1273 int mallopt( int option, int value ) { 1274 if ( value < 0 ) return 0; 1233 1275 choose( option ) { 1234 1276 case M_TOP_PAD: 1235 heapExpand = ceiling2( value, __page_size ); return 1; 1277 heapExpand = ceiling2( value, __page_size ); 1278 return 1; 1236 1279 case M_MMAP_THRESHOLD: 1237 1280 if ( setMmapStart( value ) ) return 1; 1238 break; 1239 } // switch 1281 } // choose 1240 1282 return 0; // error, unsupported 1241 1283 } // mallopt … … 1246 1288 return 0; // => impossible to release memory 1247 1289 } // malloc_trim 1248 1249 1250 // Exports an XML string that describes the current state of the memory-allocation implementation in the caller.1251 // The string is printed on the file stream stream. The exported string includes information about all arenas (see1252 // malloc).1253 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {1254 if ( options != 0 ) { errno = EINVAL; return -1; }1255 #ifdef __STATISTICS__1256 return printStatsXML( stream );1257 #else1258 return 0; // unsupported1259 #endif // __STATISTICS__1260 } // malloc_info1261 1290 1262 1291 … … 1275 1304 return 0; // unsupported 1276 1305 } // malloc_set_state 1306 1307 1308 // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. 1309 __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; } 1310 1311 // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. 1312 __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; } 1313 1314 // Amount subtracted to adjust for unfreed program storage (debug only). 1315 __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; } 1277 1316 } // extern "C" 1278 1317 … … 1283 1322 if ( unlikely( size == 0 ) ) { // special cases 1284 1323 #ifdef __STATISTICS__ 1285 __atomic_add_fetch( & resize_0_calls, 1, __ATOMIC_SEQ_CST );1324 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 1286 1325 #endif // __STATISTICS__ 1287 1326 free( oaddr ); … … 1296 1335 if ( unlikely( oaddr == 0p ) ) { 1297 1336 #ifdef __STATISTICS__ 1298 __atomic_add_fetch( & resize_calls, 1, __ATOMIC_SEQ_CST );1299 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );1337 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 1338 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 1300 1339 #endif // __STATISTICS__ 1301 1340 return memalignNoStats( nalign, size ); … … 1303 1342 1304 1343 // Attempt to reuse existing alignment. 1305 Heap Manager.Storage.Header * header = headerAddr( oaddr );1306 bool isFakeHeader = header->kind.fake.alignment & 1;// old fake header ?1344 Heap.Storage.Header * header = HeaderAddr( oaddr ); 1345 bool isFakeHeader = AlignmentBit( header ); // old fake header ? 1307 1346 size_t oalign; 1308 if ( isFakeHeader ) { 1309 oalign = header->kind.fake.alignment & -2; // old alignment 1310 if ( (uintptr_t)oaddr % nalign == 0 // lucky match ? 1347 1348 if ( unlikely( isFakeHeader ) ) { 1349 oalign = ClearAlignmentBit( header ); // old alignment 1350 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? 1311 1351 && ( oalign <= nalign // going down 1312 1352 || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ? 1313 ) {1314 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)1315 Heap Manager.FreeHeader * freeElem;1353 ) ) { 1354 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1355 Heap.FreeHeader * freeElem; 1316 1356 size_t bsize, oalign; 1317 1357 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 1318 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket1358 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket 1319 1359 1320 1360 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage 1321 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1322 1323 header->kind.real.blockSize &= -2; // turn off 0 fill 1361 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1362 ClearZeroFillBit( header ); // turn off 0 fill 1324 1363 header->kind.real.size = size; // reset allocation size 1325 1364 return oaddr; … … 1332 1371 1333 1372 #ifdef __STATISTICS__ 1334 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );1373 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 1335 1374 #endif // __STATISTICS__ 1336 1375 … … 1345 1384 if ( unlikely( size == 0 ) ) { // special cases 1346 1385 #ifdef __STATISTICS__ 1347 __atomic_add_fetch( & realloc_0_calls, 1, __ATOMIC_SEQ_CST );1386 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 1348 1387 #endif // __STATISTICS__ 1349 1388 free( oaddr ); … … 1358 1397 if ( unlikely( oaddr == 0p ) ) { 1359 1398 #ifdef __STATISTICS__ 1360 __atomic_add_fetch( & realloc_calls, 1, __ATOMIC_SEQ_CST );1361 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );1399 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1400 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1362 1401 #endif // __STATISTICS__ 1363 1402 return memalignNoStats( nalign, size ); … … 1365 1404 1366 1405 // Attempt to reuse existing alignment. 1367 Heap Manager.Storage.Header * header = headerAddr( oaddr );1368 bool isFakeHeader = header->kind.fake.alignment & 1;// old fake header ?1406 Heap.Storage.Header * header = HeaderAddr( oaddr ); 1407 bool isFakeHeader = AlignmentBit( header ); // old fake header ? 1369 1408 size_t oalign; 1370 if ( isFakeHeader) {1371 oalign = header->kind.fake.alignment & -2;// old alignment1372 if ( (uintptr_t)oaddr % nalign == 0// lucky match ?1409 if ( unlikely( isFakeHeader ) ) { 1410 oalign = ClearAlignmentBit( header ); // old alignment 1411 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? 1373 1412 && ( oalign <= nalign // going down 1374 1413 || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ? 1375 ) {1376 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)1377 return realloc( oaddr, size ); // duplicate alignment andspecial case checks1414 ) ) { 1415 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1416 return realloc( oaddr, size ); // duplicate special case checks 1378 1417 } // if 1379 1418 } else if ( ! isFakeHeader // old real header (aligned on libAlign) ? 1380 && nalign == libAlign() ) // new alignment also on libAlign => no fake header needed 1381 return realloc( oaddr, size ); // duplicate alignment and special case checks 1419 && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed 1420 return realloc( oaddr, size ); // duplicate special case checks 1421 } // if 1382 1422 1383 1423 #ifdef __STATISTICS__ 1384 __atomic_add_fetch( & realloc_calls, 1, __ATOMIC_SEQ_CST );1385 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );1424 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1425 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1386 1426 #endif // __STATISTICS__ 1387 1427 1388 Heap Manager.FreeHeader * freeElem;1428 Heap.FreeHeader * freeElem; 1389 1429 size_t bsize; 1390 1430 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); … … 1393 1433 1394 1434 size_t osize = header->kind.real.size; // old allocation size 1395 bool ozfill = (header->kind.real.blockSize & 2);// old allocation zero filled1435 bool ozfill = ZeroFillBit( header ); // old allocation zero filled 1396 1436 1397 1437 void * naddr = memalignNoStats( nalign, size ); // create new aligned area … … 1402 1442 1403 1443 if ( unlikely( ozfill ) ) { // previous request zero fill ? 1404 header->kind.real.blockSize |= 2;// mark new request as zero filled1444 MarkZeroFilledBit( header ); // mark new request as zero filled 1405 1445 if ( size > osize ) { // previous request larger ? 1406 1446 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage -
libcfa/src/heap.hfa
r9e7236f4 r24ceace 10 10 // Created On : Tue May 26 11:23:55 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Aug 8 17:36:48 202013 // Update Count : 1612 // Last Modified On : Thu Apr 21 22:52:25 2022 13 // Update Count : 21 14 14 // 15 15 16 16 #pragma once 17 17 18 size_t default_mmap_start(); // CFA extras 19 size_t default_heap_expansion(); 18 #include <malloc.h> 20 19 21 20 bool traceHeap(); … … 34 33 #ifndef M_MMAP_THRESHOLD 35 34 #define M_MMAP_THRESHOLD (-1) 36 #endif // M_TOP_PAD 35 #endif // M_MMAP_THRESHOLD 36 37 37 #ifndef M_TOP_PAD 38 38 #define M_TOP_PAD (-2) … … 40 40 41 41 extern "C" { 42 void * aalloc( size_t dim, size_t elemSize ); 43 void * resize( void * oaddr, size_t size ); 44 void * amemalign( size_t align, size_t dim, size_t elemSize ); 45 void * cmemalign( size_t align, size_t dim, size_t elemSize ); 42 // New allocation operations. 43 void * aalloc( size_t dim, size_t elemSize ) __attribute__ ((malloc)); 44 void * resize( void * oaddr, size_t size ) __attribute__ ((malloc)); 45 void * amemalign( size_t align, size_t dim, size_t elemSize ) __attribute__ ((malloc)); 46 void * cmemalign( size_t align, size_t dim, size_t elemSize ) __attribute__ ((malloc)); 46 47 size_t malloc_alignment( void * addr ); 47 48 bool malloc_zero_fill( void * addr ); 48 49 size_t malloc_size( void * addr ); 50 int malloc_stats_fd( int fd ); 49 51 size_t malloc_usable_size( void * addr ); 50 int malloc_stats_fd( int fd ); 52 size_t malloc_expansion(); // heap expansion size (bytes) 53 size_t malloc_mmap_start(); // crossover allocation size from sbrk to mmap 54 size_t malloc_unfreed(); // heap unfreed size (bytes) 51 55 } // extern "C" 52 56 53 void * resize( void * oaddr, size_t nalign, size_t size ); 54 void * realloc( void * oaddr, size_t nalign, size_t size ); 57 void * resize( void * oaddr, size_t alignment, size_t size ); 58 void * realloc( void * oaddr, size_t alignment, size_t size ); 59 void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ); 55 60 56 61 // Local Variables: // -
libcfa/src/stdlib.hfa
r9e7236f4 r24ceace 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Feb 12 17:22:25202213 // Update Count : 64 312 // Last Modified On : Mon Apr 18 17:57:36 2022 13 // Update Count : 644 14 14 // 15 15 … … 209 209 210 210 forall( TT... | { T * alloc_internal$( void *, T *, size_t, size_t, S_fill(T), TT ); } ) { 211 T * alloc_internal$( void * , T * , size_t Align, size_t Dim, S_fill(T) Fill, T_resize Resize, TT rest) {211 T * alloc_internal$( void *, T *, size_t Align, size_t Dim, S_fill(T) Fill, T_resize Resize, TT rest ) { 212 212 return alloc_internal$( Resize, (T*)0p, Align, Dim, Fill, rest); 213 213 } 214 214 215 T * alloc_internal$( void * , T * , size_t Align, size_t Dim, S_fill(T) Fill, S_realloc(T) Realloc, TT rest) {215 T * alloc_internal$( void *, T *, size_t Align, size_t Dim, S_fill(T) Fill, S_realloc(T) Realloc, TT rest ) { 216 216 return alloc_internal$( (void*)0p, Realloc, Align, Dim, Fill, rest); 217 217 } 218 218 219 T * alloc_internal$( void * Resize, T * Realloc, size_t , size_t Dim, S_fill(T) Fill, T_align Align, TT rest) {219 T * alloc_internal$( void * Resize, T * Realloc, size_t, size_t Dim, S_fill(T) Fill, T_align Align, TT rest ) { 220 220 return alloc_internal$( Resize, Realloc, Align, Dim, Fill, rest); 221 221 } 222 222 223 T * alloc_internal$( void * Resize, T * Realloc, size_t Align, size_t Dim, S_fill(T) , S_fill(T) Fill, TT rest) {224 return alloc_internal$( Resize, Realloc, Align, Dim, Fill, rest );223 T * alloc_internal$( void * Resize, T * Realloc, size_t Align, size_t Dim, S_fill(T), S_fill(T) Fill, TT rest ) { 224 return alloc_internal$( Resize, Realloc, Align, Dim, Fill, rest ); 225 225 } 226 226 227 227 T * alloc( TT all ) { 228 return alloc_internal$( (void*)0p, (T*)0p, (_Alignof(T) > libAlign() ? _Alignof(T) : libAlign()), (size_t)1, (S_fill(T)){'0'}, all );228 return alloc_internal$( (void*)0p, (T*)0p, (_Alignof(T) > libAlign() ? _Alignof(T) : libAlign()), (size_t)1, (S_fill(T)){'0'}, all ); 229 229 } 230 230 231 231 T * alloc( size_t dim, TT all ) { 232 return alloc_internal$( (void*)0p, (T*)0p, (_Alignof(T) > libAlign() ? _Alignof(T) : libAlign()), dim, (S_fill(T)){'0'}, all );232 return alloc_internal$( (void*)0p, (T*)0p, (_Alignof(T) > libAlign() ? _Alignof(T) : libAlign()), dim, (S_fill(T)){'0'}, all ); 233 233 } 234 234 } // distribution TT
Note:
See TracChangeset
for help on using the changeset viewer.