Changeset 31a5f418
- Timestamp:
- Apr 22, 2022, 6:36:18 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 58e97d9
- Parents:
- 16cc9f7
- Location:
- libcfa/src
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r16cc9f7 r31a5f418 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jan 2 23:29:41202213 // Update Count : 1 05812 // Last Modified On : Fri Apr 22 18:25:23 2022 13 // Update Count : 1121 14 14 // 15 15 16 #include <unistd.h> // sbrk, sysconf17 #include <stdlib.h> // EXIT_FAILURE18 #include <stdbool.h> // true, false19 #include <stdio.h> // snprintf, fileno20 #include <errno.h> // errno21 16 #include <string.h> // memset, memcpy 22 17 #include <limits.h> // ULONG_MAX 18 #include <stdlib.h> // EXIT_FAILURE 19 #include <errno.h> // errno, ENOMEM, EINVAL 20 #include <unistd.h> // STDERR_FILENO, sbrk, sysconf 23 21 #include <malloc.h> // memalign, malloc_usable_size 24 22 #include <sys/mman.h> // mmap, munmap 23 #include <sys/sysinfo.h> // get_nprocs 25 24 26 25 #include "bits/align.hfa" // libAlign … … 31 30 #include "bitmanip.hfa" // is_pow2, ceiling2 32 31 32 #define FASTLOOKUP 33 #define __STATISTICS__ 34 35 33 36 static bool traceHeap = false; 34 37 … … 70 73 71 74 enum { 72 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 73 // the brk address is extended by the extension amount. 74 __CFA_DEFAULT_HEAP_EXPANSION__ = (10 * 1024 * 1024), 75 76 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; 77 // values greater than or equal to this value are mmap from the operating system. 78 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 79 }; 80 81 size_t default_mmap_start() __attribute__(( weak )) { 82 return __CFA_DEFAULT_MMAP_START__; 83 } // default_mmap_start 84 85 size_t default_heap_expansion() __attribute__(( weak )) { 86 return __CFA_DEFAULT_HEAP_EXPANSION__; 87 } // default_heap_expansion 75 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk 76 // address is extended by the extension amount. 77 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, 78 79 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values 80 // greater than or equal to this value are mmap from the operating system. 81 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, 82 83 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from 84 // the malloc/free counter to adjust for storage the program does not free. 85 __CFA_DEFAULT_HEAP_UNFREED__ = 0 86 }; // enum 88 87 89 88 … … 135 134 136 135 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 137 // Break recu sion by hardcoding number of buckets and statically checking number is correct after bucket array defined.136 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 138 137 enum { NoBucketSizes = 91 }; // number of buckets sizes 139 138 140 struct Heap Manager{139 struct Heap { 141 140 struct Storage { 142 141 struct Header { // header … … 145 144 union { 146 145 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4148 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header149 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4150 151 146 union { 147 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 152 148 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 153 // 2nd low-order bit => zero filled154 149 void * home; // allocated block points back to home locations (must overlay alignment) 155 150 size_t blockSize; // size for munmap (must overlay alignment) 156 151 #if BUCKETLOCK == SPINLOCK 157 Storage * next; // freed block points next freed block of same size152 Storage * next; // freed block points to next freed block of same size 158 153 #endif // SPINLOCK 159 154 }; 160 155 size_t size; // allocation size in bytes 161 162 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4163 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header164 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4165 156 }; 166 157 #if BUCKETLOCK == LOCKFREE … … 171 162 172 163 struct FakeHeader { 173 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 174 uint32_t alignment; // 1st low-order bit => fake header & alignment 175 #endif // __ORDER_LITTLE_ENDIAN__ 176 177 uint32_t offset; 178 179 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 180 uint32_t alignment; // low-order bits of home/blockSize used for tricks 181 #endif // __ORDER_BIG_ENDIAN__ 164 uintptr_t alignment; // 1st low-order bit => fake header & alignment 165 uintptr_t offset; 182 166 } fake; // FakeHeader 183 167 } kind; // Kind 184 168 } header; // Header 169 185 170 char pad[libAlign() - sizeof( Header )]; 186 171 char data[0]; // storage 187 172 }; // Storage 188 173 189 static_assert( libAlign() >= sizeof( Storage ), " libAlign()< sizeof( Storage )" );174 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 190 175 191 176 struct FreeHeader { … … 206 191 void * heapEnd; // logical end of heap 207 192 size_t heapRemaining; // amount of storage not allocated in the current chunk 208 }; // Heap Manager193 }; // Heap 209 194 210 195 #if BUCKETLOCK == LOCKFREE 211 196 static inline { 212 Link(Heap Manager.Storage) * ?`next( HeapManager.Storage * this ) { return &this->header.kind.real.next; }213 void ?{}( Heap Manager.FreeHeader & ) {}214 void ^?{}( Heap Manager.FreeHeader & ) {}197 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 198 void ?{}( Heap.FreeHeader & ) {} 199 void ^?{}( Heap.FreeHeader & ) {} 215 200 } // distribution 216 201 #endif // LOCKFREE 217 202 218 static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } 219 220 221 #define FASTLOOKUP 222 #define __STATISTICS__ 203 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 204 205 206 #ifdef FASTLOOKUP 207 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 208 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 209 #endif // FASTLOOKUP 210 211 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 212 #ifdef __CFA_DEBUG__ 213 static bool heapBoot = 0; // detect recursion during boot 214 #endif // __CFA_DEBUG__ 215 223 216 224 217 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. … … 226 219 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 227 220 static const unsigned int bucketSizes[] @= { // different bucket sizes 228 16 + sizeof(Heap Manager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4229 96 + sizeof(Heap Manager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3230 160, 192, 224, 256 + sizeof(Heap Manager.Storage), // 4231 320, 384, 448, 512 + sizeof(Heap Manager.Storage), // 4232 640, 768, 896, 1_024 + sizeof(Heap Manager.Storage), // 4233 1_536, 2_048 + sizeof(Heap Manager.Storage), // 2234 2_560, 3_072, 3_584, 4_096 + sizeof(Heap Manager.Storage), // 4235 6_144, 8_192 + sizeof(Heap Manager.Storage), // 2236 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap Manager.Storage), // 8237 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap Manager.Storage), // 8238 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap Manager.Storage), // 8239 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap Manager.Storage), // 8240 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap Manager.Storage), // 8241 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap Manager.Storage), // 8242 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap Manager.Storage), // 4243 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap Manager.Storage), // 8244 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap Manager.Storage), // 4221 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 222 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 223 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 224 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 225 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 226 1_536, 2_048 + sizeof(Heap.Storage), // 2 227 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 228 6_144, 8_192 + sizeof(Heap.Storage), // 2 229 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 230 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 231 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 232 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 233 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 234 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 235 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 236 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 237 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 245 238 }; 246 239 247 240 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 248 241 249 #ifdef FASTLOOKUP250 enum { LookupSizes = 65_536 + sizeof(HeapManager.Storage) }; // number of fast lookup sizes251 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes252 #endif // FASTLOOKUP253 254 static const off_t mmapFd = -1; // fake or actual fd for anonymous file255 #ifdef __CFA_DEBUG__256 static bool heapBoot = 0; // detect recursion during boot257 #endif // __CFA_DEBUG__258 242 259 243 // The constructor for heapManager is called explicitly in memory_startup. 260 static Heap ManagerheapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing244 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 261 245 262 246 263 247 #ifdef __STATISTICS__ 248 enum { CntTriples = 12 }; // number of counter triples 249 enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE }; 250 251 struct StatsOverlay { // overlay for iteration 252 unsigned int calls, calls_0; 253 unsigned long long int request, alloc; 254 }; 255 264 256 // Heap statistics counters. 265 static unsigned int malloc_calls, malloc_0_calls; 266 static unsigned long long int malloc_storage_request, malloc_storage_alloc; 267 static unsigned int aalloc_calls, aalloc_0_calls; 268 static unsigned long long int aalloc_storage_request, aalloc_storage_alloc; 269 static unsigned int calloc_calls, calloc_0_calls; 270 static unsigned long long int calloc_storage_request, calloc_storage_alloc; 271 static unsigned int memalign_calls, memalign_0_calls; 272 static unsigned long long int memalign_storage_request, memalign_storage_alloc; 273 static unsigned int amemalign_calls, amemalign_0_calls; 274 static unsigned long long int amemalign_storage_request, amemalign_storage_alloc; 275 static unsigned int cmemalign_calls, cmemalign_0_calls; 276 static unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc; 277 static unsigned int resize_calls, resize_0_calls; 278 static unsigned long long int resize_storage_request, resize_storage_alloc; 279 static unsigned int realloc_calls, realloc_0_calls; 280 static unsigned long long int realloc_storage_request, realloc_storage_alloc; 281 static unsigned int free_calls, free_null_calls; 282 static unsigned long long int free_storage_request, free_storage_alloc; 283 static unsigned int mmap_calls; 284 static unsigned long long int mmap_storage_request, mmap_storage_alloc; 285 static unsigned int munmap_calls; 286 static unsigned long long int munmap_storage_request, munmap_storage_alloc; 257 union HeapStatistics { 258 struct { // minimum qualification 259 unsigned int malloc_calls, malloc_0_calls; 260 unsigned long long int malloc_storage_request, malloc_storage_alloc; 261 unsigned int aalloc_calls, aalloc_0_calls; 262 unsigned long long int aalloc_storage_request, aalloc_storage_alloc; 263 unsigned int calloc_calls, calloc_0_calls; 264 unsigned long long int calloc_storage_request, calloc_storage_alloc; 265 unsigned int memalign_calls, memalign_0_calls; 266 unsigned long long int memalign_storage_request, memalign_storage_alloc; 267 unsigned int amemalign_calls, amemalign_0_calls; 268 unsigned long long int amemalign_storage_request, amemalign_storage_alloc; 269 unsigned int cmemalign_calls, cmemalign_0_calls; 270 unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc; 271 unsigned int resize_calls, resize_0_calls; 272 unsigned long long int resize_storage_request, resize_storage_alloc; 273 unsigned int realloc_calls, realloc_0_calls; 274 unsigned long long int realloc_storage_request, realloc_storage_alloc; 275 unsigned int free_calls, free_null_calls; 276 unsigned long long int free_storage_request, free_storage_alloc; 277 unsigned int away_pulls, away_pushes; 278 unsigned long long int away_storage_request, away_storage_alloc; 279 unsigned int mmap_calls, mmap_0_calls; // no zero calls 280 unsigned long long int mmap_storage_request, mmap_storage_alloc; 281 unsigned int munmap_calls, munmap_0_calls; // no zero calls 282 unsigned long long int munmap_storage_request, munmap_storage_alloc; 283 }; 284 struct StatsOverlay counters[CntTriples]; // overlay for iteration 285 }; // HeapStatistics 286 287 static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay), 288 "Heap statistics counter-triplets does not match with array size" ); 289 290 static void HeapStatisticsCtor( HeapStatistics & stats ) { 291 memset( &stats, '\0', sizeof(stats) ); // very fast 292 // for ( unsigned int i = 0; i < CntTriples; i += 1 ) { 293 // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0; 294 // } // for 295 } // HeapStatisticsCtor 296 297 static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) { 298 for ( unsigned int i = 0; i < CntTriples; i += 1 ) { 299 lhs.counters[i].calls += rhs.counters[i].calls; 300 lhs.counters[i].calls_0 += rhs.counters[i].calls_0; 301 lhs.counters[i].request += rhs.counters[i].request; 302 lhs.counters[i].alloc += rhs.counters[i].alloc; 303 } // for 304 return lhs; 305 } // ?+=? 306 307 static HeapStatistics stats; // zero filled 287 308 static unsigned int sbrk_calls; 288 309 static unsigned long long int sbrk_storage; … … 290 311 static int stats_fd = STDERR_FILENO; // default stderr 291 312 313 #define prtFmt \ 314 "\nHeap statistics: (storage request / allocation)\n" \ 315 " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 316 " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 317 " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 318 " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 319 " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 320 " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 321 " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 322 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 323 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \ 324 " sbrk calls %'u; storage %'llu bytes\n" \ 325 " mmap calls %'u; storage %'llu / %'llu bytes\n" \ 326 " munmap calls %'u; storage %'llu / %'llu bytes\n" \ 327 292 328 // Use "write" because streams may be shutdown when calls are made. 293 static void printStats() { 294 char helpText[1024]; 295 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 296 "\nHeap statistics: (storage request / allocation + header)\n" 297 " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 298 " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 299 " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 300 " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 301 " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 302 " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 303 " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 304 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" 305 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" 306 " sbrk calls %'u; storage %'llu bytes\n" 307 " mmap calls %'u; storage %'llu / %'llu bytes\n" 308 " munmap calls %'u; storage %'llu / %'llu bytes\n", 309 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 310 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 311 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 312 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 313 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 314 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 315 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 316 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 317 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 318 sbrk_calls, sbrk_storage, 319 mmap_calls, mmap_storage_request, mmap_storage_alloc, 320 munmap_calls, munmap_storage_request, munmap_storage_alloc 329 static void printStats() { // see malloc_stats 330 char helpText[sizeof(prtFmt) + 1024]; // space for message and values 331 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt, 332 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 333 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 334 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 335 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 336 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 337 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 338 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 339 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 340 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 341 sbrk_calls, sbrk_storage, 342 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 343 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 321 344 ); 322 345 } // printStats 323 346 347 #define prtFmtXML \ 348 "<malloc version=\"1\">\n" \ 349 "<heap nr=\"0\">\n" \ 350 "<sizes>\n" \ 351 "</sizes>\n" \ 352 "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 353 "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 354 "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 355 "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 356 "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 357 "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 358 "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 359 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 360 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 361 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \ 362 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \ 363 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 364 "</malloc>" 365 324 366 static int printStatsXML( FILE * stream ) { // see malloc_info 325 char helpText[1024]; 326 int len = snprintf( helpText, sizeof(helpText), 327 "<malloc version=\"1\">\n" 328 "<heap nr=\"0\">\n" 329 "<sizes>\n" 330 "</sizes>\n" 331 "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 332 "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 333 "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 334 "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 335 "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 336 "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 337 "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 338 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 339 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 340 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" 341 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" 342 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" 343 "</malloc>", 344 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 345 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 346 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 347 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 348 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 349 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 350 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 351 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 352 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 353 sbrk_calls, sbrk_storage, 354 mmap_calls, mmap_storage_request, mmap_storage_alloc, 355 munmap_calls, munmap_storage_request, munmap_storage_alloc 367 char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values 368 return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML, 369 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 370 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 371 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 372 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 373 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 374 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 375 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 376 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 377 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 378 sbrk_calls, sbrk_storage, 379 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 380 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 356 381 ); 357 __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit358 return len;359 382 } // printStatsXML 360 383 #endif // __STATISTICS__ … … 394 417 // <-----------------<------------+-----------------------------> bsize (bucket size) 395 418 // |fake-header | addr 396 #define headerAddr( addr ) ((Heap Manager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))397 #define realHeader( header ) ((Heap Manager.Storage.Header *)((char *)header - header->kind.fake.offset))419 #define headerAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) )) 420 #define realHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset)) 398 421 399 422 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) … … 422 445 423 446 424 static inline void fakeHeader( Heap Manager.Storage.Header *& header, size_t & alignment ) {447 static inline void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { 425 448 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 426 449 alignment = header->kind.fake.alignment & -2; // remove flag from value … … 435 458 436 459 437 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap Manager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem,460 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, Heap.FreeHeader *& freeElem, 438 461 size_t & size, size_t & alignment ) with( heapManager ) { 439 462 header = headerAddr( addr ); … … 446 469 447 470 #ifdef __CFA_DEBUG__ 448 checkHeader( header < (Heap Manager.Storage.Header *)heapBegin, name, addr ); // bad low address ?471 checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ? 449 472 #endif // __CFA_DEBUG__ 450 473 … … 452 475 fakeHeader( header, alignment ); 453 476 #ifdef __CFA_DEBUG__ 454 checkHeader( header < (Heap Manager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)477 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 455 478 #endif // __CFA_DEBUG__ 456 479 457 freeElem = (Heap Manager.FreeHeader *)((size_t)header->kind.real.home & -3);480 freeElem = (Heap.FreeHeader *)((size_t)header->kind.real.home & -3); 458 481 #ifdef __CFA_DEBUG__ 459 482 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { … … 519 542 } // if 520 543 521 Heap Manager.Storage * block = (HeapManager.Storage *)heapEnd;544 Heap.Storage * block = (Heap.Storage *)heapEnd; 522 545 heapRemaining = rem; 523 546 heapEnd = (char *)heapEnd + size; … … 528 551 529 552 static inline void * doMalloc( size_t size ) with( heapManager ) { 530 Heap Manager.Storage * block; // pointer to new block of storage553 Heap.Storage * block; // pointer to new block of storage 531 554 532 555 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated 533 556 // along with the block and is a multiple of the alignment size. 534 557 535 if ( unlikely( size > ULONG_MAX - sizeof(Heap Manager.Storage) ) ) return 0p;536 size_t tsize = size + sizeof(Heap Manager.Storage);558 if ( unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) return 0p; 559 size_t tsize = size + sizeof(Heap.Storage); 537 560 if ( likely( tsize < mmapStart ) ) { // small size => sbrk 538 561 size_t posn; … … 542 565 #endif // FASTLOOKUP 543 566 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); 544 Heap Manager.FreeHeader * freeElem = &freeLists[posn];567 Heap.FreeHeader * freeElem = &freeLists[posn]; 545 568 verify( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 546 569 verify( tsize <= freeElem->blockSize ); // search failure ? … … 563 586 // and then carve it off. 564 587 565 block = (Heap Manager.Storage *)extend( tsize ); // mutual exclusion on call588 block = (Heap.Storage *)extend( tsize ); // mutual exclusion on call 566 589 #if BUCKETLOCK == SPINLOCK 567 590 } else { … … 576 599 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 577 600 #ifdef __STATISTICS__ 578 __atomic_add_fetch( & mmap_calls, 1, __ATOMIC_SEQ_CST );579 __atomic_add_fetch( & mmap_storage_request, size, __ATOMIC_SEQ_CST );580 __atomic_add_fetch( & mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST );581 #endif // __STATISTICS__ 582 583 block = (Heap Manager.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );584 if ( block == (Heap Manager.Storage *)MAP_FAILED ) { // failed ?601 __atomic_add_fetch( &stats.mmap_calls, 1, __ATOMIC_SEQ_CST ); 602 __atomic_add_fetch( &stats.mmap_storage_request, size, __ATOMIC_SEQ_CST ); 603 __atomic_add_fetch( &stats.mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST ); 604 #endif // __STATISTICS__ 605 606 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 607 if ( block == (Heap.Storage *)MAP_FAILED ) { // failed ? 585 608 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 586 609 // Do not call strerror( errno ) as it may call malloc. 587 abort( "(Heap Manager&)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno );610 abort( "(Heap &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno ); 588 611 } //if 589 612 #ifdef __CFA_DEBUG__ … … 620 643 #endif // __CFA_DEBUG__ 621 644 622 Heap Manager.Storage.Header * header;623 Heap Manager.FreeHeader * freeElem;645 Heap.Storage.Header * header; 646 Heap.FreeHeader * freeElem; 624 647 size_t size, alignment; // not used (see realloc) 625 648 626 649 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? 627 650 #ifdef __STATISTICS__ 628 __atomic_add_fetch( & munmap_calls, 1, __ATOMIC_SEQ_CST );629 __atomic_add_fetch( & munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST );630 __atomic_add_fetch( & munmap_storage_alloc, size, __ATOMIC_SEQ_CST );651 __atomic_add_fetch( &stats.munmap_calls, 1, __ATOMIC_SEQ_CST ); 652 __atomic_add_fetch( &stats.munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); 653 __atomic_add_fetch( &stats.munmap_storage_alloc, size, __ATOMIC_SEQ_CST ); 631 654 #endif // __STATISTICS__ 632 655 if ( munmap( header, size ) == -1 ) { … … 638 661 #ifdef __CFA_DEBUG__ 639 662 // Set free memory to garbage so subsequent usages might fail. 640 memset( ((Heap Manager.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( HeapManager.Storage ) );641 //Memset( ((Heap Manager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) );663 memset( ((Heap.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( Heap.Storage ) ); 664 //Memset( ((Heap.Storage *)header)->data, freeElem->blockSize - sizeof( Heap.Storage ) ); 642 665 #endif // __CFA_DEBUG__ 643 666 644 667 #ifdef __STATISTICS__ 645 __atomic_add_fetch( & free_calls, 1, __ATOMIC_SEQ_CST );646 __atomic_add_fetch( & free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST );647 __atomic_add_fetch( & free_storage_alloc, size, __ATOMIC_SEQ_CST );668 __atomic_add_fetch( &stats.free_calls, 1, __ATOMIC_SEQ_CST ); 669 __atomic_add_fetch( &stats.free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); 670 __atomic_add_fetch( &stats.free_storage_alloc, size, __ATOMIC_SEQ_CST ); 648 671 #endif // __STATISTICS__ 649 672 … … 651 674 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 652 675 header->kind.real.next = freeElem->freeList; // push on stack 653 freeElem->freeList = (Heap Manager.Storage *)header;676 freeElem->freeList = (Heap.Storage *)header; 654 677 unlock( freeElem->lock ); // release spin lock 655 678 #else 656 push( freeElem->freeList, *(Heap Manager.Storage *)header );679 push( freeElem->freeList, *(Heap.Storage *)header ); 657 680 #endif // BUCKETLOCK 658 681 } // if … … 669 692 670 693 671 size_t prtFree( Heap Manager& manager ) with( manager ) {694 size_t prtFree( Heap & manager ) with( manager ) { 672 695 size_t total = 0; 673 696 #ifdef __STATISTICS__ … … 682 705 683 706 #if BUCKETLOCK == SPINLOCK 684 for ( Heap Manager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {707 for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 685 708 #else 686 709 for(;;) { 687 // for ( Heap Manager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) {688 // for ( Heap Manager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) {689 // Heap Manager.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works`710 // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 711 // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { 712 // Heap.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works` 690 713 // typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works` 691 714 // p = temp; … … 710 733 711 734 712 static void ?{}( Heap Manager& manager ) with( manager ) {735 static void ?{}( Heap & manager ) with( manager ) { 713 736 __page_size = sysconf( _SC_PAGESIZE ); 714 737 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; … … 726 749 #endif // FASTLOOKUP 727 750 728 if ( ! setMmapStart( default_mmap_start() ) ) {729 abort( "Heap Manager: internal error, mmap start initialization failure." );730 } // if 731 heapExpand = default_heap_expansion();751 if ( ! setMmapStart( malloc_mmap_start() ) ) { 752 abort( "Heap : internal error, mmap start initialization failure." ); 753 } // if 754 heapExpand = malloc_expansion(); 732 755 733 756 char * end = (char *)sbrk( 0 ); 734 757 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment 735 } // Heap Manager736 737 738 static void ^?{}( Heap Manager& ) {758 } // Heap 759 760 761 static void ^?{}( Heap & ) { 739 762 #ifdef __STATISTICS__ 740 763 if ( traceHeapTerm() ) { … … 743 766 } // if 744 767 #endif // __STATISTICS__ 745 } // ~Heap Manager768 } // ~Heap 746 769 747 770 … … 796 819 // subtract libAlign() because it is already the minimum alignment 797 820 // add sizeof(Storage) for fake header 798 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap Manager.Storage) );821 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap.Storage) ); 799 822 800 823 // address in the block of the "next" alignment address 801 char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap Manager.Storage)), alignment );824 char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment ); 802 825 803 826 // address of header from malloc 804 Heap Manager.Storage.Header * realHeader = headerAddr( addr );827 Heap.Storage.Header * realHeader = headerAddr( addr ); 805 828 realHeader->kind.real.size = size; // correct size to eliminate above alignment offset 806 829 // address of fake header * before* the alignment location 807 Heap Manager.Storage.Header * fakeHeader = headerAddr( user );830 Heap.Storage.Header * fakeHeader = headerAddr( user ); 808 831 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 809 832 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; … … 821 844 #ifdef __STATISTICS__ 822 845 if ( likely( size > 0 ) ) { 823 __atomic_add_fetch( & malloc_calls, 1, __ATOMIC_SEQ_CST );824 __atomic_add_fetch( & malloc_storage_request, size, __ATOMIC_SEQ_CST );846 __atomic_add_fetch( &stats.malloc_calls, 1, __ATOMIC_SEQ_CST ); 847 __atomic_add_fetch( &stats.malloc_storage_request, size, __ATOMIC_SEQ_CST ); 825 848 } else { 826 __atomic_add_fetch( & malloc_0_calls, 1, __ATOMIC_SEQ_CST );849 __atomic_add_fetch( &stats.malloc_0_calls, 1, __ATOMIC_SEQ_CST ); 827 850 } // if 828 851 #endif // __STATISTICS__ … … 837 860 #ifdef __STATISTICS__ 838 861 if ( likely( size > 0 ) ) { 839 __atomic_add_fetch( & aalloc_calls, 1, __ATOMIC_SEQ_CST );840 __atomic_add_fetch( & aalloc_storage_request, size, __ATOMIC_SEQ_CST );862 __atomic_add_fetch( &stats.aalloc_calls, 1, __ATOMIC_SEQ_CST ); 863 __atomic_add_fetch( &stats.aalloc_storage_request, size, __ATOMIC_SEQ_CST ); 841 864 } else { 842 __atomic_add_fetch( & aalloc_0_calls, 1, __ATOMIC_SEQ_CST );865 __atomic_add_fetch( &stats.aalloc_0_calls, 1, __ATOMIC_SEQ_CST ); 843 866 } // if 844 867 #endif // __STATISTICS__ … … 853 876 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 854 877 #ifdef __STATISTICS__ 855 __atomic_add_fetch( & calloc_0_calls, 1, __ATOMIC_SEQ_CST );878 __atomic_add_fetch( &stats.calloc_0_calls, 1, __ATOMIC_SEQ_CST ); 856 879 #endif // __STATISTICS__ 857 880 return 0p; 858 881 } // if 859 882 #ifdef __STATISTICS__ 860 __atomic_add_fetch( & calloc_calls, 1, __ATOMIC_SEQ_CST );861 __atomic_add_fetch( & calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST );883 __atomic_add_fetch( &stats.calloc_calls, 1, __ATOMIC_SEQ_CST ); 884 __atomic_add_fetch( &stats.calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 862 885 #endif // __STATISTICS__ 863 886 864 887 char * addr = (char *)mallocNoStats( size ); 865 888 866 Heap Manager.Storage.Header * header;867 Heap Manager.FreeHeader * freeElem;889 Heap.Storage.Header * header; 890 Heap.FreeHeader * freeElem; 868 891 size_t bsize, alignment; 869 892 … … 894 917 if ( unlikely( size == 0 ) ) { // special cases 895 918 #ifdef __STATISTICS__ 896 __atomic_add_fetch( & resize_0_calls, 1, __ATOMIC_SEQ_CST );919 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 897 920 #endif // __STATISTICS__ 898 921 free( oaddr ); … … 900 923 } // if 901 924 #ifdef __STATISTICS__ 902 __atomic_add_fetch( & resize_calls, 1, __ATOMIC_SEQ_CST );925 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 903 926 #endif // __STATISTICS__ 904 927 905 928 if ( unlikely( oaddr == 0p ) ) { 906 929 #ifdef __STATISTICS__ 907 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );930 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 908 931 #endif // __STATISTICS__ 909 932 return mallocNoStats( size ); 910 933 } // if 911 934 912 Heap Manager.Storage.Header * header;913 Heap Manager.FreeHeader * freeElem;935 Heap.Storage.Header * header; 936 Heap.FreeHeader * freeElem; 914 937 size_t bsize, oalign; 915 938 headers( "resize", oaddr, header, freeElem, bsize, oalign ); … … 924 947 925 948 #ifdef __STATISTICS__ 926 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );949 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 927 950 #endif // __STATISTICS__ 928 951 … … 939 962 if ( unlikely( size == 0 ) ) { // special cases 940 963 #ifdef __STATISTICS__ 941 __atomic_add_fetch( & realloc_0_calls, 1, __ATOMIC_SEQ_CST );964 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 942 965 #endif // __STATISTICS__ 943 966 free( oaddr ); … … 945 968 } // if 946 969 #ifdef __STATISTICS__ 947 __atomic_add_fetch( & realloc_calls, 1, __ATOMIC_SEQ_CST );970 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 948 971 #endif // __STATISTICS__ 949 972 950 973 if ( unlikely( oaddr == 0p ) ) { 951 974 #ifdef __STATISTICS__ 952 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );975 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 953 976 #endif // __STATISTICS__ 954 977 return mallocNoStats( size ); 955 978 } // if 956 979 957 Heap Manager.Storage.Header * header;958 Heap Manager.FreeHeader * freeElem;980 Heap.Storage.Header * header; 981 Heap.FreeHeader * freeElem; 959 982 size_t bsize, oalign; 960 983 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); … … 972 995 973 996 #ifdef __STATISTICS__ 974 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );997 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 975 998 #endif // __STATISTICS__ 976 999 … … 1002 1025 #ifdef __STATISTICS__ 1003 1026 if ( likely( size > 0 ) ) { 1004 __atomic_add_fetch( & memalign_calls, 1, __ATOMIC_SEQ_CST );1005 __atomic_add_fetch( & memalign_storage_request, size, __ATOMIC_SEQ_CST );1027 __atomic_add_fetch( &stats.memalign_calls, 1, __ATOMIC_SEQ_CST ); 1028 __atomic_add_fetch( &stats.memalign_storage_request, size, __ATOMIC_SEQ_CST ); 1006 1029 } else { 1007 __atomic_add_fetch( & memalign_0_calls, 1, __ATOMIC_SEQ_CST );1030 __atomic_add_fetch( &stats.memalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1008 1031 } // if 1009 1032 #endif // __STATISTICS__ … … 1018 1041 #ifdef __STATISTICS__ 1019 1042 if ( likely( size > 0 ) ) { 1020 __atomic_add_fetch( & cmemalign_calls, 1, __ATOMIC_SEQ_CST );1021 __atomic_add_fetch( & cmemalign_storage_request, size, __ATOMIC_SEQ_CST );1043 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1044 __atomic_add_fetch( &stats.cmemalign_storage_request, size, __ATOMIC_SEQ_CST ); 1022 1045 } else { 1023 __atomic_add_fetch( & cmemalign_0_calls, 1, __ATOMIC_SEQ_CST );1046 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1024 1047 } // if 1025 1048 #endif // __STATISTICS__ … … 1034 1057 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 1035 1058 #ifdef __STATISTICS__ 1036 __atomic_add_fetch( & cmemalign_0_calls, 1, __ATOMIC_SEQ_CST );1059 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1037 1060 #endif // __STATISTICS__ 1038 1061 return 0p; 1039 1062 } // if 1040 1063 #ifdef __STATISTICS__ 1041 __atomic_add_fetch( & cmemalign_calls, 1, __ATOMIC_SEQ_CST );1042 __atomic_add_fetch( & cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST );1064 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1065 __atomic_add_fetch( &stats.cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 1043 1066 #endif // __STATISTICS__ 1044 1067 1045 1068 char * addr = (char *)memalignNoStats( alignment, size ); 1046 1069 1047 Heap Manager.Storage.Header * header;1048 Heap Manager.FreeHeader * freeElem;1070 Heap.Storage.Header * header; 1071 Heap.FreeHeader * freeElem; 1049 1072 size_t bsize; 1050 1073 … … 1104 1127 if ( unlikely( addr == 0p ) ) { // special case 1105 1128 #ifdef __STATISTICS__ 1106 __atomic_add_fetch( & free_null_calls, 1, __ATOMIC_SEQ_CST );1129 __atomic_add_fetch( &stats.free_null_calls, 1, __ATOMIC_SEQ_CST ); 1107 1130 #endif // __STATISTICS__ 1108 1131 … … 1124 1147 size_t malloc_alignment( void * addr ) { 1125 1148 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1126 Heap Manager.Storage.Header * header = headerAddr( addr );1149 Heap.Storage.Header * header = headerAddr( addr ); 1127 1150 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1128 1151 return header->kind.fake.alignment & -2; // remove flag from value … … 1137 1160 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1138 1161 size_t ret; 1139 Heap Manager.Storage.Header * header = headerAddr( addr );1162 Heap.Storage.Header * header = headerAddr( addr ); 1140 1163 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1141 1164 ret = header->kind.fake.alignment & -2; // remove flag from old value … … 1151 1174 bool malloc_zero_fill( void * addr ) { 1152 1175 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1153 Heap Manager.Storage.Header * header = headerAddr( addr );1176 Heap.Storage.Header * header = headerAddr( addr ); 1154 1177 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1155 1178 header = realHeader( header ); // backup from fake to real header … … 1161 1184 bool malloc_zero_fill_set$( void * addr ) { 1162 1185 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1163 Heap Manager.Storage.Header * header = headerAddr( addr );1186 Heap.Storage.Header * header = headerAddr( addr ); 1164 1187 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1165 1188 header = realHeader( header ); // backup from fake to real header … … 1174 1197 size_t malloc_size( void * addr ) { 1175 1198 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size 1176 Heap Manager.Storage.Header * header = headerAddr( addr );1199 Heap.Storage.Header * header = headerAddr( addr ); 1177 1200 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1178 1201 header = realHeader( header ); // backup from fake to real header … … 1184 1207 size_t malloc_size_set$( void * addr, size_t size ) { 1185 1208 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1186 Heap Manager.Storage.Header * header = headerAddr( addr );1209 Heap.Storage.Header * header = headerAddr( addr ); 1187 1210 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1188 1211 header = realHeader( header ); // backup from fake to real header … … 1198 1221 size_t malloc_usable_size( void * addr ) { 1199 1222 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1200 Heap Manager.Storage.Header * header;1201 Heap Manager.FreeHeader * freeElem;1223 Heap.Storage.Header * header; 1224 Heap.FreeHeader * freeElem; 1202 1225 size_t bsize, alignment; 1203 1226 … … 1275 1298 return 0; // unsupported 1276 1299 } // malloc_set_state 1300 1301 // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. 1302 __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; } 1303 1304 // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. 1305 __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; } 1306 1307 // Amount subtracted to adjust for unfreed program storage (debug only). 1308 __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; } 1277 1309 } // extern "C" 1278 1310 … … 1283 1315 if ( unlikely( size == 0 ) ) { // special cases 1284 1316 #ifdef __STATISTICS__ 1285 __atomic_add_fetch( & resize_0_calls, 1, __ATOMIC_SEQ_CST );1317 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 1286 1318 #endif // __STATISTICS__ 1287 1319 free( oaddr ); … … 1296 1328 if ( unlikely( oaddr == 0p ) ) { 1297 1329 #ifdef __STATISTICS__ 1298 __atomic_add_fetch( & resize_calls, 1, __ATOMIC_SEQ_CST );1299 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );1330 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 1331 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 1300 1332 #endif // __STATISTICS__ 1301 1333 return memalignNoStats( nalign, size ); … … 1303 1335 1304 1336 // Attempt to reuse existing alignment. 1305 Heap Manager.Storage.Header * header = headerAddr( oaddr );1337 Heap.Storage.Header * header = headerAddr( oaddr ); 1306 1338 bool isFakeHeader = header->kind.fake.alignment & 1; // old fake header ? 1307 1339 size_t oalign; … … 1313 1345 ) { 1314 1346 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1315 Heap Manager.FreeHeader * freeElem;1347 Heap.FreeHeader * freeElem; 1316 1348 size_t bsize, oalign; 1317 1349 headers( "resize", oaddr, header, freeElem, bsize, oalign ); … … 1332 1364 1333 1365 #ifdef __STATISTICS__ 1334 __atomic_add_fetch( & resize_storage_request, size, __ATOMIC_SEQ_CST );1366 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 1335 1367 #endif // __STATISTICS__ 1336 1368 … … 1345 1377 if ( unlikely( size == 0 ) ) { // special cases 1346 1378 #ifdef __STATISTICS__ 1347 __atomic_add_fetch( & realloc_0_calls, 1, __ATOMIC_SEQ_CST );1379 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 1348 1380 #endif // __STATISTICS__ 1349 1381 free( oaddr ); … … 1358 1390 if ( unlikely( oaddr == 0p ) ) { 1359 1391 #ifdef __STATISTICS__ 1360 __atomic_add_fetch( & realloc_calls, 1, __ATOMIC_SEQ_CST );1361 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );1392 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1393 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1362 1394 #endif // __STATISTICS__ 1363 1395 return memalignNoStats( nalign, size ); … … 1365 1397 1366 1398 // Attempt to reuse existing alignment. 1367 Heap Manager.Storage.Header * header = headerAddr( oaddr );1399 Heap.Storage.Header * header = headerAddr( oaddr ); 1368 1400 bool isFakeHeader = header->kind.fake.alignment & 1; // old fake header ? 1369 1401 size_t oalign; … … 1382 1414 1383 1415 #ifdef __STATISTICS__ 1384 __atomic_add_fetch( & realloc_calls, 1, __ATOMIC_SEQ_CST );1385 __atomic_add_fetch( & realloc_storage_request, size, __ATOMIC_SEQ_CST );1416 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1417 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1386 1418 #endif // __STATISTICS__ 1387 1419 1388 Heap Manager.FreeHeader * freeElem;1420 Heap.FreeHeader * freeElem; 1389 1421 size_t bsize; 1390 1422 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); -
libcfa/src/heap.hfa
r16cc9f7 r31a5f418 10 10 // Created On : Tue May 26 11:23:55 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Aug 8 17:36:48 202013 // Update Count : 1612 // Last Modified On : Thu Apr 21 22:52:25 2022 13 // Update Count : 21 14 14 // 15 15 16 16 #pragma once 17 17 18 size_t default_mmap_start(); // CFA extras 19 size_t default_heap_expansion(); 18 #include <malloc.h> 20 19 21 20 bool traceHeap(); … … 34 33 #ifndef M_MMAP_THRESHOLD 35 34 #define M_MMAP_THRESHOLD (-1) 36 #endif // M_TOP_PAD 35 #endif // M_MMAP_THRESHOLD 36 37 37 #ifndef M_TOP_PAD 38 38 #define M_TOP_PAD (-2) … … 40 40 41 41 extern "C" { 42 void * aalloc( size_t dim, size_t elemSize ); 43 void * resize( void * oaddr, size_t size ); 44 void * amemalign( size_t align, size_t dim, size_t elemSize ); 45 void * cmemalign( size_t align, size_t dim, size_t elemSize ); 42 // New allocation operations. 43 void * aalloc( size_t dim, size_t elemSize ) __attribute__ ((malloc)); 44 void * resize( void * oaddr, size_t size ) __attribute__ ((malloc)); 45 void * amemalign( size_t align, size_t dim, size_t elemSize ) __attribute__ ((malloc)); 46 void * cmemalign( size_t align, size_t dim, size_t elemSize ) __attribute__ ((malloc)); 46 47 size_t malloc_alignment( void * addr ); 47 48 bool malloc_zero_fill( void * addr ); 48 49 size_t malloc_size( void * addr ); 50 int malloc_stats_fd( int fd ); 49 51 size_t malloc_usable_size( void * addr ); 50 int malloc_stats_fd( int fd ); 52 size_t malloc_expansion(); // heap expansion size (bytes) 53 size_t malloc_mmap_start(); // crossover allocation size from sbrk to mmap 54 size_t malloc_unfreed(); // heap unfreed size (bytes) 51 55 } // extern "C" 52 56 53 void * resize( void * oaddr, size_t nalign, size_t size ); 54 void * realloc( void * oaddr, size_t nalign, size_t size ); 57 void * resize( void * oaddr, size_t alignment, size_t size ); 58 void * realloc( void * oaddr, size_t alignment, size_t size ); 59 void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ); 55 60 56 61 // Local Variables: //
Note: See TracChangeset
for help on using the changeset viewer.