source: libcfa/src/heap.cfa@ 950c58e

Last change on this file since 950c58e was a47fe52, checked in by Peter A. Buhr <pabuhr@…>, 2 years ago

missing add of terminating thread-heap statistics to master heap, check for environment variable CFA_MALLOC_STATS and print heap statistics at program termination

  • Property mode set to 100644
File size: 67.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// heap.cfa --
8//
9// Author : Peter A. Buhr
10// Created On : Tue Dec 19 21:58:35 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Wed Aug 2 18:48:30 2023
13// Update Count : 1614
14//
15
16#include <stdio.h>
17#include <string.h> // memset, memcpy
18#include <limits.h> // ULONG_MAX
19#include <errno.h> // errno, ENOMEM, EINVAL
20#include <unistd.h> // STDERR_FILENO, sbrk, sysconf, write
21#include <sys/mman.h> // mmap, munmap
22extern "C" {
23#include <sys/sysinfo.h> // get_nprocs
24} // extern "C"
25
26#include "heap.hfa"
27#include "bits/align.hfa" // libAlign
28#include "bits/defs.hfa" // likely, unlikely
29#include "concurrency/kernel/fwd.hfa" // __POLL_PREEMPTION
30#include "startup.hfa" // STARTUP_PRIORITY_MEMORY
31#include "math.hfa" // ceiling, min
32#include "bitmanip.hfa" // is_pow2, ceiling2
33
34// supported mallopt options
35#ifndef M_MMAP_THRESHOLD
36#define M_MMAP_THRESHOLD (-1)
37#endif // M_MMAP_THRESHOLD
38
39#ifndef M_TOP_PAD
40#define M_TOP_PAD (-2)
41#endif // M_TOP_PAD
42
43#define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size
44#define OWNERSHIP // return freed memory to owner thread
45#define RETURNSPIN // toggle spinlock / lockfree queue
46#if ! defined( OWNERSHIP ) && defined( RETURNSPIN )
47#warning "RETURNSPIN is ignored without OWNERSHIP; suggest commenting out RETURNSPIN"
48#endif // ! OWNERSHIP && RETURNSPIN
49
50#define CACHE_ALIGN 64
51#define CALIGN __attribute__(( aligned(CACHE_ALIGN) ))
52
53#define TLSMODEL __attribute__(( tls_model("initial-exec") ))
54
55//#define __STATISTICS__
56
57enum {
58 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk
59 // address is extended by the extension amount.
60 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024,
61
62 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values
63 // greater than or equal to this value are mmap from the operating system.
64 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1,
65
66 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from
67 // the malloc/free counter to adjust for storage the program does not free.
68 __CFA_DEFAULT_HEAP_UNFREED__ = 0
69}; // enum
70
71
72//####################### Heap Trace/Print ####################
73
74
75static bool traceHeap = false;
76
77inline bool traceHeap() libcfa_public { return traceHeap; }
78
79bool traceHeapOn() libcfa_public {
80 bool temp = traceHeap;
81 traceHeap = true;
82 return temp;
83} // traceHeapOn
84
85bool traceHeapOff() libcfa_public {
86 bool temp = traceHeap;
87 traceHeap = false;
88 return temp;
89} // traceHeapOff
90
91bool traceHeapTerm() libcfa_public { return false; }
92
93
94static bool prtFree = false;
95
96bool prtFree() {
97 return prtFree;
98} // prtFree
99
100bool prtFreeOn() {
101 bool temp = prtFree;
102 prtFree = true;
103 return temp;
104} // prtFreeOn
105
106bool prtFreeOff() {
107 bool temp = prtFree;
108 prtFree = false;
109 return temp;
110} // prtFreeOff
111
112
113//######################### Helpers #########################
114
115
116// generic Bsearchl does not inline, so substitute with hand-coded binary-search.
117inline __attribute__((always_inline))
118static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) {
119 size_t l = 0, m, h = dim;
120 while ( l < h ) {
121 m = (l + h) / 2;
122 if ( (unsigned int &)(vals[m]) < key ) { // cast away const
123 l = m + 1;
124 } else {
125 h = m;
126 } // if
127 } // while
128 return l;
129} // Bsearchl
130
131
132// pause to prevent excess processor bus usage
133#if defined( __i386 ) || defined( __x86_64 )
134 #define Pause() __asm__ __volatile__ ( "pause" : : : )
135#elif defined(__ARM_ARCH)
136 #define Pause() __asm__ __volatile__ ( "YIELD" : : : )
137#else
138 #error unsupported architecture
139#endif
140
141typedef volatile uintptr_t SpinLock_t;
142
143static inline __attribute__((always_inline)) void lock( volatile SpinLock_t & slock ) {
144 enum { SPIN_START = 4, SPIN_END = 64 * 1024, };
145 unsigned int spin = SPIN_START;
146
147 for ( unsigned int i = 1;; i += 1 ) {
148 if ( slock == 0 && __atomic_test_and_set( &slock, __ATOMIC_ACQUIRE ) == 0 ) break; // Fence
149 for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin
150 spin += spin; // powers of 2
151 //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2
152 if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning
153 } // for
154} // spin_lock
155
156static inline __attribute__((always_inline)) void unlock( volatile SpinLock_t & slock ) {
157 __atomic_clear( &slock, __ATOMIC_RELEASE ); // Fence
158} // spin_unlock
159
160
161//####################### Heap Statistics ####################
162
163
164#ifdef __STATISTICS__
165enum { CntTriples = 12 }; // number of counter triples
166enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE };
167
168struct StatsOverlay { // overlay for iteration
169 unsigned int calls, calls_0;
170 unsigned long long int request, alloc;
171};
172
173// Heap statistics counters.
174union HeapStatistics {
175 struct { // minimum qualification
176 unsigned int malloc_calls, malloc_0_calls;
177 unsigned long long int malloc_storage_request, malloc_storage_alloc;
178 unsigned int aalloc_calls, aalloc_0_calls;
179 unsigned long long int aalloc_storage_request, aalloc_storage_alloc;
180 unsigned int calloc_calls, calloc_0_calls;
181 unsigned long long int calloc_storage_request, calloc_storage_alloc;
182 unsigned int memalign_calls, memalign_0_calls;
183 unsigned long long int memalign_storage_request, memalign_storage_alloc;
184 unsigned int amemalign_calls, amemalign_0_calls;
185 unsigned long long int amemalign_storage_request, amemalign_storage_alloc;
186 unsigned int cmemalign_calls, cmemalign_0_calls;
187 unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc;
188 unsigned int resize_calls, resize_0_calls;
189 unsigned long long int resize_storage_request, resize_storage_alloc;
190 unsigned int realloc_calls, realloc_0_calls;
191 unsigned long long int realloc_storage_request, realloc_storage_alloc;
192 unsigned int free_calls, free_null_calls;
193 unsigned long long int free_storage_request, free_storage_alloc;
194 unsigned int return_pulls, return_pushes;
195 unsigned long long int return_storage_request, return_storage_alloc;
196 unsigned int mmap_calls, mmap_0_calls; // no zero calls
197 unsigned long long int mmap_storage_request, mmap_storage_alloc;
198 unsigned int munmap_calls, munmap_0_calls; // no zero calls
199 unsigned long long int munmap_storage_request, munmap_storage_alloc;
200 };
201 struct StatsOverlay counters[CntTriples]; // overlay for iteration
202}; // HeapStatistics
203
204static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay),
205 "Heap statistics counter-triplets does not match with array size" );
206
207static void HeapStatisticsCtor( HeapStatistics & stats ) {
208 memset( &stats, '\0', sizeof(stats) ); // very fast
209 // for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
210 // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0;
211 // } // for
212} // HeapStatisticsCtor
213
214static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) {
215 for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
216 lhs.counters[i].calls += rhs.counters[i].calls;
217 lhs.counters[i].calls_0 += rhs.counters[i].calls_0;
218 lhs.counters[i].request += rhs.counters[i].request;
219 lhs.counters[i].alloc += rhs.counters[i].alloc;
220 } // for
221 return lhs;
222} // ?+=?
223#endif // __STATISTICS__
224
225
226// Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
227// Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
228enum { NoBucketSizes = 91 }; // number of buckets sizes
229
230struct Heap {
231 struct Storage {
232 struct Header { // header
233 union Kind {
234 struct RealHeader {
235 union {
236 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
237 union {
238 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped
239 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
240 void * home; // allocated block points back to home locations (must overlay alignment)
241 size_t blockSize; // size for munmap (must overlay alignment)
242 Storage * next; // freed block points to next freed block of same size
243 };
244 size_t size; // allocation size in bytes
245 };
246 };
247 } real; // RealHeader
248
249 struct FakeHeader {
250 uintptr_t alignment; // 1st low-order bit => fake header & alignment
251 uintptr_t offset;
252 } fake; // FakeHeader
253 } kind; // Kind
254 } header; // Header
255
256 char pad[libAlign() - sizeof( Header )];
257 char data[0]; // storage
258 }; // Storage
259
260 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" );
261
262 struct CALIGN FreeHeader {
263 size_t blockSize CALIGN; // size of allocations on this list
264 #ifdef OWNERSHIP
265 #ifdef RETURNSPIN
266 SpinLock_t returnLock;
267 #endif // RETURNSPIN
268 Storage * returnList; // other thread return list
269 #endif // OWNERSHIP
270
271 Storage * freeList; // thread free list
272 Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap)
273 }; // FreeHeader
274
275 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
276 void * heapBuffer; // start of free storage in buffer
277 size_t heapReserve; // amount of remaining free storage in buffer
278
279 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
280 Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics or check unfreed storage
281 #endif // __STATISTICS__ || __CFA_DEBUG__
282 Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads
283
284 #ifdef __CFA_DEBUG__
285 ptrdiff_t allocUnfreed; // running total of allocations minus frees; can be negative
286 #endif // __CFA_DEBUG__
287
288 #ifdef __STATISTICS__
289 HeapStatistics stats; // local statistic table for this heap
290 #endif // __STATISTICS__
291}; // Heap
292
293
294struct HeapMaster {
295 SpinLock_t extLock; // protects allocation-buffer extension
296 SpinLock_t mgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd
297
298 void * heapBegin; // start of heap
299 void * heapEnd; // logical end of heap
300 size_t heapRemaining; // amount of storage not allocated in the current chunk
301 size_t pageSize; // architecture pagesize
302 size_t heapExpand; // sbrk advance
303 size_t mmapStart; // cross over point for mmap
304 unsigned int maxBucketsUsed; // maximum number of buckets in use
305
306 Heap * heapManagersList; // heap-list head
307 Heap * freeHeapManagersList; // free-list head
308
309 // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links.
310 Heap * heapManagersStorage; // next heap to use in heap superblock
311 Heap * heapManagersStorageEnd; // logical heap outside of superblock's end
312
313 #ifdef __STATISTICS__
314 HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting
315 unsigned long int threads_started, threads_exited; // counts threads that have started and exited
316 unsigned long int reused_heap, new_heap; // counts reusability of heaps
317 unsigned int sbrk_calls;
318 unsigned long long int sbrk_storage;
319 int stats_fd;
320 #endif // __STATISTICS__
321}; // HeapMaster
322
323
324#ifdef FASTLOOKUP
325enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes
326static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
327#endif // FASTLOOKUP
328
329static volatile bool heapMasterBootFlag = false; // trigger for first heap
330static HeapMaster heapMaster @= {}; // program global
331
332static void heapMasterCtor();
333static void heapMasterDtor();
334static Heap * getHeap();
335
336
337// Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16.
338// Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
339// malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
340static const unsigned int bucketSizes[] @= { // different bucket sizes
341 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4
342 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3
343 160, 192, 224, 256 + sizeof(Heap.Storage), // 4
344 320, 384, 448, 512 + sizeof(Heap.Storage), // 4
345 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4
346 1_536, 2_048 + sizeof(Heap.Storage), // 2
347 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4
348 6_144, 8_192 + sizeof(Heap.Storage), // 2
349 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8
350 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8
351 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8
352 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8
353 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8
354 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8
355 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4
356 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8
357 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4
358};
359
360static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );
361
362
363// extern visibility, used by runtime kernel
364libcfa_public size_t __page_size; // architecture pagesize
365libcfa_public int __map_prot; // common mmap/mprotect protection
366
367
368// Thread-local storage is allocated lazily when the storage is accessed.
369static __thread size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing
370static __thread Heap * heapManager CALIGN TLSMODEL;
371static __thread bool heapManagerBootFlag CALIGN TLSMODEL = false;
372static __thread size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing
373
374
375// declare helper functions for HeapMaster
376void noMemory(); // forward, called by "builtin_new" when malloc returns 0
377
378
379void heapMasterCtor() with( heapMaster ) {
380 // Singleton pattern to initialize heap master
381
382 verify( bucketSizes[0] == (16 + sizeof(Heap.Storage)) );
383
384 __page_size = sysconf( _SC_PAGESIZE );
385 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
386
387 extLock = 0;
388 mgrLock = 0;
389
390 char * end = (char *)sbrk( 0 );
391 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment
392 heapRemaining = 0;
393 heapExpand = malloc_expansion();
394 mmapStart = malloc_mmap_start();
395
396 // find the closest bucket size less than or equal to the mmapStart size
397 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search
398
399 verify( (mmapStart >= pageSize) && (bucketSizes[NoBucketSizes - 1] >= mmapStart) );
400 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
401 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
402
403 heapManagersList = 0p;
404 freeHeapManagersList = 0p;
405
406 heapManagersStorage = 0p;
407 heapManagersStorageEnd = 0p;
408
409 #ifdef __STATISTICS__
410 HeapStatisticsCtor( stats ); // clear statistic counters
411 threads_started = threads_exited = 0;
412 reused_heap = new_heap = 0;
413 sbrk_calls = sbrk_storage = 0;
414 stats_fd = STDERR_FILENO;
415 #endif // __STATISTICS__
416
417 #ifdef FASTLOOKUP
418 for ( unsigned int i = 0, idx = 0; i < LookupSizes; i += 1 ) {
419 if ( i > bucketSizes[idx] ) idx += 1;
420 lookup[i] = idx;
421 verify( i <= bucketSizes[idx] );
422 verify( (i <= 32 && idx == 0) || (i > bucketSizes[idx - 1]) );
423 } // for
424 #endif // FASTLOOKUP
425
426 heapMasterBootFlag = true;
427} // heapMasterCtor
428
429
430#define NO_MEMORY_MSG "**** Error **** insufficient heap memory available to allocate %zd new bytes."
431
432Heap * getHeap() with( heapMaster ) {
433 Heap * heap;
434 if ( freeHeapManagersList ) { // free heap for reused ?
435 heap = freeHeapManagersList;
436 freeHeapManagersList = heap->nextFreeHeapManager;
437
438 #ifdef __STATISTICS__
439 reused_heap += 1;
440 #endif // __STATISTICS__
441 } else { // free heap not found, create new
442 // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps *
443 // 12K ~= 120K byte superblock. Where 128-heap superblock handles a medium sized multi-processor server.
444 size_t remaining = heapManagersStorageEnd - heapManagersStorage; // remaining free heaps in superblock
445 if ( ! heapManagersStorage || remaining == 0 ) {
446 // Each block of heaps is a multiple of the number of cores on the computer.
447 int HeapDim = get_nprocs(); // get_nprocs_conf does not work
448 size_t size = HeapDim * sizeof( Heap );
449
450 heapManagersStorage = (Heap *)mmap( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 );
451 if ( unlikely( heapManagersStorage == (Heap *)MAP_FAILED ) ) { // failed ?
452 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory
453 // Do not call strerror( errno ) as it may call malloc.
454 abort( "**** Error **** attempt to allocate block of heaps of size %zu bytes and mmap failed with errno %d.", size, errno );
455 } // if
456 heapManagersStorageEnd = &heapManagersStorage[HeapDim]; // outside array
457 } // if
458
459 heap = heapManagersStorage;
460 heapManagersStorage = heapManagersStorage + 1; // bump next heap
461
462 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
463 heap->nextHeapManager = heapManagersList;
464 #endif // __STATISTICS__ || __CFA_DEBUG__
465 heapManagersList = heap;
466
467 #ifdef __STATISTICS__
468 new_heap += 1;
469 #endif // __STATISTICS__
470
471 with( *heap ) {
472 for ( unsigned int j = 0; j < NoBucketSizes; j += 1 ) { // initialize free lists
473 #ifdef OWNERSHIP
474 #ifdef RETURNSPIN
475 freeLists[j].returnLock = 0;
476 freeLists[j].returnList = 0p;
477 #endif // RETURNSPIN
478 #endif // OWNERSHIP
479
480 freeLists[j].freeList = 0p;
481 freeLists[j].homeManager = heap;
482 freeLists[j].blockSize = bucketSizes[j];
483 } // for
484
485 heapBuffer = 0p;
486 heapReserve = 0;
487 nextFreeHeapManager = 0p;
488 #ifdef __CFA_DEBUG__
489 allocUnfreed = 0;
490 #endif // __CFA_DEBUG__
491 heapManagerBootFlag = true;
492 } // with
493 } // if
494
495 return heap;
496} // getHeap
497
498
499void heapManagerCtor() libcfa_public {
500 if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor();
501
502 lock( heapMaster.mgrLock ); // protect heapMaster counters
503
504 assert( ! heapManagerBootFlag );
505
506 // get storage for heap manager
507
508 heapManager = getHeap();
509
510 #ifdef __STATISTICS__
511 HeapStatisticsCtor( heapManager->stats ); // heap local
512 heapMaster.threads_started += 1;
513 #endif // __STATISTICS__
514
515 unlock( heapMaster.mgrLock );
516} // heapManagerCtor
517
518
519void heapManagerDtor() libcfa_public {
520 if ( unlikely( ! heapManagerBootFlag ) ) return; // thread never used ?
521
522 lock( heapMaster.mgrLock );
523
524 // place heap on list of free heaps for reusability
525 heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList;
526 heapMaster.freeHeapManagersList = heapManager;
527
528 #ifdef __STATISTICS__
529 heapMaster.stats += heapManager->stats; // retain this heap's statistics
530 heapMaster.threads_exited += 1;
531 #endif // __STATISTICS__
532
533 // Do not set heapManager to NULL because it is used after Cforall is shutdown but before the program shuts down.
534
535 heapManagerBootFlag = false;
536 unlock( heapMaster.mgrLock );
537} // heapManagerDtor
538
539
540//####################### Memory Allocation Routines Helpers ####################
541
542
543extern int cfa_main_returned; // from interpose.cfa
544extern "C" {
545 void memory_startup( void ) { // singleton => called once at start of program
546 if ( ! heapMasterBootFlag ) heapManagerCtor(); // sanity check
547 } // memory_startup
548
549 void memory_shutdown( void ) {
550 heapManagerDtor();
551 } // memory_shutdown
552
553 void heapAppStart() { // called by __cfaabi_appready_startup
554 verify( heapManager );
555 #ifdef __CFA_DEBUG__
556 heapManager->allocUnfreed = 0; // clear prior allocation counts
557 #endif // __CFA_DEBUG__
558
559 #ifdef __STATISTICS__
560 HeapStatisticsCtor( heapManager->stats ); // clear prior statistic counters
561 #endif // __STATISTICS__
562 } // heapAppStart
563
564 void heapAppStop() { // called by __cfaabi_appready_startdown
565 fclose( stdin ); fclose( stdout ); // free buffer storage
566 if ( ! cfa_main_returned ) return; // do not check unfreed storage if exit called
567
568 #ifdef __STATISTICS__
569 if ( getenv( "CFA_MALLOC_STATS" ) ) { // check for external printing
570 malloc_stats();
571 } // if
572 #endif // __STATISTICS__
573
574 #ifdef __CFA_DEBUG__
575 // allocUnfreed is set to 0 when a heap is created and it accumulates any unfreed storage during its multiple thread
576 // usages. At the end, add up each heap allocUnfreed value across all heaps to get the total unfreed storage.
577 ptrdiff_t allocUnfreed = 0;
578 for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) {
579 allocUnfreed += heap->allocUnfreed;
580 } // for
581
582 allocUnfreed -= malloc_unfreed(); // subtract any user specified unfreed storage
583 if ( allocUnfreed > 0 ) {
584 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
585 char helpText[512];
586 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
587 "CFA warning (UNIX pid:%ld) : program terminating with %td(%#tx) bytes of storage allocated but not freed.\n"
588 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
589 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid
590 } // if
591 #endif // __CFA_DEBUG__
592 } // heapAppStop
593} // extern "C"
594
595
596#ifdef __STATISTICS__
597static HeapStatistics stats; // zero filled
598
599#define prtFmt \
600 "\nHeap statistics: (storage request / allocation)\n" \
601 " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
602 " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
603 " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
604 " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
605 " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
606 " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
607 " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
608 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
609 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \
610 " return pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n" \
611 " sbrk calls %'u; storage %'llu bytes\n" \
612 " mmap calls %'u; storage %'llu / %'llu bytes\n" \
613 " munmap calls %'u; storage %'llu / %'llu bytes\n" \
614 " threads started %'lu; exited %'lu\n" \
615 " heaps new %'lu; reused %'lu\n"
616
617// Use "write" because streams may be shutdown when calls are made.
618static int printStats( HeapStatistics & stats ) with( heapMaster, stats ) { // see malloc_stats
619 char helpText[sizeof(prtFmt) + 1024]; // space for message and values
620 return __cfaabi_bits_print_buffer( stats_fd, helpText, sizeof(helpText), prtFmt,
621 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc,
622 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc,
623 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc,
624 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc,
625 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc,
626 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc,
627 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc,
628 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc,
629 free_calls, free_null_calls, free_storage_request, free_storage_alloc,
630 return_pulls, return_pushes, return_storage_request, return_storage_alloc,
631 sbrk_calls, sbrk_storage,
632 mmap_calls, mmap_storage_request, mmap_storage_alloc,
633 munmap_calls, munmap_storage_request, munmap_storage_alloc,
634 threads_started, threads_exited,
635 new_heap, reused_heap
636 );
637} // printStats
638
639#define prtFmtXML \
640 "<malloc version=\"1\">\n" \
641 "<heap nr=\"0\">\n" \
642 "<sizes>\n" \
643 "</sizes>\n" \
644 "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
645 "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
646 "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
647 "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
648 "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
649 "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
650 "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
651 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
652 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
653 "<total type=\"return\" pulls=\"%'u;\" 0 pushes=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
654 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \
655 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \
656 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
657 "<total type=\"threads\" started=\"%'lu;\" exited=\"%'lu\"/>\n" \
658 "<total type=\"heaps\" new=\"%'lu;\" reused=\"%'lu\"/>\n" \
659 "</malloc>"
660
661static int printStatsXML( HeapStatistics & stats, FILE * stream ) with( heapMaster, stats ) { // see malloc_info
662 char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values
663 return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML,
664 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc,
665 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc,
666 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc,
667 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc,
668 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc,
669 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc,
670 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc,
671 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc,
672 free_calls, free_null_calls, free_storage_request, free_storage_alloc,
673 return_pulls, return_pushes, return_storage_request, return_storage_alloc,
674 sbrk_calls, sbrk_storage,
675 mmap_calls, mmap_storage_request, mmap_storage_alloc,
676 munmap_calls, munmap_storage_request, munmap_storage_alloc,
677 threads_started, threads_exited,
678 new_heap, reused_heap
679 );
680} // printStatsXML
681
682static HeapStatistics & collectStats( HeapStatistics & stats ) with( heapMaster ) {
683 lock( mgrLock );
684
685 stats += heapMaster.stats;
686 for ( Heap * heap = heapManagersList; heap; heap = heap->nextHeapManager ) {
687 stats += heap->stats;
688 } // for
689
690 unlock( mgrLock );
691 return stats;
692} // collectStats
693#endif // __STATISTICS__
694
695
696static bool setMmapStart( size_t value ) with( heapMaster ) { // true => mmapped, false => sbrk
697 if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false;
698 mmapStart = value; // set global
699
700 // find the closest bucket size less than or equal to the mmapStart size
701 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search
702
703 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
704 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
705 return true;
706} // setMmapStart
707
708
709// <-------+----------------------------------------------------> bsize (bucket size)
710// |header |addr
711//==================================================================================
712// align/offset |
713// <-----------------<------------+-----------------------------> bsize (bucket size)
714// |fake-header | addr
715#define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) ))
716#define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset))
717
718// <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
719// |header |addr
720//==================================================================================
721// align/offset |
722// <------------------------------<<---------- dsize --------->>> bsize (bucket size)
723// |fake-header |addr
724#define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
725
726
727inline __attribute__((always_inline))
728static void checkAlign( size_t alignment ) {
729 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) {
730 abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
731 } // if
732} // checkAlign
733
734
735inline __attribute__((always_inline))
736static void checkHeader( bool check, const char name[], void * addr ) {
737 if ( unlikely( check ) ) { // bad address ?
738 abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n"
739 "Possible cause is duplicate free on same block or overwriting of memory.",
740 name, addr );
741 } // if
742} // checkHeader
743
744
745// Manipulate sticky bits stored in unused 3 low-order bits of an address.
746// bit0 => alignment => fake header
747// bit1 => zero filled (calloc)
748// bit2 => mapped allocation versus sbrk
749#define StickyBits( header ) (((header)->kind.real.blockSize & 0x7))
750#define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7)
751#define MarkAlignmentBit( align ) ((align) | 1)
752#define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1))
753#define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1)
754#define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2))
755#define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2))
756#define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2)
757#define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4))
758#define MarkMmappedBit( size ) ((size) | 4)
759
760
761inline __attribute__((always_inline))
762static void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) {
763 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
764 alignment = ClearAlignmentBit( header ); // clear flag from value
765 #ifdef __CFA_DEBUG__
766 checkAlign( alignment ); // check alignment
767 #endif // __CFA_DEBUG__
768 header = RealHeader( header ); // backup from fake to real header
769 } else {
770 alignment = libAlign(); // => no fake header
771 } // if
772} // fakeHeader
773
774
775inline __attribute__((always_inline))
776static bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header,
777 Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapMaster, *heapManager ) {
778 header = HeaderAddr( addr );
779
780 #ifdef __CFA_DEBUG__
781 checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ?
782 #endif // __CFA_DEBUG__
783
784 if ( likely( ! StickyBits( header ) ) ) { // no sticky bits ?
785 freeHead = (Heap.FreeHeader *)(header->kind.real.home);
786 alignment = libAlign();
787 } else {
788 fakeHeader( header, alignment );
789 if ( unlikely( MmappedBit( header ) ) ) { // mmapped ?
790 verify( addr < heapBegin || heapEnd < addr );
791 size = ClearStickyBits( header->kind.real.blockSize ); // mmap size
792 return true;
793 } // if
794
795 freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home ));
796 } // if
797 size = freeHead->blockSize;
798
799 #ifdef __CFA_DEBUG__
800 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
801
802 Heap * homeManager;
803 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link
804 // freed and link points at another free block not to a bucket in the bucket array.
805 (homeManager = freeHead->homeManager, freeHead < &homeManager->freeLists[0] ||
806 &homeManager->freeLists[NoBucketSizes] <= freeHead ) ) ) {
807 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n"
808 "Possible cause is duplicate free on same block or overwriting of header information.",
809 name, addr );
810 } // if
811 #endif // __CFA_DEBUG__
812
813 return false;
814} // headers
815
816
817static void * master_extend( size_t size ) with( heapMaster ) {
818 lock( extLock );
819
820 ptrdiff_t rem = heapRemaining - size;
821 if ( unlikely( rem < 0 ) ) { // negative ?
822 // If the size requested is bigger than the current remaining storage, increase the size of the heap.
823
824 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() );
825 if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ?
826 unlock( extLock );
827 abort( NO_MEMORY_MSG, size ); // give up
828 } // if
829
830 // Make storage executable for thunks.
831 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
832 unlock( extLock );
833 abort( "**** Error **** attempt to make heap storage executable for thunks and mprotect failed with errno %d.", errno );
834 } // if
835
836 rem = heapRemaining + increase - size;
837
838 #ifdef __STATISTICS__
839 sbrk_calls += 1;
840 sbrk_storage += increase;
841 #endif // __STATISTICS__
842 } // if
843
844 Heap.Storage * block = (Heap.Storage *)heapEnd;
845 heapRemaining = rem;
846 heapEnd = (char *)heapEnd + size;
847
848 unlock( extLock );
849 return block;
850} // master_extend
851
852
853__attribute__(( noinline ))
854static void * manager_extend( size_t size ) with( *heapManager ) {
855 ptrdiff_t rem = heapReserve - size;
856
857 if ( unlikely( rem < 0 ) ) { // negative ?
858 // If the size requested is bigger than the current remaining reserve, use the current reserve to populate
859 // smaller freeLists, and increase the reserve.
860
861 rem = heapReserve; // positive
862
863 if ( (unsigned int)rem >= bucketSizes[0] ) { // minimal size ? otherwise ignore
864 size_t bucket;
865 #ifdef FASTLOOKUP
866 if ( likely( rem < LookupSizes ) ) bucket = lookup[rem];
867 #endif // FASTLOOKUP
868 bucket = Bsearchl( rem, bucketSizes, heapMaster.maxBucketsUsed );
869 verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed );
870 Heap.FreeHeader * freeHead = &(freeLists[bucket]);
871
872 // The remaining storage may not be bucket size, whereas all other allocations are. Round down to previous
873 // bucket size in this case.
874 if ( unlikely( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1;
875 Heap.Storage * block = (Heap.Storage *)heapBuffer;
876
877 block->header.kind.real.next = freeHead->freeList; // push on stack
878 freeHead->freeList = block;
879 } // if
880
881 size_t increase = ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), libAlign() );
882 heapBuffer = master_extend( increase );
883 rem = increase - size;
884 } // if
885
886 Heap.Storage * block = (Heap.Storage *)heapBuffer;
887 heapReserve = rem;
888 heapBuffer = (char *)heapBuffer + size;
889
890 return block;
891} // manager_extend
892
893
894#define BOOT_HEAP_MANAGER \
895 if ( unlikely( ! heapMasterBootFlag ) ) { \
896 heapManagerCtor(); /* trigger for first heap */ \
897 } /* if */
898
899#ifdef __STATISTICS__
900#define STAT_NAME __counter
901#define STAT_PARM , unsigned int STAT_NAME
902#define STAT_ARG( name ) , name
903#define STAT_0_CNT( counter ) stats.counters[counter].calls_0 += 1
904#else
905#define STAT_NAME
906#define STAT_PARM
907#define STAT_ARG( name )
908#define STAT_0_CNT( counter )
909#endif // __STATISTICS__
910
911// Uncomment to get allocation addresses for a 0-sized allocation rather than a null pointer.
912//#define __NONNULL_0_ALLOC__
913#if ! defined( __NONNULL_0_ALLOC__ )
914#define __NULL_0_ALLOC__ unlikely( size == 0 ) || /* 0 BYTE ALLOCATION RETURNS NULL POINTER */
915#else
916#define __NULL_0_ALLOC__
917#endif // __NONNULL_0_ALLOC__
918
919#define PROLOG( counter, ... ) \
920 BOOT_HEAP_MANAGER; \
921 if ( \
922 __NULL_0_ALLOC__ \
923 unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) { /* error check */ \
924 STAT_0_CNT( counter ); \
925 __VA_ARGS__; \
926 return 0p; \
927 } /* if */
928
929
930#define SCRUB_SIZE 1024lu
931// Do not use '\xfe' for scrubbing because dereferencing an address composed of it causes a SIGSEGV *without* a valid IP
932// pointer in the interrupt frame.
933#define SCRUB '\xff'
934
935static void * doMalloc( size_t size STAT_PARM ) libcfa_nopreempt with( *heapManager ) {
936 PROLOG( STAT_NAME );
937
938 verify( heapManager );
939 Heap.Storage * block; // pointer to new block of storage
940
941 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
942 // along with the block and is a multiple of the alignment size.
943 size_t tsize = size + sizeof(Heap.Storage);
944
945 #ifdef __STATISTICS__
946 stats.counters[STAT_NAME].calls += 1;
947 stats.counters[STAT_NAME].request += size;
948 #endif // __STATISTICS__
949
950 #ifdef __CFA_DEBUG__
951 allocUnfreed += size;
952 #endif // __CFA_DEBUG__
953
954 if ( likely( tsize < heapMaster.mmapStart ) ) { // small size => sbrk
955 size_t bucket;
956 #ifdef FASTLOOKUP
957 if ( likely( tsize < LookupSizes ) ) bucket = lookup[tsize];
958 else
959 #endif // FASTLOOKUP
960 bucket = Bsearchl( tsize, bucketSizes, heapMaster.maxBucketsUsed );
961 verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed );
962 Heap.FreeHeader * freeHead = &freeLists[bucket];
963
964 verify( freeHead <= &freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ?
965 verify( tsize <= freeHead->blockSize ); // search failure ?
966
967 tsize = freeHead->blockSize; // total space needed for request
968 #ifdef __STATISTICS__
969 stats.counters[STAT_NAME].alloc += tsize;
970 #endif // __STATISTICS__
971
972 block = freeHead->freeList; // remove node from stack
973 if ( unlikely( block == 0p ) ) { // no free block ?
974 // Freelist for this size is empty, so check return list (OWNERSHIP), or carve it out of the heap if there
975 // is enough left, or get some more heap storage and carve it off.
976 #ifdef OWNERSHIP
977 if ( unlikely( freeHead->returnList ) ) { // race, get next time if lose race
978 #ifdef RETURNSPIN
979 lock( freeHead->returnLock );
980 block = freeHead->returnList;
981 freeHead->returnList = 0p;
982 unlock( freeHead->returnLock );
983 #else
984 block = __atomic_exchange_n( &freeHead->returnList, 0p, __ATOMIC_SEQ_CST );
985 #endif // RETURNSPIN
986
987 verify( block );
988 #ifdef __STATISTICS__
989 stats.return_pulls += 1;
990 #endif // __STATISTICS__
991
992 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
993
994 freeHead->freeList = block->header.kind.real.next; // merge returnList into freeHead
995 } else {
996 #endif // OWNERSHIP
997 // Do not leave kernel thread as manager_extend accesses heapManager.
998 disable_interrupts();
999 block = (Heap.Storage *)manager_extend( tsize ); // mutual exclusion on call
1000 enable_interrupts( false );
1001
1002 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
1003
1004 #ifdef __CFA_DEBUG__
1005 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes.
1006 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) );
1007 #endif // __CFA_DEBUG__
1008 #ifdef OWNERSHIP
1009 } // if
1010 #endif // OWNERSHIP
1011 } else {
1012 // Memory is scrubbed in doFree.
1013 freeHead->freeList = block->header.kind.real.next;
1014 } // if
1015
1016 block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size
1017 } else { // large size => mmap
1018 if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p;
1019 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size
1020
1021 #ifdef __STATISTICS__
1022 stats.counters[STAT_NAME].alloc += tsize;
1023 stats.mmap_calls += 1;
1024 stats.mmap_storage_request += size;
1025 stats.mmap_storage_alloc += tsize;
1026 #endif // __STATISTICS__
1027
1028 disable_interrupts();
1029 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 );
1030 enable_interrupts( false );
1031
1032 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
1033
1034 if ( unlikely( block == (Heap.Storage *)MAP_FAILED ) ) { // failed ?
1035 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory
1036 // Do not call strerror( errno ) as it may call malloc.
1037 abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.",
1038 size, heapMaster.mmapStart, errno );
1039 } // if
1040 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap
1041
1042 #ifdef __CFA_DEBUG__
1043 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes. The
1044 // rest of the storage set to 0 by mmap.
1045 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) );
1046 #endif // __CFA_DEBUG__
1047 } // if
1048
1049 block->header.kind.real.size = size; // store allocation size
1050 void * addr = &(block->data); // adjust off header to user bytes
1051 verify( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ?
1052
1053 #ifdef __CFA_DEBUG__
1054 if ( traceHeap() ) {
1055 char helpText[64];
1056 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
1057 "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug
1058 } // if
1059 #endif // __CFA_DEBUG__
1060
1061// poll_interrupts(); // call rollforward
1062
1063 return addr;
1064} // doMalloc
1065
1066
1067static void doFree( void * addr ) libcfa_nopreempt with( *heapManager ) {
1068 verify( addr );
1069
1070 // detect free after thread-local storage destruction and use global stats in that case
1071
1072 Heap.Storage.Header * header;
1073 Heap.FreeHeader * freeHead;
1074 size_t size, alignment;
1075
1076 bool mapped = headers( "free", addr, header, freeHead, size, alignment );
1077 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
1078 size_t rsize = header->kind.real.size; // optimization
1079 #endif // __STATISTICS__ || __CFA_DEBUG__
1080
1081 #ifdef __STATISTICS__
1082 stats.free_storage_request += rsize;
1083 stats.free_storage_alloc += size;
1084 #endif // __STATISTICS__
1085
1086 #ifdef __CFA_DEBUG__
1087 allocUnfreed -= rsize;
1088 #endif // __CFA_DEBUG__
1089
1090 if ( unlikely( mapped ) ) { // mmapped ?
1091 #ifdef __STATISTICS__
1092 stats.munmap_calls += 1;
1093 stats.munmap_storage_request += rsize;
1094 stats.munmap_storage_alloc += size;
1095 #endif // __STATISTICS__
1096
1097 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
1098
1099 // Does not matter where this storage is freed.
1100 if ( unlikely( munmap( header, size ) == -1 ) ) {
1101 // Do not call strerror( errno ) as it may call malloc.
1102 abort( "**** Error **** attempt to deallocate large object %p and munmap failed with errno %d.\n"
1103 "Possible cause is invalid delete pointer: either not allocated or with corrupt header.",
1104 addr, errno );
1105 } // if
1106 } else {
1107 #ifdef __CFA_DEBUG__
1108 // memset is NOT always inlined!
1109 disable_interrupts();
1110 // Scrub old memory so subsequent usages might fail. Only scrub the first/last SCRUB_SIZE bytes.
1111 char * data = ((Heap.Storage *)header)->data; // data address
1112 size_t dsize = size - sizeof(Heap.Storage); // data size
1113 if ( dsize <= SCRUB_SIZE * 2 ) {
1114 memset( data, SCRUB, dsize ); // scrub all
1115 } else {
1116 memset( data, SCRUB, SCRUB_SIZE ); // scrub front
1117 memset( data + dsize - SCRUB_SIZE, SCRUB, SCRUB_SIZE ); // scrub back
1118 } // if
1119 enable_interrupts( false );
1120 #endif // __CFA_DEBUG__
1121
1122 #ifdef OWNERSHIP
1123 if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread
1124 header->kind.real.next = freeHead->freeList; // push on stack
1125 freeHead->freeList = (Heap.Storage *)header;
1126 } else { // return to thread owner
1127 verify( heapManager );
1128
1129 #ifdef RETURNSPIN
1130 lock( freeHead->returnLock );
1131 header->kind.real.next = freeHead->returnList; // push to bucket return list
1132 freeHead->returnList = (Heap.Storage *)header;
1133 unlock( freeHead->returnLock );
1134 #else // lock free
1135 header->kind.real.next = freeHead->returnList; // link new node to top node
1136 // CAS resets header->kind.real.next = freeHead->returnList on failure
1137 while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, (Heap.Storage *)header,
1138 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) );
1139
1140 #ifdef __STATISTICS__
1141 stats.return_pushes += 1;
1142 stats.return_storage_request += rsize;
1143 stats.return_storage_alloc += size;
1144 #endif // __STATISTICS__
1145 #endif // RETURNSPIN
1146 } // if
1147
1148 #else // no OWNERSHIP
1149
1150 // kind.real.home is address in owner thread's freeLists, so compute the equivalent position in this thread's freeList.
1151 freeHead = &freeLists[ClearStickyBits( (Heap.FreeHeader *)(header->kind.real.home) ) - &freeHead->homeManager->freeLists[0]];
1152 header->kind.real.next = freeHead->freeList; // push on stack
1153 freeHead->freeList = (Heap.Storage *)header;
1154 #endif // ! OWNERSHIP
1155
1156 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
1157 } // if
1158
1159 #ifdef __CFA_DEBUG__
1160 if ( traceHeap() ) {
1161 char helpText[64];
1162 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
1163 "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug
1164 } // if
1165 #endif // __CFA_DEBUG__
1166
1167// poll_interrupts(); // call rollforward
1168} // doFree
1169
1170
1171size_t prtFree( Heap & manager ) with( manager ) {
1172 size_t total = 0;
1173 #ifdef __STATISTICS__
1174 __cfaabi_bits_acquire();
1175 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" );
1176 #endif // __STATISTICS__
1177 for ( unsigned int i = 0; i < heapMaster.maxBucketsUsed; i += 1 ) {
1178 size_t size = freeLists[i].blockSize;
1179 #ifdef __STATISTICS__
1180 unsigned int N = 0;
1181 #endif // __STATISTICS__
1182
1183 for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
1184 total += size;
1185 #ifdef __STATISTICS__
1186 N += 1;
1187 #endif // __STATISTICS__
1188 } // for
1189
1190 #ifdef __STATISTICS__
1191 __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N );
1192 if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" );
1193 #endif // __STATISTICS__
1194 } // for
1195 #ifdef __STATISTICS__
1196 __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total );
1197 __cfaabi_bits_release();
1198 #endif // __STATISTICS__
1199 return (char *)heapMaster.heapEnd - (char *)heapMaster.heapBegin - total;
1200} // prtFree
1201
1202
1203#ifdef __STATISTICS__
1204static void incCalls( size_t statName ) libcfa_nopreempt {
1205 heapManager->stats.counters[statName].calls += 1;
1206} // incCalls
1207
1208static void incZeroCalls( size_t statName ) libcfa_nopreempt {
1209 heapManager->stats.counters[statName].calls_0 += 1;
1210} // incZeroCalls
1211#endif // __STATISTICS__
1212
1213#ifdef __CFA_DEBUG__
1214static void incUnfreed( intptr_t offset ) libcfa_nopreempt {
1215 heapManager->allocUnfreed += offset;
1216} // incUnfreed
1217#endif // __CFA_DEBUG__
1218
1219
1220static void * memalignNoStats( size_t alignment, size_t size STAT_PARM ) {
1221 checkAlign( alignment ); // check alignment
1222
1223 // if alignment <= default alignment or size == 0, do normal malloc as two headers are unnecessary
1224 if ( unlikely( alignment <= libAlign() || size == 0 ) ) return doMalloc( size STAT_ARG( STAT_NAME ) );
1225
1226 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
1227 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
1228 // .-------------v-----------------v----------------v----------,
1229 // | Real Header | ... padding ... | Fake Header | data ... |
1230 // `-------------^-----------------^-+--------------^----------'
1231 // |<--------------------------------' offset/align |<-- alignment boundary
1232
1233 // subtract libAlign() because it is already the minimum alignment
1234 // add sizeof(Storage) for fake header
1235 size_t offset = alignment - libAlign() + sizeof(Heap.Storage);
1236 char * addr = (char *)doMalloc( size + offset STAT_ARG( STAT_NAME ) );
1237
1238 // address in the block of the "next" alignment address
1239 char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment );
1240
1241 // address of header from malloc
1242 Heap.Storage.Header * realHeader = HeaderAddr( addr );
1243 realHeader->kind.real.size = size; // correct size to eliminate above alignment offset
1244 #ifdef __CFA_DEBUG__
1245 incUnfreed( -offset ); // adjustment off the offset from call to doMalloc
1246 #endif // __CFA_DEBUG__
1247
1248 // address of fake header *before* the alignment location
1249 Heap.Storage.Header * fakeHeader = HeaderAddr( user );
1250
1251 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
1252 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
1253 // SKULLDUGGERY: odd alignment implies fake header
1254 fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment );
1255
1256 return user;
1257} // memalignNoStats
1258
1259
1260//####################### Memory Allocation Routines ####################
1261
1262
1263extern "C" {
1264 // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0,
1265 // then malloc() returns a unique pointer value that can later be successfully passed to free().
1266 void * malloc( size_t size ) libcfa_public {
1267 return doMalloc( size STAT_ARG( MALLOC ) );
1268 } // malloc
1269
1270
1271 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
1272 void * aalloc( size_t dim, size_t elemSize ) libcfa_public {
1273 return doMalloc( dim * elemSize STAT_ARG( AALLOC ) );
1274 } // aalloc
1275
1276
1277 // Same as aalloc() with memory set to zero.
1278 void * calloc( size_t dim, size_t elemSize ) libcfa_public {
1279 size_t size = dim * elemSize;
1280 char * addr = (char *)doMalloc( size STAT_ARG( CALLOC ) );
1281
1282 if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
1283
1284 Heap.Storage.Header * header;
1285 Heap.FreeHeader * freeHead;
1286 size_t bsize, alignment;
1287
1288 #ifndef __CFA_DEBUG__
1289 bool mapped =
1290 #endif // __CFA_DEBUG__
1291 headers( "calloc", addr, header, freeHead, bsize, alignment );
1292
1293 #ifndef __CFA_DEBUG__
1294 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
1295 if ( likely( ! mapped ) )
1296 #endif // __CFA_DEBUG__
1297 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
1298 // `-header`-addr `-size
1299 memset( addr, '\0', size ); // set to zeros
1300
1301 MarkZeroFilledBit( header ); // mark as zero fill
1302 return addr;
1303 } // calloc
1304
1305
1306 // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is
1307 // 0p, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is
1308 // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier
1309 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
1310 void * resize( void * oaddr, size_t size ) libcfa_public {
1311 if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
1312 return doMalloc( size STAT_ARG( RESIZE ) );
1313 } // if
1314
1315 PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr )
1316
1317 Heap.Storage.Header * header;
1318 Heap.FreeHeader * freeHead;
1319 size_t bsize, oalign;
1320 headers( "resize", oaddr, header, freeHead, bsize, oalign );
1321
1322 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
1323 // same size, DO NOT preserve STICKY PROPERTIES.
1324 if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
1325 ClearZeroFillBit( header ); // no alignment and turn off 0 fill
1326 #ifdef __CFA_DEBUG__
1327 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
1328 #endif // __CFA_DEBUG__
1329 header->kind.real.size = size; // reset allocation size
1330 #ifdef __STATISTICS__
1331 incCalls( RESIZE );
1332 #endif // __STATISTICS__
1333 return oaddr;
1334 } // if
1335
1336 // change size, DO NOT preserve STICKY PROPERTIES.
1337 doFree( oaddr ); // free previous storage
1338
1339 return doMalloc( size STAT_ARG( RESIZE ) ); // create new area
1340 } // resize
1341
1342
1343 // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
1344 // the old and new sizes.
1345 void * realloc( void * oaddr, size_t size ) libcfa_public {
1346 if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
1347 return doMalloc( size STAT_ARG( REALLOC ) );
1348 } // if
1349
1350 PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr )
1351
1352 Heap.Storage.Header * header;
1353 Heap.FreeHeader * freeHead;
1354 size_t bsize, oalign;
1355 headers( "realloc", oaddr, header, freeHead, bsize, oalign );
1356
1357 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
1358 size_t osize = header->kind.real.size; // old allocation size
1359 bool ozfill = ZeroFillBit( header ); // old allocation zero filled
1360 if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage
1361 #ifdef __CFA_DEBUG__
1362 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
1363 #endif // __CFA_DEBUG__
1364 header->kind.real.size = size; // reset allocation size
1365 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ?
1366 memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage
1367 } // if
1368 #ifdef __STATISTICS__
1369 incCalls( REALLOC );
1370 #endif // __STATISTICS__
1371 return oaddr;
1372 } // if
1373
1374 // change size and copy old content to new storage
1375
1376 void * naddr;
1377 if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ?
1378 naddr = doMalloc( size STAT_ARG( REALLOC ) ); // create new area
1379 } else {
1380 naddr = memalignNoStats( oalign, size STAT_ARG( REALLOC ) ); // create new aligned area
1381 } // if
1382
1383 headers( "realloc", naddr, header, freeHead, bsize, oalign );
1384 // To preserve prior fill, the entire bucket must be copied versus the size.
1385 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes
1386 doFree( oaddr ); // free previous storage
1387
1388 if ( unlikely( ozfill ) ) { // previous request zero fill ?
1389 MarkZeroFilledBit( header ); // mark new request as zero filled
1390 if ( size > osize ) { // previous request larger ?
1391 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
1392 } // if
1393 } // if
1394 return naddr;
1395 } // realloc
1396
1397
1398 // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
1399 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public {
1400 return realloc( oaddr, dim * elemSize );
1401 } // reallocarray
1402
1403
1404 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
1405 void * memalign( size_t alignment, size_t size ) libcfa_public {
1406 return memalignNoStats( alignment, size STAT_ARG( MEMALIGN ) );
1407 } // memalign
1408
1409
1410 // Same as aalloc() with memory alignment.
1411 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
1412 return memalignNoStats( alignment, dim * elemSize STAT_ARG( AMEMALIGN ) );
1413 } // amemalign
1414
1415
1416 // Same as calloc() with memory alignment.
1417 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
1418 size_t size = dim * elemSize;
1419 char * addr = (char *)memalignNoStats( alignment, size STAT_ARG( CMEMALIGN ) );
1420
1421 if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
1422
1423 Heap.Storage.Header * header;
1424 Heap.FreeHeader * freeHead;
1425 size_t bsize;
1426
1427 #ifndef __CFA_DEBUG__
1428 bool mapped =
1429 #endif // __CFA_DEBUG__
1430 headers( "cmemalign", addr, header, freeHead, bsize, alignment );
1431
1432 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
1433 #ifndef __CFA_DEBUG__
1434 if ( ! mapped )
1435 #endif // __CFA_DEBUG__
1436 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
1437 // `-header`-addr `-size
1438 memset( addr, '\0', size ); // set to zeros
1439
1440 MarkZeroFilledBit( header ); // mark as zero filled
1441 return addr;
1442 } // cmemalign
1443
1444
1445 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
1446 // of alignment. This requirement is universally ignored.
1447 void * aligned_alloc( size_t alignment, size_t size ) libcfa_public {
1448 return memalign( alignment, size );
1449 } // aligned_alloc
1450
1451
1452 // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated
1453 // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size
1454 // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
1455 // free(3).
1456 int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public {
1457 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
1458 *memptr = memalign( alignment, size );
1459 return 0;
1460 } // posix_memalign
1461
1462
1463 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
1464 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
1465 void * valloc( size_t size ) libcfa_public {
1466 return memalign( __page_size, size );
1467 } // valloc
1468
1469
1470 // Same as valloc but rounds size to multiple of page size.
1471 void * pvalloc( size_t size ) libcfa_public {
1472 return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
1473 } // pvalloc
1474
1475
1476 // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc()
1477 // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
1478 // 0p, no operation is performed.
1479 void free( void * addr ) libcfa_public {
1480 if ( unlikely( addr == 0p ) ) { // special case
1481 #ifdef __STATISTICS__
1482 if ( heapManager )
1483 incZeroCalls( FREE );
1484 #endif // __STATISTICS__
1485 return;
1486 } // if
1487
1488 #ifdef __STATISTICS__
1489 incCalls( FREE );
1490 #endif // __STATISTICS__
1491
1492 doFree( addr ); // handles heapManager == nullptr
1493 } // free
1494
1495
1496 // Returns the alignment of an allocation.
1497 size_t malloc_alignment( void * addr ) libcfa_public {
1498 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment
1499 Heap.Storage.Header * header = HeaderAddr( addr );
1500 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
1501 return ClearAlignmentBit( header ); // clear flag from value
1502 } else {
1503 return libAlign(); // minimum alignment
1504 } // if
1505 } // malloc_alignment
1506
1507
1508 // Returns true if the allocation is zero filled, e.g., allocated by calloc().
1509 bool malloc_zero_fill( void * addr ) libcfa_public {
1510 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill
1511 Heap.Storage.Header * header = HeaderAddr( addr );
1512 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
1513 header = RealHeader( header ); // backup from fake to real header
1514 } // if
1515 return ZeroFillBit( header ); // zero filled ?
1516 } // malloc_zero_fill
1517
1518
1519 // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
1520 size_t malloc_size( void * addr ) libcfa_public {
1521 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size
1522 Heap.Storage.Header * header = HeaderAddr( addr );
1523 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
1524 header = RealHeader( header ); // backup from fake to real header
1525 } // if
1526 return header->kind.real.size;
1527 } // malloc_size
1528
1529
1530 // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
1531 // malloc or a related function.
1532 size_t malloc_usable_size( void * addr ) libcfa_public {
1533 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size
1534 Heap.Storage.Header * header;
1535 Heap.FreeHeader * freeHead;
1536 size_t bsize, alignment;
1537
1538 headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment );
1539 return DataStorage( bsize, addr, header ); // data storage in bucket
1540 } // malloc_usable_size
1541
1542
1543 // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
1544 void malloc_stats( void ) libcfa_public {
1545 #ifdef __STATISTICS__
1546 HeapStatistics stats;
1547 HeapStatisticsCtor( stats );
1548 if ( printStats( collectStats( stats ) ) == -1 ) {
1549 #else
1550 #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n"
1551 if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) {
1552 #endif // __STATISTICS__
1553 abort( "**** Error **** write failed in malloc_stats" );
1554 } // if
1555 } // malloc_stats
1556
1557
1558 // Changes the file descriptor where malloc_stats() writes statistics.
1559 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public {
1560 #ifdef __STATISTICS__
1561 int temp = heapMaster.stats_fd;
1562 heapMaster.stats_fd = fd;
1563 return temp;
1564 #else
1565 return -1; // unsupported
1566 #endif // __STATISTICS__
1567 } // malloc_stats_fd
1568
1569
1570 // Prints an XML string that describes the current state of the memory-allocation implementation in the caller.
1571 // The string is printed on the file stream stream. The exported string includes information about all arenas (see
1572 // malloc).
1573 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public {
1574 if ( options != 0 ) { errno = EINVAL; return -1; }
1575 #ifdef __STATISTICS__
1576 HeapStatistics stats;
1577 HeapStatisticsCtor( stats );
1578 return printStatsXML( collectStats( stats ), stream ); // returns bytes written or -1
1579 #else
1580 return 0; // unsupported
1581 #endif // __STATISTICS__
1582 } // malloc_info
1583
1584
1585 // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
1586 // specifies the parameter to be modified, and value specifies the new value for that parameter.
1587 int mallopt( int option, int value ) libcfa_public {
1588 if ( value < 0 ) return 0;
1589 choose( option ) {
1590 case M_TOP_PAD:
1591 heapMaster.heapExpand = ceiling2( value, __page_size );
1592 return 1;
1593 case M_MMAP_THRESHOLD:
1594 if ( setMmapStart( value ) ) return 1;
1595 } // choose
1596 return 0; // error, unsupported
1597 } // mallopt
1598
1599
1600 // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
1601 int malloc_trim( size_t ) libcfa_public {
1602 return 0; // => impossible to release memory
1603 } // malloc_trim
1604
1605
1606 // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap
1607 // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data
1608 // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
1609 // result. (The caller must free this memory.)
1610 void * malloc_get_state( void ) libcfa_public {
1611 return 0p; // unsupported
1612 } // malloc_get_state
1613
1614
1615 // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
1616 // structure pointed to by state.
1617 int malloc_set_state( void * ) libcfa_public {
1618 return 0; // unsupported
1619 } // malloc_set_state
1620
1621
1622 // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
1623 __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; }
1624
1625 // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped.
1626 __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; }
1627
1628 // Amount subtracted to adjust for unfreed program storage (debug only).
1629 __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; }
1630} // extern "C"
1631
1632
1633// Must have CFA linkage to overload with C linkage realloc.
1634void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public {
1635 if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
1636 return memalignNoStats( nalign, size STAT_ARG( RESIZE ) );
1637 } // if
1638
1639 PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr )
1640
1641 // Attempt to reuse existing alignment.
1642 Heap.Storage.Header * header = HeaderAddr( oaddr );
1643 bool isFakeHeader = AlignmentBit( header ); // old fake header ?
1644 size_t oalign;
1645
1646 if ( unlikely( isFakeHeader ) ) {
1647 checkAlign( nalign ); // check alignment
1648 oalign = ClearAlignmentBit( header ); // old alignment
1649 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ?
1650 && ( oalign <= nalign // going down
1651 || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
1652 ) ) {
1653 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
1654 Heap.FreeHeader * freeHead;
1655 size_t bsize, oalign;
1656 headers( "resize", oaddr, header, freeHead, bsize, oalign );
1657 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
1658
1659 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage
1660 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
1661 ClearZeroFillBit( header ); // turn off 0 fill
1662 #ifdef __CFA_DEBUG__
1663 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
1664 #endif // __CFA_DEBUG__
1665 header->kind.real.size = size; // reset allocation size
1666 #ifdef __STATISTICS__
1667 incCalls( RESIZE );
1668 #endif // __STATISTICS__
1669 return oaddr;
1670 } // if
1671 } // if
1672 } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
1673 && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed
1674 return resize( oaddr, size ); // duplicate special case checks
1675 } // if
1676
1677 // change size, DO NOT preserve STICKY PROPERTIES.
1678 doFree( oaddr ); // free previous storage
1679 return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); // create new aligned area
1680} // resize
1681
1682
1683void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public {
1684 if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
1685 return memalignNoStats( nalign, size STAT_ARG( REALLOC ) );
1686 } // if
1687
1688 PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr )
1689
1690 // Attempt to reuse existing alignment.
1691 Heap.Storage.Header * header = HeaderAddr( oaddr );
1692 bool isFakeHeader = AlignmentBit( header ); // old fake header ?
1693 size_t oalign;
1694 if ( unlikely( isFakeHeader ) ) {
1695 checkAlign( nalign ); // check alignment
1696 oalign = ClearAlignmentBit( header ); // old alignment
1697 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ?
1698 && ( oalign <= nalign // going down
1699 || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
1700 ) ) {
1701 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
1702 return realloc( oaddr, size ); // duplicate special case checks
1703 } // if
1704 } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
1705 && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed
1706 return realloc( oaddr, size ); // duplicate special case checks
1707 } // if
1708
1709 Heap.FreeHeader * freeHead;
1710 size_t bsize;
1711 headers( "realloc", oaddr, header, freeHead, bsize, oalign );
1712
1713 // change size and copy old content to new storage
1714
1715 size_t osize = header->kind.real.size; // old allocation size
1716 bool ozfill = ZeroFillBit( header ); // old allocation zero filled
1717
1718 void * naddr = memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); // create new aligned area
1719
1720 headers( "realloc", naddr, header, freeHead, bsize, oalign );
1721 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes
1722 doFree( oaddr ); // free previous storage
1723
1724 if ( unlikely( ozfill ) ) { // previous request zero fill ?
1725 MarkZeroFilledBit( header ); // mark new request as zero filled
1726 if ( size > osize ) { // previous request larger ?
1727 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
1728 } // if
1729 } // if
1730 return naddr;
1731} // realloc
1732
1733
1734void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ) __THROW {
1735 return realloc( oaddr, nalign, dim * elemSize );
1736} // reallocarray
1737
1738
1739// Local Variables: //
1740// tab-width: 4 //
1741// compile-command: "cfa -nodebug -O2 heap.cfa" //
1742// End: //
Note: See TracBrowser for help on using the repository browser.