source: libcfa/src/heap.cfa@ 6a25b8f

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 6a25b8f was 95eb7cf, checked in by Peter A. Buhr <pabuhr@…>, 6 years ago

major update of heap, especially realloc

  • Property mode set to 100644
File size: 43.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// heap.c --
8//
9// Author : Peter A. Buhr
10// Created On : Tue Dec 19 21:58:35 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Fri Nov 22 14:16:30 2019
13// Update Count : 626
14//
15
16#include <unistd.h> // sbrk, sysconf
17#include <stdbool.h> // true, false
18#include <stdio.h> // snprintf, fileno
19#include <errno.h> // errno
20extern "C" {
21#include <sys/mman.h> // mmap, munmap
22} // extern "C"
23
24// #comment TD : Many of these should be merged into math I believe
25#include "bits/align.hfa" // libPow2
26#include "bits/defs.hfa" // likely, unlikely
27#include "bits/locks.hfa" // __spinlock_t
28#include "startup.hfa" // STARTUP_PRIORITY_MEMORY
29#include "stdlib.hfa" // bsearchl
30#include "malloc.h"
31
32#define MIN(x, y) (y > x ? x : y)
33
34static bool traceHeap = false;
35
36inline bool traceHeap() {
37 return traceHeap;
38} // traceHeap
39
40bool traceHeapOn() {
41 bool temp = traceHeap;
42 traceHeap = true;
43 return temp;
44} // traceHeapOn
45
46bool traceHeapOff() {
47 bool temp = traceHeap;
48 traceHeap = false;
49 return temp;
50} // traceHeapOff
51
52
53static bool prtFree = false;
54
55inline bool prtFree() {
56 return prtFree;
57} // prtFree
58
59bool prtFreeOn() {
60 bool temp = prtFree;
61 prtFree = true;
62 return temp;
63} // prtFreeOn
64
65bool prtFreeOff() {
66 bool temp = prtFree;
67 prtFree = false;
68 return temp;
69} // prtFreeOff
70
71
72// static bool traceHeapTerm = false;
73
74// inline bool traceHeapTerm() {
75// return traceHeapTerm;
76// } // traceHeapTerm
77
78// bool traceHeapTermOn() {
79// bool temp = traceHeapTerm;
80// traceHeapTerm = true;
81// return temp;
82// } // traceHeapTermOn
83
84// bool traceHeapTermOff() {
85// bool temp = traceHeapTerm;
86// traceHeapTerm = false;
87// return temp;
88// } // traceHeapTermOff
89
90
91enum {
92 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),
93 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
94};
95
96size_t default_mmap_start() __attribute__(( weak )) {
97 return __CFA_DEFAULT_MMAP_START__;
98} // default_mmap_start
99
100size_t default_heap_expansion() __attribute__(( weak )) {
101 return __CFA_DEFAULT_HEAP_EXPANSION__;
102} // default_heap_expansion
103
104
105#ifdef __CFA_DEBUG__
106static unsigned int allocFree; // running total of allocations minus frees
107
108static void prtUnfreed() {
109 if ( allocFree != 0 ) {
110 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
111 // char helpText[512];
112 // int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"
113 // "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
114 // (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid
115 // __cfaabi_dbg_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
116 } // if
117} // prtUnfreed
118
119extern "C" {
120 void heapAppStart() { // called by __cfaabi_appready_startup
121 allocFree = 0;
122 } // heapAppStart
123
124 void heapAppStop() { // called by __cfaabi_appready_startdown
125 fclose( stdin ); fclose( stdout );
126 prtUnfreed();
127 } // heapAppStop
128} // extern "C"
129#endif // __CFA_DEBUG__
130
131// statically allocated variables => zero filled.
132static size_t pageSize; // architecture pagesize
133static size_t heapExpand; // sbrk advance
134static size_t mmapStart; // cross over point for mmap
135static unsigned int maxBucketsUsed; // maximum number of buckets in use
136
137
138#define SPINLOCK 0
139#define LOCKFREE 1
140#define BUCKETLOCK SPINLOCK
141#if BUCKETLOCK == LOCKFREE
142#include <uStackLF.h>
143#endif // LOCKFREE
144
145// Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
146// Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
147enum { NoBucketSizes = 91 }; // number of buckets sizes
148
149struct HeapManager {
150// struct FreeHeader; // forward declaration
151
152 struct Storage {
153 struct Header { // header
154 union Kind {
155 struct RealHeader {
156 union {
157 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
158 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
159 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
160 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
161
162 union {
163// FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
164 void * home; // allocated block points back to home locations (must overlay alignment)
165 size_t blockSize; // size for munmap (must overlay alignment)
166 #if BUCKLOCK == SPINLOCK
167 Storage * next; // freed block points next freed block of same size
168 #endif // SPINLOCK
169 };
170
171 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
172 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
173 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
174 };
175 // future code
176 #if BUCKLOCK == LOCKFREE
177 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide)
178 #endif // LOCKFREE
179 };
180 } real; // RealHeader
181 struct FakeHeader {
182 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
183 uint32_t alignment; // low-order bits of home/blockSize used for tricks
184 #endif // __ORDER_LITTLE_ENDIAN__
185
186 uint32_t offset;
187
188 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
189 uint32_t alignment; // low-order bits of home/blockSize used for tricks
190 #endif // __ORDER_BIG_ENDIAN__
191 } fake; // FakeHeader
192 } kind; // Kind
193 } header; // Header
194 char pad[libAlign() - sizeof( Header )];
195 char data[0]; // storage
196 }; // Storage
197
198 static_assert( libAlign() >= sizeof( Storage ), "libAlign() < sizeof( Storage )" );
199
200 struct FreeHeader {
201 #if BUCKLOCK == SPINLOCK
202 __spinlock_t lock; // must be first field for alignment
203 Storage * freeList;
204 #elif BUCKLOCK == LOCKFREE
205 // future code
206 StackLF<Storage> freeList;
207 #else
208 #error undefined lock type for bucket lock
209 #endif // SPINLOCK
210 size_t blockSize; // size of allocations on this list
211 }; // FreeHeader
212
213 // must be first fields for alignment
214 __spinlock_t extlock; // protects allocation-buffer extension
215 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
216
217 void * heapBegin; // start of heap
218 void * heapEnd; // logical end of heap
219 size_t heapRemaining; // amount of storage not allocated in the current chunk
220}; // HeapManager
221
222static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; }
223
224
225#define FASTLOOKUP
226#define __STATISTICS__
227
228// Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
229static const unsigned int bucketSizes[] @= { // different bucket sizes
230 16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4
231 96, 112, 128 + sizeof(HeapManager.Storage), // 3
232 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4
233 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4
234 640, 768, 896, 1_024 + sizeof(HeapManager.Storage), // 4
235 1_536, 2_048 + sizeof(HeapManager.Storage), // 2
236 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), // 4
237 6_144, 8_192 + sizeof(HeapManager.Storage), // 2
238 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(HeapManager.Storage), // 8
239 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(HeapManager.Storage), // 8
240 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(HeapManager.Storage), // 8
241 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(HeapManager.Storage), // 8
242 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(HeapManager.Storage), // 8
243 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(HeapManager.Storage), // 8
244 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), // 4
245 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), // 8
246 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(HeapManager.Storage), // 4
247};
248
249static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0]), "size of bucket array wrong" );
250
251#ifdef FASTLOOKUP
252enum { LookupSizes = 65_536 + sizeof(HeapManager.Storage) }; // number of fast lookup sizes
253static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
254#endif // FASTLOOKUP
255
256static int mmapFd = -1; // fake or actual fd for anonymous file
257#ifdef __CFA_DEBUG__
258static bool heapBoot = 0; // detect recursion during boot
259#endif // __CFA_DEBUG__
260static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
261
262
263#ifdef __STATISTICS__
264// Heap statistics counters.
265static unsigned long long int mmap_storage;
266static unsigned int mmap_calls;
267static unsigned long long int munmap_storage;
268static unsigned int munmap_calls;
269static unsigned long long int sbrk_storage;
270static unsigned int sbrk_calls;
271static unsigned long long int malloc_storage;
272static unsigned int malloc_calls;
273static unsigned long long int free_storage;
274static unsigned int free_calls;
275static unsigned long long int calloc_storage;
276static unsigned int calloc_calls;
277static unsigned long long int memalign_storage;
278static unsigned int memalign_calls;
279static unsigned long long int cmemalign_storage;
280static unsigned int cmemalign_calls;
281static unsigned long long int realloc_storage;
282static unsigned int realloc_calls;
283// Statistics file descriptor (changed by malloc_stats_fd).
284static int statfd = STDERR_FILENO; // default stderr
285
286// Use "write" because streams may be shutdown when calls are made.
287static void printStats() {
288 char helpText[512];
289 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
290 "\nHeap statistics:\n"
291 " malloc: calls %u / storage %llu\n"
292 " calloc: calls %u / storage %llu\n"
293 " memalign: calls %u / storage %llu\n"
294 " cmemalign: calls %u / storage %llu\n"
295 " realloc: calls %u / storage %llu\n"
296 " free: calls %u / storage %llu\n"
297 " mmap: calls %u / storage %llu\n"
298 " munmap: calls %u / storage %llu\n"
299 " sbrk: calls %u / storage %llu\n",
300 malloc_calls, malloc_storage,
301 calloc_calls, calloc_storage,
302 memalign_calls, memalign_storage,
303 cmemalign_calls, cmemalign_storage,
304 realloc_calls, realloc_storage,
305 free_calls, free_storage,
306 mmap_calls, mmap_storage,
307 munmap_calls, munmap_storage,
308 sbrk_calls, sbrk_storage
309 );
310} // printStats
311
312static int printStatsXML( FILE * stream ) { // see malloc_info
313 char helpText[512];
314 int len = snprintf( helpText, sizeof(helpText),
315 "<malloc version=\"1\">\n"
316 "<heap nr=\"0\">\n"
317 "<sizes>\n"
318 "</sizes>\n"
319 "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n"
320 "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n"
321 "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n"
322 "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n"
323 "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n"
324 "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n"
325 "<total type=\"mmap\" count=\"%u\" size=\"%llu\"/>\n"
326 "<total type=\"munmap\" count=\"%u\" size=\"%llu\"/>\n"
327 "<total type=\"sbrk\" count=\"%u\" size=\"%llu\"/>\n"
328 "</malloc>",
329 malloc_calls, malloc_storage,
330 calloc_calls, calloc_storage,
331 memalign_calls, memalign_storage,
332 cmemalign_calls, cmemalign_storage,
333 realloc_calls, realloc_storage,
334 free_calls, free_storage,
335 mmap_calls, mmap_storage,
336 munmap_calls, munmap_storage,
337 sbrk_calls, sbrk_storage
338 );
339 __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit
340 return len;
341} // printStatsXML
342#endif // __STATISTICS__
343
344
345// #comment TD : Is this the samething as Out-of-Memory?
346static inline void noMemory() {
347 abort( "Heap memory exhausted at %zu bytes.\n"
348 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",
349 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );
350} // noMemory
351
352
353static inline void checkAlign( size_t alignment ) {
354 if ( alignment < libAlign() || ! libPow2( alignment ) ) {
355 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
356 } // if
357} // checkAlign
358
359
360static inline bool setHeapExpand( size_t value ) {
361 if ( heapExpand < pageSize ) return true;
362 heapExpand = value;
363 return false;
364} // setHeapExpand
365
366
367static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk
368 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
369 mmapStart = value; // set global
370
371 // find the closest bucket size less than or equal to the mmapStart size
372 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
373 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
374 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
375 return false;
376} // setMmapStart
377
378
379static inline void checkHeader( bool check, const char * name, void * addr ) {
380 if ( unlikely( check ) ) { // bad address ?
381 abort( "Attempt to %s storage %p with address outside the heap.\n"
382 "Possible cause is duplicate free on same block or overwriting of memory.",
383 name, addr );
384 } // if
385} // checkHeader
386
387
388static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) {
389 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
390 size_t offset = header->kind.fake.offset;
391 alignment = header->kind.fake.alignment & -2; // remove flag from value
392 #ifdef __CFA_DEBUG__
393 checkAlign( alignment ); // check alignment
394 #endif // __CFA_DEBUG__
395 header = (HeapManager.Storage.Header *)((char *)header - offset);
396 } // if
397} // fakeHeader
398
399
400// <-------+----------------------------------------------------> bsize (bucket size)
401// |header |addr
402//==================================================================================
403// | alignment
404// <-----------------<------------+-----------------------------> bsize (bucket size)
405// |fake-header | addr
406#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
407
408// <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
409// |header |addr
410//==================================================================================
411// | alignment
412// <------------------------------<<---------- dsize --------->>> bsize (bucket size)
413// |fake-header |addr
414#define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
415
416
417static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
418 header = headerAddr( addr );
419
420 if ( unlikely( heapEnd < addr ) ) { // mmapped ?
421 fakeHeader( header, alignment );
422 size = header->kind.real.blockSize & -3; // mmap size
423 return true;
424 } // if
425
426 #ifdef __CFA_DEBUG__
427 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
428 #endif // __CFA_DEBUG__
429
430 // header may be safe to dereference
431 fakeHeader( header, alignment );
432 #ifdef __CFA_DEBUG__
433 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
434 #endif // __CFA_DEBUG__
435
436 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
437 #ifdef __CFA_DEBUG__
438 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
439 abort( "Attempt to %s storage %p with corrupted header.\n"
440 "Possible cause is duplicate free on same block or overwriting of header information.",
441 name, addr );
442 } // if
443 #endif // __CFA_DEBUG__
444 size = freeElem->blockSize;
445 return false;
446} // headers
447
448
449static inline void * extend( size_t size ) with ( heapManager ) {
450 lock( extlock __cfaabi_dbg_ctx2 );
451 ptrdiff_t rem = heapRemaining - size;
452 if ( rem < 0 ) {
453 // If the size requested is bigger than the current remaining storage, increase the size of the heap.
454
455 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
456 if ( sbrk( increase ) == (void *)-1 ) {
457 unlock( extlock );
458 errno = ENOMEM;
459 return 0p;
460 } // if
461 #ifdef __STATISTICS__
462 sbrk_calls += 1;
463 sbrk_storage += increase;
464 #endif // __STATISTICS__
465 #ifdef __CFA_DEBUG__
466 // Set new memory to garbage so subsequent uninitialized usages might fail.
467 memset( (char *)heapEnd + heapRemaining, '\377', increase );
468 #endif // __CFA_DEBUG__
469 rem = heapRemaining + increase - size;
470 } // if
471
472 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
473 heapRemaining = rem;
474 heapEnd = (char *)heapEnd + size;
475 unlock( extlock );
476 return block;
477} // extend
478
479
480size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) {
481 size_t l = 0, m, h = dim;
482 while ( l < h ) {
483 m = (l + h) / 2;
484 if ( (unsigned int &)(vals[m]) < key ) { // cast away const
485 l = m + 1;
486 } else {
487 h = m;
488 } // if
489 } // while
490 return l;
491} // Bsearchl
492
493
494static inline void * doMalloc( size_t size ) with ( heapManager ) {
495 HeapManager.Storage * block; // pointer to new block of storage
496
497 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
498 // along with the block and is a multiple of the alignment size.
499
500 if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0p;
501 size_t tsize = size + sizeof(HeapManager.Storage);
502 if ( likely( tsize < mmapStart ) ) { // small size => sbrk
503 size_t posn;
504 #ifdef FASTLOOKUP
505 if ( tsize < LookupSizes ) posn = lookup[tsize];
506 else
507 #endif // FASTLOOKUP
508 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed );
509 HeapManager.FreeHeader * freeElem = &freeLists[posn];
510 // #ifdef FASTLOOKUP
511 // if ( tsize < LookupSizes )
512 // freeElem = &freeLists[lookup[tsize]];
513 // else
514 // #endif // FASTLOOKUP
515 // freeElem = bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
516 // HeapManager.FreeHeader * freeElem =
517 // #ifdef FASTLOOKUP
518 // tsize < LookupSizes ? &freeLists[lookup[tsize]] :
519 // #endif // FASTLOOKUP
520 // bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
521 assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ?
522 assert( tsize <= freeElem->blockSize ); // search failure ?
523 tsize = freeElem->blockSize; // total space needed for request
524
525 // Spin until the lock is acquired for this particular size of block.
526
527 #if defined( SPINLOCK )
528 lock( freeElem->lock __cfaabi_dbg_ctx2 );
529 block = freeElem->freeList; // remove node from stack
530 #else
531 block = freeElem->freeList.pop();
532 #endif // SPINLOCK
533 if ( unlikely( block == 0p ) ) { // no free block ?
534 #if defined( SPINLOCK )
535 unlock( freeElem->lock );
536 #endif // SPINLOCK
537
538 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more
539 // and then carve it off.
540
541 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call
542 if ( unlikely( block == 0p ) ) return 0p;
543 #if defined( SPINLOCK )
544 } else {
545 freeElem->freeList = block->header.kind.real.next;
546 unlock( freeElem->lock );
547 #endif // SPINLOCK
548 } // if
549
550 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size
551 } else { // large size => mmap
552 if ( unlikely( size > ~0ul - pageSize ) ) return 0p;
553 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size
554 #ifdef __STATISTICS__
555 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
556 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST );
557 #endif // __STATISTICS__
558 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
559 if ( block == (HeapManager.Storage *)MAP_FAILED ) {
560 // Do not call strerror( errno ) as it may call malloc.
561 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno );
562 } // if
563 #ifdef __CFA_DEBUG__
564 // Set new memory to garbage so subsequent uninitialized usages might fail.
565 memset( block, '\377', tsize );
566 #endif // __CFA_DEBUG__
567 block->header.kind.real.blockSize = tsize; // storage size for munmap
568 } // if
569
570 void * addr = &(block->data); // adjust off header to user bytes
571
572 #ifdef __CFA_DEBUG__
573 assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ?
574 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST );
575 if ( traceHeap() ) {
576 enum { BufferSize = 64 };
577 char helpText[BufferSize];
578 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize );
579 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size );
580 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
581 } // if
582 #endif // __CFA_DEBUG__
583
584 return addr;
585} // doMalloc
586
587
588static inline void doFree( void * addr ) with ( heapManager ) {
589 #ifdef __CFA_DEBUG__
590 if ( unlikely( heapManager.heapBegin == 0p ) ) {
591 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );
592 } // if
593 #endif // __CFA_DEBUG__
594
595 HeapManager.Storage.Header * header;
596 HeapManager.FreeHeader * freeElem;
597 size_t size, alignment; // not used (see realloc)
598
599 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ?
600 #ifdef __STATISTICS__
601 __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST );
602 __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST );
603 #endif // __STATISTICS__
604 if ( munmap( header, size ) == -1 ) {
605 #ifdef __CFA_DEBUG__
606 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
607 "Possible cause is invalid pointer.",
608 addr );
609 #endif // __CFA_DEBUG__
610 } // if
611 } else {
612 #ifdef __CFA_DEBUG__
613 // Set free memory to garbage so subsequent usages might fail.
614 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
615 #endif // __CFA_DEBUG__
616
617 #ifdef __STATISTICS__
618 free_storage += size;
619 #endif // __STATISTICS__
620 #if defined( SPINLOCK )
621 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock
622 header->kind.real.next = freeElem->freeList; // push on stack
623 freeElem->freeList = (HeapManager.Storage *)header;
624 unlock( freeElem->lock ); // release spin lock
625 #else
626 freeElem->freeList.push( *(HeapManager.Storage *)header );
627 #endif // SPINLOCK
628 } // if
629
630 #ifdef __CFA_DEBUG__
631 __atomic_add_fetch( &allocFree, -size, __ATOMIC_SEQ_CST );
632 if ( traceHeap() ) {
633 enum { BufferSize = 64 };
634 char helpText[BufferSize];
635 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );
636 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
637 } // if
638 #endif // __CFA_DEBUG__
639} // doFree
640
641
642size_t prtFree( HeapManager & manager ) with ( manager ) {
643 size_t total = 0;
644 #ifdef __STATISTICS__
645 __cfaabi_bits_acquire();
646 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" );
647 #endif // __STATISTICS__
648 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {
649 size_t size = freeLists[i].blockSize;
650 #ifdef __STATISTICS__
651 unsigned int N = 0;
652 #endif // __STATISTICS__
653
654 #if defined( SPINLOCK )
655 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
656 #else
657 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) {
658 #endif // SPINLOCK
659 total += size;
660 #ifdef __STATISTICS__
661 N += 1;
662 #endif // __STATISTICS__
663 } // for
664
665 #ifdef __STATISTICS__
666 __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N );
667 if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" );
668 #endif // __STATISTICS__
669 } // for
670 #ifdef __STATISTICS__
671 __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total );
672 __cfaabi_bits_release();
673 #endif // __STATISTICS__
674 return (char *)heapEnd - (char *)heapBegin - total;
675} // prtFree
676
677
678static void ?{}( HeapManager & manager ) with ( manager ) {
679 pageSize = sysconf( _SC_PAGESIZE );
680
681 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
682 freeLists[i].blockSize = bucketSizes[i];
683 } // for
684
685 #ifdef FASTLOOKUP
686 unsigned int idx = 0;
687 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
688 if ( i > bucketSizes[idx] ) idx += 1;
689 lookup[i] = idx;
690 } // for
691 #endif // FASTLOOKUP
692
693 if ( setMmapStart( default_mmap_start() ) ) {
694 abort( "HeapManager : internal error, mmap start initialization failure." );
695 } // if
696 heapExpand = default_heap_expansion();
697
698 char * End = (char *)sbrk( 0 );
699 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
700 heapBegin = heapEnd = sbrk( 0 ); // get new start point
701} // HeapManager
702
703
704static void ^?{}( HeapManager & ) {
705 #ifdef __STATISTICS__
706 // if ( traceHeapTerm() ) {
707 // printStats();
708 // if ( prtfree() ) prtFree( heapManager, true );
709 // } // if
710 #endif // __STATISTICS__
711} // ~HeapManager
712
713
714static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
715void memory_startup( void ) {
716 #ifdef __CFA_DEBUG__
717 if ( unlikely( heapBoot ) ) { // check for recursion during system boot
718 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
719 abort( "boot() : internal error, recursively invoked during system boot." );
720 } // if
721 heapBoot = true;
722 #endif // __CFA_DEBUG__
723
724 //assert( heapManager.heapBegin != 0 );
725 //heapManager{};
726 if ( heapManager.heapBegin == 0p ) heapManager{};
727} // memory_startup
728
729static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
730void memory_shutdown( void ) {
731 ^heapManager{};
732} // memory_shutdown
733
734
735static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics
736 //assert( heapManager.heapBegin != 0 );
737 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ?
738 void * addr = doMalloc( size );
739 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX
740 return addr;
741} // mallocNoStats
742
743
744static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) {
745 size_t size = noOfElems * elemSize;
746 char * addr = (char *)mallocNoStats( size );
747 if ( unlikely( addr == 0p ) ) return 0p;
748
749 HeapManager.Storage.Header * header;
750 HeapManager.FreeHeader * freeElem;
751 size_t bsize, alignment;
752 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment );
753 #ifndef __CFA_DEBUG__
754 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
755 if ( ! mapped )
756 #endif // __CFA_DEBUG__
757 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
758 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
759 // `-header`-addr `-size
760 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros
761
762 header->kind.real.blockSize |= 2; // mark as zero filled
763 return addr;
764} // callocNoStats
765
766
767static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics
768 #ifdef __CFA_DEBUG__
769 checkAlign( alignment ); // check alignment
770 #endif // __CFA_DEBUG__
771
772 // if alignment <= default alignment, do normal malloc as two headers are unnecessary
773 if ( unlikely( alignment <= libAlign() ) ) return mallocNoStats( size );
774
775 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
776 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
777 // .-------------v-----------------v----------------v----------,
778 // | Real Header | ... padding ... | Fake Header | data ... |
779 // `-------------^-----------------^-+--------------^----------'
780 // |<--------------------------------' offset/align |<-- alignment boundary
781
782 // subtract libAlign() because it is already the minimum alignment
783 // add sizeof(Storage) for fake header
784 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
785 if ( unlikely( addr == 0p ) ) return addr;
786
787 // address in the block of the "next" alignment address
788 char * user = (char *)libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment );
789
790 // address of header from malloc
791 HeapManager.Storage.Header * realHeader = headerAddr( addr );
792 // address of fake header * before* the alignment location
793 HeapManager.Storage.Header * fakeHeader = headerAddr( user );
794 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
795 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
796 // SKULLDUGGERY: odd alignment imples fake header
797 fakeHeader->kind.fake.alignment = alignment | 1;
798
799 return user;
800} // memalignNoStats
801
802
803static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) {
804 size_t size = noOfElems * elemSize;
805 char * addr = (char *)memalignNoStats( alignment, size );
806 if ( unlikely( addr == 0p ) ) return 0p;
807 HeapManager.Storage.Header * header;
808 HeapManager.FreeHeader * freeElem;
809 size_t bsize;
810 bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment );
811 #ifndef __CFA_DEBUG__
812 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
813 if ( ! mapped )
814 #endif // __CFA_DEBUG__
815 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros
816 header->kind.real.blockSize |= 2; // mark as zero filled
817
818 return addr;
819} // cmemalignNoStats
820
821
822// supported mallopt options
823#ifndef M_MMAP_THRESHOLD
824#define M_MMAP_THRESHOLD (-1)
825#endif // M_TOP_PAD
826#ifndef M_TOP_PAD
827#define M_TOP_PAD (-2)
828#endif // M_TOP_PAD
829
830
831extern "C" {
832 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not
833 // initialized. If size is 0, then malloc() returns either 0p, or a unique pointer value that can later be
834 // successfully passed to free().
835 void * malloc( size_t size ) {
836 #ifdef __STATISTICS__
837 __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST );
838 __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST );
839 #endif // __STATISTICS__
840
841 return mallocNoStats( size );
842 } // malloc
843
844 // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to
845 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a
846 // unique pointer value that can later be successfully passed to free().
847 void * calloc( size_t noOfElems, size_t elemSize ) {
848 #ifdef __STATISTICS__
849 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );
850 __atomic_add_fetch( &calloc_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST );
851 #endif // __STATISTICS__
852
853 return callocNoStats( noOfElems, elemSize );
854 } // calloc
855
856 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be
857 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size
858 // is larger than the old size, the added memory will not be initialized. If ptr is 0p, then the call is
859 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not 0p, then the call
860 // is equivalent to free(ptr). Unless ptr is 0p, it must have been returned by an earlier call to malloc(),
861 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done.
862 void * realloc( void * oaddr, size_t size ) {
863 #ifdef __STATISTICS__
864 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
865 #endif // __STATISTICS__
866
867 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
868 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
869
870 HeapManager.Storage.Header * header;
871 HeapManager.FreeHeader * freeElem;
872 size_t bsize, oalign = 0;
873 headers( "realloc", oaddr, header, freeElem, bsize, oalign );
874
875 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
876 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size
877 // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know
878 // where to start filling, i.e., do not overwrite existing values in space.
879 //
880 // This case does not result in a new profiler entry because the previous one still exists and it must match with
881 // the free for this memory. Hence, this realloc does not appear in the profiler output.
882 return oaddr;
883 } // if
884
885 #ifdef __STATISTICS__
886 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
887 #endif // __STATISTICS__
888
889 // change size and copy old content to new storage
890
891 void * naddr;
892 if ( unlikely( oalign != 0 ) ) { // previous request memalign?
893 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
894 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area
895 } else {
896 naddr = memalignNoStats( oalign, size ); // create new aligned area
897 } // if
898 } else {
899 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
900 naddr = callocNoStats( 1, size ); // create new area
901 } else {
902 naddr = mallocNoStats( size ); // create new area
903 } // if
904 } // if
905 if ( unlikely( naddr == 0p ) ) return 0p;
906 headers( "realloc", naddr, header, freeElem, bsize, oalign );
907 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
908 // To preserve prior fill, the entire bucket must be copied versus the size.
909 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes
910 free( oaddr );
911 return naddr;
912 } // realloc
913
914 // The obsolete function memalign() allocates size bytes and returns a pointer to the allocated memory. The memory
915 // address will be a multiple of alignment, which must be a power of two.
916 void * memalign( size_t alignment, size_t size ) {
917 #ifdef __STATISTICS__
918 __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST );
919 __atomic_add_fetch( &memalign_storage, size, __ATOMIC_SEQ_CST );
920 #endif // __STATISTICS__
921
922 return memalignNoStats( alignment, size );
923 } // memalign
924
925
926 // The cmemalign() function is the same as calloc() with memory alignment.
927 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
928 #ifdef __STATISTICS__
929 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
930 __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST );
931 #endif // __STATISTICS__
932
933 return cmemalignNoStats( alignment, noOfElems, elemSize );
934 } // cmemalign
935
936 // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a
937 // multiple of alignment.
938 void * aligned_alloc( size_t alignment, size_t size ) {
939 return memalign( alignment, size );
940 } // aligned_alloc
941
942
943 // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The
944 // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of
945 // sizeof(void *). If size is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later
946 // be successfully passed to free(3).
947 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
948 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
949 * memptr = memalign( alignment, size );
950 if ( unlikely( * memptr == 0p ) ) return ENOMEM;
951 return 0;
952 } // posix_memalign
953
954 // The obsolete function valloc() allocates size bytes and returns a pointer to the allocated memory. The memory
955 // address will be a multiple of the page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
956 void * valloc( size_t size ) {
957 return memalign( pageSize, size );
958 } // valloc
959
960
961 // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to
962 // malloc(), calloc() or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior
963 // occurs. If ptr is 0p, no operation is performed.
964 void free( void * addr ) {
965 #ifdef __STATISTICS__
966 __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST );
967 #endif // __STATISTICS__
968
969 if ( unlikely( addr == 0p ) ) { // special case
970 // #ifdef __CFA_DEBUG__
971 // if ( traceHeap() ) {
972 // #define nullmsg "Free( 0x0 ) size:0\n"
973 // // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
974 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 );
975 // } // if
976 // #endif // __CFA_DEBUG__
977 return;
978 } // exit
979
980 doFree( addr );
981 } // free
982
983
984 // The malloc_alignment() function returns the alignment of the allocation.
985 size_t malloc_alignment( void * addr ) {
986 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment
987 HeapManager.Storage.Header * header = headerAddr( addr );
988 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
989 return header->kind.fake.alignment & -2; // remove flag from value
990 } else {
991 return libAlign (); // minimum alignment
992 } // if
993 } // malloc_alignment
994
995
996 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().
997 bool malloc_zero_fill( void * addr ) {
998 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill
999 HeapManager.Storage.Header * header = headerAddr( addr );
1000 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
1001 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);
1002 } // if
1003 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?
1004 } // malloc_zero_fill
1005
1006
1007 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to
1008 // a block of memory allocated by malloc(3) or a related function.
1009 size_t malloc_usable_size( void * addr ) {
1010 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size
1011 HeapManager.Storage.Header * header;
1012 HeapManager.FreeHeader * freeElem;
1013 size_t bsize, alignment;
1014
1015 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment );
1016 return dataStorage( bsize, addr, header ); // data storage in bucket
1017 } // malloc_usable_size
1018
1019
1020 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and
1021 // related functions.
1022 void malloc_stats( void ) {
1023 #ifdef __STATISTICS__
1024 printStats();
1025 if ( prtFree() ) prtFree( heapManager );
1026 #endif // __STATISTICS__
1027 } // malloc_stats
1028
1029 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics.
1030 int malloc_stats_fd( int fd __attribute__(( unused )) ) {
1031 #ifdef __STATISTICS__
1032 int temp = statfd;
1033 statfd = fd;
1034 return temp;
1035 #else
1036 return -1;
1037 #endif // __STATISTICS__
1038 } // malloc_stats_fd
1039
1040
1041 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see
1042 // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that
1043 // parameter.
1044 int mallopt( int option, int value ) {
1045 choose( option ) {
1046 case M_TOP_PAD:
1047 if ( setHeapExpand( value ) ) return 1;
1048 case M_MMAP_THRESHOLD:
1049 if ( setMmapStart( value ) ) return 1;
1050 } // switch
1051 return 0; // error, unsupported
1052 } // mallopt
1053
1054 // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a
1055 // suitable argument).
1056 int malloc_trim( size_t ) {
1057 return 0; // => impossible to release memory
1058 } // malloc_trim
1059
1060
1061 // The malloc_info() function exports an XML string that describes the current state of the memory-allocation
1062 // implementation in the caller. The string is printed on the file stream stream. The exported string includes
1063 // information about all arenas (see malloc(3)).
1064 int malloc_info( int options, FILE * stream ) {
1065 if ( options != 0 ) { errno = EINVAL; return -1; }
1066 return printStatsXML( stream );
1067 } // malloc_info
1068
1069
1070 // The malloc_get_state() function records the current state of all malloc(3) internal bookkeeping variables (but
1071 // not the actual contents of the heap or the state of malloc_hook(3) functions pointers). The state is recorded in
1072 // a system-dependent opaque data structure dynamically allocated via malloc(3), and a pointer to that data
1073 // structure is returned as the function result. (It is the caller's responsibility to free(3) this memory.)
1074 void * malloc_get_state( void ) {
1075 return 0p; // unsupported
1076 } // malloc_get_state
1077
1078
1079 // The malloc_set_state() function restores the state of all malloc(3) internal bookkeeping variables to the values
1080 // recorded in the opaque data structure pointed to by state.
1081 int malloc_set_state( void * ptr ) {
1082 return 0; // unsupported
1083 } // malloc_set_state
1084} // extern "C"
1085
1086
1087// Must have CFA linkage to overload with C linkage realloc.
1088void * realloc( void * oaddr, size_t nalign, size_t size ) {
1089 #ifdef __STATISTICS__
1090 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
1091 #endif // __STATISTICS__
1092
1093 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
1094 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
1095
1096 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum
1097 #ifdef __CFA_DEBUG__
1098 else
1099 checkAlign( nalign ); // check alignment
1100 #endif // __CFA_DEBUG__
1101
1102 HeapManager.Storage.Header * header;
1103 HeapManager.FreeHeader * freeElem;
1104 size_t bsize, oalign = 0;
1105 headers( "realloc", oaddr, header, freeElem, bsize, oalign );
1106
1107 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
1108
1109 if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out
1110 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
1111 return realloc( oaddr, size );
1112 } // if
1113
1114 #ifdef __STATISTICS__
1115 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
1116 #endif // __STATISTICS__
1117
1118 // change size and copy old content to new storage
1119
1120 void * naddr;
1121 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
1122 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area
1123 } else {
1124 naddr = memalignNoStats( nalign, size ); // create new aligned area
1125 } // if
1126 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
1127 // To preserve prior fill, the entire bucket must be copied versus the size.
1128 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes
1129 free( oaddr );
1130 return naddr;
1131} // realloc
1132
1133
1134// Local Variables: //
1135// tab-width: 4 //
1136// compile-command: "cfa -nodebug -O2 heap.cfa" //
1137// End: //
Note: See TracBrowser for help on using the repository browser.