Changeset 95eb7cf


Ignore:
Timestamp:
Nov 22, 2019, 3:10:22 PM (5 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
d74369b
Parents:
0a25c34
Message:

major update of heap, especially realloc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/heap.cfa

    r0a25c34 r95eb7cf  
    1010// Created On       : Tue Dec 19 21:58:35 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Oct 18 07:42:09 2019
    13 // Update Count     : 556
     12// Last Modified On : Fri Nov 22 14:16:30 2019
     13// Update Count     : 626
    1414//
    1515
     
    3030#include "malloc.h"
    3131
     32#define MIN(x, y) (y > x ? x : y)
    3233
    3334static bool traceHeap = false;
     
    5051
    5152
    52 static bool checkFree = false;
    53 
    54 inline bool checkFree() {
    55         return checkFree;
    56 } // checkFree
    57 
    58 bool checkFreeOn() {
    59         bool temp = checkFree;
    60         checkFree = true;
     53static bool prtFree = false;
     54
     55inline bool prtFree() {
     56        return prtFree;
     57} // prtFree
     58
     59bool prtFreeOn() {
     60        bool temp = prtFree;
     61        prtFree = true;
    6162        return temp;
    62 } // checkFreeOn
    63 
    64 bool checkFreeOff() {
    65         bool temp = checkFree;
    66         checkFree = false;
     63} // prtFreeOn
     64
     65bool prtFreeOff() {
     66        bool temp = prtFree;
     67        prtFree = false;
    6768        return temp;
    68 } // checkFreeOff
     69} // prtFreeOff
    6970
    7071
     
    105106static unsigned int allocFree;                                                  // running total of allocations minus frees
    106107
    107 static void checkUnfreed() {
     108static void prtUnfreed() {
    108109        if ( allocFree != 0 ) {
    109110                // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
     
    112113                //                                      "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
    113114                //                                      (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid
    114                 // __cfaabi_dbg_bits_write( helpText, len );
    115         } // if
    116 } // checkUnfreed
     115                // __cfaabi_dbg_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
     116        } // if
     117} // prtUnfreed
    117118
    118119extern "C" {
     
    123124        void heapAppStop() {                                                            // called by __cfaabi_appready_startdown
    124125                fclose( stdin ); fclose( stdout );
    125                 checkUnfreed();
     126                prtUnfreed();
    126127        } // heapAppStop
    127128} // extern "C"
     
    134135static unsigned int maxBucketsUsed;                                             // maximum number of buckets in use
    135136
    136 
    137 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa
    138 #define ALIGN 16
    139137
    140138#define SPINLOCK 0
     
    147145// Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
    148146// Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
    149 enum { NoBucketSizes = 93 };                                                    // number of buckets sizes
     147enum { NoBucketSizes = 91 };                                                    // number of buckets sizes
    150148
    151149struct HeapManager {
     
    194192                        } kind; // Kind
    195193                } header; // Header
    196                 char pad[ALIGN - sizeof( Header )];
     194                char pad[libAlign() - sizeof( Header )];
    197195                char data[0];                                                                   // storage
    198196        }; // Storage
    199197
    200         static_assert( ALIGN >= sizeof( Storage ), "ALIGN < sizeof( Storage )" );
     198        static_assert( libAlign() >= sizeof( Storage ), "libAlign() < sizeof( Storage )" );
    201199
    202200        struct FreeHeader {
     
    230228// Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
    231229static const unsigned int bucketSizes[] @= {                    // different bucket sizes
    232         16, 32, 48, 64,
    233         64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224,
    234         256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896,
    235         1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144,
    236         8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360,
    237         16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720,
    238         32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440,
    239         65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880,
    240         131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760,
    241         262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520,
    242         524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792,
    243         1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016,
    244         4_194_304 + sizeof(HeapManager.Storage)
     230        16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4
     231        96, 112, 128 + sizeof(HeapManager.Storage), // 3
     232        160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4
     233        320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4
     234        640, 768, 896, 1_024 + sizeof(HeapManager.Storage), // 4
     235        1_536, 2_048 + sizeof(HeapManager.Storage), // 2
     236        2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), // 4
     237        6_144, 8_192 + sizeof(HeapManager.Storage), // 2
     238        9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(HeapManager.Storage), // 8
     239        18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(HeapManager.Storage), // 8
     240        36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(HeapManager.Storage), // 8
     241        73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(HeapManager.Storage), // 8
     242        147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(HeapManager.Storage), // 8
     243        294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(HeapManager.Storage), // 8
     244        655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), // 4
     245        1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), // 8
     246        2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(HeapManager.Storage), // 4
    245247};
    246248
     
    251253static unsigned char lookup[LookupSizes];                               // O(1) lookup for small sizes
    252254#endif // FASTLOOKUP
     255
    253256static int mmapFd = -1;                                                                 // fake or actual fd for anonymous file
    254 
    255 
    256257#ifdef __CFA_DEBUG__
    257258static bool heapBoot = 0;                                                               // detect recursion during boot
     
    259260static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
    260261
    261 // #comment TD : The return type of this function should be commented
    262 static inline bool setMmapStart( size_t value ) {
    263   if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
    264         mmapStart = value;                                                                      // set global
    265 
    266         // find the closest bucket size less than or equal to the mmapStart size
    267         maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
    268         assert( maxBucketsUsed < NoBucketSizes );                       // subscript failure ?
    269         assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
    270         return false;
    271 } // setMmapStart
    272 
    273 
    274 static void ?{}( HeapManager & manager ) with ( manager ) {
    275         pageSize = sysconf( _SC_PAGESIZE );
    276 
    277         for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
    278                 freeLists[i].blockSize = bucketSizes[i];
    279         } // for
    280 
    281         #ifdef FASTLOOKUP
    282         unsigned int idx = 0;
    283         for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
    284                 if ( i > bucketSizes[idx] ) idx += 1;
    285                 lookup[i] = idx;
    286         } // for
    287         #endif // FASTLOOKUP
    288 
    289         if ( setMmapStart( default_mmap_start() ) ) {
    290                 abort( "HeapManager : internal error, mmap start initialization failure." );
    291         } // if
    292         heapExpand = default_heap_expansion();
    293 
    294         char * End = (char *)sbrk( 0 );
    295         sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
    296         heapBegin = heapEnd = sbrk( 0 );                                        // get new start point
    297 } // HeapManager
    298 
    299 
    300 static void ^?{}( HeapManager & ) {
    301         #ifdef __STATISTICS__
    302         // if ( traceHeapTerm() ) {
    303         //      printStats();
    304         //      if ( checkfree() ) checkFree( heapManager, true );
    305         // } // if
    306         #endif // __STATISTICS__
    307 } // ~HeapManager
    308 
    309 
    310 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
    311 void memory_startup( void ) {
    312         #ifdef __CFA_DEBUG__
    313         if ( unlikely( heapBoot ) ) {                                           // check for recursion during system boot
    314                 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
    315                 abort( "boot() : internal error, recursively invoked during system boot." );
    316         } // if
    317         heapBoot = true;
    318         #endif // __CFA_DEBUG__
    319 
    320         //assert( heapManager.heapBegin != 0 );
    321         //heapManager{};
    322         if ( heapManager.heapBegin == 0 ) heapManager{};
    323 } // memory_startup
    324 
    325 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
    326 void memory_shutdown( void ) {
    327         ^heapManager{};
    328 } // memory_shutdown
    329 
    330262
    331263#ifdef __STATISTICS__
    332 static unsigned long long int mmap_storage;                             // heap statistics counters
     264// Heap statistics counters.
     265static unsigned long long int mmap_storage;
    333266static unsigned int mmap_calls;
    334267static unsigned long long int munmap_storage;
     
    348281static unsigned long long int realloc_storage;
    349282static unsigned int realloc_calls;
    350 
    351 static int statfd;                                                                              // statistics file descriptor (changed by malloc_stats_fd)
    352 
     283// Statistics file descriptor (changed by malloc_stats_fd).
     284static int statfd = STDERR_FILENO;                                              // default stderr
    353285
    354286// Use "write" because streams may be shutdown when calls are made.
    355287static void printStats() {
    356288        char helpText[512];
    357         __cfaabi_dbg_bits_print_buffer( helpText, sizeof(helpText),
     289        __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
    358290                                                                        "\nHeap statistics:\n"
    359291                                                                        "  malloc: calls %u / storage %llu\n"
     
    405337                                                sbrk_calls, sbrk_storage
    406338                );
    407         return write( fileno( stream ), helpText, len );        // -1 => error
     339        __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit
     340        return len;
    408341} // printStatsXML
    409342#endif // __STATISTICS__
     343
    410344
    411345// #comment TD : Is this the samething as Out-of-Memory?
     
    418352
    419353static inline void checkAlign( size_t alignment ) {
    420         if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {
    421                 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment );
     354        if ( alignment < libAlign() || ! libPow2( alignment ) ) {
     355                abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
    422356        } // if
    423357} // checkAlign
     
    431365
    432366
     367static inline bool setMmapStart( size_t value ) {               // true => mmapped, false => sbrk
     368  if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
     369        mmapStart = value;                                                                      // set global
     370
     371        // find the closest bucket size less than or equal to the mmapStart size
     372        maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
     373        assert( maxBucketsUsed < NoBucketSizes );                       // subscript failure ?
     374        assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
     375        return false;
     376} // setMmapStart
     377
     378
    433379static inline void checkHeader( bool check, const char * name, void * addr ) {
    434380        if ( unlikely( check ) ) {                                                      // bad address ?
     
    439385} // checkHeader
    440386
    441 // #comment TD : function should be commented and/or have a more evocative name
    442 //               this isn't either a check or a constructor which is what I would expect this function to be
    443 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) {
     387
     388static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) {
    444389        if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
    445390                size_t offset = header->kind.fake.offset;
     
    452397} // fakeHeader
    453398
    454 // #comment TD : Why is this a define
     399
     400// <-------+----------------------------------------------------> bsize (bucket size)
     401// |header |addr
     402//==================================================================================
     403//                                | alignment
     404// <-----------------<------------+-----------------------------> bsize (bucket size)
     405//                   |fake-header | addr
    455406#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
    456407
    457 static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
     408// <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
     409// |header |addr
     410//==================================================================================
     411//                                | alignment
     412// <------------------------------<<---------- dsize --------->>> bsize (bucket size)
     413//                   |fake-header |addr
     414#define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
     415
     416
     417static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
    458418        header = headerAddr( addr );
    459419
    460420        if ( unlikely( heapEnd < addr ) ) {                                     // mmapped ?
    461                 fakeHeader( header, size, alignment );
     421                fakeHeader( header, alignment );
    462422                size = header->kind.real.blockSize & -3;                // mmap size
    463423                return true;
     
    468428        #endif // __CFA_DEBUG__
    469429
    470         // #comment TD : This code looks weird...
    471         //               It's called as the first statement of both branches of the last if, with the same parameters in all cases
    472 
    473430        // header may be safe to dereference
    474         fakeHeader( header, size, alignment );
     431        fakeHeader( header, alignment );
    475432        #ifdef __CFA_DEBUG__
    476433        checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
     
    500457                        unlock( extlock );
    501458                        errno = ENOMEM;
    502                         return 0;
     459                        return 0p;
    503460                } // if
    504461                #ifdef __STATISTICS__
     
    541498        // along with the block and is a multiple of the alignment size.
    542499
    543   if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0;
     500  if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0p;
    544501        size_t tsize = size + sizeof(HeapManager.Storage);
    545502        if ( likely( tsize < mmapStart ) ) {                            // small size => sbrk
     
    574531                block = freeElem->freeList.pop();
    575532                #endif // SPINLOCK
    576                 if ( unlikely( block == 0 ) ) {                                 // no free block ?
     533                if ( unlikely( block == 0p ) ) {                                // no free block ?
    577534                        #if defined( SPINLOCK )
    578535                        unlock( freeElem->lock );
     
    583540
    584541                        block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call
    585   if ( unlikely( block == 0 ) ) return 0;
     542  if ( unlikely( block == 0p ) ) return 0p;
    586543                        #if defined( SPINLOCK )
    587544                } else {
     
    593550                block->header.kind.real.home = freeElem;                // pointer back to free list of apropriate size
    594551        } else {                                                                                        // large size => mmap
    595   if ( unlikely( size > ~0ul - pageSize ) ) return 0;
     552  if ( unlikely( size > ~0ul - pageSize ) ) return 0p;
    596553                tsize = libCeiling( tsize, pageSize );                  // must be multiple of page size
    597554                #ifdef __STATISTICS__
     
    611568        } // if
    612569
    613         void * area = &(block->data);                                           // adjust off header to user bytes
     570        void * addr = &(block->data);                                           // adjust off header to user bytes
    614571
    615572        #ifdef __CFA_DEBUG__
    616         assert( ((uintptr_t)area & (libAlign() - 1)) == 0 ); // minimum alignment ?
     573        assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ?
    617574        __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST );
    618575        if ( traceHeap() ) {
    619576                enum { BufferSize = 64 };
    620577                char helpText[BufferSize];
    621                 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize );
    622                 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size );
    623                 __cfaabi_dbg_bits_write( helpText, len );
     578                int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize );
     579                // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size );
     580                __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
    624581        } // if
    625582        #endif // __CFA_DEBUG__
    626583
    627         return area;
     584        return addr;
    628585} // doMalloc
    629586
     
    631588static inline void doFree( void * addr ) with ( heapManager ) {
    632589        #ifdef __CFA_DEBUG__
    633         if ( unlikely( heapManager.heapBegin == 0 ) ) {
     590        if ( unlikely( heapManager.heapBegin == 0p ) ) {
    634591                abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );
    635592        } // if
     
    677634                char helpText[BufferSize];
    678635                int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );
    679                 __cfaabi_dbg_bits_write( helpText, len );
     636                __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
    680637        } // if
    681638        #endif // __CFA_DEBUG__
     
    683640
    684641
    685 size_t checkFree( HeapManager & manager ) with ( manager ) {
     642size_t prtFree( HeapManager & manager ) with ( manager ) {
    686643        size_t total = 0;
    687644        #ifdef __STATISTICS__
    688         __cfaabi_dbg_bits_acquire();
    689         __cfaabi_dbg_bits_print_nolock( "\nBin lists (bin size : free blocks on list)\n" );
     645        __cfaabi_bits_acquire();
     646        __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" );
    690647        #endif // __STATISTICS__
    691648        for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {
     
    696653
    697654                #if defined( SPINLOCK )
    698                 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0; p = p->header.kind.real.next ) {
     655                for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
    699656                #else
    700                 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0; p = p->header.kind.real.next.top ) {
     657                for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) {
    701658                #endif // SPINLOCK
    702659                        total += size;
     
    707664
    708665                #ifdef __STATISTICS__
    709                 __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u  ", size, N );
    710                 if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" );
     666                __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u  ", size, N );
     667                if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" );
    711668                #endif // __STATISTICS__
    712669        } // for
    713670        #ifdef __STATISTICS__
    714         __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total );
    715         __cfaabi_dbg_bits_release();
     671        __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total );
     672        __cfaabi_bits_release();
    716673        #endif // __STATISTICS__
    717674        return (char *)heapEnd - (char *)heapBegin - total;
    718 } // checkFree
     675} // prtFree
     676
     677
     678static void ?{}( HeapManager & manager ) with ( manager ) {
     679        pageSize = sysconf( _SC_PAGESIZE );
     680
     681        for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
     682                freeLists[i].blockSize = bucketSizes[i];
     683        } // for
     684
     685        #ifdef FASTLOOKUP
     686        unsigned int idx = 0;
     687        for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
     688                if ( i > bucketSizes[idx] ) idx += 1;
     689                lookup[i] = idx;
     690        } // for
     691        #endif // FASTLOOKUP
     692
     693        if ( setMmapStart( default_mmap_start() ) ) {
     694                abort( "HeapManager : internal error, mmap start initialization failure." );
     695        } // if
     696        heapExpand = default_heap_expansion();
     697
     698        char * End = (char *)sbrk( 0 );
     699        sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
     700        heapBegin = heapEnd = sbrk( 0 );                                        // get new start point
     701} // HeapManager
     702
     703
     704static void ^?{}( HeapManager & ) {
     705        #ifdef __STATISTICS__
     706        // if ( traceHeapTerm() ) {
     707        //      printStats();
     708        //      if ( prtfree() ) prtFree( heapManager, true );
     709        // } // if
     710        #endif // __STATISTICS__
     711} // ~HeapManager
     712
     713
     714static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
     715void memory_startup( void ) {
     716        #ifdef __CFA_DEBUG__
     717        if ( unlikely( heapBoot ) ) {                                           // check for recursion during system boot
     718                // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
     719                abort( "boot() : internal error, recursively invoked during system boot." );
     720        } // if
     721        heapBoot = true;
     722        #endif // __CFA_DEBUG__
     723
     724        //assert( heapManager.heapBegin != 0 );
     725        //heapManager{};
     726        if ( heapManager.heapBegin == 0p ) heapManager{};
     727} // memory_startup
     728
     729static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
     730void memory_shutdown( void ) {
     731        ^heapManager{};
     732} // memory_shutdown
    719733
    720734
    721735static inline void * mallocNoStats( size_t size ) {             // necessary for malloc statistics
    722736        //assert( heapManager.heapBegin != 0 );
    723         if ( unlikely( heapManager.heapBegin == 0 ) ) heapManager{}; // called before memory_startup ?
    724         void * area = doMalloc( size );
    725         if ( unlikely( area == 0 ) ) errno = ENOMEM;            // POSIX
    726         return area;
     737        if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ?
     738        void * addr = doMalloc( size );
     739        if ( unlikely( addr == 0p ) ) errno = ENOMEM;           // POSIX
     740        return addr;
    727741} // mallocNoStats
     742
     743
     744static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) {
     745        size_t size = noOfElems * elemSize;
     746        char * addr = (char *)mallocNoStats( size );
     747  if ( unlikely( addr == 0p ) ) return 0p;
     748
     749        HeapManager.Storage.Header * header;
     750        HeapManager.FreeHeader * freeElem;
     751        size_t bsize, alignment;
     752        bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment );
     753        #ifndef __CFA_DEBUG__
     754        // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
     755        if ( ! mapped )
     756        #endif // __CFA_DEBUG__
     757            // Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
     758            // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
     759            // `-header`-addr                      `-size
     760                memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros
     761
     762        header->kind.real.blockSize |= 2;                                       // mark as zero filled
     763        return addr;
     764} // callocNoStats
    728765
    729766
     
    745782        // subtract libAlign() because it is already the minimum alignment
    746783        // add sizeof(Storage) for fake header
    747         // #comment TD : this is the only place that calls doMalloc without calling mallocNoStats, why ?
    748         char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
    749   if ( unlikely( area == 0 ) ) return area;
     784        char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
     785  if ( unlikely( addr == 0p ) ) return addr;
    750786
    751787        // address in the block of the "next" alignment address
    752         char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment );
     788        char * user = (char *)libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment );
    753789
    754790        // address of header from malloc
    755         HeapManager.Storage.Header * realHeader = headerAddr( area );
     791        HeapManager.Storage.Header * realHeader = headerAddr( addr );
    756792        // address of fake header * before* the alignment location
    757793        HeapManager.Storage.Header * fakeHeader = headerAddr( user );
     
    763799        return user;
    764800} // memalignNoStats
     801
     802
     803static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) {
     804        size_t size = noOfElems * elemSize;
     805        char * addr = (char *)memalignNoStats( alignment, size );
     806  if ( unlikely( addr == 0p ) ) return 0p;
     807        HeapManager.Storage.Header * header;
     808        HeapManager.FreeHeader * freeElem;
     809        size_t bsize;
     810        bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment );
     811        #ifndef __CFA_DEBUG__
     812        // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
     813        if ( ! mapped )
     814        #endif // __CFA_DEBUG__
     815                memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros
     816        header->kind.real.blockSize |= 2;                               // mark as zero filled
     817
     818        return addr;
     819} // cmemalignNoStats
    765820
    766821
     
    776831extern "C" {
    777832        // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not
    778         // initialized. If size is 0, then malloc() returns either NULL, or a unique pointer value that can later be
     833        // initialized. If size is 0, then malloc() returns either 0p, or a unique pointer value that can later be
    779834        // successfully passed to free().
    780835        void * malloc( size_t size ) {
     
    788843
    789844        // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to
    790         // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either NULL, or a
     845        // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a
    791846        // unique pointer value that can later be successfully passed to free().
    792847        void * calloc( size_t noOfElems, size_t elemSize ) {
    793                 size_t size = noOfElems * elemSize;
    794848                #ifdef __STATISTICS__
    795849                __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );
    796                 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST );
    797                 #endif // __STATISTICS__
    798 
    799                 char * area = (char *)mallocNoStats( size );
    800           if ( unlikely( area == 0 ) ) return 0;
     850                __atomic_add_fetch( &calloc_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST );
     851                #endif // __STATISTICS__
     852
     853                return callocNoStats( noOfElems, elemSize );
     854        } // calloc
     855
     856        // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be
     857        // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size
     858        // is larger than the old size, the added memory will not be initialized.  If ptr is 0p, then the call is
     859        // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not 0p, then the call
     860        // is equivalent to free(ptr). Unless ptr is 0p, it must have been returned by an earlier call to malloc(),
     861        // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done.
     862        void * realloc( void * oaddr, size_t size ) {
     863                #ifdef __STATISTICS__
     864                __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
     865                #endif // __STATISTICS__
     866
     867          if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
     868          if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
    801869
    802870                HeapManager.Storage.Header * header;
    803871                HeapManager.FreeHeader * freeElem;
    804                 size_t asize, alignment;
    805                 bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment );
    806                 #ifndef __CFA_DEBUG__
    807                 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
    808                 if ( ! mapped )
    809                 #endif // __CFA_DEBUG__
    810                         memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros
    811 
    812                 header->kind.real.blockSize |= 2;                               // mark as zero filled
    813                 return area;
    814         } // calloc
    815 
    816         // #comment TD : Document this function
    817         void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
    818                 size_t size = noOfElems * elemSize;
    819                 #ifdef __STATISTICS__
    820                 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
    821                 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST );
    822                 #endif // __STATISTICS__
    823 
    824                 char * area = (char *)memalignNoStats( alignment, size );
    825           if ( unlikely( area == 0 ) ) return 0;
    826                 HeapManager.Storage.Header * header;
    827                 HeapManager.FreeHeader * freeElem;
    828                 size_t asize;
    829                 bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment );
    830                 #ifndef __CFA_DEBUG__
    831                 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
    832                 if ( ! mapped )
    833                         #endif // __CFA_DEBUG__
    834                         memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros
    835                 header->kind.real.blockSize |= 2;                               // mark as zero filled
    836 
    837                 return area;
    838         } // cmemalign
    839 
    840         // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be
    841         // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size
    842         // is larger than the old size, the added memory will not be initialized.  If ptr is NULL, then the call is
    843         // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not NULL, then the call
    844         // is equivalent to free(ptr). Unless ptr is NULL, it must have been returned by an earlier call to malloc(),
    845         // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done.
    846         void * realloc( void * addr, size_t size ) {
    847                 #ifdef __STATISTICS__
    848                 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
    849                 #endif // __STATISTICS__
    850 
    851           if ( unlikely( addr == 0 ) ) return mallocNoStats( size ); // special cases
    852           if ( unlikely( size == 0 ) ) { free( addr ); return 0; }
    853 
    854                 HeapManager.Storage.Header * header;
    855                 HeapManager.FreeHeader * freeElem;
    856                 size_t asize, alignment = 0;
    857                 headers( "realloc", addr, header, freeElem, asize, alignment );
    858 
    859                 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
    860                 if ( usize >= size ) {                                                  // already sufficient storage
     872                size_t bsize, oalign = 0;
     873                headers( "realloc", oaddr, header, freeElem, bsize, oalign );
     874
     875                size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
     876          if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size
     877                        // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know
     878                        // where to start filling, i.e., do not overwrite existing values in space.
     879                        //
    861880                        // This case does not result in a new profiler entry because the previous one still exists and it must match with
    862881                        // the free for this memory.  Hence, this realloc does not appear in the profiler output.
    863                         return addr;
     882                        return oaddr;
    864883                } // if
    865884
     
    868887                #endif // __STATISTICS__
    869888
    870                 void * area;
    871                 if ( unlikely( alignment != 0 ) ) {                             // previous request memalign?
    872                         area = memalign( alignment, size );                     // create new aligned area
     889                // change size and copy old content to new storage
     890
     891                void * naddr;
     892                if ( unlikely( oalign != 0 ) ) {                                // previous request memalign?
     893                        if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
     894                                naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area
     895                        } else {
     896                                naddr = memalignNoStats( oalign, size ); // create new aligned area
     897                        } // if
    873898                } else {
    874                         area = mallocNoStats( size );                           // create new area
     899                        if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
     900                                naddr = callocNoStats( 1, size );               // create new area
     901                        } else {
     902                                naddr = mallocNoStats( size );                  // create new area
     903                        } // if
    875904                } // if
    876           if ( unlikely( area == 0 ) ) return 0;
    877                 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ?
    878                         assert( (header->kind.real.blockSize & 1) == 0 );
    879                         bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment );
    880                         #ifndef __CFA_DEBUG__
    881                         // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
    882                         if ( ! mapped )
    883                         #endif // __CFA_DEBUG__
    884                                 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part
    885                         header->kind.real.blockSize |= 2;                       // mark new request as zero fill
    886                 } // if
    887                 memcpy( area, addr, usize );                                    // copy bytes
    888                 free( addr );
    889                 return area;
     905          if ( unlikely( naddr == 0p ) ) return 0p;
     906                headers( "realloc", naddr, header, freeElem, bsize, oalign );
     907                size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
     908                // To preserve prior fill, the entire bucket must be copied versus the size.
     909                memcpy( naddr, oaddr, MIN( odsize, ndsize ) );  // copy bytes
     910                free( oaddr );
     911                return naddr;
    890912        } // realloc
    891913
     
    898920                #endif // __STATISTICS__
    899921
    900                 void * area = memalignNoStats( alignment, size );
    901 
    902                 return area;
     922                return memalignNoStats( alignment, size );
    903923        } // memalign
     924
     925
     926        // The cmemalign() function is the same as calloc() with memory alignment.
     927        void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
     928                #ifdef __STATISTICS__
     929                __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
     930                __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST );
     931                #endif // __STATISTICS__
     932
     933                return cmemalignNoStats( alignment, noOfElems, elemSize );
     934        } // cmemalign
    904935
    905936        // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a
     
    912943        // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The
    913944        // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of
    914         // sizeof(void *). If size is 0, then posix_memalign() returns either NULL, or a unique pointer value that can later
     945        // sizeof(void *). If size is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later
    915946        // be successfully passed to free(3).
    916947        int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
    917948          if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
    918949                * memptr = memalign( alignment, size );
    919           if ( unlikely( * memptr == 0 ) ) return ENOMEM;
     950          if ( unlikely( * memptr == 0p ) ) return ENOMEM;
    920951                return 0;
    921952        } // posix_memalign
     
    930961        // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to
    931962        // malloc(), calloc() or realloc().  Otherwise, or if free(ptr) has already been called before, undefined behavior
    932         // occurs. If ptr is NULL, no operation is performed.
     963        // occurs. If ptr is 0p, no operation is performed.
    933964        void free( void * addr ) {
    934965                #ifdef __STATISTICS__
     
    936967                #endif // __STATISTICS__
    937968
    938                 // #comment TD : To decrease nesting I would but the special case in the
    939                 //               else instead, plus it reads more naturally to have the
    940                 //               short / normal case instead
    941                 if ( unlikely( addr == 0 ) ) {                                  // special case
    942                         #ifdef __CFA_DEBUG__
    943                         if ( traceHeap() ) {
    944                                 #define nullmsg "Free( 0x0 ) size:0\n"
    945                                 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
    946                                 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 );
    947                         } // if
    948                         #endif // __CFA_DEBUG__
     969          if ( unlikely( addr == 0p ) ) {                                       // special case
     970                        // #ifdef __CFA_DEBUG__
     971                        // if ( traceHeap() ) {
     972                        //      #define nullmsg "Free( 0x0 ) size:0\n"
     973                        //      // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
     974                        //      __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 );
     975                        // } // if
     976                        // #endif // __CFA_DEBUG__
    949977                        return;
    950978                } // exit
     
    953981        } // free
    954982
    955         // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see
    956         // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that
    957         // parameter.
    958         int mallopt( int option, int value ) {
    959                 choose( option ) {
    960                   case M_TOP_PAD:
    961                         if ( setHeapExpand( value ) ) fallthru default;
    962                   case M_MMAP_THRESHOLD:
    963                         if ( setMmapStart( value ) ) fallthru default;
    964                   default:
    965                         // #comment TD : 1 for unsopported feels wrong
    966                         return 1;                                                                       // success, or unsupported
    967                 } // switch
    968                 return 0;                                                                               // error
    969         } // mallopt
    970 
    971         // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a
    972         // suitable argument).
    973         int malloc_trim( size_t ) {
    974                 return 0;                                                                               // => impossible to release memory
    975         } // malloc_trim
    976 
    977         // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to
    978         // a block of memory allocated by malloc(3) or a related function.
    979         size_t malloc_usable_size( void * addr ) {
    980           if ( unlikely( addr == 0 ) ) return 0;                        // null allocation has 0 size
    981 
    982                 HeapManager.Storage.Header * header;
    983                 HeapManager.FreeHeader * freeElem;
    984                 size_t size, alignment;
    985 
    986                 headers( "malloc_usable_size", addr, header, freeElem, size, alignment );
    987                 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
    988                 return usize;
    989         } // malloc_usable_size
    990 
    991983
    992984    // The malloc_alignment() function returns the alignment of the allocation.
    993985        size_t malloc_alignment( void * addr ) {
    994           if ( unlikely( addr == 0 ) ) return libAlign();       // minimum alignment
     986          if ( unlikely( addr == 0p ) ) return libAlign();      // minimum alignment
    995987                HeapManager.Storage.Header * header = headerAddr( addr );
    996988                if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
     
    1004996    // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().
    1005997        bool malloc_zero_fill( void * addr ) {
    1006           if ( unlikely( addr == 0 ) ) return false;            // null allocation is not zero fill
     998          if ( unlikely( addr == 0p ) ) return false;           // null allocation is not zero fill
    1007999                HeapManager.Storage.Header * header = headerAddr( addr );
    10081000                if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
     
    10131005
    10141006
     1007        // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to
     1008        // a block of memory allocated by malloc(3) or a related function.
     1009        size_t malloc_usable_size( void * addr ) {
     1010          if ( unlikely( addr == 0p ) ) return 0;                       // null allocation has 0 size
     1011                HeapManager.Storage.Header * header;
     1012                HeapManager.FreeHeader * freeElem;
     1013                size_t bsize, alignment;
     1014
     1015                headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment );
     1016                return dataStorage( bsize, addr, header );      // data storage in bucket
     1017        } // malloc_usable_size
     1018
     1019
    10151020    // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and
    10161021    // related functions.
     
    10181023                #ifdef __STATISTICS__
    10191024                printStats();
    1020                 if ( checkFree() ) checkFree( heapManager );
     1025                if ( prtFree() ) prtFree( heapManager );
    10211026                #endif // __STATISTICS__
    10221027        } // malloc_stats
    10231028
    10241029        // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics.
    1025         int malloc_stats_fd( int fd ) {
     1030        int malloc_stats_fd( int fd __attribute__(( unused )) ) {
    10261031                #ifdef __STATISTICS__
    10271032                int temp = statfd;
     
    10331038        } // malloc_stats_fd
    10341039
     1040
     1041        // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see
     1042        // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that
     1043        // parameter.
     1044        int mallopt( int option, int value ) {
     1045                choose( option ) {
     1046                  case M_TOP_PAD:
     1047                        if ( setHeapExpand( value ) ) return 1;
     1048                  case M_MMAP_THRESHOLD:
     1049                        if ( setMmapStart( value ) ) return 1;
     1050                } // switch
     1051                return 0;                                                                               // error, unsupported
     1052        } // mallopt
     1053
     1054        // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a
     1055        // suitable argument).
     1056        int malloc_trim( size_t ) {
     1057                return 0;                                                                               // => impossible to release memory
     1058        } // malloc_trim
     1059
     1060
    10351061        // The malloc_info() function exports an XML string that describes the current state of the memory-allocation
    10361062        // implementation in the caller.  The string is printed on the file stream stream.  The exported string includes
    10371063        // information about all arenas (see malloc(3)).
    10381064        int malloc_info( int options, FILE * stream ) {
     1065                if ( options != 0 ) { errno = EINVAL; return -1; }
    10391066                return printStatsXML( stream );
    10401067        } // malloc_info
     
    10461073        // structure is returned as the function result.  (It is the caller's responsibility to free(3) this memory.)
    10471074        void * malloc_get_state( void ) {
    1048                 return 0;                                                                               // unsupported
     1075                return 0p;                                                                              // unsupported
    10491076        } // malloc_get_state
    10501077
     
    10581085
    10591086
     1087// Must have CFA linkage to overload with C linkage realloc.
     1088void * realloc( void * oaddr, size_t nalign, size_t size ) {
     1089    #ifdef __STATISTICS__
     1090        __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
     1091    #endif // __STATISTICS__
     1092
     1093  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
     1094  if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
     1095
     1096    if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum
     1097        #ifdef __CFA_DEBUG__
     1098    else
     1099                checkAlign( nalign );                                                   // check alignment
     1100        #endif // __CFA_DEBUG__
     1101
     1102        HeapManager.Storage.Header * header;
     1103        HeapManager.FreeHeader * freeElem;
     1104        size_t bsize, oalign = 0;
     1105        headers( "realloc", oaddr, header, freeElem, bsize, oalign );
     1106
     1107    size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
     1108
     1109  if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out
     1110                headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
     1111                return realloc( oaddr, size );
     1112    } // if
     1113
     1114    #ifdef __STATISTICS__
     1115        __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
     1116    #endif // __STATISTICS__
     1117
     1118    // change size and copy old content to new storage
     1119
     1120    void * naddr;
     1121    if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
     1122        naddr = cmemalignNoStats( nalign, 1, size );    // create new aligned area
     1123    } else {
     1124        naddr = memalignNoStats( nalign, size );                // create new aligned area
     1125    } // if
     1126    size_t ndsize = dataStorage( bsize, naddr, header );        // data storage avilable in bucket
     1127        // To preserve prior fill, the entire bucket must be copied versus the size.
     1128    memcpy( naddr, oaddr, MIN( odsize, ndsize ) );              // copy bytes
     1129    free( oaddr );
     1130    return naddr;
     1131} // realloc
     1132
     1133
    10601134// Local Variables: //
    10611135// tab-width: 4 //
Note: See TracChangeset for help on using the changeset viewer.