Changeset 22f94a4 for libcfa/src/heap.cfa
- Timestamp:
- Aug 11, 2020, 4:40:15 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 0d070ca
- Parents:
- 07d867b (diff), 129674b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/heap.cfa (modified) (58 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r07d867b r22f94a4 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // heap.c --7 // heap.cfa -- 8 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed May 6 17:29:26202013 // Update Count : 72712 // Last Modified On : Sun Aug 9 12:23:20 2020 13 // Update Count : 894 14 14 // 15 15 … … 20 20 #include <string.h> // memset, memcpy 21 21 #include <limits.h> // ULONG_MAX 22 extern "C" { 22 #include <malloc.h> // memalign, malloc_usable_size 23 23 #include <sys/mman.h> // mmap, munmap 24 } // extern "C" 25 26 #include "bits/align.hfa" // libPow2 24 25 #include "bits/align.hfa" // libAlign 27 26 #include "bits/defs.hfa" // likely, unlikely 28 27 #include "bits/locks.hfa" // __spinlock_t 29 28 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 30 29 //#include "stdlib.hfa" // bsearchl 31 #include "malloc.h"32 30 #include "bitmanip.hfa" // ceiling 33 31 … … 82 80 }; 83 81 82 size_t default_mmap_start() __attribute__(( weak )) { 83 return __CFA_DEFAULT_MMAP_START__; 84 } // default_mmap_start 85 84 86 size_t default_heap_expansion() __attribute__(( weak )) { 85 87 return __CFA_DEFAULT_HEAP_EXPANSION__; 86 88 } // default_heap_expansion 87 89 88 size_t default_mmap_start() __attribute__(( weak )) {89 return __CFA_DEFAULT_MMAP_START__;90 } // default_mmap_start91 92 90 93 91 #ifdef __CFA_DEBUG__ 94 static unsigned int allocFree;// running total of allocations minus frees92 static size_t allocUnfreed; // running total of allocations minus frees 95 93 96 94 static void prtUnfreed() { 97 if ( alloc Free!= 0 ) {95 if ( allocUnfreed != 0 ) { 98 96 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 99 97 char helpText[512]; 100 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with % u(0x%x) bytes of storage allocated but not freed.\n"98 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 101 99 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 102 (long int)getpid(), alloc Free, allocFree); // always print the UNIX pid100 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 103 101 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 104 102 } // if … … 107 105 extern "C" { 108 106 void heapAppStart() { // called by __cfaabi_appready_startup 109 alloc Free= 0;107 allocUnfreed = 0; 110 108 } // heapAppStart 111 109 … … 128 126 #define LOCKFREE 1 129 127 #define BUCKETLOCK SPINLOCK 130 #if BUCKETLOCK == LOCKFREE 131 #include <uStackLF.h> 128 #if BUCKETLOCK == SPINLOCK 129 #elif BUCKETLOCK == LOCKFREE 130 #include <stackLockFree.hfa> 131 #else 132 #error undefined lock type for bucket lock 132 133 #endif // LOCKFREE 133 134 … … 137 138 138 139 struct HeapManager { 139 // struct FreeHeader; // forward declaration140 141 140 struct Storage { 142 141 struct Header { // header … … 146 145 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 146 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 148 uint 32_t padding; // unused, force home/blocksize to overlay alignment in fake header147 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header 149 148 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 150 149 151 150 union { 152 //FreeHeader * home; // allocated block points back to home locations (must overlay alignment)151 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 153 152 // 2nd low-order bit => zero filled 154 153 void * home; // allocated block points back to home locations (must overlay alignment) 155 154 size_t blockSize; // size for munmap (must overlay alignment) 156 #if BUCK LOCK == SPINLOCK155 #if BUCKETLOCK == SPINLOCK 157 156 Storage * next; // freed block points next freed block of same size 158 157 #endif // SPINLOCK 159 158 }; 159 size_t size; // allocation size in bytes 160 160 161 161 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 162 uint 32_t padding; // unused, force home/blocksize to overlay alignment in fake header162 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header 163 163 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 164 164 }; 165 // future code 166 #if BUCKLOCK == LOCKFREE 167 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide) 165 #if BUCKETLOCK == LOCKFREE 166 Link(Storage) next; // freed block points next freed block of same size (double-wide) 168 167 #endif // LOCKFREE 169 168 }; 170 169 } real; // RealHeader 170 171 171 struct FakeHeader { 172 172 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 173 // 1st low-order bit => fake header & alignment 174 uint32_t alignment; 173 uint32_t alignment; // 1st low-order bit => fake header & alignment 175 174 #endif // __ORDER_LITTLE_ENDIAN__ 176 175 … … 182 181 } fake; // FakeHeader 183 182 } kind; // Kind 184 size_t size; // allocation size in bytes185 183 } header; // Header 186 184 char pad[libAlign() - sizeof( Header )]; … … 191 189 192 190 struct FreeHeader { 193 #if BUCK LOCK == SPINLOCK191 #if BUCKETLOCK == SPINLOCK 194 192 __spinlock_t lock; // must be first field for alignment 195 193 Storage * freeList; 196 #elif BUCKLOCK == LOCKFREE197 // future code198 StackLF<Storage> freeList;199 194 #else 200 #error undefined lock type for bucket lock201 #endif // SPINLOCK195 StackLF(Storage) freeList; 196 #endif // BUCKETLOCK 202 197 size_t blockSize; // size of allocations on this list 203 198 }; // FreeHeader … … 212 207 }; // HeapManager 213 208 209 #if BUCKETLOCK == LOCKFREE 210 static inline { 211 Link(HeapManager.Storage) * ?`next( HeapManager.Storage * this ) { return &this->header.kind.real.next; } 212 void ?{}( HeapManager.FreeHeader & ) {} 213 void ^?{}( HeapManager.FreeHeader & ) {} 214 } // distribution 215 #endif // LOCKFREE 216 214 217 static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } 215 218 … … 218 221 #define __STATISTICS__ 219 222 220 // Bucket size must be multiple of 16. 221 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 223 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 224 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 225 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 222 226 static const unsigned int bucketSizes[] @= { // different bucket sizes 223 16 , 32, 48, 64 + sizeof(HeapManager.Storage), // 4224 96 , 112, 128 + sizeof(HeapManager.Storage), // 3227 16 + sizeof(HeapManager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4 228 96 + sizeof(HeapManager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3 225 229 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4 226 230 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4 … … 240 244 }; 241 245 242 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );246 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 243 247 244 248 #ifdef FASTLOOKUP … … 251 255 static bool heapBoot = 0; // detect recursion during boot 252 256 #endif // __CFA_DEBUG__ 257 258 // The constructor for heapManager is called explicitly in memory_startup. 253 259 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 254 260 … … 256 262 #ifdef __STATISTICS__ 257 263 // Heap statistics counters. 264 static unsigned int malloc_calls; 265 static unsigned long long int malloc_storage; 266 static unsigned int aalloc_calls; 267 static unsigned long long int aalloc_storage; 268 static unsigned int calloc_calls; 269 static unsigned long long int calloc_storage; 270 static unsigned int memalign_calls; 271 static unsigned long long int memalign_storage; 272 static unsigned int amemalign_calls; 273 static unsigned long long int amemalign_storage; 274 static unsigned int cmemalign_calls; 275 static unsigned long long int cmemalign_storage; 276 static unsigned int resize_calls; 277 static unsigned long long int resize_storage; 278 static unsigned int realloc_calls; 279 static unsigned long long int realloc_storage; 280 static unsigned int free_calls; 281 static unsigned long long int free_storage; 282 static unsigned int mmap_calls; 258 283 static unsigned long long int mmap_storage; 259 static unsigned int m map_calls;284 static unsigned int munmap_calls; 260 285 static unsigned long long int munmap_storage; 261 static unsigned int munmap_calls;286 static unsigned int sbrk_calls; 262 287 static unsigned long long int sbrk_storage; 263 static unsigned int sbrk_calls;264 static unsigned long long int malloc_storage;265 static unsigned int malloc_calls;266 static unsigned long long int free_storage;267 static unsigned int free_calls;268 static unsigned long long int aalloc_storage;269 static unsigned int aalloc_calls;270 static unsigned long long int calloc_storage;271 static unsigned int calloc_calls;272 static unsigned long long int memalign_storage;273 static unsigned int memalign_calls;274 static unsigned long long int amemalign_storage;275 static unsigned int amemalign_calls;276 static unsigned long long int cmemalign_storage;277 static unsigned int cmemalign_calls;278 static unsigned long long int resize_storage;279 static unsigned int resize_calls;280 static unsigned long long int realloc_storage;281 static unsigned int realloc_calls;282 288 // Statistics file descriptor (changed by malloc_stats_fd). 283 static int stat fd = STDERR_FILENO; // default stderr289 static int stat_fd = STDERR_FILENO; // default stderr 284 290 285 291 // Use "write" because streams may be shutdown when calls are made. … … 301 307 " sbrk: calls %u / storage %llu\n", 302 308 malloc_calls, malloc_storage, 303 aalloc_calls, calloc_storage,309 aalloc_calls, aalloc_storage, 304 310 calloc_calls, calloc_storage, 305 311 memalign_calls, memalign_storage, … … 354 360 355 361 356 // static inline void noMemory() {357 // abort( "Heap memory exhausted at %zu bytes.\n"358 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",359 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );360 // } // noMemory361 362 363 362 // thunk problem 364 363 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 407 406 408 407 static inline void checkAlign( size_t alignment ) { 409 if ( alignment < libAlign() || ! libPow2( alignment ) ) {408 if ( alignment < libAlign() || ! is_pow2( alignment ) ) { 410 409 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); 411 410 } // if … … 429 428 #endif // __CFA_DEBUG__ 430 429 header = realHeader( header ); // backup from fake to real header 430 } else { 431 alignment = libAlign(); // => no fake header 431 432 } // if 432 433 } // fakeHeader 433 434 434 435 435 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 436 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, 437 size_t & size, size_t & alignment ) with( heapManager ) { 436 438 header = headerAddr( addr ); 437 439 438 if ( unlikely( heapEnd < addr ) ) { // mmapped ?440 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 439 441 fakeHeader( header, alignment ); 440 442 size = header->kind.real.blockSize & -3; // mmap size … … 464 466 } // headers 465 467 466 467 static inline void * extend( size_t size ) with ( heapManager ) { 468 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." 469 470 static inline void * extend( size_t size ) with( heapManager ) { 468 471 lock( extlock __cfaabi_dbg_ctx2 ); 469 472 ptrdiff_t rem = heapRemaining - size; … … 471 474 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 472 475 473 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );474 if ( sbrk( increase ) == (void *)-1 ) { 476 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() ); 477 if ( sbrk( increase ) == (void *)-1 ) { // failed, no memory ? 475 478 unlock( extlock ); 476 errno = ENOMEM; 477 return 0p; 479 abort( NO_MEMORY_MSG, size ); // give up 478 480 } // if 479 481 #ifdef __STATISTICS__ … … 496 498 497 499 498 static inline void * doMalloc( size_t size ) with ( heapManager ) {500 static inline void * doMalloc( size_t size ) with( heapManager ) { 499 501 HeapManager.Storage * block; // pointer to new block of storage 500 502 … … 512 514 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); 513 515 HeapManager.FreeHeader * freeElem = &freeLists[posn]; 514 // #ifdef FASTLOOKUP 515 // if ( tsize < LookupSizes ) 516 // freeElem = &freeLists[lookup[tsize]]; 517 // else 518 // #endif // FASTLOOKUP 519 // freeElem = bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search 520 // HeapManager.FreeHeader * freeElem = 521 // #ifdef FASTLOOKUP 522 // tsize < LookupSizes ? &freeLists[lookup[tsize]] : 523 // #endif // FASTLOOKUP 524 // bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search 525 assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 526 assert( tsize <= freeElem->blockSize ); // search failure ? 516 verify( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 517 verify( tsize <= freeElem->blockSize ); // search failure ? 527 518 tsize = freeElem->blockSize; // total space needed for request 528 519 529 520 // Spin until the lock is acquired for this particular size of block. 530 521 531 #if defined( SPINLOCK )522 #if BUCKETLOCK == SPINLOCK 532 523 lock( freeElem->lock __cfaabi_dbg_ctx2 ); 533 524 block = freeElem->freeList; // remove node from stack 534 525 #else 535 block = freeElem->freeList.pop();536 #endif // SPINLOCK526 block = pop( freeElem->freeList ); 527 #endif // BUCKETLOCK 537 528 if ( unlikely( block == 0p ) ) { // no free block ? 538 #if defined( SPINLOCK )529 #if BUCKETLOCK == SPINLOCK 539 530 unlock( freeElem->lock ); 540 #endif // SPINLOCK531 #endif // BUCKETLOCK 541 532 542 533 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more … … 544 535 545 536 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 546 if ( unlikely( block == 0p ) ) return 0p; 547 #if defined( SPINLOCK ) 537 #if BUCKETLOCK == SPINLOCK 548 538 } else { 549 539 freeElem->freeList = block->header.kind.real.next; 550 540 unlock( freeElem->lock ); 551 #endif // SPINLOCK541 #endif // BUCKETLOCK 552 542 } // if 553 543 … … 555 545 } else { // large size => mmap 556 546 if ( unlikely( size > ULONG_MAX - pageSize ) ) return 0p; 557 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size547 tsize = ceiling2( tsize, pageSize ); // must be multiple of page size 558 548 #ifdef __STATISTICS__ 559 549 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST ); 560 550 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST ); 561 551 #endif // __STATISTICS__ 552 562 553 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 563 if ( block == (HeapManager.Storage *)MAP_FAILED ) { 554 if ( block == (HeapManager.Storage *)MAP_FAILED ) { // failed ? 555 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 564 556 // Do not call strerror( errno ) as it may call malloc. 565 557 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno ); 566 } // if558 } //if 567 559 #ifdef __CFA_DEBUG__ 568 560 // Set new memory to garbage so subsequent uninitialized usages might fail. … … 572 564 } // if 573 565 574 block->header. size = size;// store allocation size566 block->header.kind.real.size = size; // store allocation size 575 567 void * addr = &(block->data); // adjust off header to user bytes 568 verify( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? 576 569 577 570 #ifdef __CFA_DEBUG__ 578 assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? 579 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST ); 571 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST ); 580 572 if ( traceHeap() ) { 581 573 enum { BufferSize = 64 }; 582 574 char helpText[BufferSize]; 583 575 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 584 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size );585 576 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 586 577 } // if … … 591 582 592 583 593 static inline void doFree( void * addr ) with ( heapManager ) {584 static inline void doFree( void * addr ) with( heapManager ) { 594 585 #ifdef __CFA_DEBUG__ 595 586 if ( unlikely( heapManager.heapBegin == 0p ) ) { … … 623 614 free_storage += size; 624 615 #endif // __STATISTICS__ 625 #if defined( SPINLOCK )616 #if BUCKETLOCK == SPINLOCK 626 617 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 627 618 header->kind.real.next = freeElem->freeList; // push on stack … … 629 620 unlock( freeElem->lock ); // release spin lock 630 621 #else 631 freeElem->freeList.push(*(HeapManager.Storage *)header );632 #endif // SPINLOCK622 push( freeElem->freeList, *(HeapManager.Storage *)header ); 623 #endif // BUCKETLOCK 633 624 } // if 634 625 635 626 #ifdef __CFA_DEBUG__ 636 __atomic_add_fetch( &alloc Free, -size, __ATOMIC_SEQ_CST );627 __atomic_add_fetch( &allocUnfreed, -size, __ATOMIC_SEQ_CST ); 637 628 if ( traceHeap() ) { 638 enum { BufferSize = 64 }; 639 char helpText[BufferSize]; 629 char helpText[64]; 640 630 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); 641 631 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug … … 645 635 646 636 647 size_t prtFree( HeapManager & manager ) with ( manager ) {637 size_t prtFree( HeapManager & manager ) with( manager ) { 648 638 size_t total = 0; 649 639 #ifdef __STATISTICS__ … … 657 647 #endif // __STATISTICS__ 658 648 659 #if defined( SPINLOCK )649 #if BUCKETLOCK == SPINLOCK 660 650 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 661 651 #else 662 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) {663 #endif // SPINLOCK652 for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 653 #endif // BUCKETLOCK 664 654 total += size; 665 655 #ifdef __STATISTICS__ … … 681 671 682 672 683 static void ?{}( HeapManager & manager ) with ( manager ) {673 static void ?{}( HeapManager & manager ) with( manager ) { 684 674 pageSize = sysconf( _SC_PAGESIZE ); 685 675 … … 702 692 703 693 char * end = (char *)sbrk( 0 ); 704 heapBegin = heapEnd = sbrk( (char *) libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment694 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 705 695 } // HeapManager 706 696 … … 710 700 if ( traceHeapTerm() ) { 711 701 printStats(); 712 // if ( prtfree() ) prtFree( heapManager, true );702 // prtUnfreed() called in heapAppStop() 713 703 } // if 714 704 #endif // __STATISTICS__ … … 719 709 void memory_startup( void ) { 720 710 #ifdef __CFA_DEBUG__ 721 if ( unlikely( heapBoot ) ) {// check for recursion during system boot711 if ( heapBoot ) { // check for recursion during system boot 722 712 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 723 713 abort( "boot() : internal error, recursively invoked during system boot." ); … … 726 716 #endif // __CFA_DEBUG__ 727 717 728 // assert( heapManager.heapBegin != 0 );718 //verify( heapManager.heapBegin != 0 ); 729 719 //heapManager{}; 730 720 if ( heapManager.heapBegin == 0p ) heapManager{}; // sanity check … … 738 728 739 729 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 740 //assert( heapManager.heapBegin != 0 ); 741 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 730 verify( heapManager.heapBegin != 0p ); // called before memory_startup ? 731 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 732 742 733 #if __SIZEOF_POINTER__ == 8 743 734 verify( size < ((typeof(size_t))1 << 48) ); 744 735 #endif // __SIZEOF_POINTER__ == 8 745 void * addr = doMalloc( size ); 746 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX 747 return addr; 736 return doMalloc( size ); 748 737 } // mallocNoStats 749 738 … … 751 740 static inline void * callocNoStats( size_t dim, size_t elemSize ) { 752 741 size_t size = dim * elemSize; 742 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 753 743 char * addr = (char *)mallocNoStats( size ); 754 if ( unlikely( addr == 0p ) ) return 0p;755 744 756 745 HeapManager.Storage.Header * header; 757 746 HeapManager.FreeHeader * freeElem; 758 747 size_t bsize, alignment; 759 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment );760 748 #ifndef __CFA_DEBUG__ 749 bool mapped = 750 #endif // __CFA_DEBUG__ 751 headers( "calloc", addr, header, freeElem, bsize, alignment ); 752 #ifndef __CFA_DEBUG__ 753 761 754 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 762 755 if ( ! mapped ) 763 756 #endif // __CFA_DEBUG__ 764 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 765 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 757 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined 766 758 // `-header`-addr `-size 767 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) );// set to zeros759 memset( addr, '\0', size ); // set to zeros 768 760 769 761 header->kind.real.blockSize |= 2; // mark as zero filled … … 772 764 773 765 774 static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics 766 static inline void * memalignNoStats( size_t alignment, size_t size ) { 767 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 768 775 769 #ifdef __CFA_DEBUG__ 776 770 checkAlign( alignment ); // check alignment … … 790 784 // add sizeof(Storage) for fake header 791 785 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 792 if ( unlikely( addr == 0p ) ) return addr;793 786 794 787 // address in the block of the "next" alignment address 795 char * user = (char *) libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment );788 char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment ); 796 789 797 790 // address of header from malloc 798 791 HeapManager.Storage.Header * realHeader = headerAddr( addr ); 792 realHeader->kind.real.size = size; // correct size to eliminate above alignment offset 799 793 // address of fake header * before* the alignment location 800 794 HeapManager.Storage.Header * fakeHeader = headerAddr( user ); … … 810 804 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) { 811 805 size_t size = dim * elemSize; 806 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 812 807 char * addr = (char *)memalignNoStats( alignment, size ); 813 if ( unlikely( addr == 0p ) ) return 0p; 808 814 809 HeapManager.Storage.Header * header; 815 810 HeapManager.FreeHeader * freeElem; 816 811 size_t bsize; 817 bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment );818 812 #ifndef __CFA_DEBUG__ 813 bool mapped = 814 #endif // __CFA_DEBUG__ 815 headers( "cmemalign", addr, header, freeElem, bsize, alignment ); 816 819 817 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 818 #ifndef __CFA_DEBUG__ 820 819 if ( ! mapped ) 821 820 #endif // __CFA_DEBUG__ 822 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros 821 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined 822 // `-header`-addr `-size 823 memset( addr, '\0', size ); // set to zeros 823 824 824 825 header->kind.real.blockSize |= 2; // mark as zero filled 825 826 return addr; 826 827 } // cmemalignNoStats 827 828 829 // supported mallopt options830 #ifndef M_MMAP_THRESHOLD831 #define M_MMAP_THRESHOLD (-1)832 #endif // M_TOP_PAD833 #ifndef M_TOP_PAD834 #define M_TOP_PAD (-2)835 #endif // M_TOP_PAD836 828 837 829 … … 851 843 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 852 844 void * aalloc( size_t dim, size_t elemSize ) { 845 size_t size = dim * elemSize; 853 846 #ifdef __STATISTICS__ 854 847 __atomic_add_fetch( &aalloc_calls, 1, __ATOMIC_SEQ_CST ); 855 __atomic_add_fetch( &aalloc_storage, dim * elemSize, __ATOMIC_SEQ_CST );856 #endif // __STATISTICS__ 857 858 return mallocNoStats( dim * elemSize );848 __atomic_add_fetch( &aalloc_storage, size, __ATOMIC_SEQ_CST ); 849 #endif // __STATISTICS__ 850 851 return mallocNoStats( size ); 859 852 } // aalloc 860 853 … … 869 862 return callocNoStats( dim, elemSize ); 870 863 } // calloc 864 871 865 872 866 // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is … … 877 871 #ifdef __STATISTICS__ 878 872 __atomic_add_fetch( &resize_calls, 1, __ATOMIC_SEQ_CST ); 879 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST );880 873 #endif // __STATISTICS__ 881 874 882 875 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 883 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 884 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 876 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 877 if ( unlikely( oaddr == 0p ) ) { 878 #ifdef __STATISTICS__ 879 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 880 #endif // __STATISTICS__ 881 return mallocNoStats( size ); 882 } // if 885 883 886 884 HeapManager.Storage.Header * header; 887 885 HeapManager.FreeHeader * freeElem; 888 size_t bsize, oalign = 0;886 size_t bsize, oalign; 889 887 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 890 888 891 889 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 892 890 // same size, DO NOT preserve STICKY PROPERTIES. 893 if ( oalign == 0&& size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size891 if ( oalign <= libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 894 892 header->kind.real.blockSize &= -2; // no alignment and turn off 0 fill 893 header->kind.real.size = size; // reset allocation size 895 894 return oaddr; 896 895 } // if 897 896 897 #ifdef __STATISTICS__ 898 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 899 #endif // __STATISTICS__ 900 898 901 // change size, DO NOT preserve STICKY PROPERTIES. 899 902 free( oaddr ); 900 void * naddr = mallocNoStats( size ); // create new area 901 return naddr; 903 return mallocNoStats( size ); // create new area 902 904 } // resize 903 905 … … 908 910 #ifdef __STATISTICS__ 909 911 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 910 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );911 912 #endif // __STATISTICS__ 912 913 913 914 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 914 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 915 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 915 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 916 if ( unlikely( oaddr == 0p ) ) { 917 #ifdef __STATISTICS__ 918 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 919 #endif // __STATISTICS__ 920 return mallocNoStats( size ); 921 } // if 916 922 917 923 HeapManager.Storage.Header * header; 918 924 HeapManager.FreeHeader * freeElem; 919 size_t bsize, oalign = 0;925 size_t bsize, oalign; 920 926 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 921 927 922 928 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 923 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size 924 // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know 925 // where to start filling, i.e., do not overwrite existing values in space. 929 size_t osize = header->kind.real.size; // old allocation size 930 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 931 if ( unlikely( size <= odsize ) && size > odsize / 2 ) { // allow up to 50% wasted storage 932 header->kind.real.size = size; // reset allocation size 933 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 934 memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage 935 } // if 926 936 return oaddr; 927 937 } // if 928 938 939 #ifdef __STATISTICS__ 940 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 941 #endif // __STATISTICS__ 942 929 943 // change size and copy old content to new storage 930 944 931 945 void * naddr; 932 if ( unlikely( oalign != 0 ) ) { // previous request memalign? 933 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 934 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area 935 } else { 936 naddr = memalignNoStats( oalign, size ); // create new aligned area 946 if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ? 947 naddr = mallocNoStats( size ); // create new area 948 } else { 949 naddr = memalignNoStats( oalign, size ); // create new aligned area 950 } // if 951 952 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 953 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes 954 free( oaddr ); 955 956 if ( unlikely( ozfill ) ) { // previous request zero fill ? 957 header->kind.real.blockSize |= 2; // mark new request as zero filled 958 if ( size > osize ) { // previous request larger ? 959 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage 937 960 } // if 938 } else { 939 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 940 naddr = callocNoStats( 1, size ); // create new area 941 } else { 942 naddr = mallocNoStats( size ); // create new area 943 } // if 944 } // if 945 if ( unlikely( naddr == 0p ) ) return 0p; 946 947 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 948 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 949 // To preserve prior fill, the entire bucket must be copied versus the size. 950 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 951 free( oaddr ); 961 } // if 952 962 return naddr; 953 963 } // realloc 954 964 965 955 966 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 956 967 void * memalign( size_t alignment, size_t size ) { … … 966 977 // Same as aalloc() with memory alignment. 967 978 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) { 979 size_t size = dim * elemSize; 968 980 #ifdef __STATISTICS__ 969 981 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 970 __atomic_add_fetch( &cmemalign_storage, dim * elemSize, __ATOMIC_SEQ_CST );971 #endif // __STATISTICS__ 972 973 return memalignNoStats( alignment, dim * elemSize );982 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST ); 983 #endif // __STATISTICS__ 984 985 return memalignNoStats( alignment, size ); 974 986 } // amemalign 975 987 … … 997 1009 // free(3). 998 1010 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 999 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment1011 if ( alignment < libAlign() || ! is_pow2( alignment ) ) return EINVAL; // check alignment 1000 1012 * memptr = memalign( alignment, size ); 1001 if ( unlikely( * memptr == 0p ) ) return ENOMEM;1002 1013 return 0; 1003 1014 } // posix_memalign … … 1012 1023 // Same as valloc but rounds size to multiple of page size. 1013 1024 void * pvalloc( size_t size ) { 1014 return memalign( pageSize, libCeiling( size, pageSize ) );1025 return memalign( pageSize, ceiling2( size, pageSize ) ); 1015 1026 } // pvalloc 1016 1027 … … 1050 1061 } // malloc_alignment 1051 1062 1063 1052 1064 // Set the alignment for an the allocation and return previous alignment or 0 if no alignment. 1053 1065 size_t $malloc_alignment_set( void * addr, size_t alignment ) { … … 1090 1102 // Returns original total allocation size (not bucket size) => array size is dimension * sizeif(T). 1091 1103 size_t malloc_size( void * addr ) { 1092 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill1104 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size 1093 1105 HeapManager.Storage.Header * header = headerAddr( addr ); 1094 1106 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1095 1107 header = realHeader( header ); // backup from fake to real header 1096 1108 } // if 1097 return header-> size;1109 return header->kind.real.size; 1098 1110 } // malloc_size 1099 1111 1100 1112 // Set allocation size and return previous size. 1101 1113 size_t $malloc_size_set( void * addr, size_t size ) { 1102 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill1114 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1103 1115 HeapManager.Storage.Header * header = headerAddr( addr ); 1104 1116 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1105 1117 header = realHeader( header ); // backup from fake to real header 1106 1118 } // if 1107 size_t ret = header-> size;1108 header-> size = size;1119 size_t ret = header->kind.real.size; 1120 header->kind.real.size = size; 1109 1121 return ret; 1110 1122 } // $malloc_size_set … … 1120 1132 1121 1133 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); 1122 return dataStorage( bsize, addr, header ); // data storage in bucket1134 return dataStorage( bsize, addr, header ); // data storage in bucket 1123 1135 } // malloc_usable_size 1124 1136 … … 1132 1144 } // malloc_stats 1133 1145 1146 1134 1147 // Changes the file descripter where malloc_stats() writes statistics. 1135 1148 int malloc_stats_fd( int fd __attribute__(( unused )) ) { 1136 1149 #ifdef __STATISTICS__ 1137 int temp = stat fd;1138 stat fd = fd;1150 int temp = stat_fd; 1151 stat_fd = fd; 1139 1152 return temp; 1140 1153 #else … … 1157 1170 } // mallopt 1158 1171 1172 1159 1173 // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). 1160 1174 int malloc_trim( size_t ) { … … 1167 1181 // malloc). 1168 1182 int malloc_info( int options, FILE * stream ) { 1169 if ( options != 0 ) { errno = EINVAL; return -1; } 1183 if ( options != 0 ) { errno = EINVAL; return -1; } 1184 #ifdef __STATISTICS__ 1170 1185 return printStatsXML( stream ); 1186 #else 1187 return 0; // unsupported 1188 #endif // __STATISTICS__ 1171 1189 } // malloc_info 1172 1190 … … 1183 1201 // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data 1184 1202 // structure pointed to by state. 1185 int malloc_set_state( void * ptr) {1203 int malloc_set_state( void * ) { 1186 1204 return 0; // unsupported 1187 1205 } // malloc_set_state … … 1193 1211 #ifdef __STATISTICS__ 1194 1212 __atomic_add_fetch( &resize_calls, 1, __ATOMIC_SEQ_CST ); 1195 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST );1196 1213 #endif // __STATISTICS__ 1197 1214 1198 1215 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1199 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases 1200 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1201 1202 1203 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum 1216 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1217 if ( unlikely( oaddr == 0p ) ) { 1218 #ifdef __STATISTICS__ 1219 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 1220 #endif // __STATISTICS__ 1221 return memalignNoStats( nalign, size ); 1222 } // if 1223 1224 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1204 1225 #ifdef __CFA_DEBUG__ 1205 1226 else … … 1209 1230 HeapManager.Storage.Header * header; 1210 1231 HeapManager.FreeHeader * freeElem; 1211 size_t bsize, oalign = 0;1232 size_t bsize, oalign; 1212 1233 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 1213 1234 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1214 1235 1215 1236 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match 1216 if ( oalign > =libAlign() ) { // fake header ?1237 if ( oalign > libAlign() ) { // fake header ? 1217 1238 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1218 1239 } // if 1219 1240 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 1220 1241 header->kind.real.blockSize &= -2; // turn off 0 fill 1242 header->kind.real.size = size; // reset allocation size 1221 1243 return oaddr; 1222 1244 } // if 1223 1245 } // if 1224 1246 1225 // change size 1226 1227 void * naddr; 1228 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1229 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1230 } else { 1231 naddr = memalignNoStats( nalign, size ); // create new aligned area 1232 } // if 1233 1247 #ifdef __STATISTICS__ 1248 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 1249 #endif // __STATISTICS__ 1250 1251 // change size, DO NOT preserve STICKY PROPERTIES. 1234 1252 free( oaddr ); 1235 return naddr;1253 return memalignNoStats( nalign, size ); // create new aligned area 1236 1254 } // resize 1237 1255 1238 1256 1239 1257 void * realloc( void * oaddr, size_t nalign, size_t size ) { 1240 if ( unlikely( nalign == 0 ) ) nalign = libAlign();// reset alignment to minimum1258 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1241 1259 #ifdef __CFA_DEBUG__ 1242 1260 else … … 1246 1264 HeapManager.Storage.Header * header; 1247 1265 HeapManager.FreeHeader * freeElem; 1248 size_t bsize, oalign = 0;1266 size_t bsize, oalign; 1249 1267 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1250 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket1251 1268 1252 1269 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match 1253 if ( oalign > =libAlign() ) { // fake header ?1270 if ( oalign > libAlign() ) { // fake header ? 1254 1271 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1255 1272 } // if … … 1265 1282 1266 1283 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1267 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases1284 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1268 1285 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1269 1286 1270 void * naddr; 1271 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1272 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1273 } else { 1274 naddr = memalignNoStats( nalign, size ); // create new aligned area 1275 } // if 1287 size_t osize = header->kind.real.size; // old allocation size 1288 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 1289 1290 void * naddr = memalignNoStats( nalign, size ); // create new aligned area 1276 1291 1277 1292 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1278 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage available in bucket 1279 // To preserve prior fill, the entire bucket must be copied versus the size. 1280 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1293 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes 1281 1294 free( oaddr ); 1295 1296 if ( unlikely( ozfill ) ) { // previous request zero fill ? 1297 header->kind.real.blockSize |= 2; // mark new request as zero filled 1298 if ( size > osize ) { // previous request larger ? 1299 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage 1300 } // if 1301 } // if 1282 1302 return naddr; 1283 1303 } // realloc
Note:
See TracChangeset
for help on using the changeset viewer.