Changeset b6830d74
- Timestamp:
- Aug 17, 2018, 3:36:21 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, no_list, persistent-indexer, pthread-emulation, qualifiedEnum
- Children:
- 72a5a75
- Parents:
- 2aa2056f
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r2aa2056f rb6830d74 1 // #comment TD : this file uses both spaces and tabs for indentation 2 1 3 // 2 4 // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo … … 22 24 } // extern "C" 23 25 26 // #comment TD : Many of these should be merged into math I believe 24 27 #include "bits/align.hfa" // libPow2 25 28 #include "bits/defs.hfa" // likely, unlikely … … 36 39 37 40 size_t default_mmap_start() __attribute__(( weak )) { 38 41 return __CFA_DEFAULT_MMAP_START__; 39 42 } // default_mmap_start 40 43 41 44 size_t default_heap_expansion() __attribute__(( weak )) { 42 45 return __CFA_DEFAULT_HEAP_EXPANSION__; 43 46 } // default_heap_expansion 44 47 … … 62 65 #endif // LOCKFREE 63 66 67 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa 64 68 #define ALIGN 16 65 69 … … 136 140 137 141 static void checkUnfreed() { 138 142 if ( allocFree != 0 ) { 139 143 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 140 144 // char helpText[512]; … … 143 147 // (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid 144 148 // __cfaabi_dbg_bits_write( helpText, len ); 145 149 } // if 146 150 } // checkUnfreed 147 151 … … 167 171 struct RealHeader { 168 172 union { 173 // #comment TD : this code use byte size but the comment uses bit size 174 169 175 struct { // 32-bit word => 64-bit header, 64-bit word => 128-bit header 170 176 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 … … 186 192 187 193 }; 194 195 // #comment TD : C++ code 188 196 #if BUCKLOCK == LOCKFREE 189 197 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide) … … 215 223 Storage * freeList; 216 224 #elif BUCKLOCK == LOCKFREE 225 // #comment TD : C++ code 217 226 StackLF<Storage> freeList; 218 227 #else … … 240 249 static unsigned int maxBucketsUsed; // maximum number of buckets in use 241 250 251 // #comment TD : This array is not const but it feels like it should be 242 252 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 243 253 static unsigned int bucketSizes[NoBucketSizes] @= { // different bucket sizes 244 245 246 247 248 249 250 251 252 253 254 255 256 254 16, 32, 48, 64, 255 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224, 256 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896, 257 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144, 258 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 259 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 260 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 261 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 262 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 263 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 264 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792, 265 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016, 266 4_194_304 + sizeof(HeapManager.Storage) 257 267 }; 258 268 #ifdef FASTLOOKUP … … 267 277 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 268 278 269 279 // #comment TD : The return type of this function should be commented 270 280 static inline bool setMmapStart( size_t value ) { 271 272 273 274 275 276 277 278 281 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; 282 mmapStart = value; // set global 283 284 // find the closest bucket size less than or equal to the mmapStart size 285 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search 286 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 287 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 288 return false; 279 289 } // setMmapStart 280 290 281 291 282 292 static void ?{}( HeapManager & manager ) with ( manager ) { 283 284 285 293 pageSize = sysconf( _SC_PAGESIZE ); 294 295 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists 286 296 freeLists[i].blockSize = bucketSizes[i]; 287 297 } // for 288 298 289 299 #ifdef FASTLOOKUP 290 291 300 unsigned int idx = 0; 301 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { 292 302 if ( i > bucketSizes[idx] ) idx += 1; 293 303 lookup[i] = idx; 294 304 } // for 295 305 #endif // FASTLOOKUP 296 306 297 307 if ( setMmapStart( default_mmap_start() ) ) { 298 308 abort( "HeapManager : internal error, mmap start initialization failure." ); 299 300 301 302 303 304 309 } // if 310 heapExpand = default_heap_expansion(); 311 312 char * End = (char *)sbrk( 0 ); 313 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment 314 heapBegin = heapEnd = sbrk( 0 ); // get new start point 305 315 } // HeapManager 306 316 … … 326 336 #endif // __CFA_DEBUG__ 327 337 338 // #comment TD : This assertion seems redundent with the above code 328 339 assert( heapManager.heapBegin == 0 ); 329 340 heapManager{}; … … 361 372 // Use "write" because streams may be shutdown when calls are made. 362 373 static void printStats() { 363 374 char helpText[512]; 364 375 __cfaabi_dbg_bits_print_buffer( helpText, sizeof(helpText), 365 376 "\nHeap statistics:\n" … … 385 396 } // printStats 386 397 387 398 // #comment TD : Why do we have this? 388 399 static int printStatsXML( FILE * stream ) { 389 390 400 char helpText[512]; 401 int len = snprintf( helpText, sizeof(helpText), 391 402 "<malloc version=\"1\">\n" 392 403 "<heap nr=\"0\">\n" … … 413 424 sbrk_calls, sbrk_storage 414 425 ); 415 426 return write( fileno( stream ), helpText, len ); // -1 => error 416 427 } // printStatsXML 417 428 #endif // __STATISTICS__ 418 429 419 430 // #comment TD : Is this the samething as Out-of-Memory? 420 431 static inline void noMemory() { 421 432 abort( "Heap memory exhausted at %zu bytes.\n" 422 433 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.", 423 434 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) ); … … 426 437 427 438 static inline void checkAlign( size_t alignment ) { 428 439 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) { 429 440 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment ); 430 441 } // if 431 442 } // checkAlign 432 443 433 444 434 445 static inline bool setHeapExpand( size_t value ) { 435 436 437 446 if ( heapExpand < pageSize ) return true; 447 heapExpand = value; 448 return false; 438 449 } // setHeapExpand 439 450 440 451 441 452 static inline void checkHeader( bool check, const char * name, void * addr ) { 442 453 if ( unlikely( check ) ) { // bad address ? 443 454 abort( "Attempt to %s storage %p with address outside the heap.\n" 444 455 "Possible cause is duplicate free on same block or overwriting of memory.", 445 456 name, addr ); 446 457 } // if 447 458 } // checkHeader 448 459 449 460 // #comment TD : function should be commented and/or have a more evocative name 461 // this isn't either a check or a constructor which is what I would expect this function to be 450 462 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) { 451 463 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 452 464 size_t offset = header->kind.fake.offset; 453 465 alignment = header->kind.fake.alignment & -2; // remove flag from value … … 456 468 #endif // __CFA_DEBUG__ 457 469 header = (HeapManager.Storage.Header *)((char *)header - offset); 458 470 } // if 459 471 } // fakeHeader 460 472 461 473 // #comment TD : Why is this a define 462 474 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 463 475 464 476 static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 465 466 467 477 header = headerAddr( addr ); 478 479 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 468 480 fakeHeader( header, size, alignment ); 469 481 size = header->kind.real.blockSize & -3; // mmap size 470 482 return true; 471 483 } // if 472 484 473 485 #ifdef __CFA_DEBUG__ 474 486 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 475 487 #endif // __CFA_DEBUG__ 476 // header may be safe to dereference 477 fakeHeader( header, size, alignment ); 488 489 // #comment TD : This code looks weird... 490 // It's called as the first statement of both branches of the last if, with the same parameters in all cases 491 492 // header may be safe to dereference 493 fakeHeader( header, size, alignment ); 478 494 #ifdef __CFA_DEBUG__ 479 495 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 480 496 #endif // __CFA_DEBUG__ 481 497 482 498 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 483 499 #ifdef __CFA_DEBUG__ 484 485 abort( "Attempt to %s storage %p with corrupted header.\n"486 487 name, addr );488 500 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 501 abort( "Attempt to %s storage %p with corrupted header.\n" 502 "Possible cause is duplicate free on same block or overwriting of header information.", 503 name, addr ); 504 } // if 489 505 #endif // __CFA_DEBUG__ 490 491 506 size = freeElem->blockSize; 507 return false; 492 508 } // headers 493 509 494 510 495 511 static inline void * extend( size_t size ) with ( heapManager ) { 496 497 498 512 lock( extlock __cfaabi_dbg_ctx2 ); 513 ptrdiff_t rem = heapRemaining - size; 514 if ( rem < 0 ) { 499 515 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 500 516 … … 514 530 #endif // __CFA_DEBUG__ 515 531 rem = heapRemaining + increase - size; 516 517 518 519 520 521 522 532 } // if 533 534 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 535 heapRemaining = rem; 536 heapEnd = (char *)heapEnd + size; 537 unlock( extlock ); 538 return block; 523 539 } // extend 524 540 525 541 526 542 static inline void * doMalloc( size_t size ) with ( heapManager ) { 527 528 529 530 531 532 533 543 HeapManager.Storage * block; 544 545 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated 546 // along with the block and is a multiple of the alignment size. 547 548 size_t tsize = size + sizeof(HeapManager.Storage); 549 if ( likely( tsize < mmapStart ) ) { // small size => sbrk 534 550 HeapManager.FreeHeader * freeElem = 535 551 #ifdef FASTLOOKUP … … 544 560 545 561 #if defined( SPINLOCK ) 546 lock( freeElem->lock __cfaabi_dbg_ctx2 );547 block = freeElem->freeList; // remove node from stack562 lock( freeElem->lock __cfaabi_dbg_ctx2 ); 563 block = freeElem->freeList; // remove node from stack 548 564 #else 549 block = freeElem->freeList.pop();565 block = freeElem->freeList.pop(); 550 566 #endif // SPINLOCK 551 567 if ( unlikely( block == 0 ) ) { // no free block ? … … 566 582 567 583 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 568 584 } else { // large size => mmap 569 585 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size 570 586 #ifdef __STATISTICS__ 571 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );572 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST );587 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST ); 588 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST ); 573 589 #endif // __STATISTICS__ 574 590 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); … … 582 598 #endif // __CFA_DEBUG__ 583 599 block->header.kind.real.blockSize = tsize; // storage size for munmap 584 585 586 600 } // if 601 602 void * area = &(block->data); // adjust off header to user bytes 587 603 588 604 #ifdef __CFA_DEBUG__ 589 590 591 if ( traceHeap() ) {592 enum { BufferSize = 64 };593 char helpText[BufferSize];594 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize );595 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size );596 __cfaabi_dbg_bits_write( helpText, len );597 } // if605 assert( ((uintptr_t)area & (libAlign() - 1)) == 0 ); // minimum alignment ? 606 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST ); 607 if ( traceHeap() ) { 608 enum { BufferSize = 64 }; 609 char helpText[BufferSize]; 610 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize ); 611 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size ); 612 __cfaabi_dbg_bits_write( helpText, len ); 613 } // if 598 614 #endif // __CFA_DEBUG__ 599 615 600 616 return area; 601 617 } // doMalloc 602 618 … … 604 620 static inline void doFree( void * addr ) with ( heapManager ) { 605 621 #ifdef __CFA_DEBUG__ 606 607 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );608 622 if ( unlikely( heapManager.heapBegin == 0 ) ) { 623 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); 624 } // if 609 625 #endif // __CFA_DEBUG__ 610 626 611 612 613 614 615 616 #ifdef __STATISTICS__ 617 __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST );618 __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST );627 HeapManager.Storage.Header * header; 628 HeapManager.FreeHeader * freeElem; 629 size_t size, alignment; // not used (see realloc) 630 631 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? 632 #ifdef __STATISTICS__ 633 __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST ); 634 __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST ); 619 635 #endif // __STATISTICS__ 620 636 if ( munmap( header, size ) == -1 ) { … … 625 641 #endif // __CFA_DEBUG__ 626 642 } // if 627 643 } else { 628 644 #ifdef __CFA_DEBUG__ 629 // Set free memory to garbage so subsequent usages might fail.630 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );645 // Set free memory to garbage so subsequent usages might fail. 646 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) ); 631 647 #endif // __CFA_DEBUG__ 632 648 633 649 #ifdef __STATISTICS__ 634 free_storage += size;650 free_storage += size; 635 651 #endif // __STATISTICS__ 636 652 #if defined( SPINLOCK ) 637 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock638 header->kind.real.next = freeElem->freeList; // push on stack639 freeElem->freeList = (HeapManager.Storage *)header;640 unlock( freeElem->lock ); // release spin lock653 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 654 header->kind.real.next = freeElem->freeList; // push on stack 655 freeElem->freeList = (HeapManager.Storage *)header; 656 unlock( freeElem->lock ); // release spin lock 641 657 #else 642 freeElem->freeList.push( *(HeapManager.Storage *)header );658 freeElem->freeList.push( *(HeapManager.Storage *)header ); 643 659 #endif // SPINLOCK 644 660 } // if 645 661 646 662 #ifdef __CFA_DEBUG__ 647 __atomic_add_fetch( &allocFree, -size, __ATOMIC_SEQ_CST );648 649 char helpText[64];650 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );651 __cfaabi_dbg_bits_write( helpText, len );652 663 __atomic_add_fetch( &allocFree, -size, __ATOMIC_SEQ_CST ); 664 if ( traceHeap() ) { 665 char helpText[64]; 666 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); 667 __cfaabi_dbg_bits_write( helpText, len ); 668 } // if 653 669 #endif // __CFA_DEBUG__ 654 670 } // doFree … … 656 672 657 673 size_t checkFree( HeapManager & manager ) with ( manager ) { 658 674 size_t total = 0; 659 675 #ifdef __STATISTICS__ 660 661 676 __cfaabi_dbg_bits_acquire(); 677 __cfaabi_dbg_bits_print_nolock( "\nBin lists (bin size : free blocks on list)\n" ); 662 678 #endif // __STATISTICS__ 663 679 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) { 664 680 size_t size = freeLists[i].blockSize; 665 681 #ifdef __STATISTICS__ 666 682 unsigned int N = 0; 667 683 #endif // __STATISTICS__ 684 668 685 #if defined( SPINLOCK ) 669 686 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0; p = p->header.kind.real.next ) { … … 675 692 N += 1; 676 693 #endif // __STATISTICS__ 677 } // for 678 #ifdef __STATISTICS__ 679 __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u ", size, N ); 680 if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" ); 694 } // for 695 696 #ifdef __STATISTICS__ 697 __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u ", size, N ); 698 if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" ); 681 699 #endif // __STATISTICS__ 682 700 } // for 683 701 #ifdef __STATISTICS__ 684 __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total );685 __cfaabi_dbg_bits_release();702 __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total ); 703 __cfaabi_dbg_bits_release(); 686 704 #endif // __STATISTICS__ 687 705 return (char *)heapEnd - (char *)heapBegin - total; 688 706 } // checkFree 689 707 690 708 // #comment TD : This is not a good name, plus this feels like it could easily be folded into doMalloc 691 709 static inline void * malloc2( size_t size ) { // necessary for malloc statistics 692 710 assert( heapManager.heapBegin != 0 ); 693 694 695 711 void * area = doMalloc( size ); 712 if ( unlikely( area == 0 ) ) errno = ENOMEM; // POSIX 713 return area; 696 714 } // malloc2 697 715 … … 699 717 static inline void * memalign2( size_t alignment, size_t size ) { // necessary for malloc statistics 700 718 #ifdef __CFA_DEBUG__ 701 719 checkAlign( alignment ); // check alignment 702 720 #endif // __CFA_DEBUG__ 703 721 704 // if alignment <= default alignment, do normal malloc as two headers are unnecessary 705 if ( unlikely( alignment <= libAlign() ) ) return malloc2( size ); 706 707 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for 708 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC. 709 // .-------------v-----------------v----------------v----------, 710 // | Real Header | ... padding ... | Fake Header | data ... | 711 // `-------------^-----------------^-+--------------^----------' 712 // |<--------------------------------' offset/align |<-- alignment boundary 713 714 // subtract libAlign() because it is already the minimum alignment 715 // add sizeof(Storage) for fake header 716 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 717 if ( unlikely( area == 0 ) ) return area; 718 719 // address in the block of the "next" alignment address 720 char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment ); 721 722 // address of header from malloc 723 HeapManager.Storage.Header * realHeader = headerAddr( area ); 724 // address of fake header * before* the alignment location 725 HeapManager.Storage.Header * fakeHeader = headerAddr( user ); 726 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 727 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; 728 // SKULLDUGGERY: odd alignment imples fake header 729 fakeHeader->kind.fake.alignment = alignment | 1; 730 731 return user; 722 // if alignment <= default alignment, do normal malloc as two headers are unnecessary 723 if ( unlikely( alignment <= libAlign() ) ) return malloc2( size ); 724 725 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for 726 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC. 727 // .-------------v-----------------v----------------v----------, 728 // | Real Header | ... padding ... | Fake Header | data ... | 729 // `-------------^-----------------^-+--------------^----------' 730 // |<--------------------------------' offset/align |<-- alignment boundary 731 732 // subtract libAlign() because it is already the minimum alignment 733 // add sizeof(Storage) for fake header 734 // #comment TD : this is the only place that calls doMalloc without calling malloc2, why ? 735 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 736 if ( unlikely( area == 0 ) ) return area; 737 738 // address in the block of the "next" alignment address 739 char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment ); 740 741 // address of header from malloc 742 HeapManager.Storage.Header * realHeader = headerAddr( area ); 743 // address of fake header * before* the alignment location 744 HeapManager.Storage.Header * fakeHeader = headerAddr( user ); 745 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 746 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; 747 // SKULLDUGGERY: odd alignment imples fake header 748 fakeHeader->kind.fake.alignment = alignment | 1; 749 750 return user; 732 751 } // memalign2 733 752 734 753 735 754 extern "C" { 736 void * malloc( size_t size ) { 737 #ifdef __STATISTICS__ 738 __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST ); 739 __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST ); 755 // The malloc() function allocates size bytes and returns a pointer to the 756 // allocated memory. The memory is not initialized. If size is 0, then malloc() 757 // returns either NULL, or a unique pointer value that can later be successfully 758 // passed to free(). 759 void * malloc( size_t size ) { 760 #ifdef __STATISTICS__ 761 __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST ); 762 __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST ); 740 763 #endif // __STATISTICS__ 741 764 742 765 return malloc2( size ); 743 } // malloc 744 745 746 void * calloc( size_t noOfElems, size_t elemSize ) { 766 } // malloc 767 768 // The calloc() function allocates memory for an array of nmemb elements of 769 // size bytes each and returns a pointer to the allocated memory. The memory 770 // is set to zero. If nmemb or size is 0, then calloc() returns either NULL, 771 // or a unique pointer value that can later be successfully passed to free(). 772 void * calloc( size_t noOfElems, size_t elemSize ) { 747 773 size_t size = noOfElems * elemSize; 748 774 #ifdef __STATISTICS__ 749 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );750 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST );775 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST ); 776 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST ); 751 777 #endif // __STATISTICS__ 752 778 753 779 char * area = (char *)malloc2( size ); 754 780 if ( unlikely( area == 0 ) ) return 0; 781 755 782 HeapManager.Storage.Header * header; 756 783 HeapManager.FreeHeader * freeElem; … … 762 789 #endif // __CFA_DEBUG__ 763 790 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros 791 764 792 header->kind.real.blockSize |= 2; // mark as zero filled 765 793 return area; 766 767 768 769 794 } // calloc 795 796 // #comment TD : Document this function 797 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 770 798 size_t size = noOfElems * elemSize; 771 799 #ifdef __STATISTICS__ 772 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );773 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST );800 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 801 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST ); 774 802 #endif // __STATISTICS__ 775 803 … … 788 816 789 817 return area; 790 } // cmemalign 791 792 793 void * realloc( void * addr, size_t size ) { 794 #ifdef __STATISTICS__ 795 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 818 } // cmemalign 819 820 // The realloc() function changes the size of the memory block pointed to by 821 // ptr to size bytes. The contents will be unchanged in the range from the 822 // start of the region up to the minimum of the old and new sizes. If the new 823 // size is larger than the old size, the added memory will not be initialized. 824 // If ptr is NULL, then the call is equivalent to malloc(size), for all values 825 // of size; if size is equal to zero, and ptr is not NULL, then the call is 826 // equivalent to free(ptr). Unless ptr is NULL, it must have been returned by 827 // an earlier call to malloc(), calloc() or realloc(). If the area pointed to 828 // was moved, a free(ptr) is done. 829 void * realloc( void * addr, size_t size ) { 830 #ifdef __STATISTICS__ 831 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 796 832 #endif // __STATISTICS__ 797 833 … … 812 848 813 849 #ifdef __STATISTICS__ 814 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );850 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 815 851 #endif // __STATISTICS__ 816 852 … … 835 871 free( addr ); 836 872 return area; 837 } // realloc 838 839 840 void * memalign( size_t alignment, size_t size ) { 873 } // realloc 874 875 876 // The obsolete function memalign() allocates size bytes and returns 877 // a pointer to the allocated memory. The memory address will be a 878 // multiple of alignment, which must be a power of two. 879 void * memalign( size_t alignment, size_t size ) __attribute__ ((deprecated)); 880 void * memalign( size_t alignment, size_t size ) { 841 881 #ifdef __STATISTICS__ 842 882 __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST ); … … 847 887 848 888 return area; 849 } // memalign 850 851 852 void * aligned_alloc( size_t alignment, size_t size ) { 889 } // memalign 890 891 // The function aligned_alloc() is the same as memalign(), except for 892 // the added restriction that size should be a multiple of alignment. 893 void * aligned_alloc( size_t alignment, size_t size ) { 853 894 return memalign( alignment, size ); 854 } // aligned_alloc 855 856 857 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 895 } // aligned_alloc 896 897 898 // The function posix_memalign() allocates size bytes and places the address 899 // of the allocated memory in *memptr. The address of the allocated memory 900 // will be a multiple of alignment, which must be a power of two and a multiple 901 // of sizeof(void *). If size is 0, then posix_memalign() returns either NULL, 902 // or a unique pointer value that can later be successfully passed to free(3). 903 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 858 904 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment 859 905 * memptr = memalign( alignment, size ); 860 906 if ( unlikely( * memptr == 0 ) ) return ENOMEM; 861 907 return 0; 862 } // posix_memalign 863 864 865 void * valloc( size_t size ) { 908 } // posix_memalign 909 910 // The obsolete function valloc() allocates size bytes and returns a pointer 911 // to the allocated memory. The memory address will be a multiple of the page size. 912 // It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 913 void * valloc( size_t size ) __attribute__ ((deprecated)); 914 void * valloc( size_t size ) { 866 915 return memalign( pageSize, size ); 867 } // valloc 868 869 870 void free( void * addr ) { 871 #ifdef __STATISTICS__ 872 __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST ); 873 #endif // __STATISTICS__ 874 916 } // valloc 917 918 919 // The free() function frees the memory space pointed to by ptr, which must 920 // have been returned by a previous call to malloc(), calloc() or realloc(). 921 // Otherwise, or if free(ptr) has already been called before, undefined 922 // behavior occurs. If ptr is NULL, no operation is performed. 923 void free( void * addr ) { 924 #ifdef __STATISTICS__ 925 __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST ); 926 #endif // __STATISTICS__ 927 928 // #comment TD : To decrease nesting I would but the special case in the 929 // else instead, plus it reads more naturally to have the 930 // short / normal case instead 875 931 if ( unlikely( addr == 0 ) ) { // special case 876 932 #ifdef __CFA_DEBUG__ 877 if ( traceHeap() ) {878 #define nullmsg "Free( 0x0 ) size:0\n"879 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.880 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 );881 } // if933 if ( traceHeap() ) { 934 #define nullmsg "Free( 0x0 ) size:0\n" 935 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf. 936 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 ); 937 } // if 882 938 #endif // __CFA_DEBUG__ 883 939 return; … … 885 941 886 942 doFree( addr ); 887 } // free 888 889 890 int mallopt( int option, int value ) { 943 } // free 944 945 // The mallopt() function adjusts parameters that control the behavior of the 946 // memory-allocation functions (see malloc(3)). The param argument specifies 947 // the parameter to be modified, and value specifies the new value for that 948 // parameter. 949 int mallopt( int option, int value ) { 891 950 choose( option ) { 892 case M_TOP_PAD: 893 if ( setHeapExpand( value ) ) fallthru default; 894 case M_MMAP_THRESHOLD: 895 if ( setMmapStart( value ) ) fallthru default; 896 default: 897 return 1; // success, or unsupported 951 case M_TOP_PAD: 952 if ( setHeapExpand( value ) ) fallthru default; 953 case M_MMAP_THRESHOLD: 954 if ( setMmapStart( value ) ) fallthru default; 955 default: 956 // #comment TD : 1 for unsopported feels wrong 957 return 1; // success, or unsupported 898 958 } // switch 899 959 return 0; // error 900 } // mallopt 901 902 960 } // mallopt 961 962 // The malloc_trim() function attempts to release free memory at the top 963 // of the heap (by calling sbrk(2) with a suitable argument). 903 964 int malloc_trim( size_t ) { 904 965 return 0; // => impossible to release memory 905 966 } // malloc_trim 906 967 907 size_t malloc_usable_size( void * addr ) { 968 // The malloc_usable_size() function returns the number of usable bytes in the 969 // block pointed to by ptr, a pointer to a block of memory allocated by 970 // malloc(3) or a related function. 971 size_t malloc_usable_size( void * addr ) { 908 972 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size 973 909 974 HeapManager.Storage.Header * header; 910 975 HeapManager.FreeHeader * freeElem; … … 914 979 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block 915 980 return usize; 916 } // malloc_usable_size 917 918 919 size_t malloc_alignment( void * addr ) { 981 } // malloc_usable_size 982 983 984 // #comment TD : Document this function 985 size_t malloc_alignment( void * addr ) { 920 986 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment 921 987 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ); … … 925 991 return libAlign (); // minimum alignment 926 992 } // if 927 } // malloc_alignment 928 929 930 bool malloc_zero_fill( void * addr ) { 993 } // malloc_alignment 994 995 996 // #comment TD : Document this function 997 bool malloc_zero_fill( void * addr ) { 931 998 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill 999 932 1000 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ); 933 1001 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? … … 935 1003 } // if 936 1004 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ? 937 } // malloc_zero_fill 938 939 940 void malloc_stats( void ) { 941 #ifdef __STATISTICS__ 942 printStats(); 943 if ( checkFree() ) checkFree( heapManager ); 944 #endif // __STATISTICS__ 945 } // malloc_stats 946 947 948 int malloc_stats_fd( int fd ) { 949 #ifdef __STATISTICS__ 950 int temp = statfd; 951 statfd = fd; 952 return temp; 1005 } // malloc_zero_fill 1006 1007 1008 // #comment TD : Document this function 1009 void malloc_stats( void ) { 1010 #ifdef __STATISTICS__ 1011 printStats(); 1012 if ( checkFree() ) checkFree( heapManager ); 1013 #endif // __STATISTICS__ 1014 } // malloc_stats 1015 1016 // #comment TD : Document this function 1017 int malloc_stats_fd( int fd ) { 1018 #ifdef __STATISTICS__ 1019 int temp = statfd; 1020 statfd = fd; 1021 return temp; 953 1022 #else 954 return -1; 955 #endif // __STATISTICS__ 956 } // malloc_stats_fd 957 958 1023 return -1; 1024 #endif // __STATISTICS__ 1025 } // malloc_stats_fd 1026 1027 1028 // #comment TD : Document this function 959 1029 int malloc_info( int options, FILE * stream ) { 960 1030 return printStatsXML( stream ); … … 962 1032 963 1033 1034 // #comment TD : What are these two functions for? 964 1035 void * malloc_get_state( void ) { 965 1036 return 0; 966 1037 } // malloc_get_state 967 968 1038 969 1039 int malloc_set_state( void * ptr ) {
Note: See TracChangeset
for help on using the changeset viewer.