Changes in libcfa/src/heap.cfa [9c438546:1076d05]
- File:
-
- 1 edited
-
libcfa/src/heap.cfa (modified) (24 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r9c438546 r1076d05 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun May 17 20:58:17202013 // Update Count : 7 6212 // Last Modified On : Wed May 6 17:29:26 2020 13 // Update Count : 727 14 14 // 15 15 … … 128 128 #define LOCKFREE 1 129 129 #define BUCKETLOCK SPINLOCK 130 #if BUCKETLOCK == SPINLOCK 131 #elif BUCKETLOCK == LOCKFREE 132 #include <stackLockFree.hfa> 133 #else 134 #error undefined lock type for bucket lock 130 #if BUCKETLOCK == LOCKFREE 131 #include <uStackLF.h> 135 132 #endif // LOCKFREE 136 133 … … 140 137 141 138 struct HeapManager { 139 // struct FreeHeader; // forward declaration 140 142 141 struct Storage { 143 142 struct Header { // header … … 147 146 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 148 147 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 149 uint 64_t padding; // unused, force home/blocksize to overlay alignment in fake header148 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header 150 149 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 151 150 152 151 union { 153 //FreeHeader * home; // allocated block points back to home locations (must overlay alignment)152 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 154 153 // 2nd low-order bit => zero filled 155 154 void * home; // allocated block points back to home locations (must overlay alignment) 156 155 size_t blockSize; // size for munmap (must overlay alignment) 157 #if BUCK ETLOCK == SPINLOCK156 #if BUCKLOCK == SPINLOCK 158 157 Storage * next; // freed block points next freed block of same size 159 158 #endif // SPINLOCK 160 159 }; 161 size_t size; // allocation size in bytes162 160 163 161 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 164 uint 64_t padding; // unused, force home/blocksize to overlay alignment in fake header162 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header 165 163 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 166 164 }; 167 #if BUCKETLOCK == LOCKFREE 168 Link(Storage) next; // freed block points next freed block of same size (double-wide) 165 // future code 166 #if BUCKLOCK == LOCKFREE 167 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide) 169 168 #endif // LOCKFREE 170 169 }; 171 170 } real; // RealHeader 172 173 171 struct FakeHeader { 174 172 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 175 uint32_t alignment; // 1st low-order bit => fake header & alignment 173 // 1st low-order bit => fake header & alignment 174 uint32_t alignment; 176 175 #endif // __ORDER_LITTLE_ENDIAN__ 177 176 … … 183 182 } fake; // FakeHeader 184 183 } kind; // Kind 184 size_t size; // allocation size in bytes 185 185 } header; // Header 186 186 char pad[libAlign() - sizeof( Header )]; … … 191 191 192 192 struct FreeHeader { 193 #if BUCK ETLOCK == SPINLOCK193 #if BUCKLOCK == SPINLOCK 194 194 __spinlock_t lock; // must be first field for alignment 195 195 Storage * freeList; 196 #elif BUCKLOCK == LOCKFREE 197 // future code 198 StackLF<Storage> freeList; 196 199 #else 197 StackLF(Storage) freeList;198 #endif // BUCKETLOCK200 #error undefined lock type for bucket lock 201 #endif // SPINLOCK 199 202 size_t blockSize; // size of allocations on this list 200 203 }; // FreeHeader … … 208 211 size_t heapRemaining; // amount of storage not allocated in the current chunk 209 212 }; // HeapManager 210 211 #if BUCKETLOCK == LOCKFREE212 static inline Link(HeapManager.Storage) * getNext( HeapManager.Storage * this ) { return &this->header.kind.real.next; }213 static inline void ?{}( HeapManager.FreeHeader & ) {}214 static inline void ^?{}( HeapManager.FreeHeader & ) {}215 #endif // LOCKFREE216 213 217 214 static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } … … 254 251 static bool heapBoot = 0; // detect recursion during boot 255 252 #endif // __CFA_DEBUG__ 256 257 // The constructor for heapManager is called explicitly in memory_startup.258 253 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 259 254 … … 359 354 360 355 356 // static inline void noMemory() { 357 // abort( "Heap memory exhausted at %zu bytes.\n" 358 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.", 359 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) ); 360 // } // noMemory 361 362 361 363 // thunk problem 362 364 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 404 406 405 407 406 // static inline void noMemory() {407 // abort( "Heap memory exhausted at %zu bytes.\n"408 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",409 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );410 // } // noMemory411 412 413 408 static inline void checkAlign( size_t alignment ) { 414 409 if ( alignment < libAlign() || ! libPow2( alignment ) ) { … … 438 433 439 434 440 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, 441 size_t & size, size_t & alignment ) with( heapManager ) { 435 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 442 436 header = headerAddr( addr ); 443 437 … … 471 465 472 466 473 static inline void * extend( size_t size ) with ( heapManager ) {467 static inline void * extend( size_t size ) with ( heapManager ) { 474 468 lock( extlock __cfaabi_dbg_ctx2 ); 475 469 ptrdiff_t rem = heapRemaining - size; … … 502 496 503 497 504 static inline void * doMalloc( size_t size ) with ( heapManager ) {498 static inline void * doMalloc( size_t size ) with ( heapManager ) { 505 499 HeapManager.Storage * block; // pointer to new block of storage 506 500 … … 535 529 // Spin until the lock is acquired for this particular size of block. 536 530 537 #if BUCKETLOCK == SPINLOCK531 #if defined( SPINLOCK ) 538 532 lock( freeElem->lock __cfaabi_dbg_ctx2 ); 539 533 block = freeElem->freeList; // remove node from stack 540 534 #else 541 block = pop( freeElem->freeList);542 #endif // BUCKETLOCK535 block = freeElem->freeList.pop(); 536 #endif // SPINLOCK 543 537 if ( unlikely( block == 0p ) ) { // no free block ? 544 #if BUCKETLOCK == SPINLOCK538 #if defined( SPINLOCK ) 545 539 unlock( freeElem->lock ); 546 #endif // BUCKETLOCK540 #endif // SPINLOCK 547 541 548 542 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more … … 550 544 551 545 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 552 if ( unlikely( block == 0p ) ) return 0p;553 #if BUCKETLOCK == SPINLOCK546 if ( unlikely( block == 0p ) ) return 0p; 547 #if defined( SPINLOCK ) 554 548 } else { 555 549 freeElem->freeList = block->header.kind.real.next; 556 550 unlock( freeElem->lock ); 557 #endif // BUCKETLOCK551 #endif // SPINLOCK 558 552 } // if 559 553 … … 578 572 } // if 579 573 580 block->header. kind.real.size = size;// store allocation size574 block->header.size = size; // store allocation size 581 575 void * addr = &(block->data); // adjust off header to user bytes 582 576 … … 597 591 598 592 599 static inline void doFree( void * addr ) with ( heapManager ) {593 static inline void doFree( void * addr ) with ( heapManager ) { 600 594 #ifdef __CFA_DEBUG__ 601 595 if ( unlikely( heapManager.heapBegin == 0p ) ) { … … 629 623 free_storage += size; 630 624 #endif // __STATISTICS__ 631 #if BUCKETLOCK == SPINLOCK625 #if defined( SPINLOCK ) 632 626 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 633 627 header->kind.real.next = freeElem->freeList; // push on stack … … 635 629 unlock( freeElem->lock ); // release spin lock 636 630 #else 637 push( freeElem->freeList,*(HeapManager.Storage *)header );638 #endif // BUCKETLOCK631 freeElem->freeList.push( *(HeapManager.Storage *)header ); 632 #endif // SPINLOCK 639 633 } // if 640 634 … … 651 645 652 646 653 size_t prtFree( HeapManager & manager ) with ( manager ) {647 size_t prtFree( HeapManager & manager ) with ( manager ) { 654 648 size_t total = 0; 655 649 #ifdef __STATISTICS__ … … 663 657 #endif // __STATISTICS__ 664 658 665 #if BUCKETLOCK == SPINLOCK659 #if defined( SPINLOCK ) 666 660 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 667 661 #else 668 for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { 669 typeof(p) temp = getNext( p )->top; // FIX ME: direct assignent fails, initialization works 670 p = temp; 671 #endif // BUCKETLOCK 662 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) { 663 #endif // SPINLOCK 672 664 total += size; 673 665 #ifdef __STATISTICS__ … … 689 681 690 682 691 static void ?{}( HeapManager & manager ) with ( manager ) {683 static void ?{}( HeapManager & manager ) with ( manager ) { 692 684 pageSize = sysconf( _SC_PAGESIZE ); 693 685 … … 1103 1095 header = realHeader( header ); // backup from fake to real header 1104 1096 } // if 1105 return header-> kind.real.size;1097 return header->size; 1106 1098 } // malloc_size 1107 1099 … … 1113 1105 header = realHeader( header ); // backup from fake to real header 1114 1106 } // if 1115 size_t ret = header-> kind.real.size;1116 header-> kind.real.size = size;1107 size_t ret = header->size; 1108 header->size = size; 1117 1109 return ret; 1118 1110 } // $malloc_size_set
Note:
See TracChangeset
for help on using the changeset viewer.