Changeset 9c438546
- Timestamp:
- May 17, 2020, 9:06:28 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 893da07
- Parents:
- 2223c80
- Location:
- libcfa/src
- Files:
-
- 1 added
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r2223c80 r9c438546 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed May 6 17:29:26202013 // Update Count : 7 2712 // Last Modified On : Sun May 17 20:58:17 2020 13 // Update Count : 762 14 14 // 15 15 … … 128 128 #define LOCKFREE 1 129 129 #define BUCKETLOCK SPINLOCK 130 #if BUCKETLOCK == LOCKFREE 131 #include <uStackLF.h> 130 #if BUCKETLOCK == SPINLOCK 131 #elif BUCKETLOCK == LOCKFREE 132 #include <stackLockFree.hfa> 133 #else 134 #error undefined lock type for bucket lock 132 135 #endif // LOCKFREE 133 136 … … 137 140 138 141 struct HeapManager { 139 // struct FreeHeader; // forward declaration140 141 142 struct Storage { 142 143 struct Header { // header … … 146 147 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 148 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 148 uint 32_t padding; // unused, force home/blocksize to overlay alignment in fake header149 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header 149 150 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 150 151 151 152 union { 152 //FreeHeader * home; // allocated block points back to home locations (must overlay alignment)153 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 153 154 // 2nd low-order bit => zero filled 154 155 void * home; // allocated block points back to home locations (must overlay alignment) 155 156 size_t blockSize; // size for munmap (must overlay alignment) 156 #if BUCK LOCK == SPINLOCK157 #if BUCKETLOCK == SPINLOCK 157 158 Storage * next; // freed block points next freed block of same size 158 159 #endif // SPINLOCK 159 160 }; 161 size_t size; // allocation size in bytes 160 162 161 163 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 162 uint 32_t padding; // unused, force home/blocksize to overlay alignment in fake header164 uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header 163 165 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 164 166 }; 165 // future code 166 #if BUCKLOCK == LOCKFREE 167 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide) 167 #if BUCKETLOCK == LOCKFREE 168 Link(Storage) next; // freed block points next freed block of same size (double-wide) 168 169 #endif // LOCKFREE 169 170 }; 170 171 } real; // RealHeader 172 171 173 struct FakeHeader { 172 174 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 173 // 1st low-order bit => fake header & alignment 174 uint32_t alignment; 175 uint32_t alignment; // 1st low-order bit => fake header & alignment 175 176 #endif // __ORDER_LITTLE_ENDIAN__ 176 177 … … 182 183 } fake; // FakeHeader 183 184 } kind; // Kind 184 size_t size; // allocation size in bytes185 185 } header; // Header 186 186 char pad[libAlign() - sizeof( Header )]; … … 191 191 192 192 struct FreeHeader { 193 #if BUCK LOCK == SPINLOCK193 #if BUCKETLOCK == SPINLOCK 194 194 __spinlock_t lock; // must be first field for alignment 195 195 Storage * freeList; 196 #elif BUCKLOCK == LOCKFREE197 // future code198 StackLF<Storage> freeList;199 196 #else 200 #error undefined lock type for bucket lock201 #endif // SPINLOCK197 StackLF(Storage) freeList; 198 #endif // BUCKETLOCK 202 199 size_t blockSize; // size of allocations on this list 203 200 }; // FreeHeader … … 211 208 size_t heapRemaining; // amount of storage not allocated in the current chunk 212 209 }; // HeapManager 210 211 #if BUCKETLOCK == LOCKFREE 212 static inline Link(HeapManager.Storage) * getNext( HeapManager.Storage * this ) { return &this->header.kind.real.next; } 213 static inline void ?{}( HeapManager.FreeHeader & ) {} 214 static inline void ^?{}( HeapManager.FreeHeader & ) {} 215 #endif // LOCKFREE 213 216 214 217 static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } … … 251 254 static bool heapBoot = 0; // detect recursion during boot 252 255 #endif // __CFA_DEBUG__ 256 257 // The constructor for heapManager is called explicitly in memory_startup. 253 258 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 254 259 … … 354 359 355 360 356 // static inline void noMemory() {357 // abort( "Heap memory exhausted at %zu bytes.\n"358 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",359 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );360 // } // noMemory361 362 363 361 // thunk problem 364 362 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 406 404 407 405 406 // static inline void noMemory() { 407 // abort( "Heap memory exhausted at %zu bytes.\n" 408 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.", 409 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) ); 410 // } // noMemory 411 412 408 413 static inline void checkAlign( size_t alignment ) { 409 414 if ( alignment < libAlign() || ! libPow2( alignment ) ) { … … 433 438 434 439 435 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 440 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, 441 size_t & size, size_t & alignment ) with( heapManager ) { 436 442 header = headerAddr( addr ); 437 443 … … 465 471 466 472 467 static inline void * extend( size_t size ) with 473 static inline void * extend( size_t size ) with( heapManager ) { 468 474 lock( extlock __cfaabi_dbg_ctx2 ); 469 475 ptrdiff_t rem = heapRemaining - size; … … 496 502 497 503 498 static inline void * doMalloc( size_t size ) with 504 static inline void * doMalloc( size_t size ) with( heapManager ) { 499 505 HeapManager.Storage * block; // pointer to new block of storage 500 506 … … 529 535 // Spin until the lock is acquired for this particular size of block. 530 536 531 #if defined( SPINLOCK )537 #if BUCKETLOCK == SPINLOCK 532 538 lock( freeElem->lock __cfaabi_dbg_ctx2 ); 533 539 block = freeElem->freeList; // remove node from stack 534 540 #else 535 block = freeElem->freeList.pop();536 #endif // SPINLOCK541 block = pop( freeElem->freeList ); 542 #endif // BUCKETLOCK 537 543 if ( unlikely( block == 0p ) ) { // no free block ? 538 #if defined( SPINLOCK )544 #if BUCKETLOCK == SPINLOCK 539 545 unlock( freeElem->lock ); 540 #endif // SPINLOCK546 #endif // BUCKETLOCK 541 547 542 548 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more … … 544 550 545 551 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 546 547 #if defined( SPINLOCK )552 if ( unlikely( block == 0p ) ) return 0p; 553 #if BUCKETLOCK == SPINLOCK 548 554 } else { 549 555 freeElem->freeList = block->header.kind.real.next; 550 556 unlock( freeElem->lock ); 551 #endif // SPINLOCK557 #endif // BUCKETLOCK 552 558 } // if 553 559 … … 572 578 } // if 573 579 574 block->header. size = size;// store allocation size580 block->header.kind.real.size = size; // store allocation size 575 581 void * addr = &(block->data); // adjust off header to user bytes 576 582 … … 591 597 592 598 593 static inline void doFree( void * addr ) with 599 static inline void doFree( void * addr ) with( heapManager ) { 594 600 #ifdef __CFA_DEBUG__ 595 601 if ( unlikely( heapManager.heapBegin == 0p ) ) { … … 623 629 free_storage += size; 624 630 #endif // __STATISTICS__ 625 #if defined( SPINLOCK )631 #if BUCKETLOCK == SPINLOCK 626 632 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 627 633 header->kind.real.next = freeElem->freeList; // push on stack … … 629 635 unlock( freeElem->lock ); // release spin lock 630 636 #else 631 freeElem->freeList.push(*(HeapManager.Storage *)header );632 #endif // SPINLOCK637 push( freeElem->freeList, *(HeapManager.Storage *)header ); 638 #endif // BUCKETLOCK 633 639 } // if 634 640 … … 645 651 646 652 647 size_t prtFree( HeapManager & manager ) with 653 size_t prtFree( HeapManager & manager ) with( manager ) { 648 654 size_t total = 0; 649 655 #ifdef __STATISTICS__ … … 657 663 #endif // __STATISTICS__ 658 664 659 #if defined( SPINLOCK )665 #if BUCKETLOCK == SPINLOCK 660 666 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 661 667 #else 662 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) { 663 #endif // SPINLOCK 668 for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { 669 typeof(p) temp = getNext( p )->top; // FIX ME: direct assignent fails, initialization works 670 p = temp; 671 #endif // BUCKETLOCK 664 672 total += size; 665 673 #ifdef __STATISTICS__ … … 681 689 682 690 683 static void ?{}( HeapManager & manager ) with 691 static void ?{}( HeapManager & manager ) with( manager ) { 684 692 pageSize = sysconf( _SC_PAGESIZE ); 685 693 … … 1095 1103 header = realHeader( header ); // backup from fake to real header 1096 1104 } // if 1097 return header-> size;1105 return header->kind.real.size; 1098 1106 } // malloc_size 1099 1107 … … 1105 1113 header = realHeader( header ); // backup from fake to real header 1106 1114 } // if 1107 size_t ret = header-> size;1108 header-> size = size;1115 size_t ret = header->kind.real.size; 1116 header->kind.real.size = size; 1109 1117 return ret; 1110 1118 } // $malloc_size_set
Note: See TracChangeset
for help on using the changeset viewer.