- Timestamp:
- Jul 24, 2019, 10:40:28 AM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 77d2432, 96ac72c
- Parents:
- 6130304 (diff), 8fc15cf (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r6130304 r83b52f1 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu May 9 16:29:12201913 // Update Count : 5 1612 // Last Modified On : Tue Jul 23 14:13:13 2019 13 // Update Count : 549 14 14 // 15 15 … … 31 31 32 32 33 enum {34 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),35 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),36 };37 38 size_t default_mmap_start() __attribute__(( weak )) {39 return __CFA_DEFAULT_MMAP_START__;40 } // default_mmap_start41 42 size_t default_heap_expansion() __attribute__(( weak )) {43 return __CFA_DEFAULT_HEAP_EXPANSION__;44 } // default_heap_expansion45 46 47 // supported mallopt options48 #ifndef M_MMAP_THRESHOLD49 #define M_MMAP_THRESHOLD (-1)50 #endif // M_TOP_PAD51 #ifndef M_TOP_PAD52 #define M_TOP_PAD (-2)53 #endif // M_TOP_PAD54 55 #define FASTLOOKUP56 #define __STATISTICS__57 58 #define SPINLOCK 059 #define LOCKFREE 160 #define BUCKETLOCK SPINLOCK61 #if BUCKETLOCK == LOCKFREE62 #include <uStackLF.h>63 #endif // LOCKFREE64 65 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa66 #define ALIGN 1667 68 // enum { NoBucketSizes = 93, // number of buckets sizes69 // #ifdef FASTLOOKUP70 // LookupSizes = 65536, // number of fast lookup sizes71 // #endif // FASTLOOKUP72 // };73 #define NoBucketSizes 93 // number of buckets sizes74 #ifdef FASTLOOKUP75 #define LookupSizes 65536 // number of fast lookup sizes76 #endif // FASTLOOKUP77 78 79 33 static bool traceHeap = false; 80 34 … … 132 86 // return temp; 133 87 // } // traceHeapTermOff 88 89 90 enum { 91 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 92 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024), 93 }; 94 95 size_t default_mmap_start() __attribute__(( weak )) { 96 return __CFA_DEFAULT_MMAP_START__; 97 } // default_mmap_start 98 99 size_t default_heap_expansion() __attribute__(( weak )) { 100 return __CFA_DEFAULT_HEAP_EXPANSION__; 101 } // default_heap_expansion 134 102 135 103 … … 160 128 #endif // __CFA_DEBUG__ 161 129 130 // statically allocated variables => zero filled. 131 static size_t pageSize; // architecture pagesize 132 static size_t heapExpand; // sbrk advance 133 static size_t mmapStart; // cross over point for mmap 134 static unsigned int maxBucketsUsed; // maximum number of buckets in use 135 136 137 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa 138 #define ALIGN 16 139 140 #define SPINLOCK 0 141 #define LOCKFREE 1 142 #define BUCKETLOCK SPINLOCK 143 #if BUCKETLOCK == LOCKFREE 144 #include <uStackLF.h> 145 #endif // LOCKFREE 146 147 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 148 // Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 149 enum { NoBucketSizes = 93 }; // number of buckets sizes 162 150 163 151 struct HeapManager { … … 234 222 }; // HeapManager 235 223 236 237 224 static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } 238 225 239 // statically allocated variables => zero filled. 240 static size_t pageSize; // architecture pagesize 241 static size_t heapExpand; // sbrk advance 242 static size_t mmapStart; // cross over point for mmap 243 static unsigned int maxBucketsUsed; // maximum number of buckets in use 226 227 #define FASTLOOKUP 228 #define __STATISTICS__ 244 229 245 230 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 246 static const unsigned int bucketSizes[ NoBucketSizes] @= {// different bucket sizes231 static const unsigned int bucketSizes[] @= { // different bucket sizes 247 232 16, 32, 48, 64, 248 233 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224, … … 259 244 4_194_304 + sizeof(HeapManager.Storage) 260 245 }; 246 247 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0]), "size of bucket array wrong" ); 248 261 249 #ifdef FASTLOOKUP 250 static_assert( 16 == sizeof(HeapManager.Storage), "size of HeapManager Storage wrong" ); // FIX ME 251 enum { LookupSizes = 65_536 + 16 }; // number of fast lookup sizes 262 252 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 263 253 #endif // FASTLOOKUP … … 532 522 533 523 524 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 525 size_t l = 0, m, h = dim; 526 while ( l < h ) { 527 m = (l + h) / 2; 528 if ( (unsigned int &)(vals[m]) < key ) { // cast away const 529 l = m + 1; 530 } else { 531 h = m; 532 } // if 533 } // while 534 return l; 535 } // Bsearchl 536 537 534 538 static inline void * doMalloc( size_t size ) with ( heapManager ) { 535 539 HeapManager.Storage * block; // pointer to new block of storage … … 540 544 size_t tsize = size + sizeof(HeapManager.Storage); 541 545 if ( likely( tsize < mmapStart ) ) { // small size => sbrk 542 HeapManager.FreeHeader * freeElem = 543 #ifdef FASTLOOKUP 544 tsize < LookupSizes ? &freeLists[lookup[tsize]] : 545 #endif // FASTLOOKUP 546 bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search 546 size_t posn; 547 #ifdef FASTLOOKUP 548 if ( tsize < LookupSizes ) posn = lookup[tsize]; 549 else 550 #endif // FASTLOOKUP 551 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); 552 HeapManager.FreeHeader * freeElem = &freeLists[posn]; 553 // #ifdef FASTLOOKUP 554 // if ( tsize < LookupSizes ) 555 // freeElem = &freeLists[lookup[tsize]]; 556 // else 557 // #endif // FASTLOOKUP 558 // freeElem = bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search 559 // HeapManager.FreeHeader * freeElem = 560 // #ifdef FASTLOOKUP 561 // tsize < LookupSizes ? &freeLists[lookup[tsize]] : 562 // #endif // FASTLOOKUP 563 // bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search 547 564 assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 548 565 assert( tsize <= freeElem->blockSize ); // search failure ? … … 747 764 748 765 766 // supported mallopt options 767 #ifndef M_MMAP_THRESHOLD 768 #define M_MMAP_THRESHOLD (-1) 769 #endif // M_TOP_PAD 770 #ifndef M_TOP_PAD 771 #define M_TOP_PAD (-2) 772 #endif // M_TOP_PAD 773 774 749 775 extern "C" { 750 776 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not … … 843 869 void * area; 844 870 if ( unlikely( alignment != 0 ) ) { // previous request memalign? 845 area = memalign( alignment, size ); // create new a rea871 area = memalign( alignment, size ); // create new aligned area 846 872 } else { 847 873 area = mallocNoStats( size ); // create new area -
libcfa/src/stdlib.hfa
r6130304 r83b52f1 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Apr 24 17:35:43201913 // Update Count : 3 5212 // Last Modified On : Tue Jul 23 14:14:59 2019 13 // Update Count : 373 14 14 // 15 15 … … 17 17 18 18 #include "bits/defs.hfa" 19 #include "bits/align.hfa" 19 20 20 21 #include <stdlib.h> // *alloc, strto*, ato* 22 21 23 extern "C" { 22 24 void * memalign( size_t align, size_t size ); // malloc.h … … 39 41 40 42 T * malloc( void ) { 41 return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 43 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 44 else return (T *)memalign( _Alignof(T), sizeof(T) ); 42 45 } // malloc 43 46 44 47 T * calloc( size_t dim ) { 45 return (T *)(void *)calloc( dim, sizeof(T) ); // C calloc 48 if ( _Alignof(T) <= libAlign() )return (T *)(void *)calloc( dim, sizeof(T) ); // C calloc 49 else return (T *)cmemalign( _Alignof(T), dim, sizeof(T) ); 46 50 } // calloc 47 51 48 52 T * realloc( T * ptr, size_t size ) { 53 if ( unlikely( ptr == 0 ) ) return malloc(); 49 54 return (T *)(void *)realloc( (void *)ptr, size ); 50 55 } // realloc … … 66 71 67 72 T * alloc( void ) { 68 return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc73 return malloc(); 69 74 } // alloc 70 75 71 76 T * alloc( char fill ) { 72 T * ptr = (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 77 T * ptr; 78 if ( _Alignof(T) <= libAlign() ) ptr = (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 79 else ptr = (T *)memalign( _Alignof(T), sizeof(T) ); 73 80 return (T *)memset( ptr, (int)fill, sizeof(T) ); // initialize with fill value 74 81 } // alloc 75 82 76 83 T * alloc( size_t dim ) { 77 return (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc 84 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc 85 else return (T *)memalign( _Alignof(T), dim * sizeof(T) ); 78 86 } // alloc 79 87 80 88 T * alloc( size_t dim, char fill ) { 81 T * ptr = (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C calloc 82 return (T *)memset( ptr, (int)fill, dim * sizeof(T) ); // initialize with fill value 89 return (T *)memset( (T *)alloc( dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 83 90 } // alloc 84 91 85 92 T * alloc( T ptr[], size_t dim ) { 86 return (T *)(void *)realloc( (void *)ptr, dim * (size_t)sizeof(T) ); // C realloc 87 } // alloc 88 } // distribution 89 90 91 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim, char fill ); 93 return realloc( ptr, dim * sizeof(T) ); 94 } // alloc 95 } // distribution 92 96 93 97 … … 107 111 108 112 T * align_alloc( size_t align, size_t dim, char fill ) { 109 T * ptr;110 113 if ( fill == '\0' ) { 111 ptr =(T *)cmemalign( align, dim, sizeof(T) );114 return (T *)cmemalign( align, dim, sizeof(T) ); 112 115 } else { 113 ptr = (T *)memalign( align, dim * sizeof(T) ); 114 return (T *)memset( ptr, (int)fill, dim * sizeof(T) ); 116 return (T *)memset( (T *)memalign( align, dim * sizeof(T) ), (int)fill, dim * sizeof(T) ); 115 117 } // if 116 return ptr; 117 } // align_alloc 118 } // distribution 118 } // align_alloc 119 } // distribution 120 121 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim, char fill ); 119 122 120 123
Note: See TracChangeset
for help on using the changeset viewer.