Changeset 74ec742 for libcfa/src/heap.cfa
- Timestamp:
- May 20, 2022, 10:36:45 AM (4 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 25fa20a
- Parents:
- 29d8c02 (diff), 7831e8fb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/heap.cfa (modified) (42 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r29d8c02 r74ec742 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 25 18:51:36202213 // Update Count : 11 4712 // Last Modified On : Fri Apr 29 19:05:03 2022 13 // Update Count : 1167 14 14 // 15 15 … … 36 36 static bool traceHeap = false; 37 37 38 inline bool traceHeap() { return traceHeap; }39 40 bool traceHeapOn() {38 inline bool traceHeap() libcfa_public { return traceHeap; } 39 40 bool traceHeapOn() libcfa_public { 41 41 bool temp = traceHeap; 42 42 traceHeap = true; … … 44 44 } // traceHeapOn 45 45 46 bool traceHeapOff() {46 bool traceHeapOff() libcfa_public { 47 47 bool temp = traceHeap; 48 48 traceHeap = false; … … 50 50 } // traceHeapOff 51 51 52 bool traceHeapTerm() { return false; }52 bool traceHeapTerm() libcfa_public { return false; } 53 53 54 54 55 55 static bool prtFree = false; 56 56 57 bool prtFree() {57 static bool prtFree() { 58 58 return prtFree; 59 59 } // prtFree 60 60 61 bool prtFreeOn() {61 static bool prtFreeOn() { 62 62 bool temp = prtFree; 63 63 prtFree = true; … … 65 65 } // prtFreeOn 66 66 67 bool prtFreeOff() {67 static bool prtFreeOff() { 68 68 bool temp = prtFree; 69 69 prtFree = false; … … 87 87 88 88 89 #ifdef __CFA_DEBUG__ 90 static size_t allocUnfreed; // running total of allocations minus frees 91 92 static void prtUnfreed() { 93 if ( allocUnfreed != 0 ) { 94 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 95 char helpText[512]; 96 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 97 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 98 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 99 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 100 } // if 101 } // prtUnfreed 102 103 extern int cfa_main_returned; // from interpose.cfa 104 extern "C" { 105 void heapAppStart() { // called by __cfaabi_appready_startup 106 allocUnfreed = 0; 107 } // heapAppStart 108 109 void heapAppStop() { // called by __cfaabi_appready_startdown 110 fclose( stdin ); fclose( stdout ); 111 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 112 } // heapAppStop 113 } // extern "C" 114 #endif // __CFA_DEBUG__ 115 116 117 // statically allocated variables => zero filled. 118 static size_t heapExpand; // sbrk advance 119 static size_t mmapStart; // cross over point for mmap 120 static unsigned int maxBucketsUsed; // maximum number of buckets in use 121 // extern visibility, used by runtime kernel 122 size_t __page_size; // architecture pagesize 123 int __map_prot; // common mmap/mprotect protection 124 125 126 #define SPINLOCK 0 127 #define LOCKFREE 1 128 #define BUCKETLOCK SPINLOCK 129 #if BUCKETLOCK == SPINLOCK 130 #elif BUCKETLOCK == LOCKFREE 131 #include <stackLockFree.hfa> 132 #else 133 #error undefined lock type for bucket lock 134 #endif // LOCKFREE 135 136 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 137 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 138 enum { NoBucketSizes = 91 }; // number of buckets sizes 139 140 struct Heap { 141 struct Storage { 142 struct Header { // header 143 union Kind { 144 struct RealHeader { 145 union { 146 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 union { 148 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 149 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 150 void * home; // allocated block points back to home locations (must overlay alignment) 151 size_t blockSize; // size for munmap (must overlay alignment) 152 #if BUCKETLOCK == SPINLOCK 153 Storage * next; // freed block points to next freed block of same size 154 #endif // SPINLOCK 155 }; 156 size_t size; // allocation size in bytes 157 }; 158 #if BUCKETLOCK == LOCKFREE 159 Link(Storage) next; // freed block points next freed block of same size (double-wide) 160 #endif // LOCKFREE 161 }; 162 } real; // RealHeader 163 164 struct FakeHeader { 165 uintptr_t alignment; // 1st low-order bit => fake header & alignment 166 uintptr_t offset; 167 } fake; // FakeHeader 168 } kind; // Kind 169 } header; // Header 170 171 char pad[libAlign() - sizeof( Header )]; 172 char data[0]; // storage 173 }; // Storage 174 175 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 176 177 struct FreeHeader { 178 #if BUCKETLOCK == SPINLOCK 179 __spinlock_t lock; // must be first field for alignment 180 Storage * freeList; 181 #else 182 StackLF(Storage) freeList; 183 #endif // BUCKETLOCK 184 size_t blockSize; // size of allocations on this list 185 }; // FreeHeader 186 187 // must be first fields for alignment 188 __spinlock_t extlock; // protects allocation-buffer extension 189 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 190 191 void * heapBegin; // start of heap 192 void * heapEnd; // logical end of heap 193 size_t heapRemaining; // amount of storage not allocated in the current chunk 194 }; // Heap 195 196 #if BUCKETLOCK == LOCKFREE 197 static inline { 198 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 199 void ?{}( Heap.FreeHeader & ) {} 200 void ^?{}( Heap.FreeHeader & ) {} 201 } // distribution 202 #endif // LOCKFREE 203 204 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 205 206 207 #ifdef FASTLOOKUP 208 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 209 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 210 #endif // FASTLOOKUP 211 212 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 213 #ifdef __CFA_DEBUG__ 214 static bool heapBoot = 0; // detect recursion during boot 215 #endif // __CFA_DEBUG__ 216 217 218 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 219 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 220 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 221 static const unsigned int bucketSizes[] @= { // different bucket sizes 222 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 223 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 224 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 225 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 226 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 227 1_536, 2_048 + sizeof(Heap.Storage), // 2 228 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 229 6_144, 8_192 + sizeof(Heap.Storage), // 2 230 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 231 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 232 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 233 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 234 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 235 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 236 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 237 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 238 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 239 }; 240 241 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 242 243 // The constructor for heapManager is called explicitly in memory_startup. 244 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 245 246 247 //####################### Memory Allocation Routines Helpers #################### 89 //####################### Heap Statistics #################### 248 90 249 91 … … 307 149 return lhs; 308 150 } // ?+=? 309 151 #endif // __STATISTICS__ 152 153 154 #define SPINLOCK 0 155 #define LOCKFREE 1 156 #define BUCKETLOCK SPINLOCK 157 #if BUCKETLOCK == SPINLOCK 158 #elif BUCKETLOCK == LOCKFREE 159 #include <stackLockFree.hfa> 160 #else 161 #error undefined lock type for bucket lock 162 #endif // LOCKFREE 163 164 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 165 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 166 enum { NoBucketSizes = 91 }; // number of buckets sizes 167 168 struct Heap { 169 struct Storage { 170 struct Header { // header 171 union Kind { 172 struct RealHeader { 173 union { 174 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 175 union { 176 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 177 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 178 void * home; // allocated block points back to home locations (must overlay alignment) 179 size_t blockSize; // size for munmap (must overlay alignment) 180 #if BUCKETLOCK == SPINLOCK 181 Storage * next; // freed block points to next freed block of same size 182 #endif // SPINLOCK 183 }; 184 size_t size; // allocation size in bytes 185 }; 186 #if BUCKETLOCK == LOCKFREE 187 Link(Storage) next; // freed block points next freed block of same size (double-wide) 188 #endif // LOCKFREE 189 }; 190 } real; // RealHeader 191 192 struct FakeHeader { 193 uintptr_t alignment; // 1st low-order bit => fake header & alignment 194 uintptr_t offset; 195 } fake; // FakeHeader 196 } kind; // Kind 197 } header; // Header 198 199 char pad[libAlign() - sizeof( Header )]; 200 char data[0]; // storage 201 }; // Storage 202 203 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 204 205 struct FreeHeader { 206 size_t blockSize __attribute__(( aligned (8) )); // size of allocations on this list 207 #if BUCKETLOCK == SPINLOCK 208 __spinlock_t lock; 209 Storage * freeList; 210 #else 211 StackLF(Storage) freeList; 212 #endif // BUCKETLOCK 213 } __attribute__(( aligned (8) )); // FreeHeader 214 215 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 216 217 __spinlock_t extlock; // protects allocation-buffer extension 218 void * heapBegin; // start of heap 219 void * heapEnd; // logical end of heap 220 size_t heapRemaining; // amount of storage not allocated in the current chunk 221 }; // Heap 222 223 #if BUCKETLOCK == LOCKFREE 224 static inline { 225 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 226 void ?{}( Heap.FreeHeader & ) {} 227 void ^?{}( Heap.FreeHeader & ) {} 228 } // distribution 229 #endif // LOCKFREE 230 231 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 232 233 234 #ifdef FASTLOOKUP 235 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 236 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 237 #endif // FASTLOOKUP 238 239 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 240 #ifdef __CFA_DEBUG__ 241 static bool heapBoot = 0; // detect recursion during boot 242 #endif // __CFA_DEBUG__ 243 244 245 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 246 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 247 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 248 static const unsigned int bucketSizes[] @= { // different bucket sizes 249 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 250 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 251 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 252 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 253 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 254 1_536, 2_048 + sizeof(Heap.Storage), // 2 255 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 256 6_144, 8_192 + sizeof(Heap.Storage), // 2 257 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 258 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 259 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 260 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 261 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 262 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 263 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 264 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 265 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 266 }; 267 268 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 269 270 // The constructor for heapManager is called explicitly in memory_startup. 271 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 272 273 274 //####################### Memory Allocation Routines Helpers #################### 275 276 277 #ifdef __CFA_DEBUG__ 278 static size_t allocUnfreed; // running total of allocations minus frees 279 280 static void prtUnfreed() { 281 if ( allocUnfreed != 0 ) { 282 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 283 char helpText[512]; 284 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 285 "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 286 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 287 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 288 } // if 289 } // prtUnfreed 290 291 extern int cfa_main_returned; // from interpose.cfa 292 extern "C" { 293 void heapAppStart() { // called by __cfaabi_appready_startup 294 allocUnfreed = 0; 295 } // heapAppStart 296 297 void heapAppStop() { // called by __cfaabi_appready_startdown 298 fclose( stdin ); fclose( stdout ); 299 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 300 } // heapAppStop 301 } // extern "C" 302 #endif // __CFA_DEBUG__ 303 304 305 #ifdef __STATISTICS__ 310 306 static HeapStatistics stats; // zero filled 311 307 static unsigned int sbrk_calls; … … 387 383 388 384 385 // statically allocated variables => zero filled. 386 static size_t heapExpand; // sbrk advance 387 static size_t mmapStart; // cross over point for mmap 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 // extern visibility, used by runtime kernel 390 // would be cool to remove libcfa_public but it's needed for libcfathread 391 libcfa_public size_t __page_size; // architecture pagesize 392 libcfa_public int __map_prot; // common mmap/mprotect protection 393 394 389 395 // thunk problem 390 396 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 490 496 } else { 491 497 fakeHeader( header, alignment ); 492 if ( unlikely( MmappedBit( header ) ) ) { 493 assert( addr < heapBegin || heapEnd < addr );498 if ( unlikely( MmappedBit( header ) ) ) { // mmapped ? 499 verify( addr < heapBegin || heapEnd < addr ); 494 500 size = ClearStickyBits( header->kind.real.blockSize ); // mmap size 495 501 return true; … … 503 509 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 504 510 505 if ( freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) { 506 abort( "Attempt to %s storage %p with corrupted header.\n" 507 "Possible cause is duplicate free on same block or overwriting of header information.", 508 name, addr ); 509 } // if 511 Heap * homeManager; 512 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link 513 // freed and link points at another free block not to a bucket in the bucket array. 514 freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { 515 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" 516 "Possible cause is duplicate free on same block or overwriting of header information.", 517 name, addr ); 518 } // if 510 519 #endif // __CFA_DEBUG__ 511 520 … … 560 569 sbrk_storage += increase; 561 570 #endif // __STATISTICS__ 571 562 572 #ifdef __CFA_DEBUG__ 563 573 // Set new memory to garbage so subsequent uninitialized usages might fail. … … 565 575 //Memset( (char *)heapEnd + heapRemaining, increase ); 566 576 #endif // __CFA_DEBUG__ 577 567 578 rem = heapRemaining + increase - size; 568 579 } // if … … 651 662 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST ); 652 663 if ( traceHeap() ) { 653 enum { BufferSize = 64 }; 654 char helpText[BufferSize]; 655 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 656 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 664 char helpText[64]; 665 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 666 "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug 657 667 } // if 658 668 #endif // __CFA_DEBUG__ … … 711 721 if ( traceHeap() ) { 712 722 char helpText[64]; 713 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );714 __cfaabi_bits_write( STDERR_FILENO, helpText, len); // print debug/nodebug723 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 724 "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug 715 725 } // if 716 726 #endif // __CFA_DEBUG__ … … 718 728 719 729 720 s ize_t prtFree( Heap & manager ) with( manager ) {730 static size_t prtFree( Heap & manager ) with( manager ) { 721 731 size_t total = 0; 722 732 #ifdef __STATISTICS__ … … 870 880 // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0, 871 881 // then malloc() returns a unique pointer value that can later be successfully passed to free(). 872 void * malloc( size_t size ) {882 void * malloc( size_t size ) libcfa_public { 873 883 #ifdef __STATISTICS__ 874 884 if ( likely( size > 0 ) ) { … … 885 895 886 896 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 887 void * aalloc( size_t dim, size_t elemSize ) {897 void * aalloc( size_t dim, size_t elemSize ) libcfa_public { 888 898 size_t size = dim * elemSize; 889 899 #ifdef __STATISTICS__ … … 901 911 902 912 // Same as aalloc() with memory set to zero. 903 void * calloc( size_t dim, size_t elemSize ) {913 void * calloc( size_t dim, size_t elemSize ) libcfa_public { 904 914 size_t size = dim * elemSize; 905 915 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER … … 942 952 // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier 943 953 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. 944 void * resize( void * oaddr, size_t size ) {954 void * resize( void * oaddr, size_t size ) libcfa_public { 945 955 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 946 956 if ( unlikely( size == 0 ) ) { // special cases … … 987 997 // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of 988 998 // the old and new sizes. 989 void * realloc( void * oaddr, size_t size ) {999 void * realloc( void * oaddr, size_t size ) libcfa_public { 990 1000 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 991 1001 if ( unlikely( size == 0 ) ) { // special cases … … 1051 1061 1052 1062 // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize. 1053 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {1063 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public { 1054 1064 return realloc( oaddr, dim * elemSize ); 1055 1065 } // reallocarray … … 1057 1067 1058 1068 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 1059 void * memalign( size_t alignment, size_t size ) {1069 void * memalign( size_t alignment, size_t size ) libcfa_public { 1060 1070 #ifdef __STATISTICS__ 1061 1071 if ( likely( size > 0 ) ) { … … 1072 1082 1073 1083 // Same as aalloc() with memory alignment. 1074 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {1084 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1075 1085 size_t size = dim * elemSize; 1076 1086 #ifdef __STATISTICS__ … … 1088 1098 1089 1099 // Same as calloc() with memory alignment. 1090 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {1100 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1091 1101 size_t size = dim * elemSize; 1092 1102 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER … … 1127 1137 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple 1128 1138 // of alignment. This requirement is universally ignored. 1129 void * aligned_alloc( size_t alignment, size_t size ) {1139 void * aligned_alloc( size_t alignment, size_t size ) libcfa_public { 1130 1140 return memalign( alignment, size ); 1131 1141 } // aligned_alloc … … 1136 1146 // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to 1137 1147 // free(3). 1138 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {1148 int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public { 1139 1149 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment 1140 1150 *memptr = memalign( alignment, size ); … … 1145 1155 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the 1146 1156 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 1147 void * valloc( size_t size ) {1157 void * valloc( size_t size ) libcfa_public { 1148 1158 return memalign( __page_size, size ); 1149 1159 } // valloc … … 1151 1161 1152 1162 // Same as valloc but rounds size to multiple of page size. 1153 void * pvalloc( size_t size ) {1163 void * pvalloc( size_t size ) libcfa_public { 1154 1164 return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size 1155 1165 } // pvalloc … … 1159 1169 // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is 1160 1170 // 0p, no operation is performed. 1161 void free( void * addr ) {1171 void free( void * addr ) libcfa_public { 1162 1172 if ( unlikely( addr == 0p ) ) { // special case 1163 1173 #ifdef __STATISTICS__ … … 1180 1190 1181 1191 // Returns the alignment of an allocation. 1182 size_t malloc_alignment( void * addr ) {1192 size_t malloc_alignment( void * addr ) libcfa_public { 1183 1193 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1184 1194 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1192 1202 1193 1203 // Returns true if the allocation is zero filled, e.g., allocated by calloc(). 1194 bool malloc_zero_fill( void * addr ) {1204 bool malloc_zero_fill( void * addr ) libcfa_public { 1195 1205 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1196 1206 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1203 1213 1204 1214 // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). 1205 size_t malloc_size( void * addr ) {1215 size_t malloc_size( void * addr ) libcfa_public { 1206 1216 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size 1207 1217 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1215 1225 // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by 1216 1226 // malloc or a related function. 1217 size_t malloc_usable_size( void * addr ) {1227 size_t malloc_usable_size( void * addr ) libcfa_public { 1218 1228 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1219 1229 Heap.Storage.Header * header; … … 1227 1237 1228 1238 // Prints (on default standard error) statistics about memory allocated by malloc and related functions. 1229 void malloc_stats( void ) {1239 void malloc_stats( void ) libcfa_public { 1230 1240 #ifdef __STATISTICS__ 1231 1241 printStats(); … … 1236 1246 1237 1247 // Changes the file descriptor where malloc_stats() writes statistics. 1238 int malloc_stats_fd( int fd __attribute__(( unused )) ) {1248 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { 1239 1249 #ifdef __STATISTICS__ 1240 1250 int temp = stats_fd; … … 1250 1260 // The string is printed on the file stream stream. The exported string includes information about all arenas (see 1251 1261 // malloc). 1252 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {1262 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public { 1253 1263 if ( options != 0 ) { errno = EINVAL; return -1; } 1254 1264 #ifdef __STATISTICS__ … … 1262 1272 // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument 1263 1273 // specifies the parameter to be modified, and value specifies the new value for that parameter. 1264 int mallopt( int option, int value ) {1274 int mallopt( int option, int value ) libcfa_public { 1265 1275 if ( value < 0 ) return 0; 1266 1276 choose( option ) { … … 1276 1286 1277 1287 // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). 1278 int malloc_trim( size_t ) {1288 int malloc_trim( size_t ) libcfa_public { 1279 1289 return 0; // => impossible to release memory 1280 1290 } // malloc_trim … … 1285 1295 // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function 1286 1296 // result. (The caller must free this memory.) 1287 void * malloc_get_state( void ) {1297 void * malloc_get_state( void ) libcfa_public { 1288 1298 return 0p; // unsupported 1289 1299 } // malloc_get_state … … 1292 1302 // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data 1293 1303 // structure pointed to by state. 1294 int malloc_set_state( void * ) {1304 int malloc_set_state( void * ) libcfa_public { 1295 1305 return 0; // unsupported 1296 1306 } // malloc_set_state … … 1298 1308 1299 1309 // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. 1300 __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }1310 __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; } 1301 1311 1302 1312 // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. 1303 __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; }1313 __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; } 1304 1314 1305 1315 // Amount subtracted to adjust for unfreed program storage (debug only). 1306 __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; }1316 __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; } 1307 1317 } // extern "C" 1308 1318 1309 1319 1310 1320 // Must have CFA linkage to overload with C linkage realloc. 1311 void * resize( void * oaddr, size_t nalign, size_t size ) {1321 void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1312 1322 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1313 1323 if ( unlikely( size == 0 ) ) { // special cases … … 1371 1381 1372 1382 1373 void * realloc( void * oaddr, size_t nalign, size_t size ) {1383 void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1374 1384 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1375 1385 if ( unlikely( size == 0 ) ) { // special cases
Note:
See TracChangeset
for help on using the changeset viewer.