Changeset 116a2ea
- Timestamp:
- Oct 11, 2022, 8:23:26 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 4f102fa
- Parents:
- 301071a
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified libcfa/src/concurrency/kernel/startup.cfa ¶
r301071a r116a2ea 184 184 185 185 186 extern void heapManagerCtor(); 187 extern void heapManagerDtor(); 188 186 189 //============================================================================================= 187 190 // Kernel Setup logic … … 365 368 proc->local_data = &__cfaabi_tls; 366 369 370 heapManagerCtor(); // initialize heap 371 367 372 __cfa_io_start( proc ); 368 373 register_tls( proc ); … … 416 421 unregister_tls( proc ); 417 422 __cfa_io_stop( proc ); 423 424 heapManagerDtor(); // de-initialize heap 418 425 419 426 return 0p; -
TabularUnified libcfa/src/heap.cfa ¶
r301071a r116a2ea 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 29 19:05:03 202213 // Update Count : 1 16712 // Last Modified On : Tue Oct 11 15:08:33 2022 13 // Update Count : 1525 14 14 // 15 15 16 #include <stdio.h> 16 17 #include <string.h> // memset, memcpy 17 18 #include <limits.h> // ULONG_MAX … … 21 22 #include <malloc.h> // memalign, malloc_usable_size 22 23 #include <sys/mman.h> // mmap, munmap 24 extern "C" { 23 25 #include <sys/sysinfo.h> // get_nprocs 26 } // extern "C" 24 27 25 28 #include "bits/align.hfa" // libAlign 26 29 #include "bits/defs.hfa" // likely, unlikely 27 30 #include "bits/locks.hfa" // __spinlock_t 31 #include "concurrency/kernel/fwd.hfa" // __POLL_PREEMPTION 28 32 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 29 #include "math.hfa" // min33 #include "math.hfa" // ceiling, min 30 34 #include "bitmanip.hfa" // is_pow2, ceiling2 31 35 32 #define FASTLOOKUP 33 #define __STATISTICS__ 36 // supported mallopt options 37 #ifndef M_MMAP_THRESHOLD 38 #define M_MMAP_THRESHOLD (-1) 39 #endif // M_MMAP_THRESHOLD 40 41 #ifndef M_TOP_PAD 42 #define M_TOP_PAD (-2) 43 #endif // M_TOP_PAD 44 45 #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size 46 #define RETURNSPIN // toggle spinlock / lockfree stack 47 #define OWNERSHIP // return freed memory to owner thread 48 49 #define CACHE_ALIGN 64 50 #define CALIGN __attribute__(( aligned(CACHE_ALIGN) )) 51 52 #define TLSMODEL __attribute__(( tls_model("initial-exec") )) 53 54 //#define __STATISTICS__ 55 56 enum { 57 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk 58 // address is extended by the extension amount. 59 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, 60 61 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values 62 // greater than or equal to this value are mmap from the operating system. 63 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, 64 65 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from 66 // the malloc/free counter to adjust for storage the program does not free. 67 __CFA_DEFAULT_HEAP_UNFREED__ = 0 68 }; // enum 69 70 71 //####################### Heap Trace/Print #################### 34 72 35 73 … … 55 93 static bool prtFree = false; 56 94 57 staticbool prtFree() {95 bool prtFree() { 58 96 return prtFree; 59 97 } // prtFree 60 98 61 staticbool prtFreeOn() {99 bool prtFreeOn() { 62 100 bool temp = prtFree; 63 101 prtFree = true; … … 65 103 } // prtFreeOn 66 104 67 staticbool prtFreeOff() {105 bool prtFreeOff() { 68 106 bool temp = prtFree; 69 107 prtFree = false; … … 72 110 73 111 74 enum { 75 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk 76 // address is extended by the extension amount. 77 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, 78 79 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values 80 // greater than or equal to this value are mmap from the operating system. 81 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, 82 83 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from 84 // the malloc/free counter to adjust for storage the program does not free. 85 __CFA_DEFAULT_HEAP_UNFREED__ = 0 86 }; // enum 112 //######################### Spin Lock ######################### 113 114 115 // pause to prevent excess processor bus usage 116 #if defined( __i386 ) || defined( __x86_64 ) 117 #define Pause() __asm__ __volatile__ ( "pause" : : : ) 118 #elif defined(__ARM_ARCH) 119 #define Pause() __asm__ __volatile__ ( "YIELD" : : : ) 120 #else 121 #error unsupported architecture 122 #endif 123 124 typedef volatile uintptr_t SpinLock_t CALIGN; // aligned addressable word-size 125 126 static inline __attribute__((always_inline)) void lock( volatile SpinLock_t & slock ) { 127 enum { SPIN_START = 4, SPIN_END = 64 * 1024, }; 128 unsigned int spin = SPIN_START; 129 130 for ( unsigned int i = 1;; i += 1 ) { 131 if ( slock == 0 && __atomic_test_and_set( &slock, __ATOMIC_SEQ_CST ) == 0 ) break; // Fence 132 for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin 133 spin += spin; // powers of 2 134 //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2 135 if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning 136 } // for 137 } // spin_lock 138 139 static inline __attribute__((always_inline)) void unlock( volatile SpinLock_t & slock ) { 140 __atomic_clear( &slock, __ATOMIC_SEQ_CST ); // Fence 141 } // spin_unlock 87 142 88 143 … … 120 175 unsigned int free_calls, free_null_calls; 121 176 unsigned long long int free_storage_request, free_storage_alloc; 122 unsigned int away_pulls, away_pushes;123 unsigned long long int away_storage_request, away_storage_alloc;177 unsigned int return_pulls, return_pushes; 178 unsigned long long int return_storage_request, return_storage_alloc; 124 179 unsigned int mmap_calls, mmap_0_calls; // no zero calls 125 180 unsigned long long int mmap_storage_request, mmap_storage_alloc; … … 131 186 132 187 static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay), 133 188 "Heap statistics counter-triplets does not match with array size" ); 134 189 135 190 static void HeapStatisticsCtor( HeapStatistics & stats ) { … … 203 258 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 204 259 205 struct FreeHeader {206 size_t blockSize __attribute__(( aligned 260 struct __attribute__(( aligned (8) )) FreeHeader { 261 size_t blockSize __attribute__(( aligned(8) )); // size of allocations on this list 207 262 #if BUCKETLOCK == SPINLOCK 208 __spinlock_t lock; 209 Storage * freeList; 263 #ifdef OWNERSHIP 264 #ifdef RETURNSPIN 265 SpinLock_t returnLock; 266 #endif // RETURNSPIN 267 Storage * returnList; // other thread return list 268 #endif // OWNERSHIP 269 Storage * freeList; // thread free list 210 270 #else 211 271 StackLF(Storage) freeList; 212 272 #endif // BUCKETLOCK 213 } __attribute__(( aligned (8) )); // FreeHeader 273 Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap) 274 }; // FreeHeader 214 275 215 276 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 216 217 __spinlock_t extlock; // protects allocation-buffer extension 218 void * heapBegin; // start of heap 219 void * heapEnd; // logical end of heap 220 size_t heapRemaining; // amount of storage not allocated in the current chunk 277 void * heapBuffer; // start of free storage in buffer 278 size_t heapReserve; // amount of remaining free storage in buffer 279 280 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) 281 Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics or check unfreed storage 282 #endif // __STATISTICS__ || __CFA_DEBUG__ 283 Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads 284 285 #ifdef __CFA_DEBUG__ 286 int64_t allocUnfreed; // running total of allocations minus frees; can be negative 287 #endif // __CFA_DEBUG__ 288 289 #ifdef __STATISTICS__ 290 HeapStatistics stats; // local statistic table for this heap 291 #endif // __STATISTICS__ 221 292 }; // Heap 222 293 223 294 #if BUCKETLOCK == LOCKFREE 224 static inline { 295 inline __attribute__((always_inline)) 296 static { 225 297 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 226 298 void ?{}( Heap.FreeHeader & ) {} … … 229 301 #endif // LOCKFREE 230 302 231 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 303 304 struct HeapMaster { 305 SpinLock_t extLock; // protects allocation-buffer extension 306 SpinLock_t mgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd 307 308 void * heapBegin; // start of heap 309 void * heapEnd; // logical end of heap 310 size_t heapRemaining; // amount of storage not allocated in the current chunk 311 size_t pageSize; // architecture pagesize 312 size_t heapExpand; // sbrk advance 313 size_t mmapStart; // cross over point for mmap 314 unsigned int maxBucketsUsed; // maximum number of buckets in use 315 316 Heap * heapManagersList; // heap-list head 317 Heap * freeHeapManagersList; // free-list head 318 319 // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links. 320 Heap * heapManagersStorage; // next heap to use in heap superblock 321 Heap * heapManagersStorageEnd; // logical heap outside of superblock's end 322 323 #ifdef __STATISTICS__ 324 HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting 325 unsigned long int threads_started, threads_exited; // counts threads that have started and exited 326 unsigned long int reused_heap, new_heap; // counts reusability of heaps 327 unsigned int sbrk_calls; 328 unsigned long long int sbrk_storage; 329 int stats_fd; 330 #endif // __STATISTICS__ 331 }; // HeapMaster 232 332 233 333 234 334 #ifdef FASTLOOKUP 235 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; 335 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 236 336 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 237 337 #endif // FASTLOOKUP 238 338 239 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 240 #ifdef __CFA_DEBUG__ 241 static bool heapBoot = 0; // detect recursion during boot 242 #endif // __CFA_DEBUG__ 339 static volatile bool heapMasterBootFlag = false; // trigger for first heap 340 static HeapMaster heapMaster @= {}; // program global 341 342 static void heapMasterCtor(); 343 static void heapMasterDtor(); 344 static Heap * getHeap(); 243 345 244 346 … … 268 370 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 269 371 270 // The constructor for heapManager is called explicitly in memory_startup. 271 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 372 373 // extern visibility, used by runtime kernel 374 libcfa_public size_t __page_size; // architecture pagesize 375 libcfa_public int __map_prot; // common mmap/mprotect protection 376 377 378 // Thread-local storage is allocated lazily when the storage is accessed. 379 static __thread size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing 380 static __thread Heap * volatile heapManager CALIGN TLSMODEL; 381 static __thread size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing 382 383 384 // declare helper functions for HeapMaster 385 void noMemory(); // forward, called by "builtin_new" when malloc returns 0 386 387 388 // generic Bsearchl does not inline, so substitute with hand-coded binary-search. 389 inline __attribute__((always_inline)) 390 static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) { 391 size_t l = 0, m, h = dim; 392 while ( l < h ) { 393 m = (l + h) / 2; 394 if ( (unsigned int &)(vals[m]) < key ) { // cast away const 395 l = m + 1; 396 } else { 397 h = m; 398 } // if 399 } // while 400 return l; 401 } // Bsearchl 402 403 404 void heapMasterCtor() with( heapMaster ) { 405 // Singleton pattern to initialize heap master 406 407 verify( bucketSizes[0] == (16 + sizeof(Heap.Storage)) ); 408 409 __page_size = sysconf( _SC_PAGESIZE ); 410 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 411 412 ?{}( extLock ); 413 ?{}( mgrLock ); 414 415 char * end = (char *)sbrk( 0 ); 416 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 417 heapRemaining = 0; 418 heapExpand = malloc_expansion(); 419 mmapStart = malloc_mmap_start(); 420 421 // find the closest bucket size less than or equal to the mmapStart size 422 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search 423 424 verify( (mmapStart >= pageSize) && (bucketSizes[NoBucketSizes - 1] >= mmapStart) ); 425 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 426 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 427 428 heapManagersList = 0p; 429 freeHeapManagersList = 0p; 430 431 heapManagersStorage = 0p; 432 heapManagersStorageEnd = 0p; 433 434 #ifdef __STATISTICS__ 435 HeapStatisticsCtor( stats ); // clear statistic counters 436 threads_started = threads_exited = 0; 437 reused_heap = new_heap = 0; 438 sbrk_calls = sbrk_storage = 0; 439 stats_fd = STDERR_FILENO; 440 #endif // __STATISTICS__ 441 442 #ifdef FASTLOOKUP 443 for ( unsigned int i = 0, idx = 0; i < LookupSizes; i += 1 ) { 444 if ( i > bucketSizes[idx] ) idx += 1; 445 lookup[i] = idx; 446 verify( i <= bucketSizes[idx] ); 447 verify( (i <= 32 && idx == 0) || (i > bucketSizes[idx - 1]) ); 448 } // for 449 #endif // FASTLOOKUP 450 451 heapMasterBootFlag = true; 452 } // heapMasterCtor 453 454 455 #define NO_MEMORY_MSG "insufficient heap memory available to allocate %zd new bytes." 456 457 Heap * getHeap() with( heapMaster ) { 458 Heap * heap; 459 if ( freeHeapManagersList ) { // free heap for reused ? 460 heap = freeHeapManagersList; 461 freeHeapManagersList = heap->nextFreeHeapManager; 462 463 #ifdef __STATISTICS__ 464 reused_heap += 1; 465 #endif // __STATISTICS__ 466 } else { // free heap not found, create new 467 // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps * 468 // 12K ~= 120K byte superblock. Where 128-heap superblock handles a medium sized multi-processor server. 469 size_t remaining = heapManagersStorageEnd - heapManagersStorage; // remaining free heaps in superblock 470 if ( ! heapManagersStorage || remaining != 0 ) { 471 // Each block of heaps is a multiple of the number of cores on the computer. 472 int HeapDim = get_nprocs(); // get_nprocs_conf does not work 473 size_t size = HeapDim * sizeof( Heap ); 474 475 heapManagersStorage = (Heap *)mmap( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); 476 if ( unlikely( heapManagersStorage == (Heap *)MAP_FAILED ) ) { // failed ? 477 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory 478 // Do not call strerror( errno ) as it may call malloc. 479 abort( "attempt to allocate block of heaps of size %zu bytes and mmap failed with errno %d.", size, errno ); 480 } // if 481 heapManagersStorageEnd = &heapManagersStorage[HeapDim]; // outside array 482 } // if 483 484 heap = heapManagersStorage; 485 heapManagersStorage = heapManagersStorage + 1; // bump next heap 486 487 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) 488 heap->nextHeapManager = heapManagersList; 489 #endif // __STATISTICS__ || __CFA_DEBUG__ 490 heapManagersList = heap; 491 492 #ifdef __STATISTICS__ 493 new_heap += 1; 494 #endif // __STATISTICS__ 495 496 with( *heap ) { 497 for ( unsigned int j = 0; j < NoBucketSizes; j += 1 ) { // initialize free lists 498 #ifdef OWNERSHIP 499 #ifdef RETURNSPIN 500 ?{}( freeLists[j].returnLock ); 501 #endif // RETURNSPIN 502 freeLists[j].returnList = 0p; 503 #endif // OWNERSHIP 504 freeLists[j].freeList = 0p; 505 freeLists[j].homeManager = heap; 506 freeLists[j].blockSize = bucketSizes[j]; 507 } // for 508 509 heapBuffer = 0p; 510 heapReserve = 0; 511 nextFreeHeapManager = 0p; 512 #ifdef __CFA_DEBUG__ 513 allocUnfreed = 0; 514 #endif // __CFA_DEBUG__ 515 } // with 516 } // if 517 return heap; 518 } // getHeap 519 520 521 void heapManagerCtor() libcfa_public { 522 if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor(); 523 524 lock( heapMaster.mgrLock ); // protect heapMaster counters 525 526 // get storage for heap manager 527 528 heapManager = getHeap(); 529 530 #ifdef __STATISTICS__ 531 HeapStatisticsCtor( heapManager->stats ); // heap local 532 heapMaster.threads_started += 1; 533 #endif // __STATISTICS__ 534 535 unlock( heapMaster.mgrLock ); 536 } // heapManagerCtor 537 538 539 void heapManagerDtor() libcfa_public { 540 lock( heapMaster.mgrLock ); 541 542 // place heap on list of free heaps for reusability 543 heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList; 544 heapMaster.freeHeapManagersList = heapManager; 545 546 #ifdef __STATISTICS__ 547 heapMaster.threads_exited += 1; 548 #endif // __STATISTICS__ 549 550 // Do not set heapManager to NULL because it is used after Cforall is shutdown but before the program shuts down. 551 552 unlock( heapMaster.mgrLock ); 553 } // heapManagerDtor 272 554 273 555 274 556 //####################### Memory Allocation Routines Helpers #################### 275 557 276 277 #ifdef __CFA_DEBUG__278 static size_t allocUnfreed; // running total of allocations minus frees279 280 static void prtUnfreed() {281 if ( allocUnfreed != 0 ) {282 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.283 char helpText[512];284 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),285 "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n"286 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",287 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid288 } // if289 } // prtUnfreed290 558 291 559 extern int cfa_main_returned; // from interpose.cfa 292 560 extern "C" { 561 void memory_startup( void ) { 562 if ( ! heapMasterBootFlag ) heapManagerCtor(); // sanity check 563 } // memory_startup 564 565 void memory_shutdown( void ) { 566 heapManagerDtor(); 567 } // memory_shutdown 568 293 569 void heapAppStart() { // called by __cfaabi_appready_startup 294 allocUnfreed = 0; 570 verify( heapManager ); 571 #ifdef __CFA_DEBUG__ 572 heapManager->allocUnfreed = 0; // clear prior allocation counts 573 #endif // __CFA_DEBUG__ 574 575 #ifdef __STATISTICS__ 576 HeapStatisticsCtor( heapManager->stats ); // clear prior statistic counters 577 #endif // __STATISTICS__ 295 578 } // heapAppStart 296 579 297 580 void heapAppStop() { // called by __cfaabi_appready_startdown 298 fclose( stdin ); fclose( stdout ); 299 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 581 fclose( stdin ); fclose( stdout ); // free buffer storage 582 if ( ! cfa_main_returned ) return; // do not check unfreed storage if exit called 583 584 #ifdef __CFA_DEBUG__ 585 // allocUnfreed is set to 0 when a heap is created and it accumulates any unfreed storage during its multiple thread 586 // usages. At the end, add up each heap allocUnfreed value across all heaps to get the total unfreed storage. 587 long long int allocUnfreed = 0; 588 for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) { 589 allocUnfreed += heap->allocUnfreed; 590 } // for 591 592 allocUnfreed -= malloc_unfreed(); // subtract any user specified unfreed storage 593 if ( allocUnfreed > 0 ) { 594 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 595 char helpText[512]; 596 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 597 "CFA warning (UNIX pid:%ld) : program terminating with %llu(0x%llx) bytes of storage allocated but not freed.\n" 598 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 599 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 600 } // if 601 #endif // __CFA_DEBUG__ 300 602 } // heapAppStop 301 603 } // extern "C" 302 #endif // __CFA_DEBUG__303 604 304 605 305 606 #ifdef __STATISTICS__ 306 607 static HeapStatistics stats; // zero filled 307 static unsigned int sbrk_calls;308 static unsigned long long int sbrk_storage;309 // Statistics file descriptor (changed by malloc_stats_fd).310 static int stats_fd = STDERR_FILENO; // default stderr311 608 312 609 #define prtFmt \ … … 321 618 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 322 619 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \ 323 " sbrk calls %'u; storage %'llu bytes\n" \ 324 " mmap calls %'u; storage %'llu / %'llu bytes\n" \ 325 " munmap calls %'u; storage %'llu / %'llu bytes\n" \ 620 " return pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n" \ 621 " sbrk calls %'u; storage %'llu bytes\n" \ 622 " mmap calls %'u; storage %'llu / %'llu bytes\n" \ 623 " munmap calls %'u; storage %'llu / %'llu bytes\n" \ 624 " threads started %'lu; exited %'lu\n" \ 625 " heaps new %'lu; reused %'lu\n" 326 626 327 627 // Use "write" because streams may be shutdown when calls are made. 328 static int printStats( ) {// see malloc_stats628 static int printStats( HeapStatistics & stats ) with( heapMaster, stats ) { // see malloc_stats 329 629 char helpText[sizeof(prtFmt) + 1024]; // space for message and values 330 return __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt, 331 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 332 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 333 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 334 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 335 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 336 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 337 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 338 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 339 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 630 return __cfaabi_bits_print_buffer( stats_fd, helpText, sizeof(helpText), prtFmt, 631 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 632 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 633 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 634 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 635 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 636 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 637 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 638 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 639 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 640 return_pulls, return_pushes, return_storage_request, return_storage_alloc, 340 641 sbrk_calls, sbrk_storage, 341 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 342 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 642 mmap_calls, mmap_storage_request, mmap_storage_alloc, 643 munmap_calls, munmap_storage_request, munmap_storage_alloc, 644 threads_started, threads_exited, 645 new_heap, reused_heap 343 646 ); 344 647 } // printStats … … 358 661 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 359 662 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 663 "<total type=\"return\" pulls=\"%'u;\" 0 pushes=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 360 664 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \ 361 665 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \ 362 666 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 667 "<total type=\"threads\" started=\"%'lu;\" exited=\"%'lu\"/>\n" \ 668 "<total type=\"heaps\" new=\"%'lu;\" reused=\"%'lu\"/>\n" \ 363 669 "</malloc>" 364 670 365 static int printStatsXML( FILE * stream ) {// see malloc_info671 static int printStatsXML( HeapStatistics & stats, FILE * stream ) with( heapMaster, stats ) { // see malloc_info 366 672 char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values 367 673 return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML, 368 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 369 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 370 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 371 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 372 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 373 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 374 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 375 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 376 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 674 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 675 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 676 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 677 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 678 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 679 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 680 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 681 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 682 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 683 return_pulls, return_pushes, return_storage_request, return_storage_alloc, 377 684 sbrk_calls, sbrk_storage, 378 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 379 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 685 mmap_calls, mmap_storage_request, mmap_storage_alloc, 686 munmap_calls, munmap_storage_request, munmap_storage_alloc, 687 threads_started, threads_exited, 688 new_heap, reused_heap 380 689 ); 381 690 } // printStatsXML 691 692 static HeapStatistics & collectStats( HeapStatistics & stats ) with( heapMaster ) { 693 lock( mgrLock ); 694 695 stats += heapMaster.stats; 696 for ( Heap * heap = heapManagersList; heap; heap = heap->nextHeapManager ) { 697 stats += heap->stats; 698 } // for 699 700 unlock( mgrLock ); 701 return stats; 702 } // collectStats 382 703 #endif // __STATISTICS__ 383 704 384 705 385 // statically allocated variables => zero filled. 386 static size_t heapExpand; // sbrk advance 387 static size_t mmapStart; // cross over point for mmap 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 // extern visibility, used by runtime kernel 390 // would be cool to remove libcfa_public but it's needed for libcfathread 391 libcfa_public size_t __page_size; // architecture pagesize 392 libcfa_public int __map_prot; // common mmap/mprotect protection 393 394 395 // thunk problem 396 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 397 size_t l = 0, m, h = dim; 398 while ( l < h ) { 399 m = (l + h) / 2; 400 if ( (unsigned int &)(vals[m]) < key ) { // cast away const 401 l = m + 1; 402 } else { 403 h = m; 404 } // if 405 } // while 406 return l; 407 } // Bsearchl 408 409 410 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 706 static bool setMmapStart( size_t value ) with( heapMaster ) { // true => mmapped, false => sbrk 411 707 if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false; 412 708 mmapStart = value; // set global 413 709 414 710 // find the closest bucket size less than or equal to the mmapStart size 415 maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search416 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?417 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?711 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search 712 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 713 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 418 714 return true; 419 715 } // setMmapStart … … 438 734 439 735 440 static inline void checkAlign( size_t alignment ) { 736 inline __attribute__((always_inline)) 737 static void checkAlign( size_t alignment ) { 441 738 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) { 442 739 abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); … … 445 742 446 743 447 static inline void checkHeader( bool check, const char name[], void * addr ) { 744 inline __attribute__((always_inline)) 745 static void checkHeader( bool check, const char name[], void * addr ) { 448 746 if ( unlikely( check ) ) { // bad address ? 449 747 abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n" … … 470 768 471 769 472 static inline void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { 770 inline __attribute__((always_inline)) 771 static void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { 473 772 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? 474 773 alignment = ClearAlignmentBit( header ); // clear flag from value … … 483 782 484 783 485 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, 486 Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapManager ) { 784 inline __attribute__((always_inline)) 785 static bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, 786 Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapMaster, *heapManager ) { 487 787 header = HeaderAddr( addr ); 488 788 … … 509 809 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 510 810 811 Heap * homeManager; 511 812 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link 512 813 // freed and link points at another free block not to a bucket in the bucket array. 513 freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { 814 (homeManager = freeHead->homeManager, freeHead < &homeManager->freeLists[0] || 815 &homeManager->freeLists[NoBucketSizes] <= freeHead ) ) ) { 514 816 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" 515 817 "Possible cause is duplicate free on same block or overwriting of header information.", … … 521 823 } // headers 522 824 523 // #ifdef __CFA_DEBUG__ 524 // #if __SIZEOF_POINTER__ == 4 525 // #define MASK 0xdeadbeef 526 // #else 527 // #define MASK 0xdeadbeefdeadbeef 528 // #endif 529 // #define STRIDE size_t 530 531 // static void * Memset( void * addr, STRIDE size ) { // debug only 532 // if ( size % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, size %zd not multiple of %zd.", size, sizeof(STRIDE) ); 533 // if ( (STRIDE)addr % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, addr %p not multiple of %zd.", addr, sizeof(STRIDE) ); 534 535 // STRIDE * end = (STRIDE *)addr + size / sizeof(STRIDE); 536 // for ( STRIDE * p = (STRIDE *)addr; p < end; p += 1 ) *p = MASK; 537 // return addr; 538 // } // Memset 539 // #endif // __CFA_DEBUG__ 540 541 542 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." 543 544 static inline void * extend( size_t size ) with( heapManager ) { 545 lock( extlock __cfaabi_dbg_ctx2 ); 825 826 static void * master_extend( size_t size ) with( heapMaster ) { 827 lock( extLock ); 546 828 547 829 ptrdiff_t rem = heapRemaining - size; … … 549 831 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 550 832 551 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size);833 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() ); 552 834 // Do not call abort or strerror( errno ) as they may call malloc. 553 if ( sbrk( increase ) == (void *)-1 ) {// failed, no memory ?554 unlock( ext lock );835 if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ? 836 unlock( extLock ); 555 837 __cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size ); 556 838 _exit( EXIT_FAILURE ); // give up 557 839 } // if 558 559 // Make storage executable for thunks. 560 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { 561 unlock( extlock ); 562 __cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno ); 563 _exit( EXIT_FAILURE ); 564 } // if 840 rem = heapRemaining + increase - size; 565 841 566 842 #ifdef __STATISTICS__ … … 568 844 sbrk_storage += increase; 569 845 #endif // __STATISTICS__ 570 571 #ifdef __CFA_DEBUG__572 // Set new memory to garbage so subsequent uninitialized usages might fail.573 memset( (char *)heapEnd + heapRemaining, '\xde', increase );574 //Memset( (char *)heapEnd + heapRemaining, increase );575 #endif // __CFA_DEBUG__576 577 rem = heapRemaining + increase - size;578 846 } // if 579 847 … … 581 849 heapRemaining = rem; 582 850 heapEnd = (char *)heapEnd + size; 583 unlock( extlock ); 851 852 unlock( extLock ); 584 853 return block; 585 } // extend 586 587 588 static inline void * doMalloc( size_t size ) with( heapManager ) { 589 Heap.Storage * block; // pointer to new block of storage 854 } // master_extend 855 856 857 __attribute__(( noinline )) 858 static void * manager_extend( size_t size ) with( *heapManager ) { 859 ptrdiff_t rem = heapReserve - size; 860 861 if ( unlikely( rem < 0 ) ) { // negative 862 // If the size requested is bigger than the current remaining reserve, use the current reserve to populate 863 // smaller freeLists, and increase the reserve. 864 865 rem = heapReserve; // positive 866 867 if ( rem >= bucketSizes[0] ) { // minimal size ? otherwise ignore 868 size_t bucket; 869 #ifdef FASTLOOKUP 870 if ( likely( rem < LookupSizes ) ) bucket = lookup[rem]; 871 #endif // FASTLOOKUP 872 bucket = Bsearchl( rem, bucketSizes, heapMaster.maxBucketsUsed ); 873 verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed ); 874 Heap.FreeHeader * freeHead = &(freeLists[bucket]); 875 876 // The remaining storage many not be bucket size, whereas all other allocations are. Round down to previous 877 // bucket size in this case. 878 if ( unlikely( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1; 879 Heap.Storage * block = (Heap.Storage *)heapBuffer; 880 881 block->header.kind.real.next = freeHead->freeList; // push on stack 882 freeHead->freeList = block; 883 } // if 884 885 size_t increase = ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), libAlign() ); 886 heapBuffer = master_extend( increase ); 887 rem = increase - size; 888 } // if 889 890 Heap.Storage * block = (Heap.Storage *)heapBuffer; 891 heapReserve = rem; 892 heapBuffer = (char *)heapBuffer + size; 893 894 return block; 895 } // manager_extend 896 897 898 #define BOOT_HEAP_MANAGER \ 899 if ( unlikely( ! heapMasterBootFlag ) ) { \ 900 heapManagerCtor(); /* trigger for first heap */ \ 901 } /* if */ 902 903 #ifdef __STATISTICS__ 904 #define STAT_NAME __counter 905 #define STAT_PARM , unsigned int STAT_NAME 906 #define STAT_ARG( name ) , name 907 #define STAT_0_CNT( counter ) stats.counters[counter].calls_0 += 1 908 #else 909 #define STAT_NAME 910 #define STAT_PARM 911 #define STAT_ARG( name ) 912 #define STAT_0_CNT( counter ) 913 #endif // __STATISTICS__ 914 915 #define PROLOG( counter, ... ) \ 916 BOOT_HEAP_MANAGER; \ 917 if ( unlikely( size == 0 ) || /* 0 BYTE ALLOCATION RETURNS NULL POINTER */ \ 918 unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) { /* error check */ \ 919 STAT_0_CNT( counter ); \ 920 __VA_ARGS__; \ 921 return 0p; \ 922 } /* if */ 923 924 925 #define SCRUB_SIZE 1024lu 926 // Do not use '\xfe' for scrubbing because dereferencing an address composed of it causes a SIGSEGV *without* a valid IP 927 // pointer in the interrupt frame. 928 #define SCRUB '\xff' 929 930 static void * doMalloc( size_t size STAT_PARM ) libcfa_nopreempt with( *heapManager ) { 931 PROLOG( STAT_NAME ); 932 933 verify( heapManager ); 934 Heap.Storage * block; // pointer to new block of storage 590 935 591 936 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated 592 937 // along with the block and is a multiple of the alignment size. 593 594 938 size_t tsize = size + sizeof(Heap.Storage); 595 939 596 if ( likely( tsize < mmapStart ) ) { // small size => sbrk 597 size_t posn; 940 #ifdef __STATISTICS__ 941 stats.counters[STAT_NAME].calls += 1; 942 stats.counters[STAT_NAME].request += size; 943 #endif // __STATISTICS__ 944 945 #ifdef __CFA_DEBUG__ 946 allocUnfreed += size; 947 #endif // __CFA_DEBUG__ 948 949 if ( likely( tsize < heapMaster.mmapStart ) ) { // small size => sbrk 950 size_t bucket; 598 951 #ifdef FASTLOOKUP 599 if ( tsize < LookupSizes ) posn= lookup[tsize];952 if ( likely( tsize < LookupSizes ) ) bucket = lookup[tsize]; 600 953 else 601 954 #endif // FASTLOOKUP 602 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); 603 Heap.FreeHeader * freeElem = &freeLists[posn]; 604 verify( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 605 verify( tsize <= freeElem->blockSize ); // search failure ? 606 tsize = freeElem->blockSize; // total space needed for request 955 bucket = Bsearchl( tsize, bucketSizes, heapMaster.maxBucketsUsed ); 956 verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed ); 957 Heap.FreeHeader * freeHead = &freeLists[bucket]; 958 959 verify( freeHead <= &freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ? 960 verify( tsize <= freeHead->blockSize ); // search failure ? 961 962 tsize = freeHead->blockSize; // total space needed for request 963 #ifdef __STATISTICS__ 964 stats.counters[STAT_NAME].alloc += tsize; 965 #endif // __STATISTICS__ 607 966 608 967 // Spin until the lock is acquired for this particular size of block. 609 968 610 969 #if BUCKETLOCK == SPINLOCK 611 lock( freeElem->lock __cfaabi_dbg_ctx2 ); 612 block = freeElem->freeList; // remove node from stack 970 block = freeHead->freeList; // remove node from stack 613 971 #else 614 block = pop( free Elem->freeList );972 block = pop( freeHead->freeList ); 615 973 #endif // BUCKETLOCK 616 974 if ( unlikely( block == 0p ) ) { // no free block ? 975 #ifdef OWNERSHIP 976 // Freelist for that size is empty, so carve it out of the heap, if there is enough left, or get some more 977 // and then carve it off. 978 #ifdef RETURNSPIN 617 979 #if BUCKETLOCK == SPINLOCK 618 unlock( freeElem->lock ); 980 lock( freeHead->returnLock ); 981 block = freeHead->returnList; 982 freeHead->returnList = 0p; 983 unlock( freeHead->returnLock ); 984 #else 985 block = __atomic_exchange_n( &freeHead->returnList, nullptr, __ATOMIC_SEQ_CST ); 986 #endif // RETURNSPIN 987 988 if ( likely( block == 0p ) ) { // return list also empty? 989 #endif // OWNERSHIP 990 // Do not leave kernel thread as manager_extend accesses heapManager. 991 disable_interrupts(); 992 block = (Heap.Storage *)manager_extend( tsize ); // mutual exclusion on call 993 enable_interrupts( false ); 994 995 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 996 997 #ifdef __CFA_DEBUG__ 998 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes. 999 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); 1000 #endif // __CFA_DEBUG__ 619 1001 #endif // BUCKETLOCK 620 621 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more 622 // and then carve it off. 623 624 block = (Heap.Storage *)extend( tsize ); // mutual exclusion on call 625 #if BUCKETLOCK == SPINLOCK 1002 #ifdef OWNERSHIP 1003 } else { // merge returnList into freeHead 1004 #ifdef __STATISTICS__ 1005 stats.return_pulls += 1; 1006 #endif // __STATISTICS__ 1007 1008 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1009 1010 freeHead->freeList = block->header.kind.real.next; 1011 } // if 1012 #endif // OWNERSHIP 626 1013 } else { 627 freeElem->freeList = block->header.kind.real.next; 628 unlock( freeElem->lock ); 629 #endif // BUCKETLOCK 630 } // if 631 632 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 1014 // Memory is scrubbed in doFree. 1015 freeHead->freeList = block->header.kind.real.next; 1016 } // if 1017 1018 block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size 633 1019 } else { // large size => mmap 634 1020 if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; 635 1021 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 636 1022 #ifdef __STATISTICS__ 637 __atomic_add_fetch( &stats.mmap_calls, 1, __ATOMIC_SEQ_CST ); 638 __atomic_add_fetch( &stats.mmap_storage_request, size, __ATOMIC_SEQ_CST ); 639 __atomic_add_fetch( &stats.mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST ); 1023 stats.counters[STAT_NAME].alloc += tsize; 1024 stats.mmap_calls += 1; 1025 stats.mmap_storage_request += size; 1026 stats.mmap_storage_alloc += tsize; 640 1027 #endif // __STATISTICS__ 641 1028 642 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 643 if ( block == (Heap.Storage *)MAP_FAILED ) { // failed ? 1029 disable_interrupts(); 1030 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); 1031 enable_interrupts( false ); 1032 1033 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1034 1035 if ( unlikely( block == (Heap.Storage *)MAP_FAILED ) ) { // failed ? 644 1036 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 645 1037 // Do not call strerror( errno ) as it may call malloc. 646 abort( "(Heap &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno ); 647 } //if 1038 abort( "attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.", size, heapMaster.mmapStart, errno ); 1039 } // if 1040 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap 1041 648 1042 #ifdef __CFA_DEBUG__ 649 // S et new memory to garbage so subsequent uninitialized usages might fail.650 memset( block, '\xde', tsize );651 //Memset( block, tsize);1043 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes. The rest of 1044 // the storage set to 0 by mmap. 1045 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); 652 1046 #endif // __CFA_DEBUG__ 653 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap654 1047 } // if 655 1048 … … 659 1052 660 1053 #ifdef __CFA_DEBUG__ 661 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST );662 1054 if ( traceHeap() ) { 663 1055 char helpText[64]; … … 667 1059 #endif // __CFA_DEBUG__ 668 1060 1061 // poll_interrupts(); // call rollforward 1062 669 1063 return addr; 670 1064 } // doMalloc 671 1065 672 1066 673 static inline void doFree( void * addr ) with( heapManager ) { 1067 static void doFree( void * addr ) libcfa_nopreempt with( *heapManager ) { 1068 verify( addr ); 1069 1070 // detect free after thread-local storage destruction and use global stats in that case 1071 1072 Heap.Storage.Header * header; 1073 Heap.FreeHeader * freeHead; 1074 size_t size, alignment; 1075 1076 bool mapped = headers( "free", addr, header, freeHead, size, alignment ); 1077 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) 1078 size_t rsize = header->kind.real.size; // optimization 1079 #endif // __STATISTICS__ || __CFA_DEBUG__ 1080 1081 #ifdef __STATISTICS__ 1082 stats.free_storage_request += rsize; 1083 stats.free_storage_alloc += size; 1084 #endif // __STATISTICS__ 1085 674 1086 #ifdef __CFA_DEBUG__ 675 if ( unlikely( heapManager.heapBegin == 0p ) ) { 676 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); 677 } // if 1087 allocUnfreed -= rsize; 678 1088 #endif // __CFA_DEBUG__ 679 1089 680 Heap.Storage.Header * header; 681 Heap.FreeHeader * freeElem; 682 size_t size, alignment; // not used (see realloc) 683 684 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? 1090 if ( unlikely( mapped ) ) { // mmapped ? 685 1091 #ifdef __STATISTICS__ 686 __atomic_add_fetch( &stats.munmap_calls, 1, __ATOMIC_SEQ_CST );687 __atomic_add_fetch( &stats.munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST );688 __atomic_add_fetch( &stats.munmap_storage_alloc, size, __ATOMIC_SEQ_CST );1092 stats.munmap_calls += 1; 1093 stats.munmap_storage_request += rsize; 1094 stats.munmap_storage_alloc += size; 689 1095 #endif // __STATISTICS__ 690 if ( munmap( header, size ) == -1 ) { 691 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n" 692 "Possible cause is invalid pointer.", 693 addr ); 1096 1097 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1098 1099 // Does not matter where this storage is freed. 1100 if ( unlikely( munmap( header, size ) == -1 ) ) { 1101 // Do not call strerror( errno ) as it may call malloc. 1102 abort( "attempt to deallocate large object %p and munmap failed with errno %d.\n" 1103 "Possible cause is invalid delete pointer: either not allocated or with corrupt header.", 1104 addr, errno ); 694 1105 } // if 695 1106 } else { 696 1107 #ifdef __CFA_DEBUG__ 697 // Set free memory to garbage so subsequent usages might fail. 698 memset( ((Heap.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( Heap.Storage ) ); 699 //Memset( ((Heap.Storage *)header)->data, freeElem->blockSize - sizeof( Heap.Storage ) ); 1108 // memset is NOT always inlined! 1109 disable_interrupts(); 1110 // Scrub old memory so subsequent usages might fail. Only scrub the first/last SCRUB_SIZE bytes. 1111 char * data = ((Heap.Storage *)header)->data; // data address 1112 size_t dsize = size - sizeof(Heap.Storage); // data size 1113 if ( dsize <= SCRUB_SIZE * 2 ) { 1114 memset( data, SCRUB, dsize ); // scrub all 1115 } else { 1116 memset( data, SCRUB, SCRUB_SIZE ); // scrub front 1117 memset( data + dsize - SCRUB_SIZE, SCRUB, SCRUB_SIZE ); // scrub back 1118 } // if 1119 enable_interrupts( false ); 700 1120 #endif // __CFA_DEBUG__ 701 1121 702 #ifdef __STATISTICS__ 703 __atomic_add_fetch( &stats.free_calls, 1, __ATOMIC_SEQ_CST ); 704 __atomic_add_fetch( &stats.free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); 705 __atomic_add_fetch( &stats.free_storage_alloc, size, __ATOMIC_SEQ_CST ); 706 #endif // __STATISTICS__ 707 708 #if BUCKETLOCK == SPINLOCK 709 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 710 header->kind.real.next = freeElem->freeList; // push on stack 711 freeElem->freeList = (Heap.Storage *)header; 712 unlock( freeElem->lock ); // release spin lock 713 #else 714 push( freeElem->freeList, *(Heap.Storage *)header ); 715 #endif // BUCKETLOCK 1122 if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread 1123 header->kind.real.next = freeHead->freeList; // push on stack 1124 freeHead->freeList = (Heap.Storage *)header; 1125 } else { // return to thread owner 1126 verify( heapManager ); 1127 1128 #ifdef OWNERSHIP 1129 #ifdef RETURNSPIN 1130 lock( freeHead->returnLock ); 1131 header->kind.real.next = freeHead->returnList; // push to bucket return list 1132 freeHead->returnList = (Heap.Storage *)header; 1133 unlock( freeHead->returnLock ); 1134 #else // lock free 1135 header->kind.real.next = freeHead->returnList; // link new node to top node 1136 // CAS resets header->kind.real.next = freeHead->returnList on failure 1137 while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, header, 1138 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ); 1139 #endif // RETURNSPIN 1140 1141 #else // no OWNERSHIP 1142 1143 freeHead = &heap->freeLists[ClearStickyBits( header->kind.real.home ) - &freeHead->homeManager->freeLists[0]]; 1144 header->kind.real.next = freeHead->freeList; // push on stack 1145 freeHead->freeList = (Heap.Storage *)header; 1146 #endif // ! OWNERSHIP 1147 1148 #ifdef __U_STATISTICS__ 1149 stats.return_pushes += 1; 1150 stats.return_storage_request += rsize; 1151 stats.return_storage_alloc += size; 1152 #endif // __U_STATISTICS__ 1153 1154 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1155 } // if 716 1156 } // if 717 1157 718 1158 #ifdef __CFA_DEBUG__ 719 __atomic_add_fetch( &allocUnfreed, -size, __ATOMIC_SEQ_CST );720 1159 if ( traceHeap() ) { 721 1160 char helpText[64]; … … 724 1163 } // if 725 1164 #endif // __CFA_DEBUG__ 1165 1166 // poll_interrupts(); // call rollforward 726 1167 } // doFree 727 1168 728 1169 729 s tatic size_t prtFree( Heap & manager ) with( manager ) {1170 size_t prtFree( Heap & manager ) with( manager ) { 730 1171 size_t total = 0; 731 1172 #ifdef __STATISTICS__ … … 733 1174 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); 734 1175 #endif // __STATISTICS__ 735 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {1176 for ( unsigned int i = 0; i < heapMaster.maxBucketsUsed; i += 1 ) { 736 1177 size_t size = freeLists[i].blockSize; 737 1178 #ifdef __STATISTICS__ … … 764 1205 __cfaabi_bits_release(); 765 1206 #endif // __STATISTICS__ 766 return (char *)heap End - (char *)heapBegin - total;1207 return (char *)heapMaster.heapEnd - (char *)heapMaster.heapBegin - total; 767 1208 } // prtFree 768 1209 769 1210 770 static void ?{}( Heap & manager ) with( manager ) { 771 __page_size = sysconf( _SC_PAGESIZE ); 772 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 773 774 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists 775 freeLists[i].blockSize = bucketSizes[i]; 776 } // for 777 778 #ifdef FASTLOOKUP 779 unsigned int idx = 0; 780 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { 781 if ( i > bucketSizes[idx] ) idx += 1; 782 lookup[i] = idx; 783 } // for 784 #endif // FASTLOOKUP 785 786 if ( ! setMmapStart( malloc_mmap_start() ) ) { 787 abort( "Heap : internal error, mmap start initialization failure." ); 788 } // if 789 heapExpand = malloc_expansion(); 790 791 char * end = (char *)sbrk( 0 ); 792 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment 793 } // Heap 794 795 796 static void ^?{}( Heap & ) { 797 #ifdef __STATISTICS__ 798 if ( traceHeapTerm() ) { 799 printStats(); 800 // prtUnfreed() called in heapAppStop() 801 } // if 802 #endif // __STATISTICS__ 803 } // ~Heap 804 805 806 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 807 void memory_startup( void ) { 808 #ifdef __CFA_DEBUG__ 809 if ( heapBoot ) { // check for recursion during system boot 810 abort( "boot() : internal error, recursively invoked during system boot." ); 811 } // if 812 heapBoot = true; 813 #endif // __CFA_DEBUG__ 814 815 //verify( heapManager.heapBegin != 0 ); 816 //heapManager{}; 817 if ( heapManager.heapBegin == 0p ) heapManager{}; // sanity check 818 } // memory_startup 819 820 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 821 void memory_shutdown( void ) { 822 ^heapManager{}; 823 } // memory_shutdown 824 825 826 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 827 verify( heapManager.heapBegin != 0p ); // called before memory_startup ? 828 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 829 830 #if __SIZEOF_POINTER__ == 8 831 verify( size < ((typeof(size_t))1 << 48) ); 832 #endif // __SIZEOF_POINTER__ == 8 833 return doMalloc( size ); 834 } // mallocNoStats 835 836 837 static inline void * memalignNoStats( size_t alignment, size_t size ) { 838 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 839 840 #ifdef __CFA_DEBUG__ 1211 #ifdef __STATISTICS__ 1212 static void incCalls( long int statName ) libcfa_nopreempt { 1213 heapManager->stats.counters[statName].calls += 1; 1214 } // incCalls 1215 1216 static void incZeroCalls( long int statName ) libcfa_nopreempt { 1217 heapManager->stats.counters[statName].calls_0 += 1; 1218 } // incZeroCalls 1219 #endif // __STATISTICS__ 1220 1221 #ifdef __CFA_DEBUG__ 1222 static void incUnfreed( size_t offset ) libcfa_nopreempt { 1223 heapManager->allocUnfreed += offset; 1224 } // incUnfreed 1225 #endif // __CFA_DEBUG__ 1226 1227 1228 static void * memalignNoStats( size_t alignment, size_t size STAT_PARM ) { 841 1229 checkAlign( alignment ); // check alignment 842 #endif // __CFA_DEBUG__ 843 844 // if alignment <= default alignment, do normal malloc as two headers are unnecessary 845 if ( unlikely( alignment <= libAlign() ) ) return mallocNoStats( size ); 1230 1231 // if alignment <= default alignment or size == 0, do normal malloc as two headers are unnecessary 1232 if ( unlikely( alignment <= libAlign() || size == 0 ) ) return doMalloc( size STAT_ARG( STAT_NAME ) ); 846 1233 847 1234 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for … … 854 1241 // subtract libAlign() because it is already the minimum alignment 855 1242 // add sizeof(Storage) for fake header 856 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap.Storage) ); 1243 size_t offset = alignment - libAlign() + sizeof(Heap.Storage); 1244 char * addr = (char *)doMalloc( size + offset STAT_ARG( STAT_NAME ) ); 857 1245 858 1246 // address in the block of the "next" alignment address … … 860 1248 861 1249 // address of header from malloc 862 Heap.Storage.Header * RealHeader = HeaderAddr( addr ); 863 RealHeader->kind.real.size = size; // correct size to eliminate above alignment offset 864 // address of fake header * before* the alignment location 1250 Heap.Storage.Header * realHeader = HeaderAddr( addr ); 1251 realHeader->kind.real.size = size; // correct size to eliminate above alignment offset 1252 #ifdef __CFA_DEBUG__ 1253 incUnfreed( -offset ); // adjustment off the offset from call to doMalloc 1254 #endif // __CFA_DEBUG__ 1255 1256 // address of fake header *before* the alignment location 865 1257 Heap.Storage.Header * fakeHeader = HeaderAddr( user ); 1258 866 1259 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 867 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *) RealHeader;1260 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; 868 1261 // SKULLDUGGERY: odd alignment implies fake header 869 1262 fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment ); … … 880 1273 // then malloc() returns a unique pointer value that can later be successfully passed to free(). 881 1274 void * malloc( size_t size ) libcfa_public { 882 #ifdef __STATISTICS__ 883 if ( likely( size > 0 ) ) { 884 __atomic_add_fetch( &stats.malloc_calls, 1, __ATOMIC_SEQ_CST ); 885 __atomic_add_fetch( &stats.malloc_storage_request, size, __ATOMIC_SEQ_CST ); 886 } else { 887 __atomic_add_fetch( &stats.malloc_0_calls, 1, __ATOMIC_SEQ_CST ); 888 } // if 889 #endif // __STATISTICS__ 890 891 return mallocNoStats( size ); 1275 return doMalloc( size STAT_ARG( MALLOC ) ); 892 1276 } // malloc 893 1277 … … 895 1279 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 896 1280 void * aalloc( size_t dim, size_t elemSize ) libcfa_public { 897 size_t size = dim * elemSize; 898 #ifdef __STATISTICS__ 899 if ( likely( size > 0 ) ) { 900 __atomic_add_fetch( &stats.aalloc_calls, 1, __ATOMIC_SEQ_CST ); 901 __atomic_add_fetch( &stats.aalloc_storage_request, size, __ATOMIC_SEQ_CST ); 902 } else { 903 __atomic_add_fetch( &stats.aalloc_0_calls, 1, __ATOMIC_SEQ_CST ); 904 } // if 905 #endif // __STATISTICS__ 906 907 return mallocNoStats( size ); 1281 return doMalloc( dim * elemSize STAT_ARG( AALLOC ) ); 908 1282 } // aalloc 909 1283 … … 912 1286 void * calloc( size_t dim, size_t elemSize ) libcfa_public { 913 1287 size_t size = dim * elemSize; 914 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 915 #ifdef __STATISTICS__ 916 __atomic_add_fetch( &stats.calloc_0_calls, 1, __ATOMIC_SEQ_CST ); 917 #endif // __STATISTICS__ 918 return 0p; 919 } // if 920 #ifdef __STATISTICS__ 921 __atomic_add_fetch( &stats.calloc_calls, 1, __ATOMIC_SEQ_CST ); 922 __atomic_add_fetch( &stats.calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 923 #endif // __STATISTICS__ 924 925 char * addr = (char *)mallocNoStats( size ); 1288 char * addr = (char *)doMalloc( size STAT_ARG( CALLOC ) ); 1289 1290 if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned 926 1291 927 1292 Heap.Storage.Header * header; 928 Heap.FreeHeader * free Elem;1293 Heap.FreeHeader * freeHead; 929 1294 size_t bsize, alignment; 930 1295 … … 932 1297 bool mapped = 933 1298 #endif // __CFA_DEBUG__ 934 headers( "calloc", addr, header, free Elem, bsize, alignment );1299 headers( "calloc", addr, header, freeHead, bsize, alignment ); 935 1300 936 1301 #ifndef __CFA_DEBUG__ 937 1302 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 938 if ( ! mapped)1303 if ( likely( ! mapped ) ) 939 1304 #endif // __CFA_DEBUG__ 940 1305 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined … … 952 1317 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. 953 1318 void * resize( void * oaddr, size_t size ) libcfa_public { 954 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 955 if ( unlikely( size == 0 ) ) { // special cases 956 #ifdef __STATISTICS__ 957 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 958 #endif // __STATISTICS__ 959 free( oaddr ); 960 return 0p; 961 } // if 962 #ifdef __STATISTICS__ 963 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 964 #endif // __STATISTICS__ 965 966 if ( unlikely( oaddr == 0p ) ) { 967 #ifdef __STATISTICS__ 968 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 969 #endif // __STATISTICS__ 970 return mallocNoStats( size ); 971 } // if 1319 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1320 return doMalloc( size STAT_ARG( RESIZE ) ); 1321 } // if 1322 1323 PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr ) 972 1324 973 1325 Heap.Storage.Header * header; 974 Heap.FreeHeader * free Elem;1326 Heap.FreeHeader * freeHead; 975 1327 size_t bsize, oalign; 976 headers( "resize", oaddr, header, free Elem, bsize, oalign );1328 headers( "resize", oaddr, header, freeHead, bsize, oalign ); 977 1329 978 1330 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket … … 980 1332 if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 981 1333 ClearZeroFillBit( header ); // no alignment and turn off 0 fill 1334 #ifdef __CFA_DEBUG__ 1335 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference 1336 #endif // __CFA_DEBUG__ 982 1337 header->kind.real.size = size; // reset allocation size 1338 #ifdef __STATISTICS__ 1339 incCalls( RESIZE ); 1340 #endif // __STATISTICS__ 983 1341 return oaddr; 984 1342 } // if 985 1343 986 #ifdef __STATISTICS__987 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST );988 #endif // __STATISTICS__989 990 1344 // change size, DO NOT preserve STICKY PROPERTIES. 991 free( oaddr ); 992 return mallocNoStats( size ); // create new area 1345 doFree( oaddr ); // free previous storage 1346 1347 return doMalloc( size STAT_ARG( RESIZE ) ); // create new area 993 1348 } // resize 994 1349 … … 997 1352 // the old and new sizes. 998 1353 void * realloc( void * oaddr, size_t size ) libcfa_public { 999 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1000 if ( unlikely( size == 0 ) ) { // special cases 1001 #ifdef __STATISTICS__ 1002 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 1003 #endif // __STATISTICS__ 1004 free( oaddr ); 1005 return 0p; 1006 } // if 1007 #ifdef __STATISTICS__ 1008 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1009 #endif // __STATISTICS__ 1010 1011 if ( unlikely( oaddr == 0p ) ) { 1012 #ifdef __STATISTICS__ 1013 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1014 #endif // __STATISTICS__ 1015 return mallocNoStats( size ); 1016 } // if 1354 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1355 return doMalloc( size STAT_ARG( REALLOC ) ); 1356 } // if 1357 1358 PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr ) 1017 1359 1018 1360 Heap.Storage.Header * header; 1019 Heap.FreeHeader * free Elem;1361 Heap.FreeHeader * freeHead; 1020 1362 size_t bsize, oalign; 1021 headers( "realloc", oaddr, header, free Elem, bsize, oalign );1363 headers( "realloc", oaddr, header, freeHead, bsize, oalign ); 1022 1364 1023 1365 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket … … 1025 1367 bool ozfill = ZeroFillBit( header ); // old allocation zero filled 1026 1368 if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage 1027 header->kind.real.size = size; // reset allocation size 1369 #ifdef __CFA_DEBUG__ 1370 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference 1371 #endif // __CFA_DEBUG__ 1372 header->kind.real.size = size; // reset allocation size 1028 1373 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 1029 1374 memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage 1030 1375 } // if 1376 #ifdef __STATISTICS__ 1377 incCalls( REALLOC ); 1378 #endif // __STATISTICS__ 1031 1379 return oaddr; 1032 1380 } // if 1033 1381 1034 #ifdef __STATISTICS__1035 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST );1036 #endif // __STATISTICS__1037 1038 1382 // change size and copy old content to new storage 1039 1383 1040 1384 void * naddr; 1041 if ( likely( oalign == libAlign() ) ) { // previous request not aligned ?1042 naddr = mallocNoStats( size );// create new area1385 if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ? 1386 naddr = doMalloc( size STAT_ARG( REALLOC ) ); // create new area 1043 1387 } else { 1044 naddr = memalignNoStats( oalign, size ); // create new aligned area 1045 } // if 1046 1047 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1388 naddr = memalignNoStats( oalign, size STAT_ARG( REALLOC ) ); // create new aligned area 1389 } // if 1390 1391 headers( "realloc", naddr, header, freeHead, bsize, oalign ); 1392 // To preserve prior fill, the entire bucket must be copied versus the size. 1048 1393 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes 1049 free( oaddr );1394 doFree( oaddr ); // free previous storage 1050 1395 1051 1396 if ( unlikely( ozfill ) ) { // previous request zero fill ? … … 1067 1412 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 1068 1413 void * memalign( size_t alignment, size_t size ) libcfa_public { 1069 #ifdef __STATISTICS__ 1070 if ( likely( size > 0 ) ) { 1071 __atomic_add_fetch( &stats.memalign_calls, 1, __ATOMIC_SEQ_CST ); 1072 __atomic_add_fetch( &stats.memalign_storage_request, size, __ATOMIC_SEQ_CST ); 1073 } else { 1074 __atomic_add_fetch( &stats.memalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1075 } // if 1076 #endif // __STATISTICS__ 1077 1078 return memalignNoStats( alignment, size ); 1414 return memalignNoStats( alignment, size STAT_ARG( MEMALIGN ) ); 1079 1415 } // memalign 1080 1416 … … 1082 1418 // Same as aalloc() with memory alignment. 1083 1419 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1084 size_t size = dim * elemSize; 1085 #ifdef __STATISTICS__ 1086 if ( likely( size > 0 ) ) { 1087 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1088 __atomic_add_fetch( &stats.cmemalign_storage_request, size, __ATOMIC_SEQ_CST ); 1089 } else { 1090 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1091 } // if 1092 #endif // __STATISTICS__ 1093 1094 return memalignNoStats( alignment, size ); 1420 return memalignNoStats( alignment, dim * elemSize STAT_ARG( AMEMALIGN ) ); 1095 1421 } // amemalign 1096 1422 … … 1099 1425 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1100 1426 size_t size = dim * elemSize; 1101 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 1102 #ifdef __STATISTICS__ 1103 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1104 #endif // __STATISTICS__ 1105 return 0p; 1106 } // if 1107 #ifdef __STATISTICS__ 1108 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1109 __atomic_add_fetch( &stats.cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 1110 #endif // __STATISTICS__ 1111 1112 char * addr = (char *)memalignNoStats( alignment, size ); 1427 char * addr = (char *)memalignNoStats( alignment, size STAT_ARG( CMEMALIGN ) ); 1428 1429 if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned 1113 1430 1114 1431 Heap.Storage.Header * header; 1115 Heap.FreeHeader * free Elem;1432 Heap.FreeHeader * freeHead; 1116 1433 size_t bsize; 1117 1434 … … 1119 1436 bool mapped = 1120 1437 #endif // __CFA_DEBUG__ 1121 headers( "cmemalign", addr, header, free Elem, bsize, alignment );1438 headers( "cmemalign", addr, header, freeHead, bsize, alignment ); 1122 1439 1123 1440 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. … … 1169 1486 // 0p, no operation is performed. 1170 1487 void free( void * addr ) libcfa_public { 1488 // verify( heapManager ); 1489 1171 1490 if ( unlikely( addr == 0p ) ) { // special case 1172 1491 #ifdef __STATISTICS__ 1173 __atomic_add_fetch( &stats.free_null_calls, 1, __ATOMIC_SEQ_CST ); 1492 if ( heapManager ) 1493 incZeroCalls( FREE ); 1174 1494 #endif // __STATISTICS__ 1175 1176 // #ifdef __CFA_DEBUG__1177 // if ( traceHeap() ) {1178 // #define nullmsg "Free( 0x0 ) size:0\n"1179 // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf.1180 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 );1181 // } // if1182 // #endif // __CFA_DEBUG__1183 1495 return; 1184 } // exit 1185 1186 doFree( addr ); 1496 } // if 1497 1498 #ifdef __STATISTICS__ 1499 incCalls( FREE ); 1500 #endif // __STATISTICS__ 1501 1502 doFree( addr ); // handles heapManager == nullptr 1187 1503 } // free 1188 1504 … … 1227 1543 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1228 1544 Heap.Storage.Header * header; 1229 Heap.FreeHeader * free Elem;1545 Heap.FreeHeader * freeHead; 1230 1546 size_t bsize, alignment; 1231 1547 1232 headers( "malloc_usable_size", addr, header, free Elem, bsize, alignment );1548 headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment ); 1233 1549 return DataStorage( bsize, addr, header ); // data storage in bucket 1234 1550 } // malloc_usable_size … … 1238 1554 void malloc_stats( void ) libcfa_public { 1239 1555 #ifdef __STATISTICS__ 1240 printStats(); 1241 if ( prtFree() ) prtFree( heapManager ); 1556 HeapStatistics stats; 1557 HeapStatisticsCtor( stats ); 1558 if ( printStats( collectStats( stats ) ) == -1 ) { 1559 #else 1560 #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n" 1561 if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) { 1242 1562 #endif // __STATISTICS__ 1563 abort( "write failed in malloc_stats" ); 1564 } // if 1243 1565 } // malloc_stats 1244 1566 … … 1247 1569 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { 1248 1570 #ifdef __STATISTICS__ 1249 int temp = stats_fd;1250 stats_fd = fd;1571 int temp = heapMaster.stats_fd; 1572 heapMaster.stats_fd = fd; 1251 1573 return temp; 1252 1574 #else … … 1262 1584 if ( options != 0 ) { errno = EINVAL; return -1; } 1263 1585 #ifdef __STATISTICS__ 1264 return printStatsXML( stream ); 1586 HeapStatistics stats; 1587 HeapStatisticsCtor( stats ); 1588 return printStatsXML( collectStats( stats ), stream ); // returns bytes written or -1 1265 1589 #else 1266 1590 return 0; // unsupported … … 1275 1599 choose( option ) { 1276 1600 case M_TOP_PAD: 1277 heap Expand = ceiling2( value, __page_size );1601 heapMaster.heapExpand = ceiling2( value, __page_size ); 1278 1602 return 1; 1279 1603 case M_MMAP_THRESHOLD: … … 1319 1643 // Must have CFA linkage to overload with C linkage realloc. 1320 1644 void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1321 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1322 if ( unlikely( size == 0 ) ) { // special cases 1323 #ifdef __STATISTICS__ 1324 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 1325 #endif // __STATISTICS__ 1326 free( oaddr ); 1327 return 0p; 1645 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1646 return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); 1328 1647 } // if 1329 1648 1330 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1331 #ifdef __CFA_DEBUG__ 1332 else checkAlign( nalign ); // check alignment 1333 #endif // __CFA_DEBUG__ 1334 1335 if ( unlikely( oaddr == 0p ) ) { 1336 #ifdef __STATISTICS__ 1337 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 1338 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 1339 #endif // __STATISTICS__ 1340 return memalignNoStats( nalign, size ); 1341 } // if 1649 PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr ) 1342 1650 1343 1651 // Attempt to reuse existing alignment. … … 1347 1655 1348 1656 if ( unlikely( isFakeHeader ) ) { 1657 checkAlign( nalign ); // check alignment 1349 1658 oalign = ClearAlignmentBit( header ); // old alignment 1350 1659 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? … … 1353 1662 ) ) { 1354 1663 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1355 Heap.FreeHeader * free Elem;1664 Heap.FreeHeader * freeHead; 1356 1665 size_t bsize, oalign; 1357 headers( "resize", oaddr, header, free Elem, bsize, oalign );1666 headers( "resize", oaddr, header, freeHead, bsize, oalign ); 1358 1667 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket 1359 1668 … … 1361 1670 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1362 1671 ClearZeroFillBit( header ); // turn off 0 fill 1672 #ifdef __CFA_DEBUG__ 1673 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference 1674 #endif // __CFA_DEBUG__ 1363 1675 header->kind.real.size = size; // reset allocation size 1676 #ifdef __STATISTICS__ 1677 incCalls( RESIZE ); 1678 #endif // __STATISTICS__ 1364 1679 return oaddr; 1365 1680 } // if … … 1370 1685 } // if 1371 1686 1372 #ifdef __STATISTICS__1373 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST );1374 #endif // __STATISTICS__1375 1376 1687 // change size, DO NOT preserve STICKY PROPERTIES. 1377 free( oaddr );1378 return memalignNoStats( nalign, size );// create new aligned area1688 doFree( oaddr ); // free previous storage 1689 return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); // create new aligned area 1379 1690 } // resize 1380 1691 1381 1692 1382 1693 void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1383 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1384 if ( unlikely( size == 0 ) ) { // special cases 1385 #ifdef __STATISTICS__ 1386 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 1387 #endif // __STATISTICS__ 1388 free( oaddr ); 1389 return 0p; 1694 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1695 return memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); 1390 1696 } // if 1391 1697 1392 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1393 #ifdef __CFA_DEBUG__ 1394 else checkAlign( nalign ); // check alignment 1395 #endif // __CFA_DEBUG__ 1396 1397 if ( unlikely( oaddr == 0p ) ) { 1398 #ifdef __STATISTICS__ 1399 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1400 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1401 #endif // __STATISTICS__ 1402 return memalignNoStats( nalign, size ); 1403 } // if 1698 PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr ) 1404 1699 1405 1700 // Attempt to reuse existing alignment. … … 1408 1703 size_t oalign; 1409 1704 if ( unlikely( isFakeHeader ) ) { 1705 checkAlign( nalign ); // check alignment 1410 1706 oalign = ClearAlignmentBit( header ); // old alignment 1411 1707 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? … … 1421 1717 } // if 1422 1718 1423 #ifdef __STATISTICS__ 1424 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1425 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1426 #endif // __STATISTICS__ 1427 1428 Heap.FreeHeader * freeElem; 1719 Heap.FreeHeader * freeHead; 1429 1720 size_t bsize; 1430 headers( "realloc", oaddr, header, free Elem, bsize, oalign );1721 headers( "realloc", oaddr, header, freeHead, bsize, oalign ); 1431 1722 1432 1723 // change size and copy old content to new storage … … 1435 1726 bool ozfill = ZeroFillBit( header ); // old allocation zero filled 1436 1727 1437 void * naddr = memalignNoStats( nalign, size );// create new aligned area1438 1439 headers( "realloc", naddr, header, free Elem, bsize, oalign );1728 void * naddr = memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); // create new aligned area 1729 1730 headers( "realloc", naddr, header, freeHead, bsize, oalign ); 1440 1731 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes 1441 free( oaddr );1732 doFree( oaddr ); // free previous storage 1442 1733 1443 1734 if ( unlikely( ozfill ) ) { // previous request zero fill ? … … 1451 1742 1452 1743 1744 void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ) __THROW { 1745 return realloc( oaddr, nalign, dim * elemSize ); 1746 } // reallocarray 1747 1748 1453 1749 // Local Variables: // 1454 1750 // tab-width: 4 // -
TabularUnified libcfa/src/heap.hfa ¶
r301071a r116a2ea 10 10 // Created On : Tue May 26 11:23:55 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Apr 21 22:52:25 202213 // Update Count : 2 112 // Last Modified On : Tue Oct 4 19:08:55 2022 13 // Update Count : 23 14 14 // 15 15 … … 30 30 bool checkFreeOff(); 31 31 32 // supported mallopt options33 #ifndef M_MMAP_THRESHOLD34 #define M_MMAP_THRESHOLD (-1)35 #endif // M_MMAP_THRESHOLD36 37 #ifndef M_TOP_PAD38 #define M_TOP_PAD (-2)39 #endif // M_TOP_PAD40 41 32 extern "C" { 42 33 // New allocation operations. … … 49 40 size_t malloc_size( void * addr ); 50 41 int malloc_stats_fd( int fd ); 51 size_t malloc_usable_size( void * addr );52 42 size_t malloc_expansion(); // heap expansion size (bytes) 53 43 size_t malloc_mmap_start(); // crossover allocation size from sbrk to mmap -
TabularUnified libcfa/src/startup.cfa ¶
r301071a r116a2ea 10 10 // Created On : Tue Jul 24 16:21:57 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 17 16:41:54202213 // Update Count : 5 512 // Last Modified On : Thu Oct 6 13:51:57 2022 13 // Update Count : 57 14 14 // 15 15 … … 24 24 25 25 extern "C" { 26 void __cfaabi_memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 27 void __cfaabi_memory_startup( void ) { 28 extern void memory_startup(); 29 memory_startup(); 30 } // __cfaabi_memory_startup 31 32 void __cfaabi_memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 33 void __cfaabi_memory_shutdown( void ) { 34 extern void memory_shutdown(); 35 memory_shutdown(); 36 } // __cfaabi_memory_shutdown 37 26 38 void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) )); 27 39 void __cfaabi_appready_startup( void ) { 28 40 tzset(); // initialize time global variables 29 #ifdef __CFA_DEBUG__30 41 extern void heapAppStart(); 31 42 heapAppStart(); 32 #endif // __CFA_DEBUG__33 43 } // __cfaabi_appready_startup 34 44 35 45 void __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) )); 36 46 void __cfaabi_appready_shutdown( void ) { 37 #ifdef __CFA_DEBUG__38 47 extern void heapAppStop(); 39 48 heapAppStop(); 40 #endif // __CFA_DEBUG__41 49 } // __cfaabi_appready_shutdown 42 50 -
TabularUnified tests/.expect/alloc.txt ¶
r301071a r116a2ea 35 35 CFA realloc array alloc, fill 36 36 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 37 CFA realloc array alloc, 5 38 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 39 CFA realloc array alloc, 5 40 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 41 CFA realloc array alloc, 5 42 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 37 43 38 44 C memalign 42 42.5 -
TabularUnified tests/alloc.cfa ¶
r301071a r116a2ea 10 10 // Created On : Wed Feb 3 07:56:22 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 18 17:13:52 202213 // Update Count : 43 312 // Last Modified On : Fri Jul 29 10:57:02 2022 13 // Update Count : 436 14 14 // 15 15 … … 154 154 printf( "\n" ); 155 155 // do not free 156 #if 0 // FIX ME 156 157 157 ip = alloc( 5 * dim, ip`realloc, 5`fill ); // CFA realloc array alloc, 5 158 158 printf( "CFA realloc array alloc, 5\n" ); … … 171 171 for ( i; 5 * dim ) { printf( "%#x ", ip[i] ); } 172 172 printf( "\n" ); 173 #endif // 0 173 174 174 free( ip ); 175 175 -
TabularUnified tests/alloc2.cfa ¶
r301071a r116a2ea 1 #include <fstream.hfa> // sout 1 2 #include <malloc.h> // malloc_usable_size 2 3 #include <stdint.h> // uintptr_t … … 4 5 #include <string.h> // memcmp 5 6 6 int last_failed;7 7 int tests_total; 8 8 int tests_failed; … … 13 13 void test_base( void * ip, size_t size, size_t align ) { 14 14 tests_total += 1; 15 // printf( "DEBUG: starting test %d\n", tests_total);15 // sout | "DEBUG: starting test" | tests_total; 16 16 bool passed = (malloc_size( ip ) == size) && (malloc_usable_size( ip ) >= size) && (malloc_alignment( ip ) == align) && ((uintptr_t)ip % align == 0); 17 17 if ( ! passed ) { 18 printf( "failed test %3d: %4zu %4zu but got %4zu ( %3zu ) %4zu\n", tests_total, size, align, malloc_size( ip ), malloc_usable_size( ip ), malloc_alignment( ip ));18 sout | "base failed test" | tests_total | "ip" | ip | "size" | size | "align" | align | "but got size" | malloc_size( ip ) | "usable" | malloc_usable_size( ip ) | "align" | malloc_alignment( ip ); 19 19 tests_failed += 1; 20 20 } // if 21 // printf( "DEBUG: done test %d\n", tests_total);21 // sout | "DEBUG: done test" | tests_total; 22 22 } 23 23 24 24 void test_fill( void * ip_, size_t start, size_t end, char fill ) { 25 25 tests_total += 1; 26 // printf( "DEBUG: starting test %d\n", tests_total );26 // sout | "DEBUG: starting test" | tests_total; 27 27 bool passed = true; 28 28 char * ip = (char *) ip_; 29 29 for ( i; start ~ end ) passed = passed && (ip[i] == fill); 30 30 if ( ! passed ) { 31 printf( "failed test %3d: fill C\n", tests_total );31 sout | "fill1 failed test" | tests_total | "fill C"; 32 32 tests_failed += 1; 33 33 } // if 34 // printf( "DEBUG: done test %d\n", tests_total );34 // sout | "DEBUG: done test" | tests_total; 35 35 } 36 36 37 37 void test_fill( void * ip_, size_t start, size_t end, int fill ) { 38 38 tests_total += 1; 39 // printf( "DEBUG: starting test %d\n", tests_total );39 // sout | "DEBUG: starting test" tests_total; 40 40 bool passed = true; 41 41 int * ip = (int *)ip_; 42 for ( i; start ~ end ) passed = passed && (ip[i] == fill);42 for ( i; start ~ end ) passed = passed && (ip[i] == fill); 43 43 if ( ! passed ) { 44 printf( "failed test %3d: fill int\n", tests_total );44 sout | "fill2 failed test" | tests_total | "fill int"; 45 45 tests_failed += 1; 46 46 } // if 47 // printf( "DEBUG: done test %d\n", tests_total );47 // sout | "DEBUG: done test" | tests_total; 48 48 } 49 49 50 50 void test_fill( void * ip_, size_t start, size_t end, int * fill ) { 51 51 tests_total += 1; 52 // printf( "DEBUG: starting test %d\n", tests_total );52 // sout | "DEBUG: starting test" | tests_total; 53 53 bool passed = memcmp((void*)((uintptr_t )ip_ + start ), (void*)fill, end ) == 0; 54 54 if ( ! passed ) { 55 printf( "failed test %3d: fill int A\n", tests_total );55 sout | "fill3 failed test" | tests_total | "fill int A"; 56 56 tests_failed += 1; 57 57 } // if 58 // printf( "DEBUG: done test %d\n", tests_total );58 // sout | "DEBUG: done test" | tests_total; 59 59 } 60 60 61 61 void test_fill( void * ip_, size_t start, size_t end, T1 fill ) { 62 62 tests_total += 1; 63 // printf( "DEBUG: starting test %d\n", tests_total );63 // sout | "DEBUG: starting test" | tests_total; 64 64 bool passed = true; 65 65 T1 * ip = (T1 *) ip_; 66 66 for ( i; start ~ end ) passed = passed && (ip[i].data == fill.data ); 67 67 if ( ! passed ) { 68 printf( "failed test %3d: fill T1\n", tests_total );68 sout | "fill4 failed test" | tests_total | "fill T1"; 69 69 tests_failed += 1; 70 70 } // if 71 // printf( "DEBUG: done test %d\n", tests_total );71 // sout | "DEBUG: done test" | tests_total; 72 72 } 73 73 74 74 void test_fill( void * ip_, size_t start, size_t end, T1 * fill ) { 75 75 tests_total += 1; 76 // printf( "DEBUG: starting test %d\n", tests_total );76 // sout | "DEBUG: starting test" | tests_total; 77 77 bool passed = memcmp( (void*)((uintptr_t )ip_ + start ), (void*)fill, end ) == 0; 78 78 if ( ! passed ) { 79 printf( "failed test %3d: fill T1 A\n", tests_total );79 sout | "fill5 failed test" | tests_total | "fill T1 A"; 80 80 tests_failed += 1; 81 81 } // if 82 // printf( "DEBUG: done test %d\n", tests_total );82 // sout | "DEBUG: done test" | tests_total; 83 83 } 84 84 85 85 void test_use( int * ip, size_t dim ) { 86 86 tests_total += 1; 87 // printf( "DEBUG: starting test %d\n", tests_total );87 // sout | "DEBUG: starting test" | tests_total; 88 88 bool passed = true; 89 89 for ( i; 0 ~ dim ) ip[i] = 0xdeadbeef; 90 90 for ( i; 0 ~ dim ) passed = passed && (ip[i] == 0xdeadbeef); 91 91 if ( ! passed ) { 92 printf( "failed test %3d: use int\n", tests_total );92 sout | "use1 failed test" | tests_total | "use int"; 93 93 tests_failed += 1; 94 94 } // if 95 // printf( "DEBUG: done test %d\n", tests_total );95 // sout | "DEBUG: done test" | tests_total; 96 96 } 97 97 98 98 void test_use( T1 * ip, size_t dim ) { 99 99 tests_total += 1; 100 // printf( "DEBUG: starting test %d\n", tests_total );100 // sout | "DEBUG: starting test" | tests_total; 101 101 bool passed = true; 102 102 for ( i; 0 ~ dim ) ip[i].data = 0xdeadbeef; 103 103 for ( i; 0 ~ dim ) passed = passed && (ip[i].data == 0xdeadbeef); 104 104 if ( ! passed ) { 105 printf( "failed test %3d: use T1\n", tests_total );105 sout | "use2 failed test" | tests_total | "use T1"; 106 106 tests_failed += 1; 107 107 } // if 108 // printf( "DEBUG: done test %d\n", tests_total );108 // sout | "DEBUG: done test" | tests_total; 109 109 } 110 110 … … 117 117 char FillC = 'a'; 118 118 int * FillA = calloc( dim / 4 ); 119 119 120 T1 FillT1 = { FillT }; 120 121 T1 * FillT1A = (T1 *)(void *) malloc( (dim / 4) * sizeof(T1) ); … … 129 130 // testing alloc 130 131 131 last_failed = -1;132 132 tests_total = 0; 133 133 tests_failed = 0; … … 153 153 free( ip ); 154 154 155 ip = alloc( ((double *)0p)`resize );155 ip = alloc( 0p`resize ); 156 156 test_base( ip, elemSize, libAlign ); 157 157 test_use( ip, elemSize / elemSize ); … … 495 495 free( ip ); 496 496 497 if ( tests_failed == 0 ) printf( "PASSED alloc tests\n\n" );498 else printf( "failed alloc tests : %d/%d\n\n", tests_failed, tests_total );499 500 // testing alloc ( aligned struct)497 if ( tests_failed == 0 ) sout | "PASSED alloc tests" | nl | nl; 498 else sout | "failed alloc tests :" | tests_failed | tests_total | nl | nl; 499 500 // testing alloc (aligned struct) 501 501 502 502 elemSize = sizeof(T1); 503 503 size = dim * elemSize; 504 last_failed = -1;505 504 tests_total = 0; 506 505 tests_failed = 0; … … 868 867 free( t1p ); 869 868 870 if ( tests_failed == 0) printf( "PASSED alloc tests (aligned struct)\n\n");871 else printf( "failed alloc tests ( aligned struct ) : %d/%d\n\n", tests_failed, tests_total );872 873 printf( "(if applicable) alignment error below indicates memory trashing caused by test_use.\n\n");869 if ( tests_failed == 0) sout | "PASSED alloc tests (aligned struct)" | nl | nl; 870 else sout | "failed alloc tests ( aligned struct ) :" | tests_failed | tests_total | nl; 871 872 sout | "(if applicable) alignment error below indicates memory trashing caused by test_use." | nl | nl; 874 873 free( FillA ); 875 874 free( FillT1A ); -
TabularUnified tests/malloc.cfa ¶
r301071a r116a2ea 1 #include < assert.h>1 #include <fstream.hfa> // sout 2 2 #include <malloc.h> // malloc_usable_size 3 3 #include <stdint.h> // uintptr_t 4 #include <stdlib.h> // posix_memalign5 #include <fstream.hfa>6 4 #include <stdlib.hfa> // access C malloc, realloc 7 5 #include <unistd.h> // getpagesize … … 10 8 int tests_failed; 11 9 size_t tAlign = 32; 12 struct S1 { int d 1; } __attribute__((aligned(32)));10 struct S1 { int data; } __attribute__(( aligned(32))); 13 11 typedef struct S1 T1; 14 12 15 void test_base( void * ip, size_t size, size_t align ) {13 void test_base( void * ip, size_t size, size_t align ) { 16 14 tests_total += 1; 17 bool passed = (malloc_size(ip) == size) && (malloc_usable_size(ip) >= size) && (malloc_alignment(ip) == align) && ((uintptr_t)ip % align == 0); 18 if (!passed) { 19 printf("failed test %2d: %4lu %4lu but got %4lu ( %3lu ) %4lu\n", tests_total, size, align, malloc_size(ip), malloc_usable_size(ip), malloc_alignment(ip)); 15 bool passed = (malloc_size( ip ) == size) && (malloc_usable_size( ip ) >= size) && (malloc_alignment( ip ) == align) && ((uintptr_t)ip % align == 0); 16 if ( ! passed ) { 17 sout | "base failed test" | tests_total | "ip" | ip | "size" | size | "align" | align | "but got size" | malloc_size( ip ) | "usable" | malloc_usable_size( ip ) | "align" | malloc_alignment( ip ); 18 tests_failed += 1; 19 } // if 20 } 21 22 void test_fill( void * ip_, size_t start, size_t end, char fill ) { 23 tests_total += 1; 24 bool passed = true; 25 char * ip = (char *) ip_; 26 for ( i; start ~ end ) passed = passed && (ip[i] == fill); 27 if ( ! passed ) { 28 sout | "fill1 failed test" | tests_total | "fill C"; 29 tests_failed += 1; 30 } // if 31 } 32 33 void test_use( void * ip_ ) { 34 tests_total += 1; 35 bool passed = true; 36 int * ip = (int *) ip_; 37 size_t size = malloc_size( ip ); 38 for ( i; 0 ~ size ~ sizeof(int)) ip[i/sizeof(int)] = 0xdeadbeef; 39 for ( i; 0 ~ size ~ sizeof(int)) passed = passed && (ip[i / sizeof(int)] == 0xdeadbeef); 40 size_t usize = malloc_usable_size( ip ); 41 for ( i; size ~ usize ~ sizeof(int)) ip[i / sizeof(int)] = -1; 42 for ( i; size ~ usize ~ sizeof(int)) passed = passed && (ip[i / sizeof(int)] == -1); 43 if ( ! passed ) { 44 sout | "failed test" | tests_total | "use"; 20 45 tests_failed += 1; 21 46 } 22 47 } 23 48 24 void test_fill( void * ip_, size_t start, size_t end, char fill) {25 tests_total += 1;26 bool passed = true;27 char * ip = (char *) ip_;28 for (i; start ~ end) passed = passed && (ip[i] == fill);29 if (!passed) {30 printf("failed test %2d: fill\n", tests_total);31 tests_failed += 1;32 }33 }34 35 void test_use( void * ip_) {36 tests_total += 1;37 bool passed = true;38 int * ip = (int *) ip_;39 size_t size = malloc_size(ip);40 for (i; 0 ~ size ~ sizeof(int)) ip[i/sizeof(int)] = 0xdeadbeef;41 for (i; 0 ~ size ~ sizeof(int)) passed = passed && (ip[i/sizeof(int)] == 0xdeadbeef);42 size_t usize = malloc_usable_size(ip);43 for (i; size ~ usize ~ sizeof(int)) ip[i/sizeof(int)] = -1;44 for (i; size ~ usize ~ sizeof(int)) passed = passed && (ip[i/sizeof(int)] == -1);45 if (!passed) {46 printf("failed test %2d: use\n", tests_total);47 tests_failed += 1;48 }49 }50 51 49 int main( void ) { 50 enum { dim = 8, align = 64, libAlign = libAlign() }; 52 51 size_t elemSize = sizeof(int); 53 size_t dim = 8;54 52 size_t size = dim * elemSize; 55 size_t align = 64;56 const size_t libAlign = libAlign();57 53 char fill = '\xde'; 58 54 int * ip; 59 55 T1 * tp; 60 56 61 // testing C 57 // testing C malloc 62 58 63 59 tests_total = 0; 64 60 tests_failed = 0; 65 61 66 ip = (int *) (void *)malloc( size );67 test_base( ip, size, libAlign);68 test_use( ip);69 free( ip);70 71 ip = (int *) (void *)malloc( 0 );72 test_base( ip, 0, libAlign);73 test_use( ip);74 free( ip);75 76 ip = (int *) (void *)aalloc( dim, elemSize );77 test_base( ip, size, libAlign);78 test_use( ip);79 free( ip);80 81 ip = (int *) (void *)aalloc( 0, elemSize );82 test_base( ip, 0, libAlign);83 test_use( ip);84 free( ip);85 86 ip = (int *) (void *)aalloc( dim, 0 );87 test_base( ip, 0, libAlign);88 test_use( ip);89 free( ip);90 91 ip = (int *) (void *)aalloc( 0, 0 );92 test_base( ip, 0, libAlign);93 test_use( ip);94 free( ip);95 96 ip = (int *) (void *)calloc( dim, elemSize );97 test_base( ip, size, libAlign);98 test_fill( ip, 0, size, '\0');99 test_use( ip);100 free( ip);101 102 ip = (int *) (void *)calloc( 0, elemSize );103 test_base( ip, 0, libAlign);104 test_fill( ip, 0, 0, '\0');105 test_use( ip);106 free( ip);107 108 ip = (int *) (void *)calloc( dim, 0 );109 test_base( ip, 0, libAlign);110 test_fill( ip, 0, 0, '\0');111 test_use( ip);112 free( ip);113 114 ip = (int *) (void *)malloc( size );115 ip = (int *) (void *) resize( (void *)ip, size / 4 );116 test_base( ip, size / 4, libAlign);117 test_use( ip);118 free( ip);119 120 ip = (int *) (void *)malloc( size );121 ip = (int *) (void *) resize( (void *)ip, size * 4 );122 test_base( ip, size * 4, libAlign);123 test_use( ip);124 free( ip);125 126 ip = (int *) (void *)malloc( size );127 ip = (int *) (void *) resize( (void *)ip, 0 );128 test_base( ip, 0, libAlign);129 test_use( ip);130 free( ip);131 132 ip = (int *) (void *)resize( NULL, size );133 test_base( ip, size, libAlign);134 test_use( ip);135 free( ip);136 137 ip = (int *) (void *)resize( 0p, size );138 test_base( ip, size, libAlign);139 test_use( ip);140 free( ip);141 142 ip = (int *) (void *)calloc( dim, elemSize );143 ip = (int *) (void *) realloc( (void *)ip, size / 4 );144 test_base( ip, size / 4, libAlign);145 test_fill( ip, 0, size / 4, '\0');146 test_use( ip);147 free( ip);148 149 ip = (int *) (void *)calloc( dim, elemSize );150 ip = (int *) (void *) realloc( (void *)ip, size * 4 );151 test_base( ip, size * 4, libAlign);152 test_fill( ip, 0, size * 4, '\0');153 test_use( ip);154 free( ip);155 156 ip = (int *) (void *)calloc( dim, elemSize );157 ip = (int *) (void *) realloc( (void *)ip, 0 );158 test_base( ip, 0, libAlign);159 test_use( ip);160 free( ip);161 162 ip = (int *) (void *)realloc( NULL, size );163 test_base( ip, size , libAlign);164 test_use( ip);165 free( ip);166 167 ip = (int *) (void *)realloc( 0p, size );168 test_base( ip, size, libAlign);169 test_use( ip);170 free( ip);171 172 ip = (int *) (void *)memalign( align, size );173 test_base( ip, size, align);174 test_use( ip);175 free( ip);176 177 ip = (int *) (void *)memalign( align, 0 );178 test_base( ip, 0, libAlign);179 test_use( ip);180 free( ip);181 182 ip = (int *) (void *)amemalign( align, dim, elemSize );183 test_base( ip, size, align);184 test_use( ip);185 free( ip);186 187 ip = (int *) (void *)amemalign( align, 0, elemSize );188 test_base( ip, 0, libAlign);189 test_use( ip);190 free( ip);191 192 ip = (int *) (void *)amemalign( align, dim, 0 );193 test_base( ip, 0, libAlign);194 test_use( ip);195 free( ip);196 197 ip = (int *) (void *)cmemalign( align, dim, elemSize );198 test_base( ip, size, align);199 test_fill( ip, 0, size, '\0');200 test_use( ip);201 free( ip);202 203 ip = (int *) (void *)cmemalign( align, 0, elemSize );204 test_base( ip, 0, libAlign);205 test_use( ip);206 free( ip);207 208 ip = (int *) (void *)cmemalign( align, dim, 0 );209 test_base( ip, 0, libAlign);210 test_use( ip);211 free( ip);212 213 ip = (int *) (void *)aligned_alloc( align, size );214 test_base( ip, size, align);215 test_use( ip);216 free( ip);217 218 ip = (int *) (void *)aligned_alloc( align, 0 );219 test_base( ip, 0, libAlign);220 test_use( ip);221 free( ip);222 223 (int)posix_memalign( (void **) &ip, align, size );224 test_base( ip, size, align);225 test_use( ip);226 free( ip);227 228 (int)posix_memalign( (void **) &ip, align, 0 );229 test_base( ip, 0, libAlign);230 test_use( ip);231 free( ip);232 233 ip = (int *) (void *)valloc( size );234 test_base( ip, size, getpagesize());235 test_use( ip);236 free( ip);237 238 ip = (int *) (void *)valloc( 0 );239 test_base( ip, 0, libAlign);240 test_use( ip);241 free( ip);242 243 ip = (int *) (void *)pvalloc( getpagesize() * 3 / 2 );244 test_base( ip, getpagesize() * 2, getpagesize());245 test_use( ip);246 free( ip);247 248 ip = (int *) (void *)pvalloc( 0 );249 test_base( ip, 0, libAlign);250 test_use( ip);251 free( ip);252 253 ip = (int *) (void *)malloc( size );254 ip = (int *) (void *) resize( (void *)ip, libAlign, size / 2 );255 test_base( ip, size / 2, libAlign);256 test_use( ip);257 free( ip);258 259 ip = (int *) (void *)aligned_alloc( align, size );260 ip = (int *) (void *) resize( (void *)ip, align, size / 2 );261 test_base( ip, size / 2, align);262 test_use( ip);263 free( ip);264 265 ip = (int *) (void *)malloc( size );266 ip = (int *) (void *) resize( (void *)ip, align, size / 4 );267 test_base( ip, size / 4, align);268 test_use( ip);269 free( ip);270 271 ip = (int *) (void *)malloc( size );272 ip = (int *) (void *) resize( (void *)ip, align, 0 );273 test_base( ip, 0, libAlign);274 test_use( ip);275 free( ip);276 277 ip = (int *) (void *)resize( NULL, align, size );278 test_base( ip, size, align);279 test_use( ip);280 free( ip);281 282 ip = (int *) (void *)resize( 0p, align, size );283 test_base( ip, size, align);284 test_use( ip);285 free( ip);286 287 ip = (int *) (void *)calloc( dim, elemSize );288 ip = (int *) (void *) realloc( (void *)ip, libAlign, size / 2 );289 test_base( ip, size / 2, libAlign);290 test_fill( ip, 0, size / 2, '\0');291 test_use( ip);292 free( ip);293 294 ip = (int *) (void *)cmemalign( align, dim, elemSize );295 ip = (int *) (void *) realloc( (void *)ip, align, size / 2 );296 test_base( ip, size / 2, align);297 test_fill( ip, 0, size / 2, '\0');298 test_use( ip);299 free( ip);300 301 ip = (int *) (void *)calloc( dim, elemSize );302 ip = (int *) (void *) realloc( (void *)ip, align, size / 4 );303 test_base( ip, size / 4, align);304 test_fill( ip, 0, size / 4, '\0');305 test_use( ip);306 free( ip);307 308 ip = (int *) (void *)calloc( dim, elemSize );309 ip = (int *) (void *) realloc( (void *) ip, 0, size * 4 );310 test_base( ip, size * 4, libAlign);311 test_fill( ip, 0, size * 4, '\0');312 test_use( ip);313 free( ip);314 315 ip = (int *) (void *)calloc( dim, elemSize );316 ip = (int *) (void *) realloc( (void *)ip, align, 0 );317 test_base( ip, 0, libAlign);318 test_use( ip);319 free( ip);320 321 free( 0p ); 322 free( NULL ); 323 324 if (tests_failed == 0) printf("PASSED C malloc tests\n\n");325 else printf("failed C malloc tests : %d/%d\n\n", tests_failed, tests_total);62 ip = (int *)malloc( size ); 63 test_base( ip, size, libAlign ); 64 test_use( ip ); 65 free( ip ); 66 67 ip = (int *)malloc( 0 ); 68 test_base( ip, 0, libAlign ); 69 test_use( ip ); 70 free( ip ); 71 72 ip = (int *)aalloc( dim, elemSize ); 73 test_base( ip, size, libAlign ); 74 test_use( ip ); 75 free( ip ); 76 77 ip = (int *)aalloc( 0, elemSize ); 78 test_base( ip, 0, libAlign ); 79 test_use( ip ); 80 free( ip ); 81 82 ip = (int *)aalloc( dim, 0 ); 83 test_base( ip, 0, libAlign ); 84 test_use( ip ); 85 free( ip ); 86 87 ip = (int *)aalloc( 0, 0 ); 88 test_base( ip, 0, libAlign ); 89 test_use( ip ); 90 free( ip ); 91 92 ip = (int *)calloc( dim, elemSize ); 93 test_base( ip, size, libAlign ); 94 test_fill( ip, 0, size, '\0' ); 95 test_use( ip ); 96 free( ip ); 97 98 ip = (int *)calloc( 0, elemSize ); 99 test_base( ip, 0, libAlign ); 100 test_fill( ip, 0, 0, '\0' ); 101 test_use( ip ); 102 free( ip ); 103 104 ip = (int *)calloc( dim, 0 ); 105 test_base( ip, 0, libAlign ); 106 test_fill( ip, 0, 0, '\0' ); 107 test_use( ip ); 108 free( ip ); 109 110 ip = (int *)malloc( size ); 111 ip = (int *)resize( ip, size / 4 ); 112 test_base( ip, size / 4, libAlign ); 113 test_use( ip ); 114 free( ip ); 115 116 ip = (int *)malloc( size ); 117 ip = (int *)resize( ip, size * 4 ); 118 test_base( ip, size * 4, libAlign ); 119 test_use( ip ); 120 free( ip ); 121 122 ip = (int *)malloc( size ); 123 ip = (int *)resize( ip, 0 ); 124 test_base( ip, 0, libAlign ); 125 test_use( ip ); 126 free( ip ); 127 128 ip = (int *)resize( NULL, size ); 129 test_base( ip, size, libAlign ); 130 test_use( ip ); 131 free( ip ); 132 133 ip = (int *)resize( 0p, size ); 134 test_base( ip, size, libAlign ); 135 test_use( ip ); 136 free( ip ); 137 138 ip = (int *)calloc( dim, elemSize ); 139 ip = (int *)realloc( ip, size / 4 ); 140 test_base( ip, size / 4, libAlign ); 141 test_fill( ip, 0, size / 4, '\0' ); 142 test_use( ip ); 143 free( ip ); 144 145 ip = (int *)calloc( dim, elemSize ); 146 ip = (int *)realloc( ip, size * 4 ); 147 test_base( ip, size * 4, libAlign ); 148 test_fill( ip, 0, size * 4, '\0' ); 149 test_use( ip ); 150 free( ip ); 151 152 ip = (int *)calloc( dim, elemSize ); 153 ip = (int *)realloc( ip, 0 ); 154 test_base( ip, 0, libAlign ); 155 test_use( ip ); 156 free( ip ); 157 158 ip = (int *)realloc( NULL, size ); 159 test_base( ip, size , libAlign ); 160 test_use( ip ); 161 free( ip ); 162 163 ip = (int *)realloc( 0p, size ); 164 test_base( ip, size, libAlign ); 165 test_use( ip ); 166 free( ip ); 167 168 ip = (int *)memalign( align, size ); 169 test_base( ip, size, align ); 170 test_use( ip ); 171 free( ip ); 172 173 ip = (int *)memalign( align, 0 ); 174 test_base( ip, 0, libAlign ); 175 test_use( ip ); 176 free( ip ); 177 178 ip = (int *)amemalign( align, dim, elemSize ); 179 test_base( ip, size, align ); 180 test_use( ip ); 181 free( ip ); 182 183 ip = (int *)amemalign( align, 0, elemSize ); 184 test_base( ip, 0, libAlign ); 185 test_use( ip ); 186 free( ip ); 187 188 ip = (int *)amemalign( align, dim, 0 ); 189 test_base( ip, 0, libAlign ); 190 test_use( ip ); 191 free( ip ); 192 193 ip = (int *)cmemalign( align, dim, elemSize ); 194 test_base( ip, size, align ); 195 test_fill( ip, 0, size, '\0' ); 196 test_use( ip ); 197 free( ip ); 198 199 ip = (int *)cmemalign( align, 0, elemSize ); 200 test_base( ip, 0, libAlign ); 201 test_use( ip ); 202 free( ip ); 203 204 ip = (int *)cmemalign( align, dim, 0 ); 205 test_base( ip, 0, libAlign ); 206 test_use( ip ); 207 free( ip ); 208 209 ip = (int *)aligned_alloc( align, size ); 210 test_base( ip, size, align ); 211 test_use( ip ); 212 free( ip ); 213 214 ip = (int *)aligned_alloc( align, 0 ); 215 test_base( ip, 0, libAlign ); 216 test_use( ip ); 217 free( ip ); 218 219 posix_memalign( (void **) &ip, align, size ); 220 test_base( ip, size, align ); 221 test_use( ip ); 222 free( ip ); 223 224 posix_memalign( (void **) &ip, align, 0 ); 225 test_base( ip, 0, libAlign ); 226 test_use( ip ); 227 free( ip ); 228 229 ip = (int *)valloc( size ); 230 test_base( ip, size, getpagesize() ); 231 test_use( ip ); 232 free( ip ); 233 234 ip = (int *)valloc( 0 ); 235 test_base( ip, 0, libAlign ); 236 test_use( ip ); 237 free( ip ); 238 239 ip = (int *)pvalloc( getpagesize() * 3 / 2 ); 240 test_base( ip, getpagesize() * 2, getpagesize() ); 241 test_use( ip ); 242 free( ip ); 243 244 ip = (int *)pvalloc( 0 ); 245 test_base( ip, 0, libAlign ); 246 test_use( ip ); 247 free( ip ); 248 249 ip = (int *)malloc( size ); 250 ip = (int *)resize( ip, libAlign, size / 2 ); 251 test_base( ip, size / 2, libAlign ); 252 test_use( ip ); 253 free( ip ); 254 255 ip = (int *)aligned_alloc( align, size ); 256 ip = (int *)resize( ip, align, size / 2 ); 257 test_base( ip, size / 2, align ); 258 test_use( ip ); 259 free( ip ); 260 261 ip = (int *)malloc( size ); 262 ip = (int *)resize( ip, align, size / 4 ); 263 test_base( ip, size / 4, align ); 264 test_use( ip ); 265 free( ip ); 266 267 ip = (int *)malloc( size ); 268 ip = (int *)resize( ip, align, 0 ); 269 test_base( ip, 0, libAlign ); 270 test_use( ip ); 271 free( ip ); 272 273 ip = (int *)resize( NULL, align, size ); 274 test_base( ip, size, align ); 275 test_use( ip ); 276 free( ip ); 277 278 ip = (int *)resize( 0p, align, size ); 279 test_base( ip, size, align ); 280 test_use( ip ); 281 free( ip ); 282 283 ip = (int *)calloc( dim, elemSize ); 284 ip = (int *)realloc( ip, libAlign, size / 2 ); 285 test_base( ip, size / 2, libAlign ); 286 test_fill( ip, 0, size / 2, '\0' ); 287 test_use( ip ); 288 free( ip ); 289 290 ip = (int *)cmemalign( align, dim, elemSize ); 291 ip = (int *)realloc( ip, align, size / 2 ); 292 test_base( ip, size / 2, align ); 293 test_fill( ip, 0, size / 2, '\0' ); 294 test_use( ip ); 295 free( ip ); 296 297 ip = (int *)calloc( dim, elemSize ); 298 ip = (int *)realloc( ip, align, size / 4 ); 299 test_base( ip, size / 4, align ); 300 test_fill( ip, 0, size / 4, '\0' ); 301 test_use( ip ); 302 free( ip ); 303 304 ip = (int *)calloc( dim, elemSize ); 305 ip = (int *)realloc( ip, libAlign, size * 4 ); 306 test_base( ip, size * 4, libAlign ); 307 test_fill( ip, 0, size * 4, '\0' ); 308 test_use( ip ); 309 free( ip ); 310 311 ip = (int *)calloc( dim, elemSize ); 312 ip = (int *)realloc( ip, align, 0 ); 313 test_base( ip, 0, libAlign ); 314 test_use( ip ); 315 free( ip ); 316 317 free( 0p ); // sanity check 318 free( NULL ); // sanity check 319 320 if (tests_failed == 0) sout | "PASSED C malloc tests" | nl | nl; 321 else sout | "failed C malloc tests" | tests_failed | tests_total | nl | nl; 326 322 327 323 // testing CFA malloc … … 331 327 332 328 ip = malloc(); 333 test_base( ip, elemSize, libAlign);334 test_use( ip);335 free( ip);329 test_base( ip, elemSize, libAlign ); 330 test_use( ip ); 331 free( ip ); 336 332 337 333 ip = aalloc( dim ); 338 test_base( ip, size, libAlign);339 test_use( ip);340 free( ip);334 test_base( ip, size, libAlign ); 335 test_use( ip ); 336 free( ip ); 341 337 342 338 ip = aalloc( 0 ); 343 test_base( ip, 0, libAlign);344 test_use( ip);345 free( ip);339 test_base( ip, 0, libAlign ); 340 test_use( ip ); 341 free( ip ); 346 342 347 343 ip = calloc( dim ); 348 test_base( ip, size, libAlign);349 test_fill( ip, 0, size, '\0');350 test_use( ip);351 free( ip);344 test_base( ip, size, libAlign ); 345 test_fill( ip, 0, size, '\0' ); 346 test_use( ip ); 347 free( ip ); 352 348 353 349 ip = calloc( 0 ); 354 test_base( ip, 0, libAlign);355 test_use( ip);356 free( ip);350 test_base( ip, 0, libAlign ); 351 test_use( ip ); 352 free( ip ); 357 353 358 354 ip = aalloc( dim ); 359 355 ip = resize( ip, size / 4 ); 360 test_base( ip, size / 4, libAlign);361 test_use( ip);362 free( ip);356 test_base( ip, size / 4, libAlign ); 357 test_use( ip ); 358 free( ip ); 363 359 364 360 ip = aalloc( dim ); 365 361 ip = resize( ip, size * 4 ); 366 test_base( ip, size * 4, libAlign);367 test_use( ip);368 free( ip);362 test_base( ip, size * 4, libAlign ); 363 test_use( ip ); 364 free( ip ); 369 365 370 366 ip = aalloc( dim ); 371 367 ip = resize( ip, 0 ); 372 test_base( ip, 0, libAlign);373 test_use( ip);374 free( ip);375 376 ip = resize( (int*)0p, size );377 test_base( ip, size, libAlign);378 test_use( ip);379 free( ip);380 381 ip = resize( (int*)0p, size );382 test_base( ip, size, libAlign);383 test_use( ip);384 free( ip);368 test_base( ip, 0, libAlign ); 369 test_use( ip ); 370 free( ip ); 371 372 ip = resize( 0p, size ); 373 test_base( ip, size, libAlign ); 374 test_use( ip ); 375 free( ip ); 376 377 ip = resize( 0p, size ); 378 test_base( ip, size, libAlign ); 379 test_use( ip ); 380 free( ip ); 385 381 386 382 ip = calloc( dim ); 387 383 ip = realloc( ip, size / 4 ); 388 test_base( ip, size / 4, libAlign);389 test_fill( ip, 0, size / 4, '\0');390 test_use( ip);391 free( ip);384 test_base( ip, size / 4, libAlign ); 385 test_fill( ip, 0, size / 4, '\0' ); 386 test_use( ip ); 387 free( ip ); 392 388 393 389 ip = calloc( dim ); 394 390 ip = realloc( ip, size * 4 ); 395 test_base( ip, size * 4, libAlign);396 test_fill( ip, 0, size, '\0');397 test_use( ip);398 free( ip);391 test_base( ip, size * 4, libAlign ); 392 test_fill( ip, 0, size, '\0' ); 393 test_use( ip ); 394 free( ip ); 399 395 400 396 ip = calloc( dim ); 401 397 ip = realloc( ip, 0 ); 402 test_base( ip, 0, libAlign);403 test_use( ip);404 free( ip);405 406 ip = realloc( (int*)0p, size );407 test_base( ip, size , libAlign);408 test_use( ip);409 free( ip);410 411 ip = realloc( (int*)0p, size );412 test_base( ip, size, libAlign);413 test_use( ip);414 free( ip);398 test_base( ip, 0, libAlign ); 399 test_use( ip ); 400 free( ip ); 401 402 ip = realloc( 0p, size ); 403 test_base( ip, size , libAlign ); 404 test_use( ip ); 405 free( ip ); 406 407 ip = realloc( 0p, size ); 408 test_base( ip, size, libAlign ); 409 test_use( ip ); 410 free( ip ); 415 411 416 412 ip = memalign( align ); 417 test_base( ip, elemSize, align);418 test_use( ip);419 free( ip);413 test_base( ip, elemSize, align ); 414 test_use( ip ); 415 free( ip ); 420 416 421 417 ip = amemalign( align, dim ); 422 test_base( ip, size, align);423 test_use( ip);424 free( ip);418 test_base( ip, size, align ); 419 test_use( ip ); 420 free( ip ); 425 421 426 422 ip = amemalign( align, 0 ); 427 test_base( ip, 0, libAlign);428 test_use( ip);429 free( ip);423 test_base( ip, 0, libAlign ); 424 test_use( ip ); 425 free( ip ); 430 426 431 427 ip = cmemalign( align, dim ); 432 test_base( ip, size, align);433 test_fill( ip, 0, size, '\0');434 test_use( ip);435 free( ip);428 test_base( ip, size, align ); 429 test_fill( ip, 0, size, '\0' ); 430 test_use( ip ); 431 free( ip ); 436 432 437 433 ip = cmemalign( align, 0 ); 438 test_base( ip, 0, libAlign);439 test_use( ip);440 free( ip);434 test_base( ip, 0, libAlign ); 435 test_use( ip ); 436 free( ip ); 441 437 442 438 ip = aligned_alloc( align ); 443 test_base( ip, elemSize, align);444 test_use( ip);445 free( ip);446 447 (int)posix_memalign( (int **) &ip, align );448 test_base( ip, elemSize, align);449 test_use( ip);450 free( ip);439 test_base( ip, elemSize, align ); 440 test_use( ip ); 441 free( ip ); 442 443 posix_memalign( (int **) &ip, align ); 444 test_base( ip, elemSize, align ); 445 test_use( ip ); 446 free( ip ); 451 447 452 448 ip = valloc(); 453 test_base( ip, elemSize, getpagesize());454 test_use( ip);455 free( ip);449 test_base( ip, elemSize, getpagesize() ); 450 test_use( ip ); 451 free( ip ); 456 452 457 453 ip = pvalloc(); 458 test_base( ip, getpagesize(), getpagesize());459 test_use( ip);460 free( ip);461 462 if (tests_failed == 0) printf("PASSED CFA malloc tests\n\n");463 else printf("failed CFA malloc tests : %d/%d\n\n", tests_failed, tests_total);454 test_base( ip, getpagesize(), getpagesize() ); 455 test_use( ip ); 456 free( ip ); 457 458 if (tests_failed == 0) sout | "PASSED CFA malloc tests" | nl | nl; 459 else sout | "failed CFA malloc tests" | tests_failed | tests_total | nl | nl; 464 460 465 461 // testing CFA malloc with aligned struct … … 471 467 472 468 tp = malloc(); 473 test_base( tp, elemSize, tAlign);474 test_use( tp);475 free( tp);469 test_base( tp, elemSize, tAlign ); 470 test_use( tp ); 471 free( tp ); 476 472 477 473 tp = aalloc( dim ); 478 test_base( tp, size, tAlign);479 test_use( tp);480 free( tp);474 test_base( tp, size, tAlign ); 475 test_use( tp ); 476 free( tp ); 481 477 482 478 tp = aalloc( 0 ); 483 test_base( tp, 0, libAlign);484 test_use( tp);485 free( tp);479 test_base( tp, 0, libAlign ); 480 test_use( tp ); 481 free( tp ); 486 482 487 483 tp = calloc( dim ); 488 test_base( tp, size, tAlign);489 test_fill( tp, 0, size, '\0');490 test_use( tp);491 free( tp);484 test_base( tp, size, tAlign ); 485 test_fill( tp, 0, size, '\0' ); 486 test_use( tp ); 487 free( tp ); 492 488 493 489 tp = calloc( 0 ); 494 test_base( tp, 0, libAlign);495 test_use( tp);496 free( tp);490 test_base( tp, 0, libAlign ); 491 test_use( tp ); 492 free( tp ); 497 493 498 494 tp = aalloc( dim ); 499 495 tp = resize( tp, size / 4 ); 500 test_base( tp, size / 4, tAlign);501 test_use( tp);502 free( tp);496 test_base( tp, size / 4, tAlign ); 497 test_use( tp ); 498 free( tp ); 503 499 504 500 tp = malloc(); 505 501 tp = resize( tp, size * 4 ); 506 test_base( tp, size * 4, tAlign);507 test_use( tp);508 free( tp);502 test_base( tp, size * 4, tAlign ); 503 test_use( tp ); 504 free( tp ); 509 505 510 506 tp = aalloc( dim ); 511 507 tp = resize( tp, 0 ); 512 test_base( tp, 0, libAlign);513 test_use( tp);514 free( tp);508 test_base( tp, 0, libAlign ); 509 test_use( tp ); 510 free( tp ); 515 511 516 512 tp = resize( (T1*)0p, size ); 517 test_base( tp, size, tAlign);518 test_use( tp);519 free( tp);513 test_base( tp, size, tAlign ); 514 test_use( tp ); 515 free( tp ); 520 516 521 517 tp = resize( (T1*)0p, size ); 522 test_base( tp, size, tAlign);523 test_use( tp);524 free( tp);518 test_base( tp, size, tAlign ); 519 test_use( tp ); 520 free( tp ); 525 521 526 522 tp = calloc( dim ); 527 523 tp = realloc( tp, size / 4 ); 528 test_base( tp, size / 4, tAlign);529 test_fill( tp, 0, size / 4, '\0');530 test_use( tp);531 free( tp);524 test_base( tp, size / 4, tAlign ); 525 test_fill( tp, 0, size / 4, '\0' ); 526 test_use( tp ); 527 free( tp ); 532 528 533 529 tp = calloc( dim ); 534 530 tp = realloc( tp, size * 4 ); 535 test_base( tp, size * 4, tAlign);536 test_fill( tp, 0, size, '\0');537 test_use( tp);538 free( tp);531 test_base( tp, size * 4, tAlign ); 532 test_fill( tp, 0, size, '\0' ); 533 test_use( tp ); 534 free( tp ); 539 535 540 536 tp = calloc( dim ); 541 537 tp = realloc( tp, 0 ); 542 test_base( tp, 0, libAlign);543 test_use( tp);544 free( tp);538 test_base( tp, 0, libAlign ); 539 test_use( tp ); 540 free( tp ); 545 541 546 542 tp = realloc( (T1*)0p, size ); 547 test_base( tp, size , tAlign);548 test_use( tp);549 free( tp);543 test_base( tp, size , tAlign ); 544 test_use( tp ); 545 free( tp ); 550 546 551 547 tp = realloc( (T1*)0p, size ); 552 test_base( tp, size, tAlign);553 test_use( tp);554 free( tp);548 test_base( tp, size, tAlign ); 549 test_use( tp ); 550 free( tp ); 555 551 556 552 tp = memalign( align ); 557 test_base( tp, elemSize, align);558 test_use( tp);559 free( tp);553 test_base( tp, elemSize, align ); 554 test_use( tp ); 555 free( tp ); 560 556 561 557 tp = amemalign( align, dim ); 562 test_base( tp, size, align);563 test_use( tp);564 free( tp);558 test_base( tp, size, align ); 559 test_use( tp ); 560 free( tp ); 565 561 566 562 tp = amemalign( align, 0 ); 567 test_base( tp, 0, libAlign);568 test_use( tp);569 free( tp);563 test_base( tp, 0, libAlign ); 564 test_use( tp ); 565 free( tp ); 570 566 571 567 tp = cmemalign( align, dim ); 572 test_base( tp, size, align);573 test_fill( tp, 0, size, '\0');574 test_use( tp);575 free( tp);568 test_base( tp, size, align ); 569 test_fill( tp, 0, size, '\0' ); 570 test_use( tp ); 571 free( tp ); 576 572 577 573 tp = cmemalign( align, 0 ); 578 test_base( tp, 0, libAlign);579 test_use( tp);580 free( tp);574 test_base( tp, 0, libAlign ); 575 test_use( tp ); 576 free( tp ); 581 577 582 578 tp = aligned_alloc( align ); 583 test_base( tp, elemSize, align);584 test_use( tp);585 free( tp);586 587 (int) posix_memalign( (T1 **)&tp, align );588 test_base( tp, elemSize, align);589 test_use( tp);590 free( tp);579 test_base( tp, elemSize, align ); 580 test_use( tp ); 581 free( tp ); 582 583 posix_memalign( (T1 **)&tp, align ); 584 test_base( tp, elemSize, align ); 585 test_use( tp ); 586 free( tp ); 591 587 592 588 tp = valloc(); 593 test_base( tp, elemSize, getpagesize());594 test_use( tp);595 free( tp);589 test_base( tp, elemSize, getpagesize() ); 590 test_use( tp ); 591 free( tp ); 596 592 597 593 tp = pvalloc(); 598 test_base(tp, getpagesize(), getpagesize()); 599 test_use(tp); 600 free(tp); 601 602 if (tests_failed == 0) printf("PASSED CFA malloc tests (aligned struct)\n\n"); 603 else printf("failed CFA malloc tests (aligned struct) : %d/%d\n\n", tests_failed, tests_total); 604 605 return 0; 594 test_base( tp, getpagesize(), getpagesize() ); 595 test_use( tp ); 596 free( tp ); 597 598 if ( tests_failed == 0 ) sout | "PASSED CFA malloc tests (aligned struct)" | nl | nl; 599 else sout | "failed CFA malloc tests (aligned struct)" | tests_failed | tests_total | nl | nl; 606 600 } 607 601
Note: See TracChangeset
for help on using the changeset viewer.