Changeset c2b3243
- Timestamp:
- Oct 18, 2022, 9:13:33 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 9511841
- Parents:
- 5408b59 (diff), ce7d197 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 2 added
- 28 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/locks.hfa
r5408b59 rc2b3243 13 13 // Created On : Tue Oct 31 15:14:38 2017 14 14 // Last Modified By : Peter A. Buhr 15 // Last Modified On : Mon Sep 19 18:51:53202216 // Update Count : 1 715 // Last Modified On : Tue Sep 20 22:09:50 2022 16 // Update Count : 18 17 17 // 18 18 … … 64 64 #ifndef NOEXPBACK 65 65 // exponential spin 66 for ( volatile unsigned int s; 0 ~ spin ) Pause();66 for ( volatile unsigned int s; 0 ~ spin ) Pause(); 67 67 68 68 // slowly increase by powers of 2 -
libcfa/src/concurrency/kernel/fwd.hfa
r5408b59 rc2b3243 276 276 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly 277 277 bool retract( future_t & this, oneshot & wait_ctx ) { 278 struct oneshot * expected = this.ptr;278 struct oneshot * expected = &wait_ctx; 279 279 280 280 // attempt to remove the context so it doesn't get consumed. -
libcfa/src/concurrency/kernel/startup.cfa
r5408b59 rc2b3243 184 184 185 185 186 extern void heapManagerCtor(); 187 extern void heapManagerDtor(); 188 186 189 //============================================================================================= 187 190 // Kernel Setup logic … … 365 368 proc->local_data = &__cfaabi_tls; 366 369 370 heapManagerCtor(); // initialize heap 371 367 372 __cfa_io_start( proc ); 368 373 register_tls( proc ); … … 416 421 unregister_tls( proc ); 417 422 __cfa_io_stop( proc ); 423 424 heapManagerDtor(); // de-initialize heap 418 425 419 426 return 0p; -
libcfa/src/concurrency/preemption.cfa
r5408b59 rc2b3243 232 232 // available. 233 233 234 //-----------------------------------------------------------------------------235 // Some assembly required236 #define __cfaasm_label(label, when) when: asm volatile goto(".global __cfaasm_" #label "_" #when "\n" "__cfaasm_" #label "_" #when ":":::"memory":when)237 238 234 //---------- 239 235 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_nopreempt libcfa_public { 241 // create a assembler label before 242 // marked as clobber all to avoid movement 243 __cfaasm_label(check, before); 244 236 bool __preemption_enabled() libcfa_nopreempt libcfa_public { 245 237 // access tls as normal 246 bool enabled = __cfaabi_tls.preemption_state.enabled; 247 248 // Check if there is a pending preemption 249 processor * proc = __cfaabi_tls.this_processor; 250 bool pending = proc ? proc->pending_preemption : false; 251 if( enabled && pending ) proc->pending_preemption = false; 252 253 // create a assembler label after 254 // marked as clobber all to avoid movement 255 __cfaasm_label(check, after); 256 257 // If we can preempt and there is a pending one 258 // this is a good time to yield 259 if( enabled && pending ) { 260 force_yield( __POLL_PREEMPTION ); 261 } 262 return enabled; 263 } 264 265 struct asm_region { 266 void * before; 267 void * after; 268 }; 269 270 static inline bool __cfaasm_in( void * ip, struct asm_region & region ) { 271 return ip >= region.before && ip <= region.after; 238 return __cfaabi_tls.preemption_state.enabled; 272 239 } 273 240 … … 293 260 uintptr_t __cfatls_get( unsigned long int offset ) libcfa_nopreempt libcfa_public; //no inline to avoid problems 294 261 uintptr_t __cfatls_get( unsigned long int offset ) { 295 // create a assembler label before296 // marked as clobber all to avoid movement297 __cfaasm_label(get, before);298 299 262 // access tls as normal (except for pointer arithmetic) 300 263 uintptr_t val = *(uintptr_t*)((uintptr_t)&__cfaabi_tls + offset); 301 264 302 // create a assembler label after303 // marked as clobber all to avoid movement304 __cfaasm_label(get, after);305 306 265 // This is used everywhere, to avoid cost, we DO NOT poll pending preemption 307 266 return val; … … 310 269 extern "C" { 311 270 // Disable interrupts by incrementing the counter 312 void disable_interrupts() libcfa_nopreempt libcfa_public { 313 // create a assembler label before 314 // marked as clobber all to avoid movement 315 __cfaasm_label(dsable, before); 316 317 with( __cfaabi_tls.preemption_state ) { 318 #if GCC_VERSION > 50000 319 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 320 #endif 321 322 // Set enabled flag to false 323 // should be atomic to avoid preemption in the middle of the operation. 324 // use memory order RELAXED since there is no inter-thread on this variable requirements 325 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED); 326 327 // Signal the compiler that a fence is needed but only for signal handlers 328 __atomic_signal_fence(__ATOMIC_ACQUIRE); 329 330 __attribute__((unused)) unsigned short new_val = disable_count + 1; 331 disable_count = new_val; 332 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 333 } 334 335 // create a assembler label after 336 // marked as clobber all to avoid movement 337 __cfaasm_label(dsable, after); 338 271 void disable_interrupts() libcfa_nopreempt libcfa_public with( __cfaabi_tls.preemption_state ) { 272 #if GCC_VERSION > 50000 273 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 274 #endif 275 276 // Set enabled flag to false 277 // should be atomic to avoid preemption in the middle of the operation. 278 // use memory order RELAXED since there is no inter-thread on this variable requirements 279 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED); 280 281 // Signal the compiler that a fence is needed but only for signal handlers 282 __atomic_signal_fence(__ATOMIC_ACQUIRE); 283 284 __attribute__((unused)) unsigned short new_val = disable_count + 1; 285 disable_count = new_val; 286 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 339 287 } 340 288 … … 379 327 // i.e. on a real processor and not in the kernel 380 328 // (can return true even if no preemption was pending) 381 bool poll_interrupts() libcfa_ public {329 bool poll_interrupts() libcfa_nopreempt libcfa_public { 382 330 // Cache the processor now since interrupts can start happening after the atomic store 383 processor * proc = publicTLS_get( this_processor );331 processor * proc = __cfaabi_tls.this_processor; 384 332 if ( ! proc ) return false; 385 if ( ! __preemption_enabled() ) return false; 386 387 with( __cfaabi_tls.preemption_state ){ 388 // Signal the compiler that a fence is needed but only for signal handlers 389 __atomic_signal_fence(__ATOMIC_RELEASE); 390 if( proc->pending_preemption ) { 391 proc->pending_preemption = false; 392 force_yield( __POLL_PREEMPTION ); 393 } 333 if ( ! __cfaabi_tls.preemption_state.enabled ) return false; 334 335 // Signal the compiler that a fence is needed but only for signal handlers 336 __atomic_signal_fence(__ATOMIC_RELEASE); 337 if( unlikely( proc->pending_preemption ) ) { 338 proc->pending_preemption = false; 339 force_yield( __POLL_PREEMPTION ); 394 340 } 395 341 -
libcfa/src/heap.cfa
r5408b59 rc2b3243 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 29 19:05:03202213 // Update Count : 1 16712 // Last Modified On : Thu Oct 13 22:21:52 2022 13 // Update Count : 1557 14 14 // 15 15 16 #include <stdio.h> 16 17 #include <string.h> // memset, memcpy 17 18 #include <limits.h> // ULONG_MAX … … 21 22 #include <malloc.h> // memalign, malloc_usable_size 22 23 #include <sys/mman.h> // mmap, munmap 24 extern "C" { 23 25 #include <sys/sysinfo.h> // get_nprocs 26 } // extern "C" 24 27 25 28 #include "bits/align.hfa" // libAlign 26 29 #include "bits/defs.hfa" // likely, unlikely 27 #include " bits/locks.hfa" // __spinlock_t30 #include "concurrency/kernel/fwd.hfa" // __POLL_PREEMPTION 28 31 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 29 #include "math.hfa" // min32 #include "math.hfa" // ceiling, min 30 33 #include "bitmanip.hfa" // is_pow2, ceiling2 31 34 32 #define FASTLOOKUP 33 #define __STATISTICS__ 35 // supported mallopt options 36 #ifndef M_MMAP_THRESHOLD 37 #define M_MMAP_THRESHOLD (-1) 38 #endif // M_MMAP_THRESHOLD 39 40 #ifndef M_TOP_PAD 41 #define M_TOP_PAD (-2) 42 #endif // M_TOP_PAD 43 44 #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size 45 #define RETURNSPIN // toggle spinlock / lockfree stack 46 #define OWNERSHIP // return freed memory to owner thread 47 48 #define CACHE_ALIGN 64 49 #define CALIGN __attribute__(( aligned(CACHE_ALIGN) )) 50 51 #define TLSMODEL __attribute__(( tls_model("initial-exec") )) 52 53 //#define __STATISTICS__ 54 55 enum { 56 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk 57 // address is extended by the extension amount. 58 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, 59 60 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values 61 // greater than or equal to this value are mmap from the operating system. 62 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, 63 64 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from 65 // the malloc/free counter to adjust for storage the program does not free. 66 __CFA_DEFAULT_HEAP_UNFREED__ = 0 67 }; // enum 68 69 70 //####################### Heap Trace/Print #################### 34 71 35 72 … … 55 92 static bool prtFree = false; 56 93 57 staticbool prtFree() {94 bool prtFree() { 58 95 return prtFree; 59 96 } // prtFree 60 97 61 staticbool prtFreeOn() {98 bool prtFreeOn() { 62 99 bool temp = prtFree; 63 100 prtFree = true; … … 65 102 } // prtFreeOn 66 103 67 staticbool prtFreeOff() {104 bool prtFreeOff() { 68 105 bool temp = prtFree; 69 106 prtFree = false; … … 72 109 73 110 74 enum { 75 // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk 76 // address is extended by the extension amount. 77 __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024, 78 79 // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values 80 // greater than or equal to this value are mmap from the operating system. 81 __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1, 82 83 // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from 84 // the malloc/free counter to adjust for storage the program does not free. 85 __CFA_DEFAULT_HEAP_UNFREED__ = 0 86 }; // enum 111 //######################### Spin Lock ######################### 112 113 114 // pause to prevent excess processor bus usage 115 #if defined( __i386 ) || defined( __x86_64 ) 116 #define Pause() __asm__ __volatile__ ( "pause" : : : ) 117 #elif defined(__ARM_ARCH) 118 #define Pause() __asm__ __volatile__ ( "YIELD" : : : ) 119 #else 120 #error unsupported architecture 121 #endif 122 123 typedef volatile uintptr_t SpinLock_t CALIGN; // aligned addressable word-size 124 125 static inline __attribute__((always_inline)) void lock( volatile SpinLock_t & slock ) { 126 enum { SPIN_START = 4, SPIN_END = 64 * 1024, }; 127 unsigned int spin = SPIN_START; 128 129 for ( unsigned int i = 1;; i += 1 ) { 130 if ( slock == 0 && __atomic_test_and_set( &slock, __ATOMIC_SEQ_CST ) == 0 ) break; // Fence 131 for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin 132 spin += spin; // powers of 2 133 //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2 134 if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning 135 } // for 136 } // spin_lock 137 138 static inline __attribute__((always_inline)) void unlock( volatile SpinLock_t & slock ) { 139 __atomic_clear( &slock, __ATOMIC_SEQ_CST ); // Fence 140 } // spin_unlock 87 141 88 142 … … 120 174 unsigned int free_calls, free_null_calls; 121 175 unsigned long long int free_storage_request, free_storage_alloc; 122 unsigned int away_pulls, away_pushes;123 unsigned long long int away_storage_request, away_storage_alloc;176 unsigned int return_pulls, return_pushes; 177 unsigned long long int return_storage_request, return_storage_alloc; 124 178 unsigned int mmap_calls, mmap_0_calls; // no zero calls 125 179 unsigned long long int mmap_storage_request, mmap_storage_alloc; … … 131 185 132 186 static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay), 133 187 "Heap statistics counter-triplets does not match with array size" ); 134 188 135 189 static void HeapStatisticsCtor( HeapStatistics & stats ) { … … 203 257 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 204 258 205 struct FreeHeader {206 size_t blockSize __attribute__(( aligned 259 struct __attribute__(( aligned (8) )) FreeHeader { 260 size_t blockSize __attribute__(( aligned(8) )); // size of allocations on this list 207 261 #if BUCKETLOCK == SPINLOCK 208 __spinlock_t lock; 209 Storage * freeList; 262 #ifdef OWNERSHIP 263 #ifdef RETURNSPIN 264 SpinLock_t returnLock; 265 #endif // RETURNSPIN 266 Storage * returnList; // other thread return list 267 #endif // OWNERSHIP 268 Storage * freeList; // thread free list 210 269 #else 211 270 StackLF(Storage) freeList; 212 271 #endif // BUCKETLOCK 213 } __attribute__(( aligned (8) )); // FreeHeader 272 Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap) 273 }; // FreeHeader 214 274 215 275 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 216 217 __spinlock_t extlock; // protects allocation-buffer extension 218 void * heapBegin; // start of heap 219 void * heapEnd; // logical end of heap 220 size_t heapRemaining; // amount of storage not allocated in the current chunk 276 void * heapBuffer; // start of free storage in buffer 277 size_t heapReserve; // amount of remaining free storage in buffer 278 279 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) 280 Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics or check unfreed storage 281 #endif // __STATISTICS__ || __CFA_DEBUG__ 282 Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads 283 284 #ifdef __CFA_DEBUG__ 285 int64_t allocUnfreed; // running total of allocations minus frees; can be negative 286 #endif // __CFA_DEBUG__ 287 288 #ifdef __STATISTICS__ 289 HeapStatistics stats; // local statistic table for this heap 290 #endif // __STATISTICS__ 221 291 }; // Heap 222 292 223 293 #if BUCKETLOCK == LOCKFREE 224 static inline { 294 inline __attribute__((always_inline)) 295 static { 225 296 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 226 297 void ?{}( Heap.FreeHeader & ) {} … … 229 300 #endif // LOCKFREE 230 301 231 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 302 303 struct HeapMaster { 304 SpinLock_t extLock; // protects allocation-buffer extension 305 SpinLock_t mgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd 306 307 void * heapBegin; // start of heap 308 void * heapEnd; // logical end of heap 309 size_t heapRemaining; // amount of storage not allocated in the current chunk 310 size_t pageSize; // architecture pagesize 311 size_t heapExpand; // sbrk advance 312 size_t mmapStart; // cross over point for mmap 313 unsigned int maxBucketsUsed; // maximum number of buckets in use 314 315 Heap * heapManagersList; // heap-list head 316 Heap * freeHeapManagersList; // free-list head 317 318 // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links. 319 Heap * heapManagersStorage; // next heap to use in heap superblock 320 Heap * heapManagersStorageEnd; // logical heap outside of superblock's end 321 322 #ifdef __STATISTICS__ 323 HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting 324 unsigned long int threads_started, threads_exited; // counts threads that have started and exited 325 unsigned long int reused_heap, new_heap; // counts reusability of heaps 326 unsigned int sbrk_calls; 327 unsigned long long int sbrk_storage; 328 int stats_fd; 329 #endif // __STATISTICS__ 330 }; // HeapMaster 232 331 233 332 234 333 #ifdef FASTLOOKUP 235 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; 334 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 236 335 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 237 336 #endif // FASTLOOKUP 238 337 239 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 240 #ifdef __CFA_DEBUG__ 241 static bool heapBoot = 0; // detect recursion during boot 242 #endif // __CFA_DEBUG__ 338 static volatile bool heapMasterBootFlag = false; // trigger for first heap 339 static HeapMaster heapMaster @= {}; // program global 340 341 static void heapMasterCtor(); 342 static void heapMasterDtor(); 343 static Heap * getHeap(); 243 344 244 345 … … 268 369 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 269 370 270 // The constructor for heapManager is called explicitly in memory_startup. 271 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 371 372 // extern visibility, used by runtime kernel 373 libcfa_public size_t __page_size; // architecture pagesize 374 libcfa_public int __map_prot; // common mmap/mprotect protection 375 376 377 // Thread-local storage is allocated lazily when the storage is accessed. 378 static __thread size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing 379 static __thread Heap * volatile heapManager CALIGN TLSMODEL; 380 static __thread size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing 381 382 383 // declare helper functions for HeapMaster 384 void noMemory(); // forward, called by "builtin_new" when malloc returns 0 385 386 387 // generic Bsearchl does not inline, so substitute with hand-coded binary-search. 388 inline __attribute__((always_inline)) 389 static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) { 390 size_t l = 0, m, h = dim; 391 while ( l < h ) { 392 m = (l + h) / 2; 393 if ( (unsigned int &)(vals[m]) < key ) { // cast away const 394 l = m + 1; 395 } else { 396 h = m; 397 } // if 398 } // while 399 return l; 400 } // Bsearchl 401 402 403 void heapMasterCtor() with( heapMaster ) { 404 // Singleton pattern to initialize heap master 405 406 verify( bucketSizes[0] == (16 + sizeof(Heap.Storage)) ); 407 408 __page_size = sysconf( _SC_PAGESIZE ); 409 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 410 411 ?{}( extLock ); 412 ?{}( mgrLock ); 413 414 char * end = (char *)sbrk( 0 ); 415 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 416 heapRemaining = 0; 417 heapExpand = malloc_expansion(); 418 mmapStart = malloc_mmap_start(); 419 420 // find the closest bucket size less than or equal to the mmapStart size 421 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search 422 423 verify( (mmapStart >= pageSize) && (bucketSizes[NoBucketSizes - 1] >= mmapStart) ); 424 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 425 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 426 427 heapManagersList = 0p; 428 freeHeapManagersList = 0p; 429 430 heapManagersStorage = 0p; 431 heapManagersStorageEnd = 0p; 432 433 #ifdef __STATISTICS__ 434 HeapStatisticsCtor( stats ); // clear statistic counters 435 threads_started = threads_exited = 0; 436 reused_heap = new_heap = 0; 437 sbrk_calls = sbrk_storage = 0; 438 stats_fd = STDERR_FILENO; 439 #endif // __STATISTICS__ 440 441 #ifdef FASTLOOKUP 442 for ( unsigned int i = 0, idx = 0; i < LookupSizes; i += 1 ) { 443 if ( i > bucketSizes[idx] ) idx += 1; 444 lookup[i] = idx; 445 verify( i <= bucketSizes[idx] ); 446 verify( (i <= 32 && idx == 0) || (i > bucketSizes[idx - 1]) ); 447 } // for 448 #endif // FASTLOOKUP 449 450 heapMasterBootFlag = true; 451 } // heapMasterCtor 452 453 454 #define NO_MEMORY_MSG "**** Error **** insufficient heap memory available to allocate %zd new bytes." 455 456 Heap * getHeap() with( heapMaster ) { 457 Heap * heap; 458 if ( freeHeapManagersList ) { // free heap for reused ? 459 heap = freeHeapManagersList; 460 freeHeapManagersList = heap->nextFreeHeapManager; 461 462 #ifdef __STATISTICS__ 463 reused_heap += 1; 464 #endif // __STATISTICS__ 465 } else { // free heap not found, create new 466 // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps * 467 // 12K ~= 120K byte superblock. Where 128-heap superblock handles a medium sized multi-processor server. 468 size_t remaining = heapManagersStorageEnd - heapManagersStorage; // remaining free heaps in superblock 469 if ( ! heapManagersStorage || remaining != 0 ) { 470 // Each block of heaps is a multiple of the number of cores on the computer. 471 int HeapDim = get_nprocs(); // get_nprocs_conf does not work 472 size_t size = HeapDim * sizeof( Heap ); 473 474 heapManagersStorage = (Heap *)mmap( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); 475 if ( unlikely( heapManagersStorage == (Heap *)MAP_FAILED ) ) { // failed ? 476 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory 477 // Do not call strerror( errno ) as it may call malloc. 478 abort( "**** Error **** attempt to allocate block of heaps of size %zu bytes and mmap failed with errno %d.", size, errno ); 479 } // if 480 heapManagersStorageEnd = &heapManagersStorage[HeapDim]; // outside array 481 } // if 482 483 heap = heapManagersStorage; 484 heapManagersStorage = heapManagersStorage + 1; // bump next heap 485 486 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) 487 heap->nextHeapManager = heapManagersList; 488 #endif // __STATISTICS__ || __CFA_DEBUG__ 489 heapManagersList = heap; 490 491 #ifdef __STATISTICS__ 492 new_heap += 1; 493 #endif // __STATISTICS__ 494 495 with( *heap ) { 496 for ( unsigned int j = 0; j < NoBucketSizes; j += 1 ) { // initialize free lists 497 #ifdef OWNERSHIP 498 #ifdef RETURNSPIN 499 ?{}( freeLists[j].returnLock ); 500 #endif // RETURNSPIN 501 freeLists[j].returnList = 0p; 502 #endif // OWNERSHIP 503 freeLists[j].freeList = 0p; 504 freeLists[j].homeManager = heap; 505 freeLists[j].blockSize = bucketSizes[j]; 506 } // for 507 508 heapBuffer = 0p; 509 heapReserve = 0; 510 nextFreeHeapManager = 0p; 511 #ifdef __CFA_DEBUG__ 512 allocUnfreed = 0; 513 #endif // __CFA_DEBUG__ 514 } // with 515 } // if 516 517 return heap; 518 } // getHeap 519 520 521 void heapManagerCtor() libcfa_public { 522 if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor(); 523 524 lock( heapMaster.mgrLock ); // protect heapMaster counters 525 526 // get storage for heap manager 527 528 heapManager = getHeap(); 529 530 #ifdef __STATISTICS__ 531 HeapStatisticsCtor( heapManager->stats ); // heap local 532 heapMaster.threads_started += 1; 533 #endif // __STATISTICS__ 534 535 unlock( heapMaster.mgrLock ); 536 } // heapManagerCtor 537 538 539 void heapManagerDtor() libcfa_public { 540 lock( heapMaster.mgrLock ); 541 542 // place heap on list of free heaps for reusability 543 heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList; 544 heapMaster.freeHeapManagersList = heapManager; 545 546 #ifdef __STATISTICS__ 547 heapMaster.threads_exited += 1; 548 #endif // __STATISTICS__ 549 550 // Do not set heapManager to NULL because it is used after Cforall is shutdown but before the program shuts down. 551 552 unlock( heapMaster.mgrLock ); 553 } // heapManagerDtor 272 554 273 555 274 556 //####################### Memory Allocation Routines Helpers #################### 275 557 276 277 #ifdef __CFA_DEBUG__278 static size_t allocUnfreed; // running total of allocations minus frees279 280 static void prtUnfreed() {281 if ( allocUnfreed != 0 ) {282 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.283 char helpText[512];284 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),285 "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n"286 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",287 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid288 } // if289 } // prtUnfreed290 558 291 559 extern int cfa_main_returned; // from interpose.cfa 292 560 extern "C" { 561 void memory_startup( void ) { 562 if ( ! heapMasterBootFlag ) heapManagerCtor(); // sanity check 563 } // memory_startup 564 565 void memory_shutdown( void ) { 566 heapManagerDtor(); 567 } // memory_shutdown 568 293 569 void heapAppStart() { // called by __cfaabi_appready_startup 294 allocUnfreed = 0; 570 verify( heapManager ); 571 #ifdef __CFA_DEBUG__ 572 heapManager->allocUnfreed = 0; // clear prior allocation counts 573 #endif // __CFA_DEBUG__ 574 575 #ifdef __STATISTICS__ 576 HeapStatisticsCtor( heapManager->stats ); // clear prior statistic counters 577 #endif // __STATISTICS__ 295 578 } // heapAppStart 296 579 297 580 void heapAppStop() { // called by __cfaabi_appready_startdown 298 fclose( stdin ); fclose( stdout ); 299 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 581 fclose( stdin ); fclose( stdout ); // free buffer storage 582 if ( ! cfa_main_returned ) return; // do not check unfreed storage if exit called 583 584 #ifdef __CFA_DEBUG__ 585 // allocUnfreed is set to 0 when a heap is created and it accumulates any unfreed storage during its multiple thread 586 // usages. At the end, add up each heap allocUnfreed value across all heaps to get the total unfreed storage. 587 int64_t allocUnfreed = 0; 588 for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) { 589 allocUnfreed += heap->allocUnfreed; 590 } // for 591 592 allocUnfreed -= malloc_unfreed(); // subtract any user specified unfreed storage 593 if ( allocUnfreed > 0 ) { 594 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 595 char helpText[512]; 596 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 597 "CFA warning (UNIX pid:%ld) : program terminating with %ju(0x%jx) bytes of storage allocated but not freed.\n" 598 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 599 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 600 } // if 601 #endif // __CFA_DEBUG__ 300 602 } // heapAppStop 301 603 } // extern "C" 302 #endif // __CFA_DEBUG__303 604 304 605 305 606 #ifdef __STATISTICS__ 306 607 static HeapStatistics stats; // zero filled 307 static unsigned int sbrk_calls;308 static unsigned long long int sbrk_storage;309 // Statistics file descriptor (changed by malloc_stats_fd).310 static int stats_fd = STDERR_FILENO; // default stderr311 608 312 609 #define prtFmt \ … … 321 618 " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \ 322 619 " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \ 323 " sbrk calls %'u; storage %'llu bytes\n" \ 324 " mmap calls %'u; storage %'llu / %'llu bytes\n" \ 325 " munmap calls %'u; storage %'llu / %'llu bytes\n" \ 620 " return pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n" \ 621 " sbrk calls %'u; storage %'llu bytes\n" \ 622 " mmap calls %'u; storage %'llu / %'llu bytes\n" \ 623 " munmap calls %'u; storage %'llu / %'llu bytes\n" \ 624 " threads started %'lu; exited %'lu\n" \ 625 " heaps new %'lu; reused %'lu\n" 326 626 327 627 // Use "write" because streams may be shutdown when calls are made. 328 static int printStats( ) {// see malloc_stats628 static int printStats( HeapStatistics & stats ) with( heapMaster, stats ) { // see malloc_stats 329 629 char helpText[sizeof(prtFmt) + 1024]; // space for message and values 330 return __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt, 331 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 332 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 333 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 334 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 335 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 336 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 337 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 338 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 339 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 630 return __cfaabi_bits_print_buffer( stats_fd, helpText, sizeof(helpText), prtFmt, 631 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 632 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 633 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 634 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 635 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 636 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 637 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 638 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 639 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 640 return_pulls, return_pushes, return_storage_request, return_storage_alloc, 340 641 sbrk_calls, sbrk_storage, 341 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 342 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 642 mmap_calls, mmap_storage_request, mmap_storage_alloc, 643 munmap_calls, munmap_storage_request, munmap_storage_alloc, 644 threads_started, threads_exited, 645 new_heap, reused_heap 343 646 ); 344 647 } // printStats … … 358 661 "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 359 662 "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 663 "<total type=\"return\" pulls=\"%'u;\" 0 pushes=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 360 664 "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \ 361 665 "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \ 362 666 "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \ 667 "<total type=\"threads\" started=\"%'lu;\" exited=\"%'lu\"/>\n" \ 668 "<total type=\"heaps\" new=\"%'lu;\" reused=\"%'lu\"/>\n" \ 363 669 "</malloc>" 364 670 365 static int printStatsXML( FILE * stream ) {// see malloc_info671 static int printStatsXML( HeapStatistics & stats, FILE * stream ) with( heapMaster, stats ) { // see malloc_info 366 672 char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values 367 673 return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML, 368 stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc, 369 stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc, 370 stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc, 371 stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc, 372 stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc, 373 stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc, 374 stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc, 375 stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc, 376 stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc, 674 malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc, 675 aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc, 676 calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc, 677 memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc, 678 amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc, 679 cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc, 680 resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc, 681 realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc, 682 free_calls, free_null_calls, free_storage_request, free_storage_alloc, 683 return_pulls, return_pushes, return_storage_request, return_storage_alloc, 377 684 sbrk_calls, sbrk_storage, 378 stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc, 379 stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc 685 mmap_calls, mmap_storage_request, mmap_storage_alloc, 686 munmap_calls, munmap_storage_request, munmap_storage_alloc, 687 threads_started, threads_exited, 688 new_heap, reused_heap 380 689 ); 381 690 } // printStatsXML 691 692 static HeapStatistics & collectStats( HeapStatistics & stats ) with( heapMaster ) { 693 lock( mgrLock ); 694 695 stats += heapMaster.stats; 696 for ( Heap * heap = heapManagersList; heap; heap = heap->nextHeapManager ) { 697 stats += heap->stats; 698 } // for 699 700 unlock( mgrLock ); 701 return stats; 702 } // collectStats 382 703 #endif // __STATISTICS__ 383 704 384 705 385 // statically allocated variables => zero filled. 386 static size_t heapExpand; // sbrk advance 387 static size_t mmapStart; // cross over point for mmap 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 // extern visibility, used by runtime kernel 390 // would be cool to remove libcfa_public but it's needed for libcfathread 391 libcfa_public size_t __page_size; // architecture pagesize 392 libcfa_public int __map_prot; // common mmap/mprotect protection 393 394 395 // thunk problem 396 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 397 size_t l = 0, m, h = dim; 398 while ( l < h ) { 399 m = (l + h) / 2; 400 if ( (unsigned int &)(vals[m]) < key ) { // cast away const 401 l = m + 1; 402 } else { 403 h = m; 404 } // if 405 } // while 406 return l; 407 } // Bsearchl 408 409 410 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 706 static bool setMmapStart( size_t value ) with( heapMaster ) { // true => mmapped, false => sbrk 411 707 if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false; 412 708 mmapStart = value; // set global 413 709 414 710 // find the closest bucket size less than or equal to the mmapStart size 415 maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search416 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?417 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?711 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search 712 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 713 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 418 714 return true; 419 715 } // setMmapStart … … 438 734 439 735 440 static inline void checkAlign( size_t alignment ) { 736 inline __attribute__((always_inline)) 737 static void checkAlign( size_t alignment ) { 441 738 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) { 442 739 abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); … … 445 742 446 743 447 static inline void checkHeader( bool check, const char name[], void * addr ) { 744 inline __attribute__((always_inline)) 745 static void checkHeader( bool check, const char name[], void * addr ) { 448 746 if ( unlikely( check ) ) { // bad address ? 449 747 abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n" … … 470 768 471 769 472 static inline void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { 770 inline __attribute__((always_inline)) 771 static void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) { 473 772 if ( unlikely( AlignmentBit( header ) ) ) { // fake header ? 474 773 alignment = ClearAlignmentBit( header ); // clear flag from value … … 483 782 484 783 485 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, 486 Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapManager ) { 784 inline __attribute__((always_inline)) 785 static bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, 786 Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapMaster, *heapManager ) { 487 787 header = HeaderAddr( addr ); 488 788 … … 509 809 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 510 810 811 Heap * homeManager; 511 812 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link 512 813 // freed and link points at another free block not to a bucket in the bucket array. 513 freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { 814 (homeManager = freeHead->homeManager, freeHead < &homeManager->freeLists[0] || 815 &homeManager->freeLists[NoBucketSizes] <= freeHead ) ) ) { 514 816 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" 515 817 "Possible cause is duplicate free on same block or overwriting of header information.", … … 521 823 } // headers 522 824 523 // #ifdef __CFA_DEBUG__ 524 // #if __SIZEOF_POINTER__ == 4 525 // #define MASK 0xdeadbeef 526 // #else 527 // #define MASK 0xdeadbeefdeadbeef 528 // #endif 529 // #define STRIDE size_t 530 531 // static void * Memset( void * addr, STRIDE size ) { // debug only 532 // if ( size % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, size %zd not multiple of %zd.", size, sizeof(STRIDE) ); 533 // if ( (STRIDE)addr % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, addr %p not multiple of %zd.", addr, sizeof(STRIDE) ); 534 535 // STRIDE * end = (STRIDE *)addr + size / sizeof(STRIDE); 536 // for ( STRIDE * p = (STRIDE *)addr; p < end; p += 1 ) *p = MASK; 537 // return addr; 538 // } // Memset 539 // #endif // __CFA_DEBUG__ 540 541 542 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." 543 544 static inline void * extend( size_t size ) with( heapManager ) { 545 lock( extlock __cfaabi_dbg_ctx2 ); 825 826 static void * master_extend( size_t size ) with( heapMaster ) { 827 lock( extLock ); 546 828 547 829 ptrdiff_t rem = heapRemaining - size; … … 549 831 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 550 832 551 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size);833 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() ); 552 834 // Do not call abort or strerror( errno ) as they may call malloc. 553 if ( sbrk( increase ) == (void *)-1 ) { // failed, no memory ? 554 unlock( extlock ); 555 __cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size ); 556 _exit( EXIT_FAILURE ); // give up 835 if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ? 836 unlock( extLock ); 837 abort( NO_MEMORY_MSG, size ); // no memory 557 838 } // if 558 839 559 840 // Make storage executable for thunks. 560 841 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { 561 unlock( extlock ); 562 __cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno ); 563 _exit( EXIT_FAILURE ); 564 } // if 842 unlock( extLock ); 843 abort( "**** Error **** attempt to make heap storage executable for thunks and mprotect failed with errno %d.", errno ); 844 } // if 845 846 rem = heapRemaining + increase - size; 565 847 566 848 #ifdef __STATISTICS__ … … 568 850 sbrk_storage += increase; 569 851 #endif // __STATISTICS__ 570 571 #ifdef __CFA_DEBUG__572 // Set new memory to garbage so subsequent uninitialized usages might fail.573 memset( (char *)heapEnd + heapRemaining, '\xde', increase );574 //Memset( (char *)heapEnd + heapRemaining, increase );575 #endif // __CFA_DEBUG__576 577 rem = heapRemaining + increase - size;578 852 } // if 579 853 … … 581 855 heapRemaining = rem; 582 856 heapEnd = (char *)heapEnd + size; 583 unlock( extlock ); 857 858 unlock( extLock ); 584 859 return block; 585 } // extend 586 587 588 static inline void * doMalloc( size_t size ) with( heapManager ) { 589 Heap.Storage * block; // pointer to new block of storage 860 } // master_extend 861 862 863 __attribute__(( noinline )) 864 static void * manager_extend( size_t size ) with( *heapManager ) { 865 ptrdiff_t rem = heapReserve - size; 866 867 if ( unlikely( rem < 0 ) ) { // negative 868 // If the size requested is bigger than the current remaining reserve, use the current reserve to populate 869 // smaller freeLists, and increase the reserve. 870 871 rem = heapReserve; // positive 872 873 if ( rem >= bucketSizes[0] ) { // minimal size ? otherwise ignore 874 size_t bucket; 875 #ifdef FASTLOOKUP 876 if ( likely( rem < LookupSizes ) ) bucket = lookup[rem]; 877 #endif // FASTLOOKUP 878 bucket = Bsearchl( rem, bucketSizes, heapMaster.maxBucketsUsed ); 879 verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed ); 880 Heap.FreeHeader * freeHead = &(freeLists[bucket]); 881 882 // The remaining storage many not be bucket size, whereas all other allocations are. Round down to previous 883 // bucket size in this case. 884 if ( unlikely( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1; 885 Heap.Storage * block = (Heap.Storage *)heapBuffer; 886 887 block->header.kind.real.next = freeHead->freeList; // push on stack 888 freeHead->freeList = block; 889 } // if 890 891 size_t increase = ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), libAlign() ); 892 heapBuffer = master_extend( increase ); 893 rem = increase - size; 894 } // if 895 896 Heap.Storage * block = (Heap.Storage *)heapBuffer; 897 heapReserve = rem; 898 heapBuffer = (char *)heapBuffer + size; 899 900 return block; 901 } // manager_extend 902 903 904 #define BOOT_HEAP_MANAGER \ 905 if ( unlikely( ! heapMasterBootFlag ) ) { \ 906 heapManagerCtor(); /* trigger for first heap */ \ 907 } /* if */ 908 909 #ifdef __STATISTICS__ 910 #define STAT_NAME __counter 911 #define STAT_PARM , unsigned int STAT_NAME 912 #define STAT_ARG( name ) , name 913 #define STAT_0_CNT( counter ) stats.counters[counter].calls_0 += 1 914 #else 915 #define STAT_NAME 916 #define STAT_PARM 917 #define STAT_ARG( name ) 918 #define STAT_0_CNT( counter ) 919 #endif // __STATISTICS__ 920 921 #define PROLOG( counter, ... ) \ 922 BOOT_HEAP_MANAGER; \ 923 if ( unlikely( size == 0 ) || /* 0 BYTE ALLOCATION RETURNS NULL POINTER */ \ 924 unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) { /* error check */ \ 925 STAT_0_CNT( counter ); \ 926 __VA_ARGS__; \ 927 return 0p; \ 928 } /* if */ 929 930 931 #define SCRUB_SIZE 1024lu 932 // Do not use '\xfe' for scrubbing because dereferencing an address composed of it causes a SIGSEGV *without* a valid IP 933 // pointer in the interrupt frame. 934 #define SCRUB '\xff' 935 936 static void * doMalloc( size_t size STAT_PARM ) libcfa_nopreempt with( *heapManager ) { 937 PROLOG( STAT_NAME ); 938 939 verify( heapManager ); 940 Heap.Storage * block; // pointer to new block of storage 590 941 591 942 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated 592 943 // along with the block and is a multiple of the alignment size. 593 594 944 size_t tsize = size + sizeof(Heap.Storage); 595 945 596 if ( likely( tsize < mmapStart ) ) { // small size => sbrk 597 size_t posn; 946 #ifdef __STATISTICS__ 947 stats.counters[STAT_NAME].calls += 1; 948 stats.counters[STAT_NAME].request += size; 949 #endif // __STATISTICS__ 950 951 #ifdef __CFA_DEBUG__ 952 allocUnfreed += size; 953 #endif // __CFA_DEBUG__ 954 955 if ( likely( tsize < heapMaster.mmapStart ) ) { // small size => sbrk 956 size_t bucket; 598 957 #ifdef FASTLOOKUP 599 if ( tsize < LookupSizes ) posn= lookup[tsize];958 if ( likely( tsize < LookupSizes ) ) bucket = lookup[tsize]; 600 959 else 601 960 #endif // FASTLOOKUP 602 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed ); 603 Heap.FreeHeader * freeElem = &freeLists[posn]; 604 verify( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? 605 verify( tsize <= freeElem->blockSize ); // search failure ? 606 tsize = freeElem->blockSize; // total space needed for request 961 bucket = Bsearchl( tsize, bucketSizes, heapMaster.maxBucketsUsed ); 962 verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed ); 963 Heap.FreeHeader * freeHead = &freeLists[bucket]; 964 965 verify( freeHead <= &freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ? 966 verify( tsize <= freeHead->blockSize ); // search failure ? 967 968 tsize = freeHead->blockSize; // total space needed for request 969 #ifdef __STATISTICS__ 970 stats.counters[STAT_NAME].alloc += tsize; 971 #endif // __STATISTICS__ 607 972 608 973 // Spin until the lock is acquired for this particular size of block. 609 974 610 975 #if BUCKETLOCK == SPINLOCK 611 lock( freeElem->lock __cfaabi_dbg_ctx2 ); 612 block = freeElem->freeList; // remove node from stack 976 block = freeHead->freeList; // remove node from stack 613 977 #else 614 block = pop( free Elem->freeList );978 block = pop( freeHead->freeList ); 615 979 #endif // BUCKETLOCK 616 980 if ( unlikely( block == 0p ) ) { // no free block ? 981 #ifdef OWNERSHIP 982 // Freelist for that size is empty, so carve it out of the heap, if there is enough left, or get some more 983 // and then carve it off. 984 #ifdef RETURNSPIN 617 985 #if BUCKETLOCK == SPINLOCK 618 unlock( freeElem->lock ); 986 lock( freeHead->returnLock ); 987 block = freeHead->returnList; 988 freeHead->returnList = 0p; 989 unlock( freeHead->returnLock ); 990 #else 991 block = __atomic_exchange_n( &freeHead->returnList, nullptr, __ATOMIC_SEQ_CST ); 992 #endif // RETURNSPIN 993 994 if ( likely( block == 0p ) ) { // return list also empty? 995 #endif // OWNERSHIP 996 // Do not leave kernel thread as manager_extend accesses heapManager. 997 disable_interrupts(); 998 block = (Heap.Storage *)manager_extend( tsize ); // mutual exclusion on call 999 enable_interrupts( false ); 1000 1001 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1002 1003 #ifdef __CFA_DEBUG__ 1004 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes. 1005 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); 1006 #endif // __CFA_DEBUG__ 619 1007 #endif // BUCKETLOCK 620 621 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more 622 // and then carve it off. 623 624 block = (Heap.Storage *)extend( tsize ); // mutual exclusion on call 625 #if BUCKETLOCK == SPINLOCK 1008 #ifdef OWNERSHIP 1009 } else { // merge returnList into freeHead 1010 #ifdef __STATISTICS__ 1011 stats.return_pulls += 1; 1012 #endif // __STATISTICS__ 1013 1014 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1015 1016 freeHead->freeList = block->header.kind.real.next; 1017 } // if 1018 #endif // OWNERSHIP 626 1019 } else { 627 freeElem->freeList = block->header.kind.real.next; 628 unlock( freeElem->lock ); 629 #endif // BUCKETLOCK 630 } // if 631 632 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 1020 // Memory is scrubbed in doFree. 1021 freeHead->freeList = block->header.kind.real.next; 1022 } // if 1023 1024 block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size 633 1025 } else { // large size => mmap 634 1026 if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; 635 1027 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 636 1028 #ifdef __STATISTICS__ 637 __atomic_add_fetch( &stats.mmap_calls, 1, __ATOMIC_SEQ_CST ); 638 __atomic_add_fetch( &stats.mmap_storage_request, size, __ATOMIC_SEQ_CST ); 639 __atomic_add_fetch( &stats.mmap_storage_alloc, tsize, __ATOMIC_SEQ_CST ); 1029 stats.counters[STAT_NAME].alloc += tsize; 1030 stats.mmap_calls += 1; 1031 stats.mmap_storage_request += size; 1032 stats.mmap_storage_alloc += tsize; 640 1033 #endif // __STATISTICS__ 641 1034 642 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 643 if ( block == (Heap.Storage *)MAP_FAILED ) { // failed ? 1035 disable_interrupts(); 1036 block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); 1037 enable_interrupts( false ); 1038 1039 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1040 1041 if ( unlikely( block == (Heap.Storage *)MAP_FAILED ) ) { // failed ? 644 1042 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 645 1043 // Do not call strerror( errno ) as it may call malloc. 646 abort( "(Heap &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno ); 647 } //if 1044 abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.", size, heapMaster.mmapStart, errno ); 1045 } // if 1046 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap 1047 648 1048 #ifdef __CFA_DEBUG__ 649 // S et new memory to garbage so subsequent uninitialized usages might fail.650 memset( block, '\xde', tsize );651 //Memset( block, tsize);1049 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes. The rest of 1050 // the storage set to 0 by mmap. 1051 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); 652 1052 #endif // __CFA_DEBUG__ 653 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap654 1053 } // if 655 1054 … … 659 1058 660 1059 #ifdef __CFA_DEBUG__ 661 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST );662 1060 if ( traceHeap() ) { 663 1061 char helpText[64]; … … 667 1065 #endif // __CFA_DEBUG__ 668 1066 1067 // poll_interrupts(); // call rollforward 1068 669 1069 return addr; 670 1070 } // doMalloc 671 1071 672 1072 673 static inline void doFree( void * addr ) with( heapManager ) { 1073 static void doFree( void * addr ) libcfa_nopreempt with( *heapManager ) { 1074 verify( addr ); 1075 1076 // detect free after thread-local storage destruction and use global stats in that case 1077 1078 Heap.Storage.Header * header; 1079 Heap.FreeHeader * freeHead; 1080 size_t size, alignment; 1081 1082 bool mapped = headers( "free", addr, header, freeHead, size, alignment ); 1083 #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ ) 1084 size_t rsize = header->kind.real.size; // optimization 1085 #endif // __STATISTICS__ || __CFA_DEBUG__ 1086 1087 #ifdef __STATISTICS__ 1088 stats.free_storage_request += rsize; 1089 stats.free_storage_alloc += size; 1090 #endif // __STATISTICS__ 1091 674 1092 #ifdef __CFA_DEBUG__ 675 if ( unlikely( heapManager.heapBegin == 0p ) ) { 676 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); 677 } // if 1093 allocUnfreed -= rsize; 678 1094 #endif // __CFA_DEBUG__ 679 1095 680 Heap.Storage.Header * header; 681 Heap.FreeHeader * freeElem; 682 size_t size, alignment; // not used (see realloc) 683 684 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? 1096 if ( unlikely( mapped ) ) { // mmapped ? 685 1097 #ifdef __STATISTICS__ 686 __atomic_add_fetch( &stats.munmap_calls, 1, __ATOMIC_SEQ_CST );687 __atomic_add_fetch( &stats.munmap_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST );688 __atomic_add_fetch( &stats.munmap_storage_alloc, size, __ATOMIC_SEQ_CST );1098 stats.munmap_calls += 1; 1099 stats.munmap_storage_request += rsize; 1100 stats.munmap_storage_alloc += size; 689 1101 #endif // __STATISTICS__ 690 if ( munmap( header, size ) == -1 ) { 691 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n" 692 "Possible cause is invalid pointer.", 693 addr ); 1102 1103 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1104 1105 // Does not matter where this storage is freed. 1106 if ( unlikely( munmap( header, size ) == -1 ) ) { 1107 // Do not call strerror( errno ) as it may call malloc. 1108 abort( "**** Error **** attempt to deallocate large object %p and munmap failed with errno %d.\n" 1109 "Possible cause is invalid delete pointer: either not allocated or with corrupt header.", 1110 addr, errno ); 694 1111 } // if 695 1112 } else { 696 1113 #ifdef __CFA_DEBUG__ 697 // Set free memory to garbage so subsequent usages might fail. 698 memset( ((Heap.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( Heap.Storage ) ); 699 //Memset( ((Heap.Storage *)header)->data, freeElem->blockSize - sizeof( Heap.Storage ) ); 1114 // memset is NOT always inlined! 1115 disable_interrupts(); 1116 // Scrub old memory so subsequent usages might fail. Only scrub the first/last SCRUB_SIZE bytes. 1117 char * data = ((Heap.Storage *)header)->data; // data address 1118 size_t dsize = size - sizeof(Heap.Storage); // data size 1119 if ( dsize <= SCRUB_SIZE * 2 ) { 1120 memset( data, SCRUB, dsize ); // scrub all 1121 } else { 1122 memset( data, SCRUB, SCRUB_SIZE ); // scrub front 1123 memset( data + dsize - SCRUB_SIZE, SCRUB, SCRUB_SIZE ); // scrub back 1124 } // if 1125 enable_interrupts( false ); 700 1126 #endif // __CFA_DEBUG__ 701 1127 702 #ifdef __STATISTICS__ 703 __atomic_add_fetch( &stats.free_calls, 1, __ATOMIC_SEQ_CST ); 704 __atomic_add_fetch( &stats.free_storage_request, header->kind.real.size, __ATOMIC_SEQ_CST ); 705 __atomic_add_fetch( &stats.free_storage_alloc, size, __ATOMIC_SEQ_CST ); 706 #endif // __STATISTICS__ 707 708 #if BUCKETLOCK == SPINLOCK 709 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock 710 header->kind.real.next = freeElem->freeList; // push on stack 711 freeElem->freeList = (Heap.Storage *)header; 712 unlock( freeElem->lock ); // release spin lock 713 #else 714 push( freeElem->freeList, *(Heap.Storage *)header ); 715 #endif // BUCKETLOCK 1128 if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread 1129 header->kind.real.next = freeHead->freeList; // push on stack 1130 freeHead->freeList = (Heap.Storage *)header; 1131 } else { // return to thread owner 1132 verify( heapManager ); 1133 1134 #ifdef OWNERSHIP 1135 #ifdef RETURNSPIN 1136 lock( freeHead->returnLock ); 1137 header->kind.real.next = freeHead->returnList; // push to bucket return list 1138 freeHead->returnList = (Heap.Storage *)header; 1139 unlock( freeHead->returnLock ); 1140 #else // lock free 1141 header->kind.real.next = freeHead->returnList; // link new node to top node 1142 // CAS resets header->kind.real.next = freeHead->returnList on failure 1143 while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, header, 1144 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ); 1145 #endif // RETURNSPIN 1146 1147 #else // no OWNERSHIP 1148 1149 freeHead = &heap->freeLists[ClearStickyBits( header->kind.real.home ) - &freeHead->homeManager->freeLists[0]]; 1150 header->kind.real.next = freeHead->freeList; // push on stack 1151 freeHead->freeList = (Heap.Storage *)header; 1152 #endif // ! OWNERSHIP 1153 1154 #ifdef __U_STATISTICS__ 1155 stats.return_pushes += 1; 1156 stats.return_storage_request += rsize; 1157 stats.return_storage_alloc += size; 1158 #endif // __U_STATISTICS__ 1159 1160 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1161 } // if 716 1162 } // if 717 1163 718 1164 #ifdef __CFA_DEBUG__ 719 __atomic_add_fetch( &allocUnfreed, -size, __ATOMIC_SEQ_CST );720 1165 if ( traceHeap() ) { 721 1166 char helpText[64]; … … 724 1169 } // if 725 1170 #endif // __CFA_DEBUG__ 1171 1172 // poll_interrupts(); // call rollforward 726 1173 } // doFree 727 1174 728 1175 729 s tatic size_t prtFree( Heap & manager ) with( manager ) {1176 size_t prtFree( Heap & manager ) with( manager ) { 730 1177 size_t total = 0; 731 1178 #ifdef __STATISTICS__ … … 733 1180 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); 734 1181 #endif // __STATISTICS__ 735 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {1182 for ( unsigned int i = 0; i < heapMaster.maxBucketsUsed; i += 1 ) { 736 1183 size_t size = freeLists[i].blockSize; 737 1184 #ifdef __STATISTICS__ … … 764 1211 __cfaabi_bits_release(); 765 1212 #endif // __STATISTICS__ 766 return (char *)heap End - (char *)heapBegin - total;1213 return (char *)heapMaster.heapEnd - (char *)heapMaster.heapBegin - total; 767 1214 } // prtFree 768 1215 769 1216 770 static void ?{}( Heap & manager ) with( manager ) { 771 __page_size = sysconf( _SC_PAGESIZE ); 772 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 773 774 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists 775 freeLists[i].blockSize = bucketSizes[i]; 776 } // for 777 778 #ifdef FASTLOOKUP 779 unsigned int idx = 0; 780 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { 781 if ( i > bucketSizes[idx] ) idx += 1; 782 lookup[i] = idx; 783 } // for 784 #endif // FASTLOOKUP 785 786 if ( ! setMmapStart( malloc_mmap_start() ) ) { 787 abort( "Heap : internal error, mmap start initialization failure." ); 788 } // if 789 heapExpand = malloc_expansion(); 790 791 char * end = (char *)sbrk( 0 ); 792 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment 793 } // Heap 794 795 796 static void ^?{}( Heap & ) { 797 #ifdef __STATISTICS__ 798 if ( traceHeapTerm() ) { 799 printStats(); 800 // prtUnfreed() called in heapAppStop() 801 } // if 802 #endif // __STATISTICS__ 803 } // ~Heap 804 805 806 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 807 void memory_startup( void ) { 808 #ifdef __CFA_DEBUG__ 809 if ( heapBoot ) { // check for recursion during system boot 810 abort( "boot() : internal error, recursively invoked during system boot." ); 811 } // if 812 heapBoot = true; 813 #endif // __CFA_DEBUG__ 814 815 //verify( heapManager.heapBegin != 0 ); 816 //heapManager{}; 817 if ( heapManager.heapBegin == 0p ) heapManager{}; // sanity check 818 } // memory_startup 819 820 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 821 void memory_shutdown( void ) { 822 ^heapManager{}; 823 } // memory_shutdown 824 825 826 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 827 verify( heapManager.heapBegin != 0p ); // called before memory_startup ? 828 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 829 830 #if __SIZEOF_POINTER__ == 8 831 verify( size < ((typeof(size_t))1 << 48) ); 832 #endif // __SIZEOF_POINTER__ == 8 833 return doMalloc( size ); 834 } // mallocNoStats 835 836 837 static inline void * memalignNoStats( size_t alignment, size_t size ) { 838 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 839 840 #ifdef __CFA_DEBUG__ 1217 #ifdef __STATISTICS__ 1218 static void incCalls( intptr_t statName ) libcfa_nopreempt { 1219 heapManager->stats.counters[statName].calls += 1; 1220 } // incCalls 1221 1222 static void incZeroCalls( intptr_t statName ) libcfa_nopreempt { 1223 heapManager->stats.counters[statName].calls_0 += 1; 1224 } // incZeroCalls 1225 #endif // __STATISTICS__ 1226 1227 #ifdef __CFA_DEBUG__ 1228 static void incUnfreed( intptr_t offset ) libcfa_nopreempt { 1229 heapManager->allocUnfreed += offset; 1230 } // incUnfreed 1231 #endif // __CFA_DEBUG__ 1232 1233 1234 static void * memalignNoStats( size_t alignment, size_t size STAT_PARM ) { 841 1235 checkAlign( alignment ); // check alignment 842 #endif // __CFA_DEBUG__ 843 844 // if alignment <= default alignment, do normal malloc as two headers are unnecessary 845 if ( unlikely( alignment <= libAlign() ) ) return mallocNoStats( size ); 1236 1237 // if alignment <= default alignment or size == 0, do normal malloc as two headers are unnecessary 1238 if ( unlikely( alignment <= libAlign() || size == 0 ) ) return doMalloc( size STAT_ARG( STAT_NAME ) ); 846 1239 847 1240 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for … … 854 1247 // subtract libAlign() because it is already the minimum alignment 855 1248 // add sizeof(Storage) for fake header 856 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(Heap.Storage) ); 1249 size_t offset = alignment - libAlign() + sizeof(Heap.Storage); 1250 char * addr = (char *)doMalloc( size + offset STAT_ARG( STAT_NAME ) ); 857 1251 858 1252 // address in the block of the "next" alignment address … … 860 1254 861 1255 // address of header from malloc 862 Heap.Storage.Header * RealHeader = HeaderAddr( addr ); 863 RealHeader->kind.real.size = size; // correct size to eliminate above alignment offset 864 // address of fake header * before* the alignment location 1256 Heap.Storage.Header * realHeader = HeaderAddr( addr ); 1257 realHeader->kind.real.size = size; // correct size to eliminate above alignment offset 1258 #ifdef __CFA_DEBUG__ 1259 incUnfreed( -offset ); // adjustment off the offset from call to doMalloc 1260 #endif // __CFA_DEBUG__ 1261 1262 // address of fake header *before* the alignment location 865 1263 Heap.Storage.Header * fakeHeader = HeaderAddr( user ); 1264 866 1265 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 867 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *) RealHeader;1266 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; 868 1267 // SKULLDUGGERY: odd alignment implies fake header 869 1268 fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment ); … … 880 1279 // then malloc() returns a unique pointer value that can later be successfully passed to free(). 881 1280 void * malloc( size_t size ) libcfa_public { 882 #ifdef __STATISTICS__ 883 if ( likely( size > 0 ) ) { 884 __atomic_add_fetch( &stats.malloc_calls, 1, __ATOMIC_SEQ_CST ); 885 __atomic_add_fetch( &stats.malloc_storage_request, size, __ATOMIC_SEQ_CST ); 886 } else { 887 __atomic_add_fetch( &stats.malloc_0_calls, 1, __ATOMIC_SEQ_CST ); 888 } // if 889 #endif // __STATISTICS__ 890 891 return mallocNoStats( size ); 1281 return doMalloc( size STAT_ARG( MALLOC ) ); 892 1282 } // malloc 893 1283 … … 895 1285 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 896 1286 void * aalloc( size_t dim, size_t elemSize ) libcfa_public { 897 size_t size = dim * elemSize; 898 #ifdef __STATISTICS__ 899 if ( likely( size > 0 ) ) { 900 __atomic_add_fetch( &stats.aalloc_calls, 1, __ATOMIC_SEQ_CST ); 901 __atomic_add_fetch( &stats.aalloc_storage_request, size, __ATOMIC_SEQ_CST ); 902 } else { 903 __atomic_add_fetch( &stats.aalloc_0_calls, 1, __ATOMIC_SEQ_CST ); 904 } // if 905 #endif // __STATISTICS__ 906 907 return mallocNoStats( size ); 1287 return doMalloc( dim * elemSize STAT_ARG( AALLOC ) ); 908 1288 } // aalloc 909 1289 … … 912 1292 void * calloc( size_t dim, size_t elemSize ) libcfa_public { 913 1293 size_t size = dim * elemSize; 914 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 915 #ifdef __STATISTICS__ 916 __atomic_add_fetch( &stats.calloc_0_calls, 1, __ATOMIC_SEQ_CST ); 917 #endif // __STATISTICS__ 918 return 0p; 919 } // if 920 #ifdef __STATISTICS__ 921 __atomic_add_fetch( &stats.calloc_calls, 1, __ATOMIC_SEQ_CST ); 922 __atomic_add_fetch( &stats.calloc_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 923 #endif // __STATISTICS__ 924 925 char * addr = (char *)mallocNoStats( size ); 1294 char * addr = (char *)doMalloc( size STAT_ARG( CALLOC ) ); 1295 1296 if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned 926 1297 927 1298 Heap.Storage.Header * header; 928 Heap.FreeHeader * free Elem;1299 Heap.FreeHeader * freeHead; 929 1300 size_t bsize, alignment; 930 1301 … … 932 1303 bool mapped = 933 1304 #endif // __CFA_DEBUG__ 934 headers( "calloc", addr, header, free Elem, bsize, alignment );1305 headers( "calloc", addr, header, freeHead, bsize, alignment ); 935 1306 936 1307 #ifndef __CFA_DEBUG__ 937 1308 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 938 if ( ! mapped)1309 if ( likely( ! mapped ) ) 939 1310 #endif // __CFA_DEBUG__ 940 1311 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined … … 952 1323 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. 953 1324 void * resize( void * oaddr, size_t size ) libcfa_public { 954 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 955 if ( unlikely( size == 0 ) ) { // special cases 956 #ifdef __STATISTICS__ 957 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 958 #endif // __STATISTICS__ 959 free( oaddr ); 960 return 0p; 961 } // if 962 #ifdef __STATISTICS__ 963 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 964 #endif // __STATISTICS__ 965 966 if ( unlikely( oaddr == 0p ) ) { 967 #ifdef __STATISTICS__ 968 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 969 #endif // __STATISTICS__ 970 return mallocNoStats( size ); 971 } // if 1325 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1326 return doMalloc( size STAT_ARG( RESIZE ) ); 1327 } // if 1328 1329 PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr ) 972 1330 973 1331 Heap.Storage.Header * header; 974 Heap.FreeHeader * free Elem;1332 Heap.FreeHeader * freeHead; 975 1333 size_t bsize, oalign; 976 headers( "resize", oaddr, header, free Elem, bsize, oalign );1334 headers( "resize", oaddr, header, freeHead, bsize, oalign ); 977 1335 978 1336 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket … … 980 1338 if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 981 1339 ClearZeroFillBit( header ); // no alignment and turn off 0 fill 1340 #ifdef __CFA_DEBUG__ 1341 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference 1342 #endif // __CFA_DEBUG__ 982 1343 header->kind.real.size = size; // reset allocation size 1344 #ifdef __STATISTICS__ 1345 incCalls( RESIZE ); 1346 #endif // __STATISTICS__ 983 1347 return oaddr; 984 1348 } // if 985 1349 986 #ifdef __STATISTICS__987 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST );988 #endif // __STATISTICS__989 990 1350 // change size, DO NOT preserve STICKY PROPERTIES. 991 free( oaddr ); 992 return mallocNoStats( size ); // create new area 1351 doFree( oaddr ); // free previous storage 1352 1353 return doMalloc( size STAT_ARG( RESIZE ) ); // create new area 993 1354 } // resize 994 1355 … … 997 1358 // the old and new sizes. 998 1359 void * realloc( void * oaddr, size_t size ) libcfa_public { 999 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1000 if ( unlikely( size == 0 ) ) { // special cases 1001 #ifdef __STATISTICS__ 1002 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 1003 #endif // __STATISTICS__ 1004 free( oaddr ); 1005 return 0p; 1006 } // if 1007 #ifdef __STATISTICS__ 1008 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1009 #endif // __STATISTICS__ 1010 1011 if ( unlikely( oaddr == 0p ) ) { 1012 #ifdef __STATISTICS__ 1013 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1014 #endif // __STATISTICS__ 1015 return mallocNoStats( size ); 1016 } // if 1360 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1361 return doMalloc( size STAT_ARG( REALLOC ) ); 1362 } // if 1363 1364 PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr ) 1017 1365 1018 1366 Heap.Storage.Header * header; 1019 Heap.FreeHeader * free Elem;1367 Heap.FreeHeader * freeHead; 1020 1368 size_t bsize, oalign; 1021 headers( "realloc", oaddr, header, free Elem, bsize, oalign );1369 headers( "realloc", oaddr, header, freeHead, bsize, oalign ); 1022 1370 1023 1371 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket … … 1025 1373 bool ozfill = ZeroFillBit( header ); // old allocation zero filled 1026 1374 if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage 1027 header->kind.real.size = size; // reset allocation size 1375 #ifdef __CFA_DEBUG__ 1376 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference 1377 #endif // __CFA_DEBUG__ 1378 header->kind.real.size = size; // reset allocation size 1028 1379 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 1029 1380 memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage 1030 1381 } // if 1382 #ifdef __STATISTICS__ 1383 incCalls( REALLOC ); 1384 #endif // __STATISTICS__ 1031 1385 return oaddr; 1032 1386 } // if 1033 1387 1034 #ifdef __STATISTICS__1035 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST );1036 #endif // __STATISTICS__1037 1038 1388 // change size and copy old content to new storage 1039 1389 1040 1390 void * naddr; 1041 if ( likely( oalign == libAlign() ) ) { // previous request not aligned ?1042 naddr = mallocNoStats( size );// create new area1391 if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ? 1392 naddr = doMalloc( size STAT_ARG( REALLOC ) ); // create new area 1043 1393 } else { 1044 naddr = memalignNoStats( oalign, size ); // create new aligned area 1045 } // if 1046 1047 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1394 naddr = memalignNoStats( oalign, size STAT_ARG( REALLOC ) ); // create new aligned area 1395 } // if 1396 1397 headers( "realloc", naddr, header, freeHead, bsize, oalign ); 1398 // To preserve prior fill, the entire bucket must be copied versus the size. 1048 1399 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes 1049 free( oaddr );1400 doFree( oaddr ); // free previous storage 1050 1401 1051 1402 if ( unlikely( ozfill ) ) { // previous request zero fill ? … … 1067 1418 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 1068 1419 void * memalign( size_t alignment, size_t size ) libcfa_public { 1069 #ifdef __STATISTICS__ 1070 if ( likely( size > 0 ) ) { 1071 __atomic_add_fetch( &stats.memalign_calls, 1, __ATOMIC_SEQ_CST ); 1072 __atomic_add_fetch( &stats.memalign_storage_request, size, __ATOMIC_SEQ_CST ); 1073 } else { 1074 __atomic_add_fetch( &stats.memalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1075 } // if 1076 #endif // __STATISTICS__ 1077 1078 return memalignNoStats( alignment, size ); 1420 return memalignNoStats( alignment, size STAT_ARG( MEMALIGN ) ); 1079 1421 } // memalign 1080 1422 … … 1082 1424 // Same as aalloc() with memory alignment. 1083 1425 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1084 size_t size = dim * elemSize; 1085 #ifdef __STATISTICS__ 1086 if ( likely( size > 0 ) ) { 1087 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1088 __atomic_add_fetch( &stats.cmemalign_storage_request, size, __ATOMIC_SEQ_CST ); 1089 } else { 1090 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1091 } // if 1092 #endif // __STATISTICS__ 1093 1094 return memalignNoStats( alignment, size ); 1426 return memalignNoStats( alignment, dim * elemSize STAT_ARG( AMEMALIGN ) ); 1095 1427 } // amemalign 1096 1428 … … 1099 1431 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1100 1432 size_t size = dim * elemSize; 1101 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER 1102 #ifdef __STATISTICS__ 1103 __atomic_add_fetch( &stats.cmemalign_0_calls, 1, __ATOMIC_SEQ_CST ); 1104 #endif // __STATISTICS__ 1105 return 0p; 1106 } // if 1107 #ifdef __STATISTICS__ 1108 __atomic_add_fetch( &stats.cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 1109 __atomic_add_fetch( &stats.cmemalign_storage_request, dim * elemSize, __ATOMIC_SEQ_CST ); 1110 #endif // __STATISTICS__ 1111 1112 char * addr = (char *)memalignNoStats( alignment, size ); 1433 char * addr = (char *)memalignNoStats( alignment, size STAT_ARG( CMEMALIGN ) ); 1434 1435 if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned 1113 1436 1114 1437 Heap.Storage.Header * header; 1115 Heap.FreeHeader * free Elem;1438 Heap.FreeHeader * freeHead; 1116 1439 size_t bsize; 1117 1440 … … 1119 1442 bool mapped = 1120 1443 #endif // __CFA_DEBUG__ 1121 headers( "cmemalign", addr, header, free Elem, bsize, alignment );1444 headers( "cmemalign", addr, header, freeHead, bsize, alignment ); 1122 1445 1123 1446 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. … … 1169 1492 // 0p, no operation is performed. 1170 1493 void free( void * addr ) libcfa_public { 1494 // verify( heapManager ); 1495 1171 1496 if ( unlikely( addr == 0p ) ) { // special case 1172 1497 #ifdef __STATISTICS__ 1173 __atomic_add_fetch( &stats.free_null_calls, 1, __ATOMIC_SEQ_CST ); 1498 if ( heapManager ) 1499 incZeroCalls( FREE ); 1174 1500 #endif // __STATISTICS__ 1175 1176 // #ifdef __CFA_DEBUG__1177 // if ( traceHeap() ) {1178 // #define nullmsg "Free( 0x0 ) size:0\n"1179 // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf.1180 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 );1181 // } // if1182 // #endif // __CFA_DEBUG__1183 1501 return; 1184 } // exit 1185 1186 doFree( addr ); 1502 } // if 1503 1504 #ifdef __STATISTICS__ 1505 incCalls( FREE ); 1506 #endif // __STATISTICS__ 1507 1508 doFree( addr ); // handles heapManager == nullptr 1187 1509 } // free 1188 1510 … … 1227 1549 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1228 1550 Heap.Storage.Header * header; 1229 Heap.FreeHeader * free Elem;1551 Heap.FreeHeader * freeHead; 1230 1552 size_t bsize, alignment; 1231 1553 1232 headers( "malloc_usable_size", addr, header, free Elem, bsize, alignment );1554 headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment ); 1233 1555 return DataStorage( bsize, addr, header ); // data storage in bucket 1234 1556 } // malloc_usable_size … … 1238 1560 void malloc_stats( void ) libcfa_public { 1239 1561 #ifdef __STATISTICS__ 1240 printStats(); 1241 if ( prtFree() ) prtFree( heapManager ); 1562 HeapStatistics stats; 1563 HeapStatisticsCtor( stats ); 1564 if ( printStats( collectStats( stats ) ) == -1 ) { 1565 #else 1566 #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n" 1567 if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) { 1242 1568 #endif // __STATISTICS__ 1569 abort( "**** Error **** write failed in malloc_stats" ); 1570 } // if 1243 1571 } // malloc_stats 1244 1572 … … 1247 1575 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { 1248 1576 #ifdef __STATISTICS__ 1249 int temp = stats_fd;1250 stats_fd = fd;1577 int temp = heapMaster.stats_fd; 1578 heapMaster.stats_fd = fd; 1251 1579 return temp; 1252 1580 #else … … 1262 1590 if ( options != 0 ) { errno = EINVAL; return -1; } 1263 1591 #ifdef __STATISTICS__ 1264 return printStatsXML( stream ); 1592 HeapStatistics stats; 1593 HeapStatisticsCtor( stats ); 1594 return printStatsXML( collectStats( stats ), stream ); // returns bytes written or -1 1265 1595 #else 1266 1596 return 0; // unsupported … … 1275 1605 choose( option ) { 1276 1606 case M_TOP_PAD: 1277 heap Expand = ceiling2( value, __page_size );1607 heapMaster.heapExpand = ceiling2( value, __page_size ); 1278 1608 return 1; 1279 1609 case M_MMAP_THRESHOLD: … … 1319 1649 // Must have CFA linkage to overload with C linkage realloc. 1320 1650 void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1321 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1322 if ( unlikely( size == 0 ) ) { // special cases 1323 #ifdef __STATISTICS__ 1324 __atomic_add_fetch( &stats.resize_0_calls, 1, __ATOMIC_SEQ_CST ); 1325 #endif // __STATISTICS__ 1326 free( oaddr ); 1327 return 0p; 1651 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1652 return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); 1328 1653 } // if 1329 1654 1330 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1331 #ifdef __CFA_DEBUG__ 1332 else checkAlign( nalign ); // check alignment 1333 #endif // __CFA_DEBUG__ 1334 1335 if ( unlikely( oaddr == 0p ) ) { 1336 #ifdef __STATISTICS__ 1337 __atomic_add_fetch( &stats.resize_calls, 1, __ATOMIC_SEQ_CST ); 1338 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST ); 1339 #endif // __STATISTICS__ 1340 return memalignNoStats( nalign, size ); 1341 } // if 1655 PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr ) 1342 1656 1343 1657 // Attempt to reuse existing alignment. … … 1347 1661 1348 1662 if ( unlikely( isFakeHeader ) ) { 1663 checkAlign( nalign ); // check alignment 1349 1664 oalign = ClearAlignmentBit( header ); // old alignment 1350 1665 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? … … 1353 1668 ) ) { 1354 1669 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1355 Heap.FreeHeader * free Elem;1670 Heap.FreeHeader * freeHead; 1356 1671 size_t bsize, oalign; 1357 headers( "resize", oaddr, header, free Elem, bsize, oalign );1672 headers( "resize", oaddr, header, freeHead, bsize, oalign ); 1358 1673 size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket 1359 1674 … … 1361 1676 HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same) 1362 1677 ClearZeroFillBit( header ); // turn off 0 fill 1678 #ifdef __CFA_DEBUG__ 1679 incUnfreed( size - header->kind.real.size ); // adjustment off the size difference 1680 #endif // __CFA_DEBUG__ 1363 1681 header->kind.real.size = size; // reset allocation size 1682 #ifdef __STATISTICS__ 1683 incCalls( RESIZE ); 1684 #endif // __STATISTICS__ 1364 1685 return oaddr; 1365 1686 } // if … … 1370 1691 } // if 1371 1692 1372 #ifdef __STATISTICS__1373 __atomic_add_fetch( &stats.resize_storage_request, size, __ATOMIC_SEQ_CST );1374 #endif // __STATISTICS__1375 1376 1693 // change size, DO NOT preserve STICKY PROPERTIES. 1377 free( oaddr );1378 return memalignNoStats( nalign, size );// create new aligned area1694 doFree( oaddr ); // free previous storage 1695 return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); // create new aligned area 1379 1696 } // resize 1380 1697 1381 1698 1382 1699 void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1383 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1384 if ( unlikely( size == 0 ) ) { // special cases 1385 #ifdef __STATISTICS__ 1386 __atomic_add_fetch( &stats.realloc_0_calls, 1, __ATOMIC_SEQ_CST ); 1387 #endif // __STATISTICS__ 1388 free( oaddr ); 1389 return 0p; 1700 if ( unlikely( oaddr == 0p ) ) { // => malloc( size ) 1701 return memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); 1390 1702 } // if 1391 1703 1392 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1393 #ifdef __CFA_DEBUG__ 1394 else checkAlign( nalign ); // check alignment 1395 #endif // __CFA_DEBUG__ 1396 1397 if ( unlikely( oaddr == 0p ) ) { 1398 #ifdef __STATISTICS__ 1399 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1400 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1401 #endif // __STATISTICS__ 1402 return memalignNoStats( nalign, size ); 1403 } // if 1704 PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr ) 1404 1705 1405 1706 // Attempt to reuse existing alignment. … … 1408 1709 size_t oalign; 1409 1710 if ( unlikely( isFakeHeader ) ) { 1711 checkAlign( nalign ); // check alignment 1410 1712 oalign = ClearAlignmentBit( header ); // old alignment 1411 1713 if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ? … … 1421 1723 } // if 1422 1724 1423 #ifdef __STATISTICS__ 1424 __atomic_add_fetch( &stats.realloc_calls, 1, __ATOMIC_SEQ_CST ); 1425 __atomic_add_fetch( &stats.realloc_storage_request, size, __ATOMIC_SEQ_CST ); 1426 #endif // __STATISTICS__ 1427 1428 Heap.FreeHeader * freeElem; 1725 Heap.FreeHeader * freeHead; 1429 1726 size_t bsize; 1430 headers( "realloc", oaddr, header, free Elem, bsize, oalign );1727 headers( "realloc", oaddr, header, freeHead, bsize, oalign ); 1431 1728 1432 1729 // change size and copy old content to new storage … … 1435 1732 bool ozfill = ZeroFillBit( header ); // old allocation zero filled 1436 1733 1437 void * naddr = memalignNoStats( nalign, size );// create new aligned area1438 1439 headers( "realloc", naddr, header, free Elem, bsize, oalign );1734 void * naddr = memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); // create new aligned area 1735 1736 headers( "realloc", naddr, header, freeHead, bsize, oalign ); 1440 1737 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes 1441 free( oaddr );1738 doFree( oaddr ); // free previous storage 1442 1739 1443 1740 if ( unlikely( ozfill ) ) { // previous request zero fill ? … … 1451 1748 1452 1749 1750 void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ) __THROW { 1751 return realloc( oaddr, nalign, dim * elemSize ); 1752 } // reallocarray 1753 1754 1453 1755 // Local Variables: // 1454 1756 // tab-width: 4 // -
libcfa/src/heap.hfa
r5408b59 rc2b3243 10 10 // Created On : Tue May 26 11:23:55 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Apr 21 22:52:25 202213 // Update Count : 2 112 // Last Modified On : Tue Oct 4 19:08:55 2022 13 // Update Count : 23 14 14 // 15 15 … … 30 30 bool checkFreeOff(); 31 31 32 // supported mallopt options33 #ifndef M_MMAP_THRESHOLD34 #define M_MMAP_THRESHOLD (-1)35 #endif // M_MMAP_THRESHOLD36 37 #ifndef M_TOP_PAD38 #define M_TOP_PAD (-2)39 #endif // M_TOP_PAD40 41 32 extern "C" { 42 33 // New allocation operations. … … 49 40 size_t malloc_size( void * addr ); 50 41 int malloc_stats_fd( int fd ); 51 size_t malloc_usable_size( void * addr );52 42 size_t malloc_expansion(); // heap expansion size (bytes) 53 43 size_t malloc_mmap_start(); // crossover allocation size from sbrk to mmap -
libcfa/src/parseargs.cfa
r5408b59 rc2b3243 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2022 University of Waterloo 3 // 4 // The contents of this file are covered under the licence agreement in the 5 // file "LICENCE" distributed with Cforall. 6 // 7 // parseargs.cfa 8 // implementation of arguments parsing (argc, argv) 9 // 10 // Author : Thierry Delisle 11 // Created On : Wed Oct 12 15:28:01 2022 12 // Last Modified By : 13 // Last Modified On : 14 // Update Count : 15 // 16 1 17 #include "parseargs.hfa" 2 18 19 #include <assert.h> 3 20 #include <ctype.h> 4 21 #include <stdint.h> … … 146 163 } 147 164 165 static inline int next_newline(const char * str) { 166 int ret; 167 const char * ptr = strstr(str, "\n"); 168 if(!ptr) return MAX; 169 170 /* paranoid */ verify( str <= ptr); 171 intptr_t low = (intptr_t)str; 172 intptr_t hi = (intptr_t)ptr; 173 ret = hi - low; 174 175 return ret; 176 } 177 148 178 //----------------------------------------------------------------------------- 149 179 // Print usage 150 180 static void printopt(FILE * out, int width, int max, char sn, const char * ln, const char * help) { 181 // check how wide we should be printing 182 // this includes all options and the help message 151 183 int hwidth = max - (11 + width); 152 184 if(hwidth <= 0) hwidth = max; 153 185 154 char sname[4] = { ' ', ' ', ' ', '\0' }; 155 if(sn != '\0') { 156 sname[0] = '-'; 157 sname[1] = sn; 158 sname[2] = ','; 159 } 160 161 fprintf(out, " %s --%-*s %.*s\n", sname, width, ln, hwidth, help); 162 for() { 163 help += min(strlen(help), hwidth); 164 if('\0' == *help) break; 165 fprintf(out, "%*s%.*s\n", width + 11, "", hwidth, help); 166 } 186 // check which pieces we have 187 bool has_ln = ln && strcmp("", ln); 188 bool has_help = help && strcmp("", help); 189 190 // print the small name if present 191 if(sn != '\0') fprintf(out, " -%c", sn); 192 else fprintf(out, " "); 193 194 // print a comma if we have both short and long names 195 if(sn != '\0' && has_ln) fprintf(out, ", "); 196 else fprintf(out, " "); 197 198 // print the long name if present 199 if(has_ln) fprintf(out, "--%-*s", width, ln); 200 else if(has_help) fprintf(out, " %-*s", width, ""); 201 202 if(has_help) { 203 // print the help 204 // We need to wrap at the max width, and also indent newlines so everything is nice and pretty 205 206 // for each line to print 207 for() { 208 //find out if there is a newline 209 int nextnl = next_newline(help); 210 int real = min(min(strlen(help), hwidth), nextnl); 211 212 fprintf(out, " %.*s", real, help); 213 // printf("%d %d\n", real, nextnl); 214 help += real; 215 if( nextnl == real ) help++; 216 if('\0' == *help) break; 217 fprintf(out, "\n%*s", width + 8, ""); 218 } 219 } 220 fprintf(out, "\n"); 167 221 } 168 222 -
libcfa/src/parseargs.hfa
r5408b59 rc2b3243 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2022 University of Waterloo 3 // 4 // The contents of this file are covered under the licence agreement in the 5 // file "LICENCE" distributed with Cforall. 6 // 7 // parseargs.cfa -- PUBLIC 8 // API for arguments parsing (argc, argv) 9 // 10 // Author : Thierry Delisle 11 // Created On : Wed Oct 12 15:28:01 2022 12 // Last Modified By : 13 // Last Modified On : 14 // Update Count : 15 // 1 16 #pragma once 2 17 -
libcfa/src/startup.cfa
r5408b59 rc2b3243 10 10 // Created On : Tue Jul 24 16:21:57 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 17 16:41:54202213 // Update Count : 5 512 // Last Modified On : Thu Oct 6 13:51:57 2022 13 // Update Count : 57 14 14 // 15 15 … … 24 24 25 25 extern "C" { 26 void __cfaabi_memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 27 void __cfaabi_memory_startup( void ) { 28 extern void memory_startup(); 29 memory_startup(); 30 } // __cfaabi_memory_startup 31 32 void __cfaabi_memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 33 void __cfaabi_memory_shutdown( void ) { 34 extern void memory_shutdown(); 35 memory_shutdown(); 36 } // __cfaabi_memory_shutdown 37 26 38 void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) )); 27 39 void __cfaabi_appready_startup( void ) { 28 40 tzset(); // initialize time global variables 29 #ifdef __CFA_DEBUG__30 41 extern void heapAppStart(); 31 42 heapAppStart(); 32 #endif // __CFA_DEBUG__33 43 } // __cfaabi_appready_startup 34 44 35 45 void __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) )); 36 46 void __cfaabi_appready_shutdown( void ) { 37 #ifdef __CFA_DEBUG__38 47 extern void heapAppStop(); 39 48 heapAppStop(); 40 #endif // __CFA_DEBUG__41 49 } // __cfaabi_appready_shutdown 42 50 -
libcfa/src/stdhdr/assert.h
r5408b59 rc2b3243 10 10 // Created On : Mon Jul 4 23:25:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Feb 4 12:58:49 202013 // Update Count : 1 512 // Last Modified On : Sun Oct 9 21:28:22 2022 13 // Update Count : 16 14 14 // 15 15 … … 31 31 #endif 32 32 33 #if ! defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__))33 #if ! defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__)) 34 34 #define __CFA_WITH_VERIFY__ 35 35 #define verify(x) assert(x) -
src/AST/Pass.hpp
r5408b59 rc2b3243 167 167 const ast::Expr * visit( const ast::UntypedExpr * ) override final; 168 168 const ast::Expr * visit( const ast::NameExpr * ) override final; 169 const ast::Expr * 169 const ast::Expr * visit( const ast::QualifiedNameExpr * ) override final; 170 170 const ast::Expr * visit( const ast::AddressExpr * ) override final; 171 171 const ast::Expr * visit( const ast::LabelAddressExpr * ) override final; -
src/GenPoly/ScrubTyVars.h
r5408b59 rc2b3243 116 116 node_t const * target, const TypeVarMap & typeVars ) { 117 117 return strict_dynamic_cast<node_t const *>( 118 scrubTypeVars<ast::Node>( target ) );118 scrubTypeVars<ast::Node>( target, typeVars ) ); 119 119 } 120 120 -
src/Parser/ParseNode.h
r5408b59 rc2b3243 10 10 // Created On : Sat May 16 13:28:16 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 09:15:49202213 // Update Count : 9 0512 // Last Modified On : Tue Oct 18 14:15:37 2022 13 // Update Count : 936 14 14 // 15 15 … … 465 465 errors.append( e ); 466 466 } // try 467 cur = dynamic_cast< NodeType * >( cur->get_next() ); 467 const ParseNode * temp = (cur->get_next()); 468 cur = dynamic_cast< const NodeType * >( temp ); // should not return nullptr 469 if ( ! cur && temp ) { // non-homogeneous nodes ? 470 SemanticError( cur->location, "internal error, non-homogeneous nodes founds in buildList processing." ); 471 } // if 468 472 } // while 469 473 if ( ! errors.isEmpty() ) { -
src/Parser/lex.ll
r5408b59 rc2b3243 10 10 * Created On : Sat Sep 22 08:58:10 2001 11 11 * Last Modified By : Peter A. Buhr 12 * Last Modified On : T ue Sep 20 21:18:55202213 * Update Count : 76 212 * Last Modified On : Thu Oct 13 20:46:04 2022 13 * Update Count : 764 14 14 */ 15 15 … … 331 331 __uint128_t { KEYWORD_RETURN(UINT128); } // GCC 332 332 unsigned { KEYWORD_RETURN(UNSIGNED); } 333 __builtin_va_list { KEYWORD_RETURN(VALIST); } // GCC 333 __builtin_va_arg { KEYWORD_RETURN(VA_ARG); } // GCC 334 __builtin_va_list { KEYWORD_RETURN(VA_LIST); } // GCC 334 335 virtual { KEYWORD_RETURN(VIRTUAL); } // CFA 335 336 void { KEYWORD_RETURN(VOID); } -
src/Parser/parser.yy
r5408b59 rc2b3243 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Oct 8 08:21:18202213 // Update Count : 57 0912 // Last Modified On : Fri Oct 14 14:04:43 2022 13 // Update Count : 5751 14 14 // 15 15 … … 305 305 %token TYPEDEF 306 306 %token EXTERN STATIC AUTO REGISTER 307 %token THREADLOCALGCC THREADLOCALC11 307 %token THREADLOCALGCC THREADLOCALC11 // GCC, C11 308 308 %token INLINE FORTRAN // C99, extension ISO/IEC 9899:1999 Section J.5.9(1) 309 309 %token NORETURN // C11 … … 318 318 %token DECIMAL32 DECIMAL64 DECIMAL128 // GCC 319 319 %token ZERO_T ONE_T // CFA 320 %token SIZEOF TYPEOF VA LIST AUTO_TYPE// GCC320 %token SIZEOF TYPEOF VA_LIST VA_ARG AUTO_TYPE // GCC 321 321 %token OFFSETOF BASETYPEOF TYPEID // CFA 322 322 %token ENUM STRUCT UNION … … 409 409 // declarations 410 410 %type<decl> abstract_declarator abstract_ptr abstract_array abstract_function array_dimension multi_array_dimension 411 %type<decl> abstract_parameter_declarator abstract_parameter_ptr abstract_parameter_array abstract_parameter_function array_parameter_dimension array_parameter_1st_dimension411 %type<decl> abstract_parameter_declarator_opt abstract_parameter_declarator abstract_parameter_ptr abstract_parameter_array abstract_parameter_function array_parameter_dimension array_parameter_1st_dimension 412 412 %type<decl> abstract_parameter_declaration 413 413 … … 698 698 primary_expression 699 699 | postfix_expression '[' assignment_expression ',' tuple_expression_list ']' 700 701 702 700 // Historic, transitional: Disallow commas in subscripts. 701 // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts. 702 // Current: Commas in subscripts make tuples. 703 703 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); } 704 704 | postfix_expression '[' assignment_expression ']' … … 720 720 | postfix_expression '(' argument_expression_list_opt ')' 721 721 { $$ = new ExpressionNode( build_func( $1, $3 ) ); } 722 | VA_ARG '(' primary_expression ',' declaration_specifier_nobody abstract_parameter_declarator_opt ')' 723 // { SemanticError( yylloc, "va_arg is currently unimplemented." ); $$ = nullptr; } 724 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_varref( new string( "__builtin_va_arg") ) ), 725 (ExpressionNode *)($3->set_last( (ExpressionNode *)($6 ? $6->addType( $5 ) : $5) )) ) ); } 722 726 | postfix_expression '`' identifier // CFA, postfix call 723 727 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_varref( build_postfix_name( $3 ) ) ), $1 ) ); } … … 2156 2160 | LONG 2157 2161 { $$ = DeclarationNode::newLength( DeclarationNode::Long ); } 2158 | VA LIST // GCC, __builtin_va_list2162 | VA_LIST // GCC, __builtin_va_list 2159 2163 { $$ = DeclarationNode::newBuiltinType( DeclarationNode::Valist ); } 2160 2164 | AUTO_TYPE … … 3676 3680 // functions. 3677 3681 3682 abstract_parameter_declarator_opt: 3683 // empty 3684 { $$ = nullptr; } 3685 | abstract_parameter_declarator 3686 ; 3687 3678 3688 abstract_parameter_declarator: 3679 3689 abstract_parameter_ptr -
tests/.expect/alloc.txt
r5408b59 rc2b3243 35 35 CFA realloc array alloc, fill 36 36 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 37 CFA realloc array alloc, 5 38 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 39 CFA realloc array alloc, 5 40 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 41 CFA realloc array alloc, 5 42 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 0x5 37 43 38 44 C memalign 42 42.5 -
tests/Makefile.am
r5408b59 rc2b3243 76 76 pybin/tools.py \ 77 77 long_tests.hfa \ 78 avltree/avl-private.h \ 79 avltree/avl.h \ 80 concurrent/clib_tls.c \ 81 concurrent/clib.c \ 78 82 configs/.in/parseconfig-all.txt \ 79 83 configs/.in/parseconfig-errors.txt \ 80 84 configs/.in/parseconfig-missing.txt \ 85 exceptions/except-io.hfa \ 86 exceptions/with-threads.hfa \ 81 87 io/.in/io.data \ 82 88 io/.in/many_read.data \ 83 avltree/avl.h \ 84 avltree/avl-private.h \ 85 concurrent/clib.c \ 86 concurrent/clib_tls.c \ 87 exceptions/with-threads.hfa \ 88 exceptions/except-io.hfa \ 89 meta/fork+exec.hfa \ 89 90 unified_locking/mutex_test.hfa 90 91 -
tests/alloc.cfa
r5408b59 rc2b3243 10 10 // Created On : Wed Feb 3 07:56:22 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 18 17:13:52202213 // Update Count : 4 3312 // Last Modified On : Fri Oct 14 09:31:39 2022 13 // Update Count : 491 14 14 // 15 15 … … 34 34 ip = (int *)malloc( sizeof(*ip) ); // C malloc, type unsafe 35 35 *ip = 0xdeadbeef; 36 printf( "C malloc %#x\n", *ip);36 sout | "C malloc" | hex(*ip); 37 37 free( ip ); 38 38 39 39 ip = malloc(); // CFA malloc, type safe 40 40 *ip = 0xdeadbeef; 41 printf( "CFA malloc %#x\n", *ip);41 sout | "CFA malloc" | hex(*ip); 42 42 free( ip ); 43 43 44 44 ip = alloc(); // CFA alloc, type safe 45 45 *ip = 0xdeadbeef; 46 printf( "CFA alloc %#x\n", *ip);46 sout | "CFA alloc" | hex(*ip); 47 47 free( ip ); 48 48 49 49 ip = alloc( fill`fill ); // CFA alloc, fill 50 printf( "CFA alloc, fill %08x\n", *ip);50 sout | "CFA alloc, fill" | wd(8, nobase(hex(*ip))); 51 51 free( ip ); 52 52 53 53 ip = alloc( 3`fill ); // CFA alloc, fill 54 printf( "CFA alloc, fill %d\n", *ip );54 sout | "CFA alloc, fill" | *ip; 55 55 free( ip ); 56 56 57 57 58 58 // allocation, array types 59 printf( "\n" );59 sout | nl; 60 60 61 61 ip = (int *)calloc( dim, sizeof( *ip ) ); // C array calloc, type unsafe 62 printf( "C array calloc, fill 0\n" );63 for ( i; dim ) { printf( "%#x ", ip[i] ); }64 printf( "\n" );62 sout | "C array calloc, fill 0"; 63 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 64 sout | nl; 65 65 free( ip ); 66 66 67 67 ip = calloc( dim ); // CFA array calloc, type safe 68 printf( "CFA array calloc, fill 0\n" );69 for ( i; dim ) { printf( "%#x ", ip[i] ); }70 printf( "\n" );68 sout | "CFA array calloc, fill 0"; 69 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 70 sout | nl; 71 71 free( ip ); 72 72 73 73 ip = alloc( dim ); // CFA array alloc, type safe 74 74 for ( i; dim ) { ip[i] = 0xdeadbeef; } 75 printf( "CFA array alloc, no fill\n" );76 for ( i; dim ) { printf( "%#x ", ip[i] ); }77 printf( "\n" );75 sout | "CFA array alloc, no fill"; 76 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 77 sout | nl; 78 78 free( ip ); 79 79 80 80 ip = alloc( 2 * dim, fill`fill ); // CFA array alloc, fill 81 printf( "CFA array alloc, fill %#hhx\n", fill);82 for ( i; 2 * dim ) { printf( "%#x ", ip[i] ); }83 printf( "\n" );81 sout | "CFA array alloc, fill" | hex(fill); 82 for ( i; 2 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 83 sout | nl; 84 84 free( ip ); 85 85 86 86 ip = alloc( 2 * dim, ((int)0xdeadbeef)`fill ); // CFA array alloc, fill 87 printf( "CFA array alloc, fill %#hhx\n", 0xdeadbeef);88 for ( i; 2 * dim ) { printf( "%#x ", ip[i] ); }89 printf( "\n" );87 sout | "CFA array alloc, fill" | hex((char)0xdeadbeef); 88 for ( i; 2 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 89 sout | nl; 90 90 // do not free 91 91 92 92 ip1 = alloc( 2 * dim, [ip, 2 * dim]`fill ); // CFA array alloc, fill 93 printf( "CFA array alloc, fill from array\n" );94 for ( i; 2 * dim ) { printf( "%#x %#x, ", ip[i], ip1[i] ); }93 sout | "CFA array alloc, fill from array"; 94 for ( i; 2 * dim ) { sout | hex(ip[i]) | hex(ip1[i]) | ", " | nonl; } 95 95 free( ip1 ); 96 printf( "\n" );96 sout | nl; 97 97 98 98 99 99 // realloc, non-array types 100 printf( "\n" );100 sout | nl; 101 101 102 102 ip = (int *)realloc( ip, dim * sizeof(*ip) ); // C realloc 103 printf( "C realloc\n" );104 for ( i; dim ) { printf( "%#x ", ip[i] ); }105 printf( "\n" );103 sout | "C realloc"; 104 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 105 sout | nl; 106 106 // do not free 107 107 108 108 ip = realloc( ip, 2 * dim * sizeof(*ip) ); // CFA realloc 109 109 for ( i; dim ~ 2 * dim ) { ip[i] = 0x1010101; } 110 printf( "CFA realloc\n" );111 for ( i; 2 * dim ) { printf( "%#x ", ip[i] ); }112 printf( "\n" );110 sout | "CFA realloc"; 111 for ( i; 2 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 112 sout | nl; 113 113 // do not free 114 114 115 115 116 116 // realloc, array types 117 printf( "\n" );117 sout | nl; 118 118 119 119 ip = alloc( dim, ip`realloc ); // CFA realloc array alloc 120 120 for ( i; dim ) { ip[i] = 0xdeadbeef; } 121 printf( "CFA realloc array alloc\n" );122 for ( i; dim ) { printf( "%#x ", ip[i] ); }123 printf( "\n" );121 sout | "CFA realloc array alloc"; 122 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 123 sout | nl; 124 124 // do not free 125 125 126 126 ip = alloc( 2 * dim, ip`realloc ); // CFA realloc array alloc 127 127 for ( i; dim ~ 2 * dim ) { ip[i] = 0x1010101; } // fill upper part 128 printf( "CFA realloc array alloc\n" );129 for ( i; 2 * dim ) { printf( "%#x ", ip[i] ); }130 printf( "\n" );128 sout | "CFA realloc array alloc"; 129 for ( i; 2 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 130 sout | nl; 131 131 // do not free 132 132 133 133 ip = alloc( dim, ip`realloc ); // CFA realloc array alloc 134 printf( "CFA realloc array alloc\n" );135 for ( i; dim ) { printf( "%#x ", ip[i] ); }136 printf( "\n" );134 sout | "CFA realloc array alloc"; 135 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 136 sout | nl; 137 137 // do not free 138 138 139 139 ip = alloc( 3 * dim, ip`realloc, fill`fill ); // CFA realloc array alloc, fill 140 printf( "CFA realloc array alloc, fill\n" );141 for ( i; 3 * dim ) { printf( "%#x ", ip[i] ); }142 printf( "\n" );140 sout | "CFA realloc array alloc, fill"; 141 for ( i; 3 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 142 sout | nl; 143 143 // do not free 144 144 145 145 ip = alloc( dim, ip`realloc, fill`fill ); // CFA realloc array alloc, fill 146 printf( "CFA realloc array alloc, fill\n" );147 for ( i; dim ) { printf( "%#x ", ip[i] ); }148 printf( "\n" );146 sout | "CFA realloc array alloc, fill"; 147 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 148 sout | nl; 149 149 // do not free 150 150 151 151 ip = alloc( 3 * dim, ip`realloc, fill`fill ); // CFA realloc array alloc, fill 152 printf( "CFA realloc array alloc, fill\n" );153 for ( i; 3 * dim ) { printf( "%#x ", ip[i] ); }154 printf( "\n" );155 // do not free 156 #if 0 // FIX ME 152 sout | "CFA realloc array alloc, fill"; 153 for ( i; 3 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 154 sout | nl; 155 // do not free 156 157 157 ip = alloc( 5 * dim, ip`realloc, 5`fill ); // CFA realloc array alloc, 5 158 printf( "CFA realloc array alloc, 5\n" );159 for ( i; 5 * dim ) { printf( "%#x ", ip[i] ); }160 printf( "\n" );158 sout | "CFA realloc array alloc, 5"; 159 for ( i; 5 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 160 sout | nl; 161 161 // do not free 162 162 163 163 ip = alloc( dim, ip`realloc, 5`fill ); // CFA realloc array alloc, 5 164 printf( "CFA realloc array alloc, 5\n" );165 for ( i; dim ) { printf( "%#x ", ip[i] ); }166 printf( "\n" );164 sout | "CFA realloc array alloc, 5"; 165 for ( i; dim ) { sout | hex(ip[i]) | ' ' | nonl; } 166 sout | nl; 167 167 // do not free 168 168 169 169 ip = alloc( 5 * dim, ip`realloc, 5`fill ); // CFA realloc array alloc, 5 170 printf( "CFA realloc array alloc, 5\n" );171 for ( i; 5 * dim ) { printf( "%#x ", ip[i] ); }172 printf( "\n" );173 #endif // 0 170 sout | "CFA realloc array alloc, 5"; 171 for ( i; 5 * dim ) { sout | hex(ip[i]) | ' ' | nonl; } 172 sout | nl; 173 174 174 free( ip ); 175 175 … … 180 180 }; 181 181 182 182 ip = alloc(); 183 183 *ip = 5; 184 184 double * dp = alloc( ip`resize ); 185 185 *dp = 5.5; 186 186 S * sp = alloc( dp`resize ); 187 187 *sp = (S){ {0, 1, 2, 3, 4} }; 188 188 ip = alloc( sp`resize ); 189 189 *ip = 3; 190 190 free( ip ); 191 191 192 192 193 193 // resize, array types 194 194 195 195 ip = alloc( 5 ); 196 196 for ( i; 5 ) { ip[i] = 5; } 197 197 dp = alloc( 5, ip`resize ); 198 198 for ( i; 5 ) { dp[i] = 5.5; } 199 199 sp = alloc( 5, dp`resize ); 200 200 for ( i; 5 ) { sp[i] = (S){ {0, 1, 2, 3, 4} }; } 201 201 ip = alloc( 3, sp`resize ); 202 202 for ( i; 3 ) { ip[i] = 3; } 203 203 ip = alloc( 7, ip`realloc ); 204 204 for ( i; 7 ) { ip[i] = 7; } 205 205 ip = alloc( 7, ip`resize ); 206 206 for ( i; 7 ) { ip[i] = 7; } 207 207 free( ip ); 208 208 209 209 … … 217 217 const_count++; 218 218 } 219 void ^?{}( Struct & a ) { 219 void ^?{}( Struct & a ) { dest_count++; } // destruct 220 220 Struct st, st1, sta[dim], sta1[dim], * stp, * stp1; 221 221 222 222 // alignment, non-array types 223 printf( "\n" );223 sout | nl; 224 224 enum { Alignment = 128 }; 225 225 226 226 stp = &(*(Struct*)memalign( Alignment, sizeof( *stp ) ) ){ 42, 42.5 }; // C memalign 227 227 assert( (uintptr_t)stp % Alignment == 0 ); 228 printf( "C memalign %d %g\n", stp->x, stp->y );228 sout | "C memalign " | stp->x | stp->y; 229 229 free( stp ); 230 230 231 231 stp = &(*memalign( Alignment )){ 42, 42.5 }; // CFA memalign 232 232 assert( (uintptr_t)stp % Alignment == 0 ); 233 printf( "CFA memalign %d %g\n", stp->x, stp->y );233 sout | "CFA memalign" | stp->x | stp->y; 234 234 free( stp ); 235 235 … … 237 237 *stp = (Struct){ 42, 42.5 }; 238 238 assert( (uintptr_t)stp % Alignment == 0 ); 239 printf( "CFA posix_memalign %d %g\n", stp->x, stp->y );239 sout | "CFA posix_memalign" | stp->x | stp->y; 240 240 free( stp ); 241 241 … … 243 243 *stp = (Struct){ 42, 42.5 }; 244 244 assert( (uintptr_t)stp % Alignment == 0 ); 245 printf( "CFA posix_memalign %d %g\n", stp->x, stp->y );245 sout | "CFA posix_memalign" | stp->x | stp->y; 246 246 free( stp ); 247 247 248 248 stp = &(*alloc( Alignment`align)){ 42, 42.5 }; // CFA alloc_align 249 249 assert( (uintptr_t)stp % Alignment == 0 ); 250 printf( "CFA alloc_align %d %g\n", stp->x, stp->y );250 sout | "CFA alloc_align" | stp->x | stp->y; 251 251 free( stp ); 252 252 253 253 stp = &(*alloc( Alignment`align )){ 42, 42.5 }; // CFA alloc_align 254 254 assert( (uintptr_t)stp % Alignment == 0 ); 255 printf( "CFA alloc_align %d %g\n", stp->x, stp->y );255 sout | "CFA alloc_align" | stp->x | stp->y; 256 256 free( stp ); 257 257 258 258 stp = alloc( Alignment`align, fill`fill ); // CFA memalign, fill 259 259 assert( (uintptr_t)stp % Alignment == 0 ); 260 printf( "CFA alloc_align fill %#x %a\n", stp->x, stp->y);260 sout | "CFA alloc_align fill" | hex(stp->x) | hex(stp->y); 261 261 free( stp ); 262 262 263 263 stp = alloc( Alignment`align, (Struct){ 42, 42.5 }`fill ); // CFA memalign, fill 264 264 assert( (uintptr_t)stp % Alignment == 0 ); 265 printf( "CFA alloc_align fill %d %g\n", stp->x, stp->y );265 sout | "CFA alloc_align fill" | stp->x | stp->y; 266 266 // do not free 267 267 268 268 stp = &(*alloc( stp`realloc, 4096`align )){ 42, 42.5 }; // CFA realign 269 269 assert( (uintptr_t)stp % 4096 == 0 ); 270 printf( "CFA alloc_align %d %g\n", stp->x, stp->y );270 sout | "CFA alloc_align" | stp->x | stp->y; 271 271 free( stp ); 272 272 273 273 274 274 // alignment, array types 275 printf( "\n" );275 sout | nl; 276 276 277 277 stp = alloc( dim, Alignment`align ); // CFA array memalign 278 278 assert( (uintptr_t)stp % Alignment == 0 ); 279 279 for ( i; dim ) { stp[i] = (Struct){ 42, 42.5 }; } 280 printf( "CFA array alloc_align\n" );281 for ( i; dim ) { printf( "%d %g, ", stp[i].x, stp[i].y ); }282 printf( "\n" );280 sout | "CFA array alloc_align"; 281 for ( i; dim ) { sout | stp[i].x | stp[i].y | ", " | nonl; } 282 sout | nl; 283 283 free( stp ); 284 284 285 285 stp = alloc( dim, Alignment`align, fill`fill ); // CFA array memalign, fill 286 286 assert( (uintptr_t)stp % Alignment == 0 ); 287 printf( "CFA array alloc_align, fill\n" );288 for ( i; dim ) { printf( "%#x %a, ", stp[i].x, stp[i].y ); }289 printf( "\n" );287 sout | "CFA array alloc_align, fill"; 288 for ( i; dim ) { sout | hex(stp[i].x) | hex(stp[i].y) | ", " | nonl; } 289 sout | nl; 290 290 free( stp ); 291 291 292 292 stp = alloc( dim, Alignment`align, ((Struct){ 42, 42.5 })`fill ); // CFA array memalign, fill 293 293 assert( (uintptr_t)stp % Alignment == 0 ); 294 printf( "CFA array alloc_align, fill\n" );295 for ( i; dim ) { printf( "%d %g, ", stp[i].x, stp[i].y ); }296 printf( "\n" );294 sout | "CFA array alloc_align, fill"; 295 for ( i; dim ) { sout | stp[i].x | stp[i].y | ", " | nonl; } 296 sout | nl; 297 297 // do not free 298 298 299 299 stp1 = alloc( dim, Alignment`align, [stp, dim]`fill ); // CFA array memalign, fill 300 300 assert( (uintptr_t)stp % Alignment == 0 ); 301 printf( "CFA array alloc_align, fill array\n" );302 for ( i; dim ) { printf( "%d %g, ", stp1[i].x, stp1[i].y ); }303 printf( "\n" );301 sout | "CFA array alloc_align, fill array"; 302 for ( i; dim ) { sout | stp1[i].x | stp1[i].y | ", " | nonl; } 303 sout | nl; 304 304 free( stp1 ); 305 305 … … 307 307 assert( (uintptr_t)stp % 4096 == 0 ); 308 308 for ( i; dim ) { stp[i] = (Struct){ 42, 42.5 }; } 309 printf( "CFA realloc array alloc_align\n" );310 for ( i; dim ) { printf( "%d %g, ", stp[i].x, stp[i].y ); }311 printf( "\n" );309 sout | "CFA realloc array alloc_align"; 310 for ( i; dim ) { sout | stp[i].x | stp[i].y | ", " | nonl; } 311 sout | nl; 312 312 free( stp ); 313 313 314 314 315 315 // data, non-array types 316 printf( "\n" );316 sout | nl; 317 317 318 318 memset( &st, fill ); // CFA memset, type safe 319 printf( "CFA memset %#x %a\n", st.x, st.y);319 sout | "CFA memset" | hex(st.x) | hex(st.y); 320 320 memcpy( &st1, &st ); // CFA memcpy, type safe 321 printf( "CFA memcpy %#x %a\n", st1.x, st1.y);321 sout | "CFA memcpy" | hex(st1.x) | hex(st1.y); 322 322 323 323 324 324 // data, array types 325 printf( "\n" );325 sout | nl; 326 326 327 327 amemset( sta, fill, dim ); // CFA array memset, type safe 328 printf( "CFA array memset\n" );329 for ( i; dim ) { printf( "%#x %a, ", sta[i].x, sta[i].y ); }330 printf( "\n" );328 sout | "CFA array memset"; 329 for ( i; dim ) { sout | hex(sta[i].x) | hex(sta[i].y) | ", " | nonl; } 330 sout | nl; 331 331 332 332 amemcpy( sta1, sta, dim ); // CFA array memcpy, type safe 333 printf( "CFA array memcpy\n" );334 for ( i; dim ) { printf( "%#x %a, ", sta1[i].x, sta1[i].y ); }335 printf( "\n" );333 sout | "CFA array memcpy"; 334 for ( i; dim ) { sout | hex(sta1[i].x) | hex(sta1[i].y) | ", " | nonl; } 335 sout | nl; 336 336 337 337 // new, non-array types 338 printf( "\n" );338 sout | nl; 339 339 340 340 const_count = dest_count = 0; … … 344 344 assert( const_count == 2 && dest_count == 0 ); // assertion for testing 345 345 346 printf( "CFA new initialize\n%d %g %d %g\n", stp->x, stp->y, stp1->x, stp1->y );346 sout | "CFA new initialize" | nl | stp->x | stp->y | stp1->x | stp1->y; 347 347 delete( stp, stp1 ); 348 348 assert( const_count == 2 && dest_count == 2 ); // assertion for testing … … 351 351 stp = anew( dim, 42, 42.5 ); 352 352 assert( const_count == 2 + dim && dest_count == 2 ); // assertion for testing 353 printf( "CFA array new initialize\n" );354 for ( i; dim ) { printf( "%d %g, ", stp[i].x, stp[i].y ); }355 printf( "\n" );353 sout | "CFA array new initialize"; 354 for ( i; dim ) { sout | stp[i].x | stp[i].y | ", " | nonl; } 355 sout | nl; 356 356 357 357 stp1 = anew( dim, 42, 42.5 ); 358 358 assert( const_count == 2 + 2 * dim && dest_count == 2 ); // assertion for testing 359 for ( i; dim ) { printf( "%d %g, ", stp1[i].x, stp1[i].y ); }360 printf( "\n" );359 for ( i; dim ) { sout | stp1[i].x | stp1[i].y | ", " | nonl; } 360 sout | nl; 361 361 adelete( stp, stp1 ); 362 362 assert( const_count == 2 + 2 * dim && dest_count == 2 + 2 * dim); // assertion for testing 363 363 364 364 // extras 365 printf( "\n" );365 sout | nl; 366 366 367 367 float * fp = malloc() + 1; 368 printf( "pointer arithmetic %d\n", fp == fp - 1 );368 sout | "pointer arithmetic" | fp == fp - 1; 369 369 free( fp - 1 ); 370 370 371 371 ip = foo( bar( baz( malloc(), 0 ), 0 ), 0 ); 372 372 *ip = 0xdeadbeef; 373 printf( "CFA deep malloc %#x\n", *ip);373 sout | "CFA deep malloc" | hex(*ip); 374 374 375 375 dp = alloc(5.0`fill); // just for testing multiple free … … 379 379 #ifdef ERR1 380 380 stp = malloc(); 381 printf( "\nSHOULD FAIL\n" );381 sout | "\nSHOULD FAIL"; 382 382 ip = realloc( stp, dim * sizeof( *stp ) ); 383 383 ip = memset( stp, 10 ); -
tests/alloc2.cfa
r5408b59 rc2b3243 1 #include <fstream.hfa> // sout 1 2 #include <malloc.h> // malloc_usable_size 2 3 #include <stdint.h> // uintptr_t … … 4 5 #include <string.h> // memcmp 5 6 6 int last_failed;7 7 int tests_total; 8 8 int tests_failed; … … 13 13 void test_base( void * ip, size_t size, size_t align ) { 14 14 tests_total += 1; 15 // printf( "DEBUG: starting test %d\n", tests_total);15 // sout | "DEBUG: starting test" | tests_total; 16 16 bool passed = (malloc_size( ip ) == size) && (malloc_usable_size( ip ) >= size) && (malloc_alignment( ip ) == align) && ((uintptr_t)ip % align == 0); 17 17 if ( ! passed ) { 18 printf( "failed test %3d: %4zu %4zu but got %4zu ( %3zu ) %4zu\n", tests_total, size, align, malloc_size( ip ), malloc_usable_size( ip ), malloc_alignment( ip ));18 sout | "base failed test" | tests_total | "ip" | ip | "size" | size | "align" | align | "but got size" | malloc_size( ip ) | "usable" | malloc_usable_size( ip ) | "align" | malloc_alignment( ip ); 19 19 tests_failed += 1; 20 20 } // if 21 // printf( "DEBUG: done test %d\n", tests_total);21 // sout | "DEBUG: done test" | tests_total; 22 22 } 23 23 24 24 void test_fill( void * ip_, size_t start, size_t end, char fill ) { 25 25 tests_total += 1; 26 // printf( "DEBUG: starting test %d\n", tests_total );26 // sout | "DEBUG: starting test" | tests_total; 27 27 bool passed = true; 28 28 char * ip = (char *) ip_; 29 29 for ( i; start ~ end ) passed = passed && (ip[i] == fill); 30 30 if ( ! passed ) { 31 printf( "failed test %3d: fill C\n", tests_total );31 sout | "fill1 failed test" | tests_total | "fill C"; 32 32 tests_failed += 1; 33 33 } // if 34 // printf( "DEBUG: done test %d\n", tests_total );34 // sout | "DEBUG: done test" | tests_total; 35 35 } 36 36 37 37 void test_fill( void * ip_, size_t start, size_t end, int fill ) { 38 38 tests_total += 1; 39 // printf( "DEBUG: starting test %d\n", tests_total );39 // sout | "DEBUG: starting test" tests_total; 40 40 bool passed = true; 41 41 int * ip = (int *)ip_; 42 for ( i; start ~ end ) passed = passed && (ip[i] == fill);42 for ( i; start ~ end ) passed = passed && (ip[i] == fill); 43 43 if ( ! passed ) { 44 printf( "failed test %3d: fill int\n", tests_total );44 sout | "fill2 failed test" | tests_total | "fill int"; 45 45 tests_failed += 1; 46 46 } // if 47 // printf( "DEBUG: done test %d\n", tests_total );47 // sout | "DEBUG: done test" | tests_total; 48 48 } 49 49 50 50 void test_fill( void * ip_, size_t start, size_t end, int * fill ) { 51 51 tests_total += 1; 52 // printf( "DEBUG: starting test %d\n", tests_total );52 // sout | "DEBUG: starting test" | tests_total; 53 53 bool passed = memcmp((void*)((uintptr_t )ip_ + start ), (void*)fill, end ) == 0; 54 54 if ( ! passed ) { 55 printf( "failed test %3d: fill int A\n", tests_total );55 sout | "fill3 failed test" | tests_total | "fill int A"; 56 56 tests_failed += 1; 57 57 } // if 58 // printf( "DEBUG: done test %d\n", tests_total );58 // sout | "DEBUG: done test" | tests_total; 59 59 } 60 60 61 61 void test_fill( void * ip_, size_t start, size_t end, T1 fill ) { 62 62 tests_total += 1; 63 // printf( "DEBUG: starting test %d\n", tests_total );63 // sout | "DEBUG: starting test" | tests_total; 64 64 bool passed = true; 65 65 T1 * ip = (T1 *) ip_; 66 66 for ( i; start ~ end ) passed = passed && (ip[i].data == fill.data ); 67 67 if ( ! passed ) { 68 printf( "failed test %3d: fill T1\n", tests_total );68 sout | "fill4 failed test" | tests_total | "fill T1"; 69 69 tests_failed += 1; 70 70 } // if 71 // printf( "DEBUG: done test %d\n", tests_total );71 // sout | "DEBUG: done test" | tests_total; 72 72 } 73 73 74 74 void test_fill( void * ip_, size_t start, size_t end, T1 * fill ) { 75 75 tests_total += 1; 76 // printf( "DEBUG: starting test %d\n", tests_total );76 // sout | "DEBUG: starting test" | tests_total; 77 77 bool passed = memcmp( (void*)((uintptr_t )ip_ + start ), (void*)fill, end ) == 0; 78 78 if ( ! passed ) { 79 printf( "failed test %3d: fill T1 A\n", tests_total );79 sout | "fill5 failed test" | tests_total | "fill T1 A"; 80 80 tests_failed += 1; 81 81 } // if 82 // printf( "DEBUG: done test %d\n", tests_total );82 // sout | "DEBUG: done test" | tests_total; 83 83 } 84 84 85 85 void test_use( int * ip, size_t dim ) { 86 86 tests_total += 1; 87 // printf( "DEBUG: starting test %d\n", tests_total );87 // sout | "DEBUG: starting test" | tests_total; 88 88 bool passed = true; 89 89 for ( i; 0 ~ dim ) ip[i] = 0xdeadbeef; 90 90 for ( i; 0 ~ dim ) passed = passed && (ip[i] == 0xdeadbeef); 91 91 if ( ! passed ) { 92 printf( "failed test %3d: use int\n", tests_total );92 sout | "use1 failed test" | tests_total | "use int"; 93 93 tests_failed += 1; 94 94 } // if 95 // printf( "DEBUG: done test %d\n", tests_total );95 // sout | "DEBUG: done test" | tests_total; 96 96 } 97 97 98 98 void test_use( T1 * ip, size_t dim ) { 99 99 tests_total += 1; 100 // printf( "DEBUG: starting test %d\n", tests_total );100 // sout | "DEBUG: starting test" | tests_total; 101 101 bool passed = true; 102 102 for ( i; 0 ~ dim ) ip[i].data = 0xdeadbeef; 103 103 for ( i; 0 ~ dim ) passed = passed && (ip[i].data == 0xdeadbeef); 104 104 if ( ! passed ) { 105 printf( "failed test %3d: use T1\n", tests_total );105 sout | "use2 failed test" | tests_total | "use T1"; 106 106 tests_failed += 1; 107 107 } // if 108 // printf( "DEBUG: done test %d\n", tests_total );108 // sout | "DEBUG: done test" | tests_total; 109 109 } 110 110 … … 117 117 char FillC = 'a'; 118 118 int * FillA = calloc( dim / 4 ); 119 119 120 T1 FillT1 = { FillT }; 120 121 T1 * FillT1A = (T1 *)(void *) malloc( (dim / 4) * sizeof(T1) ); … … 129 130 // testing alloc 130 131 131 last_failed = -1;132 132 tests_total = 0; 133 133 tests_failed = 0; … … 153 153 free( ip ); 154 154 155 ip = alloc( ((double *)0p)`resize );155 ip = alloc( 0p`resize ); 156 156 test_base( ip, elemSize, libAlign ); 157 157 test_use( ip, elemSize / elemSize ); … … 495 495 free( ip ); 496 496 497 if ( tests_failed == 0 ) printf( "PASSED alloc tests\n\n" );498 else printf( "failed alloc tests : %d/%d\n\n", tests_failed, tests_total );499 500 // testing alloc ( aligned struct)497 if ( tests_failed == 0 ) sout | "PASSED alloc tests" | nl | nl; 498 else sout | "failed alloc tests :" | tests_failed | tests_total | nl | nl; 499 500 // testing alloc (aligned struct) 501 501 502 502 elemSize = sizeof(T1); 503 503 size = dim * elemSize; 504 last_failed = -1;505 504 tests_total = 0; 506 505 tests_failed = 0; … … 868 867 free( t1p ); 869 868 870 if ( tests_failed == 0) printf( "PASSED alloc tests (aligned struct)\n\n");871 else printf( "failed alloc tests ( aligned struct ) : %d/%d\n\n", tests_failed, tests_total );872 873 printf( "(if applicable) alignment error below indicates memory trashing caused by test_use.\n\n");869 if ( tests_failed == 0) sout | "PASSED alloc tests (aligned struct)" | nl | nl; 870 else sout | "failed alloc tests ( aligned struct ) :" | tests_failed | tests_total | nl; 871 872 sout | "(if applicable) alignment error below indicates memory trashing caused by test_use." | nl | nl; 874 873 free( FillA ); 875 874 free( FillT1A ); -
tests/configs/.expect/parsebools.txt
r5408b59 rc2b3243 7 7 set false :true 8 8 Child status: 9 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 09 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 10 10 11 11 all true/set arg: … … 17 17 set false :false 18 18 Child status: 19 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 019 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 20 20 21 21 all false/unset arg: … … 27 27 set false :true 28 28 Child status: 29 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 029 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 30 30 31 31 gibberish arg 1: … … 43 43 -h, --help print this help message 44 44 Child status: 45 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 045 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 46 46 47 47 gibberish arg 2: … … 59 59 -h, --help print this help message 60 60 Child status: 61 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 061 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 62 62 63 63 gibberish arg 3: … … 74 74 -h, --help print this help message 75 75 Child status: 76 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 076 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 77 77 78 78 gibberish arg 4: … … 89 89 -h, --help print this help message 90 90 Child status: 91 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 091 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 92 92 93 93 All Done! -
tests/configs/.expect/parsenums.x64.txt
r5408b59 rc2b3243 6 6 double :3.3 7 7 Child status: 8 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 08 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 9 9 10 10 all 0 arg: … … 15 15 double :0. 16 16 Child status: 17 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 017 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 18 18 19 19 negative vals arg: … … 24 24 double :-1. 25 25 Child status: 26 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 026 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 27 27 28 28 funky notation arg: … … 33 33 double :5000000. 34 34 Child status: 35 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 035 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 36 36 37 37 big values arg: … … 42 42 double :5000000. 43 43 Child status: 44 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 044 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 45 45 46 46 too big values arg: … … 57 57 -h, --help print this help message 58 58 Child status: 59 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 059 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 60 60 61 61 Argument '4294967296' for option u could not be parsed … … 71 71 -h, --help print this help message 72 72 Child status: 73 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 073 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 74 74 75 75 Argument '18446744073709551616' for option l could not be parsed … … 85 85 -h, --help print this help message 86 86 Child status: 87 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 087 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 88 88 89 89 Argument '18446744073709551616' for option L could not be parsed … … 99 99 -h, --help print this help message 100 100 Child status: 101 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0101 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 102 102 103 103 negative errors arg: … … 114 114 -h, --help print this help message 115 115 Child status: 116 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0116 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 117 117 118 118 Argument '-1' for option l could not be parsed … … 128 128 -h, --help print this help message 129 129 Child status: 130 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0130 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 131 131 132 132 Argument '-1' for option L could not be parsed … … 142 142 -h, --help print this help message 143 143 Child status: 144 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0144 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 145 145 146 146 All Done! -
tests/configs/.expect/parsenums.x86.txt
r5408b59 rc2b3243 6 6 double :3.3 7 7 Child status: 8 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 08 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 9 9 10 10 all 0 arg: … … 15 15 double :0. 16 16 Child status: 17 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 017 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 18 18 19 19 negative vals arg: … … 24 24 double :-1. 25 25 Child status: 26 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 026 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 27 27 28 28 funky notation arg: … … 33 33 double :5000000. 34 34 Child status: 35 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 035 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 36 36 37 37 big values arg: … … 42 42 double :5000000. 43 43 Child status: 44 WIFEXITED : 1 WEXITSTATUS : 0 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 0 WIFCONTINUED: 044 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 45 45 46 46 too big values arg: … … 57 57 -h, --help print this help message 58 58 Child status: 59 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 059 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 60 60 61 61 Argument '4294967296' for option u could not be parsed … … 71 71 -h, --help print this help message 72 72 Child status: 73 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 073 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 74 74 75 75 Argument '4294967296' for option l could not be parsed … … 85 85 -h, --help print this help message 86 86 Child status: 87 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 087 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 88 88 89 89 Argument '18446744073709551616' for option L could not be parsed … … 99 99 -h, --help print this help message 100 100 Child status: 101 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0101 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 102 102 103 103 negative errors arg: … … 114 114 -h, --help print this help message 115 115 Child status: 116 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0116 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 117 117 118 118 Argument '-1' for option l could not be parsed … … 128 128 -h, --help print this help message 129 129 Child status: 130 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0130 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 131 131 132 132 Argument '-1' for option L could not be parsed … … 142 142 -h, --help print this help message 143 143 Child status: 144 WIFEXITED : 1 WEXITSTATUS : 1 WIFSIGNALED : 0 WTERMSIG : 0 WCOREDUMP : 0 WIFSTOPPED : 0 WSTOPSIG : 1 WIFCONTINUED: 0144 IFEXITED : 1, EXITSTATUS : 1, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 1, IFCONTINUED: 0 145 145 146 146 All Done! -
tests/configs/parsebools.cfa
r5408b59 rc2b3243 1 #include <stdlib.h> 2 #include <stdio.h> 3 #include <string.h> 4 5 #include <errno.h> 6 #include <signal.h> 7 8 extern "C" { 9 #include <sys/types.h> 10 #include <sys/wait.h> 11 #include <unistd.h> 12 } 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2022 University of Waterloo 3 // 4 // The contents of this file are covered under the licence agreement in the 5 // file "LICENCE" distributed with Cforall. 6 // 7 // configs/parsebools.cfa 8 // Testing parsing of boolean arguments 9 // 10 // Author : Thierry Delisle 11 // Created On : Wed Oct 12 15:28:01 2022 12 // Last Modified By : 13 // Last Modified On : 14 // Update Count : 15 // 13 16 14 17 #include <parseargs.hfa> 15 18 #include <fstream.hfa> 16 19 17 int true_main(const char * exec); 20 #include "../meta/fork+exec.hfa" 18 21 19 22 int main(int argc, char * argv[]) { 20 if(!getenv("CFATEST_FORK_EXEC_TEXT")) return true_main(argv[0]);23 check_main(argv[0]); 21 24 22 25 bool YN = false; … … 48 51 } 49 52 50 int do_wait(pid_t pid) { 51 int wstatus; 52 int options = 0; 53 pid_t ret = waitpid(pid, &wstatus, options); 54 fflush(stdout); 55 if(ret < 0) { 56 fprintf(stderr, "Fork returned with error: %d '%s'\n", errno, strerror(errno)); 57 exit(1); 58 } 59 return wstatus; 60 } 61 62 pid_t strict_fork(void) { 63 fflush(stdout); 64 pid_t ret = fork(); 65 if(ret < 0) { 66 fprintf(stderr, "Fork returned with error: %d '%s'\n", errno, strerror(errno)); 67 exit(1); 68 } 69 return ret; 70 } 71 72 void print_status(int wstatus) { 73 printf("Child status:\n"); 74 printf(" WIFEXITED : %d", WIFEXITED(wstatus)); 75 printf(" WEXITSTATUS : %d", WEXITSTATUS(wstatus)); 76 printf(" WIFSIGNALED : %d", WIFSIGNALED(wstatus)); 77 printf(" WTERMSIG : %d", WTERMSIG(wstatus)); 78 printf(" WCOREDUMP : %d", WCOREDUMP(wstatus)); 79 printf(" WIFSTOPPED : %d", WIFSTOPPED(wstatus)); 80 printf(" WSTOPSIG : %d", WSTOPSIG(wstatus)); 81 printf(" WIFCONTINUED: %d\n", WIFCONTINUED(wstatus)); 82 } 83 84 int true_main(const char * path) { 85 char * env[] = { "CFATEST_FORK_EXEC_TEXT=1", 0p }; 86 53 int true_main(const char * path, char * env[]) { 87 54 printf("no arg:\n"); 88 55 if(pid_t child = strict_fork(); child == 0) { … … 97 64 print_status(status); 98 65 } 99 printf("\n");100 66 101 67 printf("all true/set arg:\n"); … … 111 77 print_status(status); 112 78 } 113 printf("\n");114 79 115 80 printf("all false/unset arg:\n"); … … 125 90 print_status(status); 126 91 } 127 printf("\n");128 92 129 93 printf("gibberish arg 1:\n"); … … 139 103 print_status(status); 140 104 } 141 printf("\n");142 105 143 106 printf("gibberish arg 2:\n"); … … 153 116 print_status(status); 154 117 } 155 printf("\n");156 118 157 119 printf("gibberish arg 3:\n"); … … 167 129 print_status(status); 168 130 } 169 printf("\n");170 131 171 132 printf("gibberish arg 4:\n"); … … 181 142 print_status(status); 182 143 } 183 printf("\n");184 144 185 145 printf("All Done!\n"); -
tests/configs/parsenums.cfa
r5408b59 rc2b3243 1 #include <stdlib.h> 2 #include <stdio.h> 3 #include <string.h> 4 5 #include <errno.h> 6 #include <signal.h> 7 8 extern "C" { 9 #include <sys/types.h> 10 #include <sys/wait.h> 11 #include <unistd.h> 12 } 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2022 University of Waterloo 3 // 4 // The contents of this file are covered under the licence agreement in the 5 // file "LICENCE" distributed with Cforall. 6 // 7 // configs/parsenums.cfa 8 // Testing parsing of integer arguments 9 // 10 // Author : Thierry Delisle 11 // Created On : Wed Oct 12 15:28:01 2022 12 // Last Modified By : 13 // Last Modified On : 14 // Update Count : 15 // 13 16 14 17 #include <parseargs.hfa> 15 18 #include <fstream.hfa> 19 20 #include "../meta/fork+exec.hfa" 16 21 17 22 #if __SIZEOF_LONG__ == 4 … … 28 33 29 34 int main(int argc, char * argv[]) { 30 if(!getenv("CFATEST_FORK_EXEC_TEXT")) return true_main(argv[0]);35 check_main(argv[0]); 31 36 32 37 int i = -3; … … 56 61 } 57 62 58 int do_wait(pid_t pid) { 59 int wstatus; 60 int options = 0; 61 pid_t ret = waitpid(pid, &wstatus, options); 62 fflush(stdout); 63 if(ret < 0) { 64 fprintf(stderr, "Fork returned with error: %d '%s'\n", errno, strerror(errno)); 65 exit(1); 66 } 67 return wstatus; 68 } 69 70 pid_t strict_fork(void) { 71 fflush(stdout); 72 pid_t ret = fork(); 73 if(ret < 0) { 74 fprintf(stderr, "Fork returned with error: %d '%s'\n", errno, strerror(errno)); 75 exit(1); 76 } 77 return ret; 78 } 79 80 void print_status(int wstatus) { 81 printf("Child status:\n"); 82 printf(" WIFEXITED : %d", WIFEXITED(wstatus)); 83 printf(" WEXITSTATUS : %d", WEXITSTATUS(wstatus)); 84 printf(" WIFSIGNALED : %d", WIFSIGNALED(wstatus)); 85 printf(" WTERMSIG : %d", WTERMSIG(wstatus)); 86 printf(" WCOREDUMP : %d", WCOREDUMP(wstatus)); 87 printf(" WIFSTOPPED : %d", WIFSTOPPED(wstatus)); 88 printf(" WSTOPSIG : %d", WSTOPSIG(wstatus)); 89 printf(" WIFCONTINUED: %d\n", WIFCONTINUED(wstatus)); 90 } 91 92 int true_main(const char * path) { 93 char * env[] = { "CFATEST_FORK_EXEC_TEXT=1", 0p }; 94 63 int true_main(const char * path, char * env[]) { 95 64 printf("no arg:\n"); 96 65 if(pid_t child = strict_fork(); child == 0) { … … 105 74 print_status(status); 106 75 } 107 printf("\n");108 76 109 77 printf("all 0 arg:\n"); … … 119 87 print_status(status); 120 88 } 121 printf("\n");122 89 123 90 printf("negative vals arg:\n"); … … 133 100 print_status(status); 134 101 } 135 printf("\n");136 102 137 103 printf("funky notation arg:\n"); … … 147 113 print_status(status); 148 114 } 149 printf("\n");150 115 151 116 printf("big values arg:\n"); … … 161 126 print_status(status); 162 127 } 163 printf("\n");164 128 165 129 printf("too big values arg:\n"); … … 175 139 print_status(status); 176 140 } 177 printf("\n");178 141 179 142 if(pid_t child = strict_fork(); child == 0) { … … 188 151 print_status(status); 189 152 } 190 printf("\n");191 153 192 154 if(pid_t child = strict_fork(); child == 0) { … … 201 163 print_status(status); 202 164 } 203 printf("\n");204 165 205 166 if(pid_t child = strict_fork(); child == 0) { … … 214 175 print_status(status); 215 176 } 216 printf("\n");217 177 218 178 printf("negative errors arg:\n"); … … 228 188 print_status(status); 229 189 } 230 printf("\n");231 190 232 191 if(pid_t child = strict_fork(); child == 0) { … … 241 200 print_status(status); 242 201 } 243 printf("\n");244 202 245 203 if(pid_t child = strict_fork(); child == 0) { … … 254 212 print_status(status); 255 213 } 256 printf("\n");257 214 258 215 printf("All Done!\n"); -
tests/malloc.cfa
r5408b59 rc2b3243 1 #include < assert.h>1 #include <fstream.hfa> // sout 2 2 #include <malloc.h> // malloc_usable_size 3 3 #include <stdint.h> // uintptr_t 4 #include <stdlib.h> // posix_memalign5 #include <fstream.hfa>6 4 #include <stdlib.hfa> // access C malloc, realloc 7 5 #include <unistd.h> // getpagesize … … 10 8 int tests_failed; 11 9 size_t tAlign = 32; 12 struct S1 { int d 1; } __attribute__((aligned(32)));10 struct S1 { int data; } __attribute__(( aligned(32))); 13 11 typedef struct S1 T1; 14 12 15 void test_base( void * ip, size_t size, size_t align ) {13 void test_base( void * ip, size_t size, size_t align ) { 16 14 tests_total += 1; 17 bool passed = (malloc_size(ip) == size) && (malloc_usable_size(ip) >= size) && (malloc_alignment(ip) == align) && ((uintptr_t)ip % align == 0); 18 if (!passed) { 19 printf("failed test %2d: %4lu %4lu but got %4lu ( %3lu ) %4lu\n", tests_total, size, align, malloc_size(ip), malloc_usable_size(ip), malloc_alignment(ip)); 15 bool passed = (malloc_size( ip ) == size) && (malloc_usable_size( ip ) >= size) && (malloc_alignment( ip ) == align) && ((uintptr_t)ip % align == 0); 16 if ( ! passed ) { 17 sout | "base failed test" | tests_total | "ip" | ip | "size" | size | "align" | align | "but got size" | malloc_size( ip ) | "usable" | malloc_usable_size( ip ) | "align" | malloc_alignment( ip ); 18 tests_failed += 1; 19 } // if 20 } 21 22 void test_fill( void * ip_, size_t start, size_t end, char fill ) { 23 tests_total += 1; 24 bool passed = true; 25 char * ip = (char *) ip_; 26 for ( i; start ~ end ) passed = passed && (ip[i] == fill); 27 if ( ! passed ) { 28 sout | "fill1 failed test" | tests_total | "fill C"; 29 tests_failed += 1; 30 } // if 31 } 32 33 void test_use( void * ip_ ) { 34 tests_total += 1; 35 bool passed = true; 36 int * ip = (int *) ip_; 37 size_t size = malloc_size( ip ); 38 for ( i; 0 ~ size ~ sizeof(int)) ip[i/sizeof(int)] = 0xdeadbeef; 39 for ( i; 0 ~ size ~ sizeof(int)) passed = passed && (ip[i / sizeof(int)] == 0xdeadbeef); 40 size_t usize = malloc_usable_size( ip ); 41 for ( i; size ~ usize ~ sizeof(int)) ip[i / sizeof(int)] = -1; 42 for ( i; size ~ usize ~ sizeof(int)) passed = passed && (ip[i / sizeof(int)] == -1); 43 if ( ! passed ) { 44 sout | "failed test" | tests_total | "use"; 20 45 tests_failed += 1; 21 46 } 22 47 } 23 48 24 void test_fill( void * ip_, size_t start, size_t end, char fill) {25 tests_total += 1;26 bool passed = true;27 char * ip = (char *) ip_;28 for (i; start ~ end) passed = passed && (ip[i] == fill);29 if (!passed) {30 printf("failed test %2d: fill\n", tests_total);31 tests_failed += 1;32 }33 }34 35 void test_use( void * ip_) {36 tests_total += 1;37 bool passed = true;38 int * ip = (int *) ip_;39 size_t size = malloc_size(ip);40 for (i; 0 ~ size ~ sizeof(int)) ip[i/sizeof(int)] = 0xdeadbeef;41 for (i; 0 ~ size ~ sizeof(int)) passed = passed && (ip[i/sizeof(int)] == 0xdeadbeef);42 size_t usize = malloc_usable_size(ip);43 for (i; size ~ usize ~ sizeof(int)) ip[i/sizeof(int)] = -1;44 for (i; size ~ usize ~ sizeof(int)) passed = passed && (ip[i/sizeof(int)] == -1);45 if (!passed) {46 printf("failed test %2d: use\n", tests_total);47 tests_failed += 1;48 }49 }50 51 49 int main( void ) { 50 enum { dim = 8, align = 64, libAlign = libAlign() }; 52 51 size_t elemSize = sizeof(int); 53 size_t dim = 8;54 52 size_t size = dim * elemSize; 55 size_t align = 64;56 const size_t libAlign = libAlign();57 53 char fill = '\xde'; 58 54 int * ip; 59 55 T1 * tp; 60 56 61 // testing C 57 // testing C malloc 62 58 63 59 tests_total = 0; 64 60 tests_failed = 0; 65 61 66 ip = (int *) (void *)malloc( size );67 test_base( ip, size, libAlign);68 test_use( ip);69 free( ip);70 71 ip = (int *) (void *)malloc( 0 );72 test_base( ip, 0, libAlign);73 test_use( ip);74 free( ip);75 76 ip = (int *) (void *)aalloc( dim, elemSize );77 test_base( ip, size, libAlign);78 test_use( ip);79 free( ip);80 81 ip = (int *) (void *)aalloc( 0, elemSize );82 test_base( ip, 0, libAlign);83 test_use( ip);84 free( ip);85 86 ip = (int *) (void *)aalloc( dim, 0 );87 test_base( ip, 0, libAlign);88 test_use( ip);89 free( ip);90 91 ip = (int *) (void *)aalloc( 0, 0 );92 test_base( ip, 0, libAlign);93 test_use( ip);94 free( ip);95 96 ip = (int *) (void *)calloc( dim, elemSize );97 test_base( ip, size, libAlign);98 test_fill( ip, 0, size, '\0');99 test_use( ip);100 free( ip);101 102 ip = (int *) (void *)calloc( 0, elemSize );103 test_base( ip, 0, libAlign);104 test_fill( ip, 0, 0, '\0');105 test_use( ip);106 free( ip);107 108 ip = (int *) (void *)calloc( dim, 0 );109 test_base( ip, 0, libAlign);110 test_fill( ip, 0, 0, '\0');111 test_use( ip);112 free( ip);113 114 ip = (int *) (void *)malloc( size );115 ip = (int *) (void *) resize( (void *)ip, size / 4 );116 test_base( ip, size / 4, libAlign);117 test_use( ip);118 free( ip);119 120 ip = (int *) (void *)malloc( size );121 ip = (int *) (void *) resize( (void *)ip, size * 4 );122 test_base( ip, size * 4, libAlign);123 test_use( ip);124 free( ip);125 126 ip = (int *) (void *)malloc( size );127 ip = (int *) (void *) resize( (void *)ip, 0 );128 test_base( ip, 0, libAlign);129 test_use( ip);130 free( ip);131 132 ip = (int *) (void *)resize( NULL, size );133 test_base( ip, size, libAlign);134 test_use( ip);135 free( ip);136 137 ip = (int *) (void *)resize( 0p, size );138 test_base( ip, size, libAlign);139 test_use( ip);140 free( ip);141 142 ip = (int *) (void *)calloc( dim, elemSize );143 ip = (int *) (void *) realloc( (void *)ip, size / 4 );144 test_base( ip, size / 4, libAlign);145 test_fill( ip, 0, size / 4, '\0');146 test_use( ip);147 free( ip);148 149 ip = (int *) (void *)calloc( dim, elemSize );150 ip = (int *) (void *) realloc( (void *)ip, size * 4 );151 test_base( ip, size * 4, libAlign);152 test_fill( ip, 0, size * 4, '\0');153 test_use( ip);154 free( ip);155 156 ip = (int *) (void *)calloc( dim, elemSize );157 ip = (int *) (void *) realloc( (void *)ip, 0 );158 test_base( ip, 0, libAlign);159 test_use( ip);160 free( ip);161 162 ip = (int *) (void *)realloc( NULL, size );163 test_base( ip, size , libAlign);164 test_use( ip);165 free( ip);166 167 ip = (int *) (void *)realloc( 0p, size );168 test_base( ip, size, libAlign);169 test_use( ip);170 free( ip);171 172 ip = (int *) (void *)memalign( align, size );173 test_base( ip, size, align);174 test_use( ip);175 free( ip);176 177 ip = (int *) (void *)memalign( align, 0 );178 test_base( ip, 0, libAlign);179 test_use( ip);180 free( ip);181 182 ip = (int *) (void *)amemalign( align, dim, elemSize );183 test_base( ip, size, align);184 test_use( ip);185 free( ip);186 187 ip = (int *) (void *)amemalign( align, 0, elemSize );188 test_base( ip, 0, libAlign);189 test_use( ip);190 free( ip);191 192 ip = (int *) (void *)amemalign( align, dim, 0 );193 test_base( ip, 0, libAlign);194 test_use( ip);195 free( ip);196 197 ip = (int *) (void *)cmemalign( align, dim, elemSize );198 test_base( ip, size, align);199 test_fill( ip, 0, size, '\0');200 test_use( ip);201 free( ip);202 203 ip = (int *) (void *)cmemalign( align, 0, elemSize );204 test_base( ip, 0, libAlign);205 test_use( ip);206 free( ip);207 208 ip = (int *) (void *)cmemalign( align, dim, 0 );209 test_base( ip, 0, libAlign);210 test_use( ip);211 free( ip);212 213 ip = (int *) (void *)aligned_alloc( align, size );214 test_base( ip, size, align);215 test_use( ip);216 free( ip);217 218 ip = (int *) (void *)aligned_alloc( align, 0 );219 test_base( ip, 0, libAlign);220 test_use( ip);221 free( ip);222 223 (int)posix_memalign( (void **) &ip, align, size );224 test_base( ip, size, align);225 test_use( ip);226 free( ip);227 228 (int)posix_memalign( (void **) &ip, align, 0 );229 test_base( ip, 0, libAlign);230 test_use( ip);231 free( ip);232 233 ip = (int *) (void *)valloc( size );234 test_base( ip, size, getpagesize());235 test_use( ip);236 free( ip);237 238 ip = (int *) (void *)valloc( 0 );239 test_base( ip, 0, libAlign);240 test_use( ip);241 free( ip);242 243 ip = (int *) (void *)pvalloc( getpagesize() * 3 / 2 );244 test_base( ip, getpagesize() * 2, getpagesize());245 test_use( ip);246 free( ip);247 248 ip = (int *) (void *)pvalloc( 0 );249 test_base( ip, 0, libAlign);250 test_use( ip);251 free( ip);252 253 ip = (int *) (void *)malloc( size );254 ip = (int *) (void *) resize( (void *)ip, libAlign, size / 2 );255 test_base( ip, size / 2, libAlign);256 test_use( ip);257 free( ip);258 259 ip = (int *) (void *)aligned_alloc( align, size );260 ip = (int *) (void *) resize( (void *)ip, align, size / 2 );261 test_base( ip, size / 2, align);262 test_use( ip);263 free( ip);264 265 ip = (int *) (void *)malloc( size );266 ip = (int *) (void *) resize( (void *)ip, align, size / 4 );267 test_base( ip, size / 4, align);268 test_use( ip);269 free( ip);270 271 ip = (int *) (void *)malloc( size );272 ip = (int *) (void *) resize( (void *)ip, align, 0 );273 test_base( ip, 0, libAlign);274 test_use( ip);275 free( ip);276 277 ip = (int *) (void *)resize( NULL, align, size );278 test_base( ip, size, align);279 test_use( ip);280 free( ip);281 282 ip = (int *) (void *)resize( 0p, align, size );283 test_base( ip, size, align);284 test_use( ip);285 free( ip);286 287 ip = (int *) (void *)calloc( dim, elemSize );288 ip = (int *) (void *) realloc( (void *)ip, libAlign, size / 2 );289 test_base( ip, size / 2, libAlign);290 test_fill( ip, 0, size / 2, '\0');291 test_use( ip);292 free( ip);293 294 ip = (int *) (void *)cmemalign( align, dim, elemSize );295 ip = (int *) (void *) realloc( (void *)ip, align, size / 2 );296 test_base( ip, size / 2, align);297 test_fill( ip, 0, size / 2, '\0');298 test_use( ip);299 free( ip);300 301 ip = (int *) (void *)calloc( dim, elemSize );302 ip = (int *) (void *) realloc( (void *)ip, align, size / 4 );303 test_base( ip, size / 4, align);304 test_fill( ip, 0, size / 4, '\0');305 test_use( ip);306 free( ip);307 308 ip = (int *) (void *)calloc( dim, elemSize );309 ip = (int *) (void *) realloc( (void *) ip, 0, size * 4 );310 test_base( ip, size * 4, libAlign);311 test_fill( ip, 0, size * 4, '\0');312 test_use( ip);313 free( ip);314 315 ip = (int *) (void *)calloc( dim, elemSize );316 ip = (int *) (void *) realloc( (void *)ip, align, 0 );317 test_base( ip, 0, libAlign);318 test_use( ip);319 free( ip);320 321 free( 0p ); 322 free( NULL ); 323 324 if (tests_failed == 0) printf("PASSED C malloc tests\n\n");325 else printf("failed C malloc tests : %d/%d\n\n", tests_failed, tests_total);62 ip = (int *)malloc( size ); 63 test_base( ip, size, libAlign ); 64 test_use( ip ); 65 free( ip ); 66 67 ip = (int *)malloc( 0 ); 68 test_base( ip, 0, libAlign ); 69 test_use( ip ); 70 free( ip ); 71 72 ip = (int *)aalloc( dim, elemSize ); 73 test_base( ip, size, libAlign ); 74 test_use( ip ); 75 free( ip ); 76 77 ip = (int *)aalloc( 0, elemSize ); 78 test_base( ip, 0, libAlign ); 79 test_use( ip ); 80 free( ip ); 81 82 ip = (int *)aalloc( dim, 0 ); 83 test_base( ip, 0, libAlign ); 84 test_use( ip ); 85 free( ip ); 86 87 ip = (int *)aalloc( 0, 0 ); 88 test_base( ip, 0, libAlign ); 89 test_use( ip ); 90 free( ip ); 91 92 ip = (int *)calloc( dim, elemSize ); 93 test_base( ip, size, libAlign ); 94 test_fill( ip, 0, size, '\0' ); 95 test_use( ip ); 96 free( ip ); 97 98 ip = (int *)calloc( 0, elemSize ); 99 test_base( ip, 0, libAlign ); 100 test_fill( ip, 0, 0, '\0' ); 101 test_use( ip ); 102 free( ip ); 103 104 ip = (int *)calloc( dim, 0 ); 105 test_base( ip, 0, libAlign ); 106 test_fill( ip, 0, 0, '\0' ); 107 test_use( ip ); 108 free( ip ); 109 110 ip = (int *)malloc( size ); 111 ip = (int *)resize( ip, size / 4 ); 112 test_base( ip, size / 4, libAlign ); 113 test_use( ip ); 114 free( ip ); 115 116 ip = (int *)malloc( size ); 117 ip = (int *)resize( ip, size * 4 ); 118 test_base( ip, size * 4, libAlign ); 119 test_use( ip ); 120 free( ip ); 121 122 ip = (int *)malloc( size ); 123 ip = (int *)resize( ip, 0 ); 124 test_base( ip, 0, libAlign ); 125 test_use( ip ); 126 free( ip ); 127 128 ip = (int *)resize( NULL, size ); 129 test_base( ip, size, libAlign ); 130 test_use( ip ); 131 free( ip ); 132 133 ip = (int *)resize( 0p, size ); 134 test_base( ip, size, libAlign ); 135 test_use( ip ); 136 free( ip ); 137 138 ip = (int *)calloc( dim, elemSize ); 139 ip = (int *)realloc( ip, size / 4 ); 140 test_base( ip, size / 4, libAlign ); 141 test_fill( ip, 0, size / 4, '\0' ); 142 test_use( ip ); 143 free( ip ); 144 145 ip = (int *)calloc( dim, elemSize ); 146 ip = (int *)realloc( ip, size * 4 ); 147 test_base( ip, size * 4, libAlign ); 148 test_fill( ip, 0, size * 4, '\0' ); 149 test_use( ip ); 150 free( ip ); 151 152 ip = (int *)calloc( dim, elemSize ); 153 ip = (int *)realloc( ip, 0 ); 154 test_base( ip, 0, libAlign ); 155 test_use( ip ); 156 free( ip ); 157 158 ip = (int *)realloc( NULL, size ); 159 test_base( ip, size , libAlign ); 160 test_use( ip ); 161 free( ip ); 162 163 ip = (int *)realloc( 0p, size ); 164 test_base( ip, size, libAlign ); 165 test_use( ip ); 166 free( ip ); 167 168 ip = (int *)memalign( align, size ); 169 test_base( ip, size, align ); 170 test_use( ip ); 171 free( ip ); 172 173 ip = (int *)memalign( align, 0 ); 174 test_base( ip, 0, libAlign ); 175 test_use( ip ); 176 free( ip ); 177 178 ip = (int *)amemalign( align, dim, elemSize ); 179 test_base( ip, size, align ); 180 test_use( ip ); 181 free( ip ); 182 183 ip = (int *)amemalign( align, 0, elemSize ); 184 test_base( ip, 0, libAlign ); 185 test_use( ip ); 186 free( ip ); 187 188 ip = (int *)amemalign( align, dim, 0 ); 189 test_base( ip, 0, libAlign ); 190 test_use( ip ); 191 free( ip ); 192 193 ip = (int *)cmemalign( align, dim, elemSize ); 194 test_base( ip, size, align ); 195 test_fill( ip, 0, size, '\0' ); 196 test_use( ip ); 197 free( ip ); 198 199 ip = (int *)cmemalign( align, 0, elemSize ); 200 test_base( ip, 0, libAlign ); 201 test_use( ip ); 202 free( ip ); 203 204 ip = (int *)cmemalign( align, dim, 0 ); 205 test_base( ip, 0, libAlign ); 206 test_use( ip ); 207 free( ip ); 208 209 ip = (int *)aligned_alloc( align, size ); 210 test_base( ip, size, align ); 211 test_use( ip ); 212 free( ip ); 213 214 ip = (int *)aligned_alloc( align, 0 ); 215 test_base( ip, 0, libAlign ); 216 test_use( ip ); 217 free( ip ); 218 219 posix_memalign( (void **) &ip, align, size ); 220 test_base( ip, size, align ); 221 test_use( ip ); 222 free( ip ); 223 224 posix_memalign( (void **) &ip, align, 0 ); 225 test_base( ip, 0, libAlign ); 226 test_use( ip ); 227 free( ip ); 228 229 ip = (int *)valloc( size ); 230 test_base( ip, size, getpagesize() ); 231 test_use( ip ); 232 free( ip ); 233 234 ip = (int *)valloc( 0 ); 235 test_base( ip, 0, libAlign ); 236 test_use( ip ); 237 free( ip ); 238 239 ip = (int *)pvalloc( getpagesize() * 3 / 2 ); 240 test_base( ip, getpagesize() * 2, getpagesize() ); 241 test_use( ip ); 242 free( ip ); 243 244 ip = (int *)pvalloc( 0 ); 245 test_base( ip, 0, libAlign ); 246 test_use( ip ); 247 free( ip ); 248 249 ip = (int *)malloc( size ); 250 ip = (int *)resize( ip, libAlign, size / 2 ); 251 test_base( ip, size / 2, libAlign ); 252 test_use( ip ); 253 free( ip ); 254 255 ip = (int *)aligned_alloc( align, size ); 256 ip = (int *)resize( ip, align, size / 2 ); 257 test_base( ip, size / 2, align ); 258 test_use( ip ); 259 free( ip ); 260 261 ip = (int *)malloc( size ); 262 ip = (int *)resize( ip, align, size / 4 ); 263 test_base( ip, size / 4, align ); 264 test_use( ip ); 265 free( ip ); 266 267 ip = (int *)malloc( size ); 268 ip = (int *)resize( ip, align, 0 ); 269 test_base( ip, 0, libAlign ); 270 test_use( ip ); 271 free( ip ); 272 273 ip = (int *)resize( NULL, align, size ); 274 test_base( ip, size, align ); 275 test_use( ip ); 276 free( ip ); 277 278 ip = (int *)resize( 0p, align, size ); 279 test_base( ip, size, align ); 280 test_use( ip ); 281 free( ip ); 282 283 ip = (int *)calloc( dim, elemSize ); 284 ip = (int *)realloc( ip, libAlign, size / 2 ); 285 test_base( ip, size / 2, libAlign ); 286 test_fill( ip, 0, size / 2, '\0' ); 287 test_use( ip ); 288 free( ip ); 289 290 ip = (int *)cmemalign( align, dim, elemSize ); 291 ip = (int *)realloc( ip, align, size / 2 ); 292 test_base( ip, size / 2, align ); 293 test_fill( ip, 0, size / 2, '\0' ); 294 test_use( ip ); 295 free( ip ); 296 297 ip = (int *)calloc( dim, elemSize ); 298 ip = (int *)realloc( ip, align, size / 4 ); 299 test_base( ip, size / 4, align ); 300 test_fill( ip, 0, size / 4, '\0' ); 301 test_use( ip ); 302 free( ip ); 303 304 ip = (int *)calloc( dim, elemSize ); 305 ip = (int *)realloc( ip, libAlign, size * 4 ); 306 test_base( ip, size * 4, libAlign ); 307 test_fill( ip, 0, size * 4, '\0' ); 308 test_use( ip ); 309 free( ip ); 310 311 ip = (int *)calloc( dim, elemSize ); 312 ip = (int *)realloc( ip, align, 0 ); 313 test_base( ip, 0, libAlign ); 314 test_use( ip ); 315 free( ip ); 316 317 free( 0p ); // sanity check 318 free( NULL ); // sanity check 319 320 if (tests_failed == 0) sout | "PASSED C malloc tests" | nl | nl; 321 else sout | "failed C malloc tests" | tests_failed | tests_total | nl | nl; 326 322 327 323 // testing CFA malloc … … 331 327 332 328 ip = malloc(); 333 test_base( ip, elemSize, libAlign);334 test_use( ip);335 free( ip);329 test_base( ip, elemSize, libAlign ); 330 test_use( ip ); 331 free( ip ); 336 332 337 333 ip = aalloc( dim ); 338 test_base( ip, size, libAlign);339 test_use( ip);340 free( ip);334 test_base( ip, size, libAlign ); 335 test_use( ip ); 336 free( ip ); 341 337 342 338 ip = aalloc( 0 ); 343 test_base( ip, 0, libAlign);344 test_use( ip);345 free( ip);339 test_base( ip, 0, libAlign ); 340 test_use( ip ); 341 free( ip ); 346 342 347 343 ip = calloc( dim ); 348 test_base( ip, size, libAlign);349 test_fill( ip, 0, size, '\0');350 test_use( ip);351 free( ip);344 test_base( ip, size, libAlign ); 345 test_fill( ip, 0, size, '\0' ); 346 test_use( ip ); 347 free( ip ); 352 348 353 349 ip = calloc( 0 ); 354 test_base( ip, 0, libAlign);355 test_use( ip);356 free( ip);350 test_base( ip, 0, libAlign ); 351 test_use( ip ); 352 free( ip ); 357 353 358 354 ip = aalloc( dim ); 359 355 ip = resize( ip, size / 4 ); 360 test_base( ip, size / 4, libAlign);361 test_use( ip);362 free( ip);356 test_base( ip, size / 4, libAlign ); 357 test_use( ip ); 358 free( ip ); 363 359 364 360 ip = aalloc( dim ); 365 361 ip = resize( ip, size * 4 ); 366 test_base( ip, size * 4, libAlign);367 test_use( ip);368 free( ip);362 test_base( ip, size * 4, libAlign ); 363 test_use( ip ); 364 free( ip ); 369 365 370 366 ip = aalloc( dim ); 371 367 ip = resize( ip, 0 ); 372 test_base( ip, 0, libAlign);373 test_use( ip);374 free( ip);375 376 ip = resize( (int*)0p, size );377 test_base( ip, size, libAlign);378 test_use( ip);379 free( ip);380 381 ip = resize( (int*)0p, size );382 test_base( ip, size, libAlign);383 test_use( ip);384 free( ip);368 test_base( ip, 0, libAlign ); 369 test_use( ip ); 370 free( ip ); 371 372 ip = resize( 0p, size ); 373 test_base( ip, size, libAlign ); 374 test_use( ip ); 375 free( ip ); 376 377 ip = resize( 0p, size ); 378 test_base( ip, size, libAlign ); 379 test_use( ip ); 380 free( ip ); 385 381 386 382 ip = calloc( dim ); 387 383 ip = realloc( ip, size / 4 ); 388 test_base( ip, size / 4, libAlign);389 test_fill( ip, 0, size / 4, '\0');390 test_use( ip);391 free( ip);384 test_base( ip, size / 4, libAlign ); 385 test_fill( ip, 0, size / 4, '\0' ); 386 test_use( ip ); 387 free( ip ); 392 388 393 389 ip = calloc( dim ); 394 390 ip = realloc( ip, size * 4 ); 395 test_base( ip, size * 4, libAlign);396 test_fill( ip, 0, size, '\0');397 test_use( ip);398 free( ip);391 test_base( ip, size * 4, libAlign ); 392 test_fill( ip, 0, size, '\0' ); 393 test_use( ip ); 394 free( ip ); 399 395 400 396 ip = calloc( dim ); 401 397 ip = realloc( ip, 0 ); 402 test_base( ip, 0, libAlign);403 test_use( ip);404 free( ip);405 406 ip = realloc( (int*)0p, size );407 test_base( ip, size , libAlign);408 test_use( ip);409 free( ip);410 411 ip = realloc( (int*)0p, size );412 test_base( ip, size, libAlign);413 test_use( ip);414 free( ip);398 test_base( ip, 0, libAlign ); 399 test_use( ip ); 400 free( ip ); 401 402 ip = realloc( 0p, size ); 403 test_base( ip, size , libAlign ); 404 test_use( ip ); 405 free( ip ); 406 407 ip = realloc( 0p, size ); 408 test_base( ip, size, libAlign ); 409 test_use( ip ); 410 free( ip ); 415 411 416 412 ip = memalign( align ); 417 test_base( ip, elemSize, align);418 test_use( ip);419 free( ip);413 test_base( ip, elemSize, align ); 414 test_use( ip ); 415 free( ip ); 420 416 421 417 ip = amemalign( align, dim ); 422 test_base( ip, size, align);423 test_use( ip);424 free( ip);418 test_base( ip, size, align ); 419 test_use( ip ); 420 free( ip ); 425 421 426 422 ip = amemalign( align, 0 ); 427 test_base( ip, 0, libAlign);428 test_use( ip);429 free( ip);423 test_base( ip, 0, libAlign ); 424 test_use( ip ); 425 free( ip ); 430 426 431 427 ip = cmemalign( align, dim ); 432 test_base( ip, size, align);433 test_fill( ip, 0, size, '\0');434 test_use( ip);435 free( ip);428 test_base( ip, size, align ); 429 test_fill( ip, 0, size, '\0' ); 430 test_use( ip ); 431 free( ip ); 436 432 437 433 ip = cmemalign( align, 0 ); 438 test_base( ip, 0, libAlign);439 test_use( ip);440 free( ip);434 test_base( ip, 0, libAlign ); 435 test_use( ip ); 436 free( ip ); 441 437 442 438 ip = aligned_alloc( align ); 443 test_base( ip, elemSize, align);444 test_use( ip);445 free( ip);446 447 (int)posix_memalign( (int **) &ip, align );448 test_base( ip, elemSize, align);449 test_use( ip);450 free( ip);439 test_base( ip, elemSize, align ); 440 test_use( ip ); 441 free( ip ); 442 443 posix_memalign( (int **) &ip, align ); 444 test_base( ip, elemSize, align ); 445 test_use( ip ); 446 free( ip ); 451 447 452 448 ip = valloc(); 453 test_base( ip, elemSize, getpagesize());454 test_use( ip);455 free( ip);449 test_base( ip, elemSize, getpagesize() ); 450 test_use( ip ); 451 free( ip ); 456 452 457 453 ip = pvalloc(); 458 test_base( ip, getpagesize(), getpagesize());459 test_use( ip);460 free( ip);461 462 if (tests_failed == 0) printf("PASSED CFA malloc tests\n\n");463 else printf("failed CFA malloc tests : %d/%d\n\n", tests_failed, tests_total);454 test_base( ip, getpagesize(), getpagesize() ); 455 test_use( ip ); 456 free( ip ); 457 458 if (tests_failed == 0) sout | "PASSED CFA malloc tests" | nl | nl; 459 else sout | "failed CFA malloc tests" | tests_failed | tests_total | nl | nl; 464 460 465 461 // testing CFA malloc with aligned struct … … 471 467 472 468 tp = malloc(); 473 test_base( tp, elemSize, tAlign);474 test_use( tp);475 free( tp);469 test_base( tp, elemSize, tAlign ); 470 test_use( tp ); 471 free( tp ); 476 472 477 473 tp = aalloc( dim ); 478 test_base( tp, size, tAlign);479 test_use( tp);480 free( tp);474 test_base( tp, size, tAlign ); 475 test_use( tp ); 476 free( tp ); 481 477 482 478 tp = aalloc( 0 ); 483 test_base( tp, 0, libAlign);484 test_use( tp);485 free( tp);479 test_base( tp, 0, libAlign ); 480 test_use( tp ); 481 free( tp ); 486 482 487 483 tp = calloc( dim ); 488 test_base( tp, size, tAlign);489 test_fill( tp, 0, size, '\0');490 test_use( tp);491 free( tp);484 test_base( tp, size, tAlign ); 485 test_fill( tp, 0, size, '\0' ); 486 test_use( tp ); 487 free( tp ); 492 488 493 489 tp = calloc( 0 ); 494 test_base( tp, 0, libAlign);495 test_use( tp);496 free( tp);490 test_base( tp, 0, libAlign ); 491 test_use( tp ); 492 free( tp ); 497 493 498 494 tp = aalloc( dim ); 499 495 tp = resize( tp, size / 4 ); 500 test_base( tp, size / 4, tAlign);501 test_use( tp);502 free( tp);496 test_base( tp, size / 4, tAlign ); 497 test_use( tp ); 498 free( tp ); 503 499 504 500 tp = malloc(); 505 501 tp = resize( tp, size * 4 ); 506 test_base( tp, size * 4, tAlign);507 test_use( tp);508 free( tp);502 test_base( tp, size * 4, tAlign ); 503 test_use( tp ); 504 free( tp ); 509 505 510 506 tp = aalloc( dim ); 511 507 tp = resize( tp, 0 ); 512 test_base( tp, 0, libAlign);513 test_use( tp);514 free( tp);508 test_base( tp, 0, libAlign ); 509 test_use( tp ); 510 free( tp ); 515 511 516 512 tp = resize( (T1*)0p, size ); 517 test_base( tp, size, tAlign);518 test_use( tp);519 free( tp);513 test_base( tp, size, tAlign ); 514 test_use( tp ); 515 free( tp ); 520 516 521 517 tp = resize( (T1*)0p, size ); 522 test_base( tp, size, tAlign);523 test_use( tp);524 free( tp);518 test_base( tp, size, tAlign ); 519 test_use( tp ); 520 free( tp ); 525 521 526 522 tp = calloc( dim ); 527 523 tp = realloc( tp, size / 4 ); 528 test_base( tp, size / 4, tAlign);529 test_fill( tp, 0, size / 4, '\0');530 test_use( tp);531 free( tp);524 test_base( tp, size / 4, tAlign ); 525 test_fill( tp, 0, size / 4, '\0' ); 526 test_use( tp ); 527 free( tp ); 532 528 533 529 tp = calloc( dim ); 534 530 tp = realloc( tp, size * 4 ); 535 test_base( tp, size * 4, tAlign);536 test_fill( tp, 0, size, '\0');537 test_use( tp);538 free( tp);531 test_base( tp, size * 4, tAlign ); 532 test_fill( tp, 0, size, '\0' ); 533 test_use( tp ); 534 free( tp ); 539 535 540 536 tp = calloc( dim ); 541 537 tp = realloc( tp, 0 ); 542 test_base( tp, 0, libAlign);543 test_use( tp);544 free( tp);538 test_base( tp, 0, libAlign ); 539 test_use( tp ); 540 free( tp ); 545 541 546 542 tp = realloc( (T1*)0p, size ); 547 test_base( tp, size , tAlign);548 test_use( tp);549 free( tp);543 test_base( tp, size , tAlign ); 544 test_use( tp ); 545 free( tp ); 550 546 551 547 tp = realloc( (T1*)0p, size ); 552 test_base( tp, size, tAlign);553 test_use( tp);554 free( tp);548 test_base( tp, size, tAlign ); 549 test_use( tp ); 550 free( tp ); 555 551 556 552 tp = memalign( align ); 557 test_base( tp, elemSize, align);558 test_use( tp);559 free( tp);553 test_base( tp, elemSize, align ); 554 test_use( tp ); 555 free( tp ); 560 556 561 557 tp = amemalign( align, dim ); 562 test_base( tp, size, align);563 test_use( tp);564 free( tp);558 test_base( tp, size, align ); 559 test_use( tp ); 560 free( tp ); 565 561 566 562 tp = amemalign( align, 0 ); 567 test_base( tp, 0, libAlign);568 test_use( tp);569 free( tp);563 test_base( tp, 0, libAlign ); 564 test_use( tp ); 565 free( tp ); 570 566 571 567 tp = cmemalign( align, dim ); 572 test_base( tp, size, align);573 test_fill( tp, 0, size, '\0');574 test_use( tp);575 free( tp);568 test_base( tp, size, align ); 569 test_fill( tp, 0, size, '\0' ); 570 test_use( tp ); 571 free( tp ); 576 572 577 573 tp = cmemalign( align, 0 ); 578 test_base( tp, 0, libAlign);579 test_use( tp);580 free( tp);574 test_base( tp, 0, libAlign ); 575 test_use( tp ); 576 free( tp ); 581 577 582 578 tp = aligned_alloc( align ); 583 test_base( tp, elemSize, align);584 test_use( tp);585 free( tp);586 587 (int) posix_memalign( (T1 **)&tp, align );588 test_base( tp, elemSize, align);589 test_use( tp);590 free( tp);579 test_base( tp, elemSize, align ); 580 test_use( tp ); 581 free( tp ); 582 583 posix_memalign( (T1 **)&tp, align ); 584 test_base( tp, elemSize, align ); 585 test_use( tp ); 586 free( tp ); 591 587 592 588 tp = valloc(); 593 test_base( tp, elemSize, getpagesize());594 test_use( tp);595 free( tp);589 test_base( tp, elemSize, getpagesize() ); 590 test_use( tp ); 591 free( tp ); 596 592 597 593 tp = pvalloc(); 598 test_base(tp, getpagesize(), getpagesize()); 599 test_use(tp); 600 free(tp); 601 602 if (tests_failed == 0) printf("PASSED CFA malloc tests (aligned struct)\n\n"); 603 else printf("failed CFA malloc tests (aligned struct) : %d/%d\n\n", tests_failed, tests_total); 604 605 return 0; 594 test_base( tp, getpagesize(), getpagesize() ); 595 test_use( tp ); 596 free( tp ); 597 598 if ( tests_failed == 0 ) sout | "PASSED CFA malloc tests (aligned struct)" | nl | nl; 599 else sout | "failed CFA malloc tests (aligned struct)" | tests_failed | tests_total | nl | nl; 606 600 } 607 601 -
tests/meta/.expect/fork+exec.txt
r5408b59 rc2b3243 4 4 Success! 5 5 Child status: 6 WIFEXITED : 1 7 WEXITSTATUS : 0 8 WIFSIGNALED : 0 9 WTERMSIG : 0 10 WCOREDUMP : 0 11 WIFSTOPPED : 0 12 WSTOPSIG : 0 13 WIFCONTINUED: 0 6 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 7 14 8 1 arg: 15 9 arguments are: … … 17 11 Success! 18 12 Child status: 19 WIFEXITED : 1 20 WEXITSTATUS : 0 21 WIFSIGNALED : 0 22 WTERMSIG : 0 23 WCOREDUMP : 0 24 WIFSTOPPED : 0 25 WSTOPSIG : 0 26 WIFCONTINUED: 0 13 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 14 27 15 5 arg: 28 16 arguments are: … … 34 22 Success! 35 23 Child status: 36 WIFEXITED : 1 37 WEXITSTATUS : 0 38 WIFSIGNALED : 0 39 WTERMSIG : 0 40 WCOREDUMP : 0 41 WIFSTOPPED : 0 42 WSTOPSIG : 0 43 WIFCONTINUED: 0 24 IFEXITED : 1, EXITSTATUS : 0, IFSIGNALED : 0, TERMSIG : 0, COREDUMP : 0, IFSTOPPED : 0, STOPSIG : 0, IFCONTINUED: 0 25 44 26 All Done! -
tests/meta/fork+exec.hfa
r5408b59 rc2b3243 28 28 } 29 29 30 static int true_main(const char * exec, char * env[]);30 static int true_main(const char * path, char * env[]); 31 31 32 32 static int do_wait(pid_t pid) { … … 55 55 static void print_status(int wstatus) { 56 56 printf("Child status:\n"); 57 printf(" WIFEXITED : %d\n", WIFEXITED(wstatus)); 58 printf(" WEXITSTATUS : %d\n", WEXITSTATUS(wstatus)); 59 printf(" WIFSIGNALED : %d\n", WIFSIGNALED(wstatus)); 60 printf(" WTERMSIG : %d\n", WTERMSIG(wstatus)); 61 printf(" WCOREDUMP : %d\n", WCOREDUMP(wstatus)); 62 printf(" WIFSTOPPED : %d\n", WIFSTOPPED(wstatus)); 63 printf(" WSTOPSIG : %d\n", WSTOPSIG(wstatus)); 64 printf(" WIFCONTINUED: %d\n", WIFCONTINUED(wstatus)); 57 printf("IFEXITED : %d, ", WIFEXITED(wstatus)); 58 printf("EXITSTATUS : %d, ", WEXITSTATUS(wstatus)); 59 printf("IFSIGNALED : %d, ", WIFSIGNALED(wstatus)); 60 printf("TERMSIG : %d, ", WTERMSIG(wstatus)); 61 printf("COREDUMP : %d, ", WCOREDUMP(wstatus)); 62 printf("IFSTOPPED : %d, ", WIFSTOPPED(wstatus)); 63 printf("STOPSIG : %d, ", WSTOPSIG(wstatus)); 64 printf("IFCONTINUED: %d", WIFCONTINUED(wstatus)); 65 printf("\n"); 66 printf("\n"); 65 67 } 66 68 -
tests/test.py
r5408b59 rc2b3243 72 72 # this is a valid name, let's check if it already exists 73 73 found = [test for test in all_tests if canonical_path( test.target() ) == testname] 74 setup = itertools.product(settings.all_arch if options.arch else [None])74 setup = settings.all_arch if options.arch else [None] 75 75 if not found: 76 76 # it's a new name, create it according to the name and specified architecture
Note:
See TracChangeset
for help on using the changeset viewer.