source: libcfa/src/heap.cfa@ 099f5bd

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 099f5bd was 1aa6ecb, checked in by Peter A. Buhr <pabuhr@…>, 6 years ago

check for maximum allocation and use macro to get header

  • Property mode set to 100644
File size: 40.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// heap.c --
8//
9// Author : Peter A. Buhr
10// Created On : Tue Dec 19 21:58:35 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Fri Oct 18 07:42:09 2019
13// Update Count : 556
14//
15
16#include <unistd.h> // sbrk, sysconf
17#include <stdbool.h> // true, false
18#include <stdio.h> // snprintf, fileno
19#include <errno.h> // errno
20extern "C" {
21#include <sys/mman.h> // mmap, munmap
22} // extern "C"
23
24// #comment TD : Many of these should be merged into math I believe
25#include "bits/align.hfa" // libPow2
26#include "bits/defs.hfa" // likely, unlikely
27#include "bits/locks.hfa" // __spinlock_t
28#include "startup.hfa" // STARTUP_PRIORITY_MEMORY
29#include "stdlib.hfa" // bsearchl
30#include "malloc.h"
31
32
33static bool traceHeap = false;
34
35inline bool traceHeap() {
36 return traceHeap;
37} // traceHeap
38
39bool traceHeapOn() {
40 bool temp = traceHeap;
41 traceHeap = true;
42 return temp;
43} // traceHeapOn
44
45bool traceHeapOff() {
46 bool temp = traceHeap;
47 traceHeap = false;
48 return temp;
49} // traceHeapOff
50
51
52static bool checkFree = false;
53
54inline bool checkFree() {
55 return checkFree;
56} // checkFree
57
58bool checkFreeOn() {
59 bool temp = checkFree;
60 checkFree = true;
61 return temp;
62} // checkFreeOn
63
64bool checkFreeOff() {
65 bool temp = checkFree;
66 checkFree = false;
67 return temp;
68} // checkFreeOff
69
70
71// static bool traceHeapTerm = false;
72
73// inline bool traceHeapTerm() {
74// return traceHeapTerm;
75// } // traceHeapTerm
76
77// bool traceHeapTermOn() {
78// bool temp = traceHeapTerm;
79// traceHeapTerm = true;
80// return temp;
81// } // traceHeapTermOn
82
83// bool traceHeapTermOff() {
84// bool temp = traceHeapTerm;
85// traceHeapTerm = false;
86// return temp;
87// } // traceHeapTermOff
88
89
90enum {
91 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),
92 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
93};
94
95size_t default_mmap_start() __attribute__(( weak )) {
96 return __CFA_DEFAULT_MMAP_START__;
97} // default_mmap_start
98
99size_t default_heap_expansion() __attribute__(( weak )) {
100 return __CFA_DEFAULT_HEAP_EXPANSION__;
101} // default_heap_expansion
102
103
104#ifdef __CFA_DEBUG__
105static unsigned int allocFree; // running total of allocations minus frees
106
107static void checkUnfreed() {
108 if ( allocFree != 0 ) {
109 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
110 // char helpText[512];
111 // int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"
112 // "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
113 // (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid
114 // __cfaabi_dbg_bits_write( helpText, len );
115 } // if
116} // checkUnfreed
117
118extern "C" {
119 void heapAppStart() { // called by __cfaabi_appready_startup
120 allocFree = 0;
121 } // heapAppStart
122
123 void heapAppStop() { // called by __cfaabi_appready_startdown
124 fclose( stdin ); fclose( stdout );
125 checkUnfreed();
126 } // heapAppStop
127} // extern "C"
128#endif // __CFA_DEBUG__
129
130// statically allocated variables => zero filled.
131static size_t pageSize; // architecture pagesize
132static size_t heapExpand; // sbrk advance
133static size_t mmapStart; // cross over point for mmap
134static unsigned int maxBucketsUsed; // maximum number of buckets in use
135
136
137// #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa
138#define ALIGN 16
139
140#define SPINLOCK 0
141#define LOCKFREE 1
142#define BUCKETLOCK SPINLOCK
143#if BUCKETLOCK == LOCKFREE
144#include <uStackLF.h>
145#endif // LOCKFREE
146
147// Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
148// Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
149enum { NoBucketSizes = 93 }; // number of buckets sizes
150
151struct HeapManager {
152// struct FreeHeader; // forward declaration
153
154 struct Storage {
155 struct Header { // header
156 union Kind {
157 struct RealHeader {
158 union {
159 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
160 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
161 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
162 #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
163
164 union {
165// FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
166 void * home; // allocated block points back to home locations (must overlay alignment)
167 size_t blockSize; // size for munmap (must overlay alignment)
168 #if BUCKLOCK == SPINLOCK
169 Storage * next; // freed block points next freed block of same size
170 #endif // SPINLOCK
171 };
172
173 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
174 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
175 #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
176 };
177 // future code
178 #if BUCKLOCK == LOCKFREE
179 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide)
180 #endif // LOCKFREE
181 };
182 } real; // RealHeader
183 struct FakeHeader {
184 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
185 uint32_t alignment; // low-order bits of home/blockSize used for tricks
186 #endif // __ORDER_LITTLE_ENDIAN__
187
188 uint32_t offset;
189
190 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
191 uint32_t alignment; // low-order bits of home/blockSize used for tricks
192 #endif // __ORDER_BIG_ENDIAN__
193 } fake; // FakeHeader
194 } kind; // Kind
195 } header; // Header
196 char pad[ALIGN - sizeof( Header )];
197 char data[0]; // storage
198 }; // Storage
199
200 static_assert( ALIGN >= sizeof( Storage ), "ALIGN < sizeof( Storage )" );
201
202 struct FreeHeader {
203 #if BUCKLOCK == SPINLOCK
204 __spinlock_t lock; // must be first field for alignment
205 Storage * freeList;
206 #elif BUCKLOCK == LOCKFREE
207 // future code
208 StackLF<Storage> freeList;
209 #else
210 #error undefined lock type for bucket lock
211 #endif // SPINLOCK
212 size_t blockSize; // size of allocations on this list
213 }; // FreeHeader
214
215 // must be first fields for alignment
216 __spinlock_t extlock; // protects allocation-buffer extension
217 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
218
219 void * heapBegin; // start of heap
220 void * heapEnd; // logical end of heap
221 size_t heapRemaining; // amount of storage not allocated in the current chunk
222}; // HeapManager
223
224static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; }
225
226
227#define FASTLOOKUP
228#define __STATISTICS__
229
230// Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
231static const unsigned int bucketSizes[] @= { // different bucket sizes
232 16, 32, 48, 64,
233 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224,
234 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896,
235 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144,
236 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360,
237 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720,
238 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440,
239 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880,
240 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760,
241 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520,
242 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792,
243 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016,
244 4_194_304 + sizeof(HeapManager.Storage)
245};
246
247static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0]), "size of bucket array wrong" );
248
249#ifdef FASTLOOKUP
250enum { LookupSizes = 65_536 + sizeof(HeapManager.Storage) }; // number of fast lookup sizes
251static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
252#endif // FASTLOOKUP
253static int mmapFd = -1; // fake or actual fd for anonymous file
254
255
256#ifdef __CFA_DEBUG__
257static bool heapBoot = 0; // detect recursion during boot
258#endif // __CFA_DEBUG__
259static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
260
261// #comment TD : The return type of this function should be commented
262static inline bool setMmapStart( size_t value ) {
263 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
264 mmapStart = value; // set global
265
266 // find the closest bucket size less than or equal to the mmapStart size
267 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
268 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
269 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
270 return false;
271} // setMmapStart
272
273
274static void ?{}( HeapManager & manager ) with ( manager ) {
275 pageSize = sysconf( _SC_PAGESIZE );
276
277 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
278 freeLists[i].blockSize = bucketSizes[i];
279 } // for
280
281 #ifdef FASTLOOKUP
282 unsigned int idx = 0;
283 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
284 if ( i > bucketSizes[idx] ) idx += 1;
285 lookup[i] = idx;
286 } // for
287 #endif // FASTLOOKUP
288
289 if ( setMmapStart( default_mmap_start() ) ) {
290 abort( "HeapManager : internal error, mmap start initialization failure." );
291 } // if
292 heapExpand = default_heap_expansion();
293
294 char * End = (char *)sbrk( 0 );
295 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
296 heapBegin = heapEnd = sbrk( 0 ); // get new start point
297} // HeapManager
298
299
300static void ^?{}( HeapManager & ) {
301 #ifdef __STATISTICS__
302 // if ( traceHeapTerm() ) {
303 // printStats();
304 // if ( checkfree() ) checkFree( heapManager, true );
305 // } // if
306 #endif // __STATISTICS__
307} // ~HeapManager
308
309
310static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
311void memory_startup( void ) {
312 #ifdef __CFA_DEBUG__
313 if ( unlikely( heapBoot ) ) { // check for recursion during system boot
314 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
315 abort( "boot() : internal error, recursively invoked during system boot." );
316 } // if
317 heapBoot = true;
318 #endif // __CFA_DEBUG__
319
320 //assert( heapManager.heapBegin != 0 );
321 //heapManager{};
322 if ( heapManager.heapBegin == 0 ) heapManager{};
323} // memory_startup
324
325static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
326void memory_shutdown( void ) {
327 ^heapManager{};
328} // memory_shutdown
329
330
331#ifdef __STATISTICS__
332static unsigned long long int mmap_storage; // heap statistics counters
333static unsigned int mmap_calls;
334static unsigned long long int munmap_storage;
335static unsigned int munmap_calls;
336static unsigned long long int sbrk_storage;
337static unsigned int sbrk_calls;
338static unsigned long long int malloc_storage;
339static unsigned int malloc_calls;
340static unsigned long long int free_storage;
341static unsigned int free_calls;
342static unsigned long long int calloc_storage;
343static unsigned int calloc_calls;
344static unsigned long long int memalign_storage;
345static unsigned int memalign_calls;
346static unsigned long long int cmemalign_storage;
347static unsigned int cmemalign_calls;
348static unsigned long long int realloc_storage;
349static unsigned int realloc_calls;
350
351static int statfd; // statistics file descriptor (changed by malloc_stats_fd)
352
353
354// Use "write" because streams may be shutdown when calls are made.
355static void printStats() {
356 char helpText[512];
357 __cfaabi_dbg_bits_print_buffer( helpText, sizeof(helpText),
358 "\nHeap statistics:\n"
359 " malloc: calls %u / storage %llu\n"
360 " calloc: calls %u / storage %llu\n"
361 " memalign: calls %u / storage %llu\n"
362 " cmemalign: calls %u / storage %llu\n"
363 " realloc: calls %u / storage %llu\n"
364 " free: calls %u / storage %llu\n"
365 " mmap: calls %u / storage %llu\n"
366 " munmap: calls %u / storage %llu\n"
367 " sbrk: calls %u / storage %llu\n",
368 malloc_calls, malloc_storage,
369 calloc_calls, calloc_storage,
370 memalign_calls, memalign_storage,
371 cmemalign_calls, cmemalign_storage,
372 realloc_calls, realloc_storage,
373 free_calls, free_storage,
374 mmap_calls, mmap_storage,
375 munmap_calls, munmap_storage,
376 sbrk_calls, sbrk_storage
377 );
378} // printStats
379
380static int printStatsXML( FILE * stream ) { // see malloc_info
381 char helpText[512];
382 int len = snprintf( helpText, sizeof(helpText),
383 "<malloc version=\"1\">\n"
384 "<heap nr=\"0\">\n"
385 "<sizes>\n"
386 "</sizes>\n"
387 "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n"
388 "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n"
389 "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n"
390 "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n"
391 "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n"
392 "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n"
393 "<total type=\"mmap\" count=\"%u\" size=\"%llu\"/>\n"
394 "<total type=\"munmap\" count=\"%u\" size=\"%llu\"/>\n"
395 "<total type=\"sbrk\" count=\"%u\" size=\"%llu\"/>\n"
396 "</malloc>",
397 malloc_calls, malloc_storage,
398 calloc_calls, calloc_storage,
399 memalign_calls, memalign_storage,
400 cmemalign_calls, cmemalign_storage,
401 realloc_calls, realloc_storage,
402 free_calls, free_storage,
403 mmap_calls, mmap_storage,
404 munmap_calls, munmap_storage,
405 sbrk_calls, sbrk_storage
406 );
407 return write( fileno( stream ), helpText, len ); // -1 => error
408} // printStatsXML
409#endif // __STATISTICS__
410
411// #comment TD : Is this the samething as Out-of-Memory?
412static inline void noMemory() {
413 abort( "Heap memory exhausted at %zu bytes.\n"
414 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",
415 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );
416} // noMemory
417
418
419static inline void checkAlign( size_t alignment ) {
420 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {
421 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment );
422 } // if
423} // checkAlign
424
425
426static inline bool setHeapExpand( size_t value ) {
427 if ( heapExpand < pageSize ) return true;
428 heapExpand = value;
429 return false;
430} // setHeapExpand
431
432
433static inline void checkHeader( bool check, const char * name, void * addr ) {
434 if ( unlikely( check ) ) { // bad address ?
435 abort( "Attempt to %s storage %p with address outside the heap.\n"
436 "Possible cause is duplicate free on same block or overwriting of memory.",
437 name, addr );
438 } // if
439} // checkHeader
440
441// #comment TD : function should be commented and/or have a more evocative name
442// this isn't either a check or a constructor which is what I would expect this function to be
443static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) {
444 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
445 size_t offset = header->kind.fake.offset;
446 alignment = header->kind.fake.alignment & -2; // remove flag from value
447 #ifdef __CFA_DEBUG__
448 checkAlign( alignment ); // check alignment
449 #endif // __CFA_DEBUG__
450 header = (HeapManager.Storage.Header *)((char *)header - offset);
451 } // if
452} // fakeHeader
453
454// #comment TD : Why is this a define
455#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
456
457static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
458 header = headerAddr( addr );
459
460 if ( unlikely( heapEnd < addr ) ) { // mmapped ?
461 fakeHeader( header, size, alignment );
462 size = header->kind.real.blockSize & -3; // mmap size
463 return true;
464 } // if
465
466 #ifdef __CFA_DEBUG__
467 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
468 #endif // __CFA_DEBUG__
469
470 // #comment TD : This code looks weird...
471 // It's called as the first statement of both branches of the last if, with the same parameters in all cases
472
473 // header may be safe to dereference
474 fakeHeader( header, size, alignment );
475 #ifdef __CFA_DEBUG__
476 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
477 #endif // __CFA_DEBUG__
478
479 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
480 #ifdef __CFA_DEBUG__
481 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
482 abort( "Attempt to %s storage %p with corrupted header.\n"
483 "Possible cause is duplicate free on same block or overwriting of header information.",
484 name, addr );
485 } // if
486 #endif // __CFA_DEBUG__
487 size = freeElem->blockSize;
488 return false;
489} // headers
490
491
492static inline void * extend( size_t size ) with ( heapManager ) {
493 lock( extlock __cfaabi_dbg_ctx2 );
494 ptrdiff_t rem = heapRemaining - size;
495 if ( rem < 0 ) {
496 // If the size requested is bigger than the current remaining storage, increase the size of the heap.
497
498 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
499 if ( sbrk( increase ) == (void *)-1 ) {
500 unlock( extlock );
501 errno = ENOMEM;
502 return 0;
503 } // if
504 #ifdef __STATISTICS__
505 sbrk_calls += 1;
506 sbrk_storage += increase;
507 #endif // __STATISTICS__
508 #ifdef __CFA_DEBUG__
509 // Set new memory to garbage so subsequent uninitialized usages might fail.
510 memset( (char *)heapEnd + heapRemaining, '\377', increase );
511 #endif // __CFA_DEBUG__
512 rem = heapRemaining + increase - size;
513 } // if
514
515 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
516 heapRemaining = rem;
517 heapEnd = (char *)heapEnd + size;
518 unlock( extlock );
519 return block;
520} // extend
521
522
523size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) {
524 size_t l = 0, m, h = dim;
525 while ( l < h ) {
526 m = (l + h) / 2;
527 if ( (unsigned int &)(vals[m]) < key ) { // cast away const
528 l = m + 1;
529 } else {
530 h = m;
531 } // if
532 } // while
533 return l;
534} // Bsearchl
535
536
537static inline void * doMalloc( size_t size ) with ( heapManager ) {
538 HeapManager.Storage * block; // pointer to new block of storage
539
540 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
541 // along with the block and is a multiple of the alignment size.
542
543 if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0;
544 size_t tsize = size + sizeof(HeapManager.Storage);
545 if ( likely( tsize < mmapStart ) ) { // small size => sbrk
546 size_t posn;
547 #ifdef FASTLOOKUP
548 if ( tsize < LookupSizes ) posn = lookup[tsize];
549 else
550 #endif // FASTLOOKUP
551 posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed );
552 HeapManager.FreeHeader * freeElem = &freeLists[posn];
553 // #ifdef FASTLOOKUP
554 // if ( tsize < LookupSizes )
555 // freeElem = &freeLists[lookup[tsize]];
556 // else
557 // #endif // FASTLOOKUP
558 // freeElem = bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
559 // HeapManager.FreeHeader * freeElem =
560 // #ifdef FASTLOOKUP
561 // tsize < LookupSizes ? &freeLists[lookup[tsize]] :
562 // #endif // FASTLOOKUP
563 // bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
564 assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ?
565 assert( tsize <= freeElem->blockSize ); // search failure ?
566 tsize = freeElem->blockSize; // total space needed for request
567
568 // Spin until the lock is acquired for this particular size of block.
569
570 #if defined( SPINLOCK )
571 lock( freeElem->lock __cfaabi_dbg_ctx2 );
572 block = freeElem->freeList; // remove node from stack
573 #else
574 block = freeElem->freeList.pop();
575 #endif // SPINLOCK
576 if ( unlikely( block == 0 ) ) { // no free block ?
577 #if defined( SPINLOCK )
578 unlock( freeElem->lock );
579 #endif // SPINLOCK
580
581 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more
582 // and then carve it off.
583
584 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call
585 if ( unlikely( block == 0 ) ) return 0;
586 #if defined( SPINLOCK )
587 } else {
588 freeElem->freeList = block->header.kind.real.next;
589 unlock( freeElem->lock );
590 #endif // SPINLOCK
591 } // if
592
593 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size
594 } else { // large size => mmap
595 if ( unlikely( size > ~0ul - pageSize ) ) return 0;
596 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size
597 #ifdef __STATISTICS__
598 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
599 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST );
600 #endif // __STATISTICS__
601 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
602 if ( block == (HeapManager.Storage *)MAP_FAILED ) {
603 // Do not call strerror( errno ) as it may call malloc.
604 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno );
605 } // if
606 #ifdef __CFA_DEBUG__
607 // Set new memory to garbage so subsequent uninitialized usages might fail.
608 memset( block, '\377', tsize );
609 #endif // __CFA_DEBUG__
610 block->header.kind.real.blockSize = tsize; // storage size for munmap
611 } // if
612
613 void * area = &(block->data); // adjust off header to user bytes
614
615 #ifdef __CFA_DEBUG__
616 assert( ((uintptr_t)area & (libAlign() - 1)) == 0 ); // minimum alignment ?
617 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST );
618 if ( traceHeap() ) {
619 enum { BufferSize = 64 };
620 char helpText[BufferSize];
621 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize );
622 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size );
623 __cfaabi_dbg_bits_write( helpText, len );
624 } // if
625 #endif // __CFA_DEBUG__
626
627 return area;
628} // doMalloc
629
630
631static inline void doFree( void * addr ) with ( heapManager ) {
632 #ifdef __CFA_DEBUG__
633 if ( unlikely( heapManager.heapBegin == 0 ) ) {
634 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );
635 } // if
636 #endif // __CFA_DEBUG__
637
638 HeapManager.Storage.Header * header;
639 HeapManager.FreeHeader * freeElem;
640 size_t size, alignment; // not used (see realloc)
641
642 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ?
643 #ifdef __STATISTICS__
644 __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST );
645 __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST );
646 #endif // __STATISTICS__
647 if ( munmap( header, size ) == -1 ) {
648 #ifdef __CFA_DEBUG__
649 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
650 "Possible cause is invalid pointer.",
651 addr );
652 #endif // __CFA_DEBUG__
653 } // if
654 } else {
655 #ifdef __CFA_DEBUG__
656 // Set free memory to garbage so subsequent usages might fail.
657 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
658 #endif // __CFA_DEBUG__
659
660 #ifdef __STATISTICS__
661 free_storage += size;
662 #endif // __STATISTICS__
663 #if defined( SPINLOCK )
664 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock
665 header->kind.real.next = freeElem->freeList; // push on stack
666 freeElem->freeList = (HeapManager.Storage *)header;
667 unlock( freeElem->lock ); // release spin lock
668 #else
669 freeElem->freeList.push( *(HeapManager.Storage *)header );
670 #endif // SPINLOCK
671 } // if
672
673 #ifdef __CFA_DEBUG__
674 __atomic_add_fetch( &allocFree, -size, __ATOMIC_SEQ_CST );
675 if ( traceHeap() ) {
676 enum { BufferSize = 64 };
677 char helpText[BufferSize];
678 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );
679 __cfaabi_dbg_bits_write( helpText, len );
680 } // if
681 #endif // __CFA_DEBUG__
682} // doFree
683
684
685size_t checkFree( HeapManager & manager ) with ( manager ) {
686 size_t total = 0;
687 #ifdef __STATISTICS__
688 __cfaabi_dbg_bits_acquire();
689 __cfaabi_dbg_bits_print_nolock( "\nBin lists (bin size : free blocks on list)\n" );
690 #endif // __STATISTICS__
691 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {
692 size_t size = freeLists[i].blockSize;
693 #ifdef __STATISTICS__
694 unsigned int N = 0;
695 #endif // __STATISTICS__
696
697 #if defined( SPINLOCK )
698 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0; p = p->header.kind.real.next ) {
699 #else
700 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0; p = p->header.kind.real.next.top ) {
701 #endif // SPINLOCK
702 total += size;
703 #ifdef __STATISTICS__
704 N += 1;
705 #endif // __STATISTICS__
706 } // for
707
708 #ifdef __STATISTICS__
709 __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u ", size, N );
710 if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" );
711 #endif // __STATISTICS__
712 } // for
713 #ifdef __STATISTICS__
714 __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total );
715 __cfaabi_dbg_bits_release();
716 #endif // __STATISTICS__
717 return (char *)heapEnd - (char *)heapBegin - total;
718} // checkFree
719
720
721static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics
722 //assert( heapManager.heapBegin != 0 );
723 if ( unlikely( heapManager.heapBegin == 0 ) ) heapManager{}; // called before memory_startup ?
724 void * area = doMalloc( size );
725 if ( unlikely( area == 0 ) ) errno = ENOMEM; // POSIX
726 return area;
727} // mallocNoStats
728
729
730static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics
731 #ifdef __CFA_DEBUG__
732 checkAlign( alignment ); // check alignment
733 #endif // __CFA_DEBUG__
734
735 // if alignment <= default alignment, do normal malloc as two headers are unnecessary
736 if ( unlikely( alignment <= libAlign() ) ) return mallocNoStats( size );
737
738 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
739 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
740 // .-------------v-----------------v----------------v----------,
741 // | Real Header | ... padding ... | Fake Header | data ... |
742 // `-------------^-----------------^-+--------------^----------'
743 // |<--------------------------------' offset/align |<-- alignment boundary
744
745 // subtract libAlign() because it is already the minimum alignment
746 // add sizeof(Storage) for fake header
747 // #comment TD : this is the only place that calls doMalloc without calling mallocNoStats, why ?
748 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
749 if ( unlikely( area == 0 ) ) return area;
750
751 // address in the block of the "next" alignment address
752 char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment );
753
754 // address of header from malloc
755 HeapManager.Storage.Header * realHeader = headerAddr( area );
756 // address of fake header * before* the alignment location
757 HeapManager.Storage.Header * fakeHeader = headerAddr( user );
758 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
759 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
760 // SKULLDUGGERY: odd alignment imples fake header
761 fakeHeader->kind.fake.alignment = alignment | 1;
762
763 return user;
764} // memalignNoStats
765
766
767// supported mallopt options
768#ifndef M_MMAP_THRESHOLD
769#define M_MMAP_THRESHOLD (-1)
770#endif // M_TOP_PAD
771#ifndef M_TOP_PAD
772#define M_TOP_PAD (-2)
773#endif // M_TOP_PAD
774
775
776extern "C" {
777 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not
778 // initialized. If size is 0, then malloc() returns either NULL, or a unique pointer value that can later be
779 // successfully passed to free().
780 void * malloc( size_t size ) {
781 #ifdef __STATISTICS__
782 __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST );
783 __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST );
784 #endif // __STATISTICS__
785
786 return mallocNoStats( size );
787 } // malloc
788
789 // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to
790 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either NULL, or a
791 // unique pointer value that can later be successfully passed to free().
792 void * calloc( size_t noOfElems, size_t elemSize ) {
793 size_t size = noOfElems * elemSize;
794 #ifdef __STATISTICS__
795 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );
796 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST );
797 #endif // __STATISTICS__
798
799 char * area = (char *)mallocNoStats( size );
800 if ( unlikely( area == 0 ) ) return 0;
801
802 HeapManager.Storage.Header * header;
803 HeapManager.FreeHeader * freeElem;
804 size_t asize, alignment;
805 bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment );
806 #ifndef __CFA_DEBUG__
807 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
808 if ( ! mapped )
809 #endif // __CFA_DEBUG__
810 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros
811
812 header->kind.real.blockSize |= 2; // mark as zero filled
813 return area;
814 } // calloc
815
816 // #comment TD : Document this function
817 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
818 size_t size = noOfElems * elemSize;
819 #ifdef __STATISTICS__
820 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
821 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST );
822 #endif // __STATISTICS__
823
824 char * area = (char *)memalignNoStats( alignment, size );
825 if ( unlikely( area == 0 ) ) return 0;
826 HeapManager.Storage.Header * header;
827 HeapManager.FreeHeader * freeElem;
828 size_t asize;
829 bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment );
830 #ifndef __CFA_DEBUG__
831 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
832 if ( ! mapped )
833 #endif // __CFA_DEBUG__
834 memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros
835 header->kind.real.blockSize |= 2; // mark as zero filled
836
837 return area;
838 } // cmemalign
839
840 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be
841 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size
842 // is larger than the old size, the added memory will not be initialized. If ptr is NULL, then the call is
843 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not NULL, then the call
844 // is equivalent to free(ptr). Unless ptr is NULL, it must have been returned by an earlier call to malloc(),
845 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done.
846 void * realloc( void * addr, size_t size ) {
847 #ifdef __STATISTICS__
848 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
849 #endif // __STATISTICS__
850
851 if ( unlikely( addr == 0 ) ) return mallocNoStats( size ); // special cases
852 if ( unlikely( size == 0 ) ) { free( addr ); return 0; }
853
854 HeapManager.Storage.Header * header;
855 HeapManager.FreeHeader * freeElem;
856 size_t asize, alignment = 0;
857 headers( "realloc", addr, header, freeElem, asize, alignment );
858
859 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
860 if ( usize >= size ) { // already sufficient storage
861 // This case does not result in a new profiler entry because the previous one still exists and it must match with
862 // the free for this memory. Hence, this realloc does not appear in the profiler output.
863 return addr;
864 } // if
865
866 #ifdef __STATISTICS__
867 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
868 #endif // __STATISTICS__
869
870 void * area;
871 if ( unlikely( alignment != 0 ) ) { // previous request memalign?
872 area = memalign( alignment, size ); // create new aligned area
873 } else {
874 area = mallocNoStats( size ); // create new area
875 } // if
876 if ( unlikely( area == 0 ) ) return 0;
877 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ?
878 assert( (header->kind.real.blockSize & 1) == 0 );
879 bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment );
880 #ifndef __CFA_DEBUG__
881 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
882 if ( ! mapped )
883 #endif // __CFA_DEBUG__
884 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part
885 header->kind.real.blockSize |= 2; // mark new request as zero fill
886 } // if
887 memcpy( area, addr, usize ); // copy bytes
888 free( addr );
889 return area;
890 } // realloc
891
892 // The obsolete function memalign() allocates size bytes and returns a pointer to the allocated memory. The memory
893 // address will be a multiple of alignment, which must be a power of two.
894 void * memalign( size_t alignment, size_t size ) {
895 #ifdef __STATISTICS__
896 __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST );
897 __atomic_add_fetch( &memalign_storage, size, __ATOMIC_SEQ_CST );
898 #endif // __STATISTICS__
899
900 void * area = memalignNoStats( alignment, size );
901
902 return area;
903 } // memalign
904
905 // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a
906 // multiple of alignment.
907 void * aligned_alloc( size_t alignment, size_t size ) {
908 return memalign( alignment, size );
909 } // aligned_alloc
910
911
912 // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The
913 // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of
914 // sizeof(void *). If size is 0, then posix_memalign() returns either NULL, or a unique pointer value that can later
915 // be successfully passed to free(3).
916 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
917 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
918 * memptr = memalign( alignment, size );
919 if ( unlikely( * memptr == 0 ) ) return ENOMEM;
920 return 0;
921 } // posix_memalign
922
923 // The obsolete function valloc() allocates size bytes and returns a pointer to the allocated memory. The memory
924 // address will be a multiple of the page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
925 void * valloc( size_t size ) {
926 return memalign( pageSize, size );
927 } // valloc
928
929
930 // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to
931 // malloc(), calloc() or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior
932 // occurs. If ptr is NULL, no operation is performed.
933 void free( void * addr ) {
934 #ifdef __STATISTICS__
935 __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST );
936 #endif // __STATISTICS__
937
938 // #comment TD : To decrease nesting I would but the special case in the
939 // else instead, plus it reads more naturally to have the
940 // short / normal case instead
941 if ( unlikely( addr == 0 ) ) { // special case
942 #ifdef __CFA_DEBUG__
943 if ( traceHeap() ) {
944 #define nullmsg "Free( 0x0 ) size:0\n"
945 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
946 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 );
947 } // if
948 #endif // __CFA_DEBUG__
949 return;
950 } // exit
951
952 doFree( addr );
953 } // free
954
955 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see
956 // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that
957 // parameter.
958 int mallopt( int option, int value ) {
959 choose( option ) {
960 case M_TOP_PAD:
961 if ( setHeapExpand( value ) ) fallthru default;
962 case M_MMAP_THRESHOLD:
963 if ( setMmapStart( value ) ) fallthru default;
964 default:
965 // #comment TD : 1 for unsopported feels wrong
966 return 1; // success, or unsupported
967 } // switch
968 return 0; // error
969 } // mallopt
970
971 // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a
972 // suitable argument).
973 int malloc_trim( size_t ) {
974 return 0; // => impossible to release memory
975 } // malloc_trim
976
977 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to
978 // a block of memory allocated by malloc(3) or a related function.
979 size_t malloc_usable_size( void * addr ) {
980 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size
981
982 HeapManager.Storage.Header * header;
983 HeapManager.FreeHeader * freeElem;
984 size_t size, alignment;
985
986 headers( "malloc_usable_size", addr, header, freeElem, size, alignment );
987 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
988 return usize;
989 } // malloc_usable_size
990
991
992 // The malloc_alignment() function returns the alignment of the allocation.
993 size_t malloc_alignment( void * addr ) {
994 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment
995 HeapManager.Storage.Header * header = headerAddr( addr );
996 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
997 return header->kind.fake.alignment & -2; // remove flag from value
998 } else {
999 return libAlign (); // minimum alignment
1000 } // if
1001 } // malloc_alignment
1002
1003
1004 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().
1005 bool malloc_zero_fill( void * addr ) {
1006 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill
1007 HeapManager.Storage.Header * header = headerAddr( addr );
1008 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
1009 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);
1010 } // if
1011 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?
1012 } // malloc_zero_fill
1013
1014
1015 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and
1016 // related functions.
1017 void malloc_stats( void ) {
1018 #ifdef __STATISTICS__
1019 printStats();
1020 if ( checkFree() ) checkFree( heapManager );
1021 #endif // __STATISTICS__
1022 } // malloc_stats
1023
1024 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics.
1025 int malloc_stats_fd( int fd ) {
1026 #ifdef __STATISTICS__
1027 int temp = statfd;
1028 statfd = fd;
1029 return temp;
1030 #else
1031 return -1;
1032 #endif // __STATISTICS__
1033 } // malloc_stats_fd
1034
1035 // The malloc_info() function exports an XML string that describes the current state of the memory-allocation
1036 // implementation in the caller. The string is printed on the file stream stream. The exported string includes
1037 // information about all arenas (see malloc(3)).
1038 int malloc_info( int options, FILE * stream ) {
1039 return printStatsXML( stream );
1040 } // malloc_info
1041
1042
1043 // The malloc_get_state() function records the current state of all malloc(3) internal bookkeeping variables (but
1044 // not the actual contents of the heap or the state of malloc_hook(3) functions pointers). The state is recorded in
1045 // a system-dependent opaque data structure dynamically allocated via malloc(3), and a pointer to that data
1046 // structure is returned as the function result. (It is the caller's responsibility to free(3) this memory.)
1047 void * malloc_get_state( void ) {
1048 return 0; // unsupported
1049 } // malloc_get_state
1050
1051
1052 // The malloc_set_state() function restores the state of all malloc(3) internal bookkeeping variables to the values
1053 // recorded in the opaque data structure pointed to by state.
1054 int malloc_set_state( void * ptr ) {
1055 return 0; // unsupported
1056 } // malloc_set_state
1057} // extern "C"
1058
1059
1060// Local Variables: //
1061// tab-width: 4 //
1062// compile-command: "cfa -nodebug -O2 heap.cfa" //
1063// End: //
Note: See TracBrowser for help on using the repository browser.