source: src/libcfa/heap.c@ 02559df

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr no_list persistent-indexer pthread-emulation qualifiedEnum
Last change on this file since 02559df was f0b3f51, checked in by Peter A. Buhr <pabuhr@…>, 7 years ago

update heap

  • Property mode set to 100644
File size: 33.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// heap.c --
8//
9// Author : Peter A. Buhr
10// Created On : Tue Dec 19 21:58:35 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Jul 26 22:28:23 2018
13// Update Count : 449
14//
15
16#include <unistd.h> // sbrk, sysconf
17#include <stdbool.h> // true, false
18#include <stdio.h> // snprintf, fileno
19#include <errno.h> // errno
20extern "C" {
21#include <sys/mman.h> // mmap, munmap
22} // extern "C"
23
24#include "bits/align.h" // libPow2
25#include "bits/defs.h" // likely, unlikely
26#include "bits/locks.h" // __spinlock_t
27#include "startup.h" // STARTUP_PRIORITY_MEMORY
28#include "stdlib" // bsearchl
29#include "malloc.h"
30
31
32enum {
33 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),
34 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
35};
36
37size_t default_mmap_start() __attribute__(( weak )) {
38 return __CFA_DEFAULT_MMAP_START__;
39} // default_mmap_start
40
41size_t default_heap_expansion() __attribute__(( weak )) {
42 return __CFA_DEFAULT_HEAP_EXPANSION__;
43} // default_heap_expansion
44
45
46// supported mallopt options
47#ifndef M_MMAP_THRESHOLD
48#define M_MMAP_THRESHOLD (-1)
49#endif // M_TOP_PAD
50#ifndef M_TOP_PAD
51#define M_TOP_PAD (-2)
52#endif // M_TOP_PAD
53
54#define FASTLOOKUP
55#define __STATISTICS__
56
57#define SPINLOCK 0
58#define LOCKFREE 1
59#define BUCKETLOCK SPINLOCK
60#if BUCKETLOCK == LOCKFREE
61#include <uStackLF.h>
62#endif // LOCKFREE
63
64#define ALIGN 16
65
66// enum { NoBucketSizes = 93, // number of buckets sizes
67// #ifdef FASTLOOKUP
68// LookupSizes = 65536, // number of fast lookup sizes
69// #endif // FASTLOOKUP
70// };
71#define NoBucketSizes 93 // number of buckets sizes
72#ifdef FASTLOOKUP
73#define LookupSizes 65536 // number of fast lookup sizes
74#endif // FASTLOOKUP
75
76
77static _Bool traceHeap = false;
78
79inline _Bool traceHeap() {
80 return traceHeap;
81} // traceHeap
82
83_Bool traceHeapOn() {
84 _Bool temp = traceHeap;
85 traceHeap = true;
86 return temp;
87} // traceHeapOn
88
89_Bool traceHeapOff() {
90 _Bool temp = traceHeap;
91 traceHeap = false;
92 return temp;
93} // traceHeapOff
94
95
96// static _Bool prtHeapTerm = false;
97
98// inline _Bool prtHeapTerm() {
99// return prtHeapTerm;
100// } // prtHeapTerm
101
102// _Bool prtHeapTermOn() {
103// _Bool temp = traceHeap;
104// traceHeap = true;
105// return temp;
106// } // prtHeapTermOn
107
108// _Bool prtHeapTermOff() {
109// _Bool temp = traceHeap;
110// traceHeap = false;
111// return temp;
112// } // prtHeapTermOff
113
114
115#ifdef __CFA_DEBUG__
116static unsigned int allocfree; // running total of allocations minus frees
117static unsigned int appStart; // storage allocation when application starts
118
119static void checkUnfreed() {
120 unsigned int total = allocfree - appStart;
121 if ( total != 0 ) {
122 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
123 // char helpText[512];
124 // int len = snprintf( helpText, 512, "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"
125 // "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
126 // (long int)getpid(), total, total ); // always print the UNIX pid
127 // __cfaabi_dbg_bits_write( helpText, len );
128 } // if
129} // checkUnfreed
130
131extern "C" {
132void heapAppStart() { // called by __cfaabi_appready_startup
133 appStart = allocfree;
134} // heapAppStart
135
136void heapAppStop() { // called by __cfaabi_appready_startdown
137 checkUnfreed();
138} // heapAppStop
139} // extern "C"
140#endif // __CFA_DEBUG__
141
142
143// statically allocated variables => zero filled.
144
145static size_t pageSize; // architecture pagesize
146static size_t heapExpand; // sbrk advance
147static size_t mmapStart; // cross over point for mmap
148static unsigned int maxBucketsUsed; // maximum number of buckets in use
149static unsigned int bucketSizes[NoBucketSizes] = { // different bucket sizes
150 16, 32, 48, 64,
151 80, 96, 112, 128, 144, 160, 192, 224,
152 256, 320, 384, 448, 512, 640, 768, 896,
153 1024, 1536, 2048, 2560, 3072, 3584, 4096, 6144,
154 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360,
155 16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720,
156 32768, 36864, 40960, 45056, 49152, 53248, 57344, 61440,
157 65536, 73728, 81920, 90112, 98304, 106496, 114688, 122880,
158 131072, 147456, 163840, 180224, 196608, 212992, 229376, 245760,
159 262144, 294912, 327680, 360448, 393216, 425984, 458752, 491520,
160 524288, 655360, 786432, 917504, 1048576, 1179648, 1310720, 1441792,
161 1572864, 1703936, 1835008, 1966080, 2097152, 2621440, 3145728, 3670016,
162 4194304
163};
164#ifdef FASTLOOKUP
165static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
166#endif // FASTLOOKUP
167static int mmapFd = -1; // fake or actual fd for anonymous file
168
169
170struct HeapManager {
171// struct FreeHeader; // forward declaration
172
173 struct Storage {
174 struct Header { // header
175 union Kind {
176 struct RealHeader {
177 union {
178 struct { // 32-bit word => 64-bit header, 64-bit word => 128-bit header
179 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
180 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
181 #endif // __ORDER_BIG_ENDIAN__ && __U_WORDSIZE__ == 32
182
183 union {
184// FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
185 void * home; // allocated block points back to home locations (must overlay alignment)
186 size_t blockSize; // size for munmap (must overlay alignment)
187 #if BUCKLOCK == SPINLOCK
188 Storage * next; // freed block points next freed block of same size
189 #endif // SPINLOCK
190 };
191
192 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
193 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
194 #endif // __ORDER_LITTLE_ENDIAN__ && __U_WORDSIZE__ == 32
195
196 };
197 #if BUCKLOCK == LOCKFREE
198 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide)
199 #endif // LOCKFREE
200 };
201 } real;
202 struct FakeHeader {
203 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
204 uint32_t alignment; // low-order bits of home/blockSize used for tricks
205 #endif // __ORDER_LITTLE_ENDIAN__
206
207 uint32_t offset;
208
209 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
210 uint32_t alignment; // low-order bits of home/blockSize used for tricks
211 #endif // __ORDER_BIG_ENDIAN__
212 } fake;
213 } kind;
214 } header; // Header
215 char pad[ALIGN - sizeof( Header )];
216 char data[0]; // storage
217 }; // Storage
218
219 static_assert( ALIGN >= sizeof( Storage ), "ALIGN < sizeof( Storage )" );
220
221 struct FreeHeader {
222 #if BUCKLOCK == SPINLOCK
223 __spinlock_t lock; // must be first field for alignment
224 Storage * freeList;
225 #elif BUCKLOCK == LOCKFREE
226 StackLF<Storage> freeList;
227 #else
228 #error undefined lock type for bucket lock
229 #endif // SPINLOCK
230 size_t blockSize; // size of allocations on this list
231 }; // FreeHeader
232
233 // must be first fields for alignment
234 __spinlock_t extlock; // protects allocation-buffer extension
235 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
236
237 void * heapBegin; // start of heap
238 void * heapEnd; // logical end of heap
239 size_t heapRemaining; // amount of storage not allocated in the current chunk
240}; // HeapManager
241
242
243static inline _Bool setMmapStart( size_t value ) {
244 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
245 mmapStart = value; // set global
246
247 // find the closest bucket size less than or equal to the mmapStart size
248 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
249 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
250 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
251 return false;
252} // setMmapStart
253
254
255static void ?{}( HeapManager & manager ) with ( manager ) {
256 pageSize = sysconf( _SC_PAGESIZE );
257
258 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
259 freeLists[i].blockSize = bucketSizes[i];
260 } // for
261
262 #ifdef FASTLOOKUP
263 unsigned int idx = 0;
264 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
265 if ( i > bucketSizes[idx] ) idx += 1;
266 lookup[i] = idx;
267 } // for
268 #endif // FASTLOOKUP
269
270 if ( setMmapStart( default_mmap_start() ) ) {
271 abort( "HeapManager : internal error, mmap start initialization failure." );
272 } // if
273 heapExpand = default_heap_expansion();
274
275 char * End = (char *)sbrk( 0 );
276 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
277 heapBegin = heapEnd = sbrk( 0 ); // get new start point
278} // HeapManager
279
280
281static void ^?{}( HeapManager & ) {
282 #ifdef __STATISTICS__
283 // if ( prtHeapTerm() ) {
284 // printStats();
285 // checkFree( heapManager, true );
286 // } // if
287 #endif // __STATISTICS__
288} // ~HeapManager
289
290
291#ifdef __CFA_DEBUG__
292static _Bool heapBoot = 0; // detect recursion during boot
293#endif // __CFA_DEBUG__
294static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
295
296static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
297void memory_startup( void ) {
298 #ifdef __CFA_DEBUG__
299 if ( unlikely( heapBoot ) ) { // check for recursion during system boot
300 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
301 abort( "boot() : internal error, recursively invoked during system boot." );
302 } // if
303 heapBoot = true;
304 #endif // __CFA_DEBUG__
305
306 assert( heapManager.heapBegin == 0 );
307 heapManager{};
308} // memory_startup
309
310static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
311void memory_shutdown( void ) {
312 ^heapManager{};
313} // memory_shutdown
314
315static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; }
316
317
318#ifdef __STATISTICS__
319static unsigned long long int mmap_storage; // heap statistics counters
320static unsigned int mmap_calls;
321static unsigned long long int munmap_storage;
322static unsigned int munmap_calls;
323static unsigned long long int sbrk_storage;
324static unsigned int sbrk_calls;
325static unsigned long long int malloc_storage;
326static unsigned int malloc_calls;
327static unsigned long long int free_storage;
328static unsigned int free_calls;
329static unsigned long long int calloc_storage;
330static unsigned int calloc_calls;
331static unsigned long long int memalign_storage;
332static unsigned int memalign_calls;
333static unsigned long long int cmemalign_storage;
334static unsigned int cmemalign_calls;
335static unsigned long long int realloc_storage;
336static unsigned int realloc_calls;
337
338static int statfd; // statistics file descriptor (changed by malloc_stats_fd)
339
340
341// Use "write" because streams may be shutdown when calls are made.
342static void printStats() {
343 char helpText[512];
344 int len = snprintf( helpText, 512,
345 "\nHeap statistics:\n"
346 " malloc: calls %u / storage %llu\n"
347 " calloc: calls %u / storage %llu\n"
348 " memalign: calls %u / storage %llu\n"
349 " cmemalign: calls %u / storage %llu\n"
350 " realloc: calls %u / storage %llu\n"
351 " free: calls %u / storage %llu\n"
352 " mmap: calls %u / storage %llu\n"
353 " munmap: calls %u / storage %llu\n"
354 " sbrk: calls %u / storage %llu\n",
355 malloc_calls, malloc_storage,
356 calloc_calls, calloc_storage,
357 memalign_calls, memalign_storage,
358 cmemalign_calls, cmemalign_storage,
359 realloc_calls, realloc_storage,
360 free_calls, free_storage,
361 mmap_calls, mmap_storage,
362 munmap_calls, munmap_storage,
363 sbrk_calls, sbrk_storage
364 );
365 write( statfd, helpText, len );
366} // printStats
367
368
369static int printStatsXML( FILE * stream ) {
370 char helpText[512];
371 int len = snprintf( helpText, 512,
372 "<malloc version=\"1\">\n"
373 "<heap nr=\"0\">\n"
374 "<sizes>\n"
375 "</sizes>\n"
376 "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n"
377 "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n"
378 "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n"
379 "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n"
380 "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n"
381 "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n"
382 "<total type=\"mmap\" count=\"%u\" size=\"%llu\"/>\n"
383 "<total type=\"munmap\" count=\"%u\" size=\"%llu\"/>\n"
384 "<total type=\"sbrk\" count=\"%u\" size=\"%llu\"/>\n"
385 "</malloc>",
386 malloc_calls, malloc_storage,
387 calloc_calls, calloc_storage,
388 memalign_calls, memalign_storage,
389 cmemalign_calls, cmemalign_storage,
390 realloc_calls, realloc_storage,
391 free_calls, free_storage,
392 mmap_calls, mmap_storage,
393 munmap_calls, munmap_storage,
394 sbrk_calls, sbrk_storage
395 );
396 return write( fileno( stream ), helpText, len ); // -1 => error
397} // printStatsXML
398#endif // __STATISTICS__
399
400
401static inline void noMemory() {
402 abort( "Heap memory exhausted at %zu bytes.\n"
403 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",
404 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );
405} // noMemory
406
407
408static inline void checkAlign( size_t alignment ) {
409 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {
410 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment );
411 } // if
412} // checkAlign
413
414
415static inline _Bool setHeapExpand( size_t value ) {
416 if ( heapExpand < pageSize ) return true;
417 heapExpand = value;
418 return false;
419} // setHeapExpand
420
421
422static inline void checkHeader( _Bool check, const char * name, void * addr ) {
423 if ( unlikely( check ) ) { // bad address ?
424 abort( "Attempt to %s storage %p with address outside the heap.\n"
425 "Possible cause is duplicate free on same block or overwriting of memory.",
426 name, addr );
427 } // if
428} // checkHeader
429
430
431static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) {
432 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
433 size_t offset = header->kind.fake.offset;
434 alignment = header->kind.fake.alignment & -2; // remove flag from value
435 #ifdef __CFA_DEBUG__
436 checkAlign( alignment ); // check alignment
437 #endif // __CFA_DEBUG__
438 header = (HeapManager.Storage.Header *)((char *)header - offset);
439 } // if
440} // fakeHeader
441
442
443#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
444
445static inline _Bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
446 header = headerAddr( addr );
447
448 if ( unlikely( heapEnd < addr ) ) { // mmapped ?
449 fakeHeader( header, size, alignment );
450 size = header->kind.real.blockSize & -3; // mmap size
451 return true;
452 } // if
453
454 #ifdef __CFA_DEBUG__
455 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
456 #endif // __CFA_DEBUG__
457 // header may be safe to dereference
458 fakeHeader( header, size, alignment );
459 #ifdef __CFA_DEBUG__
460 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
461 #endif // __CFA_DEBUG__
462
463 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
464 #ifdef __CFA_DEBUG__
465 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
466 abort( "Attempt to %s storage %p with corrupted header.\n"
467 "Possible cause is duplicate free on same block or overwriting of header information.",
468 name, addr );
469 } // if
470 #endif // __CFA_DEBUG__
471 size = freeElem->blockSize;
472 return false;
473} // headers
474
475
476static inline void * extend( size_t size ) with ( heapManager ) {
477 lock( extlock __cfaabi_dbg_ctx2 );
478 ptrdiff_t rem = heapRemaining - size;
479 if ( rem < 0 ) {
480 // If the size requested is bigger than the current remaining storage, increase the size of the heap.
481
482 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
483 if ( sbrk( increase ) == (void *)-1 ) {
484 unlock( extlock );
485 errno = ENOMEM;
486 return 0;
487 } // if
488#ifdef __STATISTICS__
489 sbrk_calls += 1;
490 sbrk_storage += increase;
491#endif // __STATISTICS__
492#ifdef __CFA_DEBUG__
493 // Set new memory to garbage so subsequent uninitialized usages might fail.
494 memset( (char *)heapEnd + heapRemaining, '\377', increase );
495#endif // __CFA_DEBUG__
496 rem = heapRemaining + increase - size;
497 } // if
498
499 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
500 heapRemaining = rem;
501 heapEnd = (char *)heapEnd + size;
502 unlock( extlock );
503 return block;
504} // extend
505
506
507static inline void * doMalloc( size_t size ) with ( heapManager ) {
508 HeapManager.Storage * block;
509
510 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
511 // along with the block and is a multiple of the alignment size.
512
513 size_t tsize = size + sizeof(HeapManager.Storage);
514 if ( likely( tsize < mmapStart ) ) { // small size => sbrk
515 HeapManager.FreeHeader * freeElem =
516 #ifdef FASTLOOKUP
517 tsize < LookupSizes ? &freeLists[lookup[tsize]] :
518 #endif // FASTLOOKUP
519 bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
520 assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ?
521 assert( tsize <= freeElem->blockSize ); // search failure ?
522 tsize = freeElem->blockSize; // total space needed for request
523
524 // Spin until the lock is acquired for this particular size of block.
525
526 #if defined( SPINLOCK )
527 lock( freeElem->lock __cfaabi_dbg_ctx2 );
528 block = freeElem->freeList; // remove node from stack
529 #else
530 block = freeElem->freeList.pop();
531 #endif // SPINLOCK
532 if ( unlikely( block == 0 ) ) { // no free block ?
533 #if defined( SPINLOCK )
534 unlock( freeElem->lock );
535 #endif // SPINLOCK
536 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more
537 // and then carve it off.
538
539 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call
540 if ( unlikely( block == 0 ) ) return 0;
541 #if defined( SPINLOCK )
542 } else {
543 freeElem->freeList = block->header.kind.real.next;
544 unlock( freeElem->lock );
545 #endif // SPINLOCK
546 } // if
547
548 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size
549 } else { // large size => mmap
550 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size
551 #ifdef __STATISTICS__
552 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
553 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST );
554 #endif // __STATISTICS__
555 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
556 if ( block == (HeapManager.Storage *)MAP_FAILED ) {
557 // Do not call strerror( errno ) as it may call malloc.
558 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno );
559 } // if
560#ifdef __CFA_DEBUG__
561 // Set new memory to garbage so subsequent uninitialized usages might fail.
562 memset( block, '\377', tsize );
563#endif // __CFA_DEBUG__
564 block->header.kind.real.blockSize = tsize; // storage size for munmap
565 } // if
566
567 void * area = &(block->data); // adjust off header to user bytes
568
569 #ifdef __CFA_DEBUG__
570 assert( ((uintptr_t)area & (libAlign() - 1)) == 0 ); // minimum alignment ?
571 __atomic_add_fetch( &allocfree, tsize, __ATOMIC_SEQ_CST );
572 if ( traceHeap() ) {
573 enum { BufferSize = 64 };
574 char helpText[BufferSize];
575 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize );
576 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size );
577 __cfaabi_dbg_bits_write( helpText, len );
578 } // if
579 #endif // __CFA_DEBUG__
580
581 return area;
582} // doMalloc
583
584
585static inline void doFree( void * addr ) with ( heapManager ) {
586 #ifdef __CFA_DEBUG__
587 if ( unlikely( heapManager.heapBegin == 0 ) ) {
588 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );
589 } // if
590 #endif // __CFA_DEBUG__
591
592 HeapManager.Storage.Header * header;
593 HeapManager.FreeHeader * freeElem;
594 size_t size, alignment; // not used (see realloc)
595
596 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ?
597 #ifdef __STATISTICS__
598 __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST );
599 __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST );
600 #endif // __STATISTICS__
601 if ( munmap( header, size ) == -1 ) {
602 #ifdef __CFA_DEBUG__
603 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
604 "Possible cause is invalid pointer.",
605 addr );
606 #endif // __CFA_DEBUG__
607 } // if
608 } else {
609 #ifdef __CFA_DEBUG__
610 // Set free memory to garbage so subsequent usages might fail.
611 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
612 #endif // __CFA_DEBUG__
613
614 #ifdef __STATISTICS__
615 free_storage += size;
616 #endif // __STATISTICS__
617 #if defined( SPINLOCK )
618 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock
619 header->kind.real.next = freeElem->freeList; // push on stack
620 freeElem->freeList = (HeapManager.Storage *)header;
621 unlock( freeElem->lock ); // release spin lock
622 #else
623 freeElem->freeList.push( *(HeapManager.Storage *)header );
624 #endif // SPINLOCK
625 } // if
626
627 #ifdef __CFA_DEBUG__
628 __atomic_add_fetch( &allocfree, -size, __ATOMIC_SEQ_CST );
629 if ( traceHeap() ) {
630 enum { BufferSize = 64 };
631 char helpText[BufferSize];
632 int len = snprintf( helpText, BufferSize, "Free( %p ) size:%zu\n", addr, size );
633 __cfaabi_dbg_bits_write( helpText, len );
634 } // if
635 #endif // __CFA_DEBUG__
636} // doFree
637
638
639size_t checkFree( HeapManager & manager, _Bool prt ) with ( manager ) {
640 size_t total = 0;
641 #ifdef __STATISTICS__
642 __cfaabi_dbg_bits_acquire();
643 if ( prt ) __cfaabi_dbg_bits_print_nolock( "\nBin lists (bin size : free blocks on list)\n" );
644 #endif // __STATISTICS__
645 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {
646 size_t size = freeLists[i].blockSize;
647 #ifdef __STATISTICS__
648 unsigned int N = 0;
649 #endif // __STATISTICS__
650 #if defined( SPINLOCK )
651 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0; p = p->header.kind.real.next ) {
652 #else
653 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0; p = p->header.kind.real.next.top ) {
654 #endif // SPINLOCK
655 total += size;
656 #ifdef __STATISTICS__
657 N += 1;
658 #endif // __STATISTICS__
659 } // for
660 #ifdef __STATISTICS__
661 if ( prt ) __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u ", size, N );
662 if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" );
663 #endif // __STATISTICS__
664 } // for
665 #ifdef __STATISTICS__
666 if ( prt ) __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total );
667 __cfaabi_dbg_bits_release();
668 #endif // __STATISTICS__
669 return (char *)heapEnd - (char *)heapBegin - total;
670} // checkFree
671
672
673static inline void * malloc2( size_t size ) { // necessary for malloc statistics
674 assert( heapManager.heapBegin != 0 );
675 void * area = doMalloc( size );
676 if ( unlikely( area == 0 ) ) errno = ENOMEM; // POSIX
677 return area;
678} // malloc2
679
680
681static inline void * memalign2( size_t alignment, size_t size ) { // necessary for malloc statistics
682#ifdef __CFA_DEBUG__
683 checkAlign( alignment ); // check alignment
684#endif // __CFA_DEBUG__
685
686 // if alignment <= default alignment, do normal malloc as two headers are unnecessary
687 if ( unlikely( alignment <= libAlign() ) ) return malloc2( size );
688
689 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
690 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
691 // .-------------v-----------------v----------------v----------,
692 // | Real Header | ... padding ... | Fake Header | data ... |
693 // `-------------^-----------------^-+--------------^----------'
694 // |<--------------------------------' offset/align |<-- alignment boundary
695
696 // subtract libAlign() because it is already the minimum alignment
697 // add sizeof(Storage) for fake header
698 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
699 if ( unlikely( area == 0 ) ) return area;
700
701 // address in the block of the "next" alignment address
702 char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment );
703
704 // address of header from malloc
705 HeapManager.Storage.Header * realHeader = headerAddr( area );
706 // address of fake header * before* the alignment location
707 HeapManager.Storage.Header * fakeHeader = headerAddr( user );
708 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
709 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
710 // SKULLDUGGERY: odd alignment imples fake header
711 fakeHeader->kind.fake.alignment = alignment | 1;
712
713 return user;
714} // memalign2
715
716
717extern "C" {
718 void * malloc( size_t size ) {
719 #ifdef __STATISTICS__
720 __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST );
721 __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST );
722 #endif // __STATISTICS__
723
724 return malloc2( size );
725 } // malloc
726
727
728 void * calloc( size_t noOfElems, size_t elemSize ) {
729 size_t size = noOfElems * elemSize;
730 #ifdef __STATISTICS__
731 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );
732 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST );
733 #endif // __STATISTICS__
734
735 char * area = (char *)malloc2( size );
736 if ( unlikely( area == 0 ) ) return 0;
737 HeapManager.Storage.Header * header;
738 HeapManager.FreeHeader * freeElem;
739 size_t asize, alignment;
740 _Bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment );
741 #ifndef __CFA_DEBUG__
742 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
743 if ( ! mapped )
744 #endif // __CFA_DEBUG__
745 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros
746 header->kind.real.blockSize |= 2; // mark as zero filled
747 return area;
748 } // calloc
749
750
751 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
752 size_t size = noOfElems * elemSize;
753 #ifdef __STATISTICS__
754 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
755 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST );
756 #endif // __STATISTICS__
757
758 char * area = (char *)memalign2( alignment, size );
759 if ( unlikely( area == 0 ) ) return 0;
760 HeapManager.Storage.Header * header;
761 HeapManager.FreeHeader * freeElem;
762 size_t asize;
763 _Bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment );
764 #ifndef __CFA_DEBUG__
765 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
766 if ( ! mapped )
767 #endif // __CFA_DEBUG__
768 memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros
769 header->kind.real.blockSize |= 2; // mark as zero filled
770
771 return area;
772 } // cmemalign
773
774
775 void * realloc( void * addr, size_t size ) {
776 #ifdef __STATISTICS__
777 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
778 #endif // __STATISTICS__
779
780 if ( unlikely( addr == 0 ) ) return malloc2( size ); // special cases
781 if ( unlikely( size == 0 ) ) { free( addr ); return 0; }
782
783 HeapManager.Storage.Header * header;
784 HeapManager.FreeHeader * freeElem;
785 size_t asize, alignment = 0;
786 headers( "realloc", addr, header, freeElem, asize, alignment );
787
788 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
789 if ( usize >= size ) { // already sufficient storage
790 // This case does not result in a new profiler entry because the previous one still exists and it must match with
791 // the free for this memory. Hence, this realloc does not appear in the profiler output.
792 return addr;
793 } // if
794
795 #ifdef __STATISTICS__
796 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
797 #endif // __STATISTICS__
798
799 void * area;
800 if ( unlikely( alignment != 0 ) ) { // previous request memalign?
801 area = memalign( alignment, size ); // create new area
802 } else {
803 area = malloc2( size ); // create new area
804 } // if
805 if ( unlikely( area == 0 ) ) return 0;
806 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ?
807 assert( (header->kind.real.blockSize & 1) == 0 );
808 _Bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment );
809 #ifndef __CFA_DEBUG__
810 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
811 if ( ! mapped )
812 #endif // __CFA_DEBUG__
813 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part
814 header->kind.real.blockSize |= 2; // mark new request as zero fill
815 } // if
816 memcpy( area, addr, usize ); // copy bytes
817 free( addr );
818 return area;
819 } // realloc
820
821
822 void * memalign( size_t alignment, size_t size ) {
823 #ifdef __STATISTICS__
824 __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST );
825 __atomic_add_fetch( &memalign_storage, size, __ATOMIC_SEQ_CST );
826 #endif // __STATISTICS__
827
828 void * area = memalign2( alignment, size );
829
830 return area;
831 } // memalign
832
833
834 void * aligned_alloc( size_t alignment, size_t size ) {
835 return memalign( alignment, size );
836 } // aligned_alloc
837
838
839 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
840 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
841 * memptr = memalign( alignment, size );
842 if ( unlikely( * memptr == 0 ) ) return ENOMEM;
843 return 0;
844 } // posix_memalign
845
846
847 void * valloc( size_t size ) {
848 return memalign( pageSize, size );
849 } // valloc
850
851
852 void free( void * addr ) {
853 #ifdef __STATISTICS__
854 __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST );
855 #endif // __STATISTICS__
856
857 if ( unlikely( addr == 0 ) ) { // special case
858 #ifdef __CFA_DEBUG__
859 if ( traceHeap() ) {
860 #define nullmsg "Free( 0x0 ) size:0\n"
861 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
862 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 );
863 } // if
864 #endif // __CFA_DEBUG__
865 return;
866 } // exit
867
868 doFree( addr );
869 } // free
870
871 int mallopt( int option, int value ) {
872 choose( option ) {
873 case M_TOP_PAD:
874 if ( setHeapExpand( value ) ) fallthru default;
875 case M_MMAP_THRESHOLD:
876 if ( setMmapStart( value ) ) fallthru default;
877 default:
878 return 1; // success, or unsupported
879 } // switch
880 return 0; // error
881 } // mallopt
882
883
884 int malloc_trim( size_t ) {
885 return 0; // => impossible to release memory
886 } // malloc_trim
887
888 size_t malloc_usable_size( void * addr ) {
889 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size
890 HeapManager.Storage.Header * header;
891 HeapManager.FreeHeader * freeElem;
892 size_t size, alignment;
893
894 headers( "malloc_usable_size", addr, header, freeElem, size, alignment );
895 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
896 return usize;
897 } // malloc_usable_size
898
899
900 size_t malloc_alignment( void * addr ) {
901 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment
902 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );
903 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
904 return header->kind.fake.alignment & -2; // remove flag from value
905 } else {
906 return libAlign (); // minimum alignment
907 } // if
908 } // malloc_alignment
909
910
911 _Bool malloc_zero_fill( void * addr ) {
912 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill
913 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );
914 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
915 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);
916 } // if
917 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?
918 } // malloc_zero_fill
919
920
921 void malloc_stats( void ) {
922 #ifdef __STATISTICS__
923 printStats();
924 checkFree( heapManager, true );
925 #endif // __STATISTICS__
926 } // malloc_stats
927
928
929 int malloc_stats_fd( int fd ) {
930 #ifdef __STATISTICS__
931 int temp = statfd;
932 statfd = fd;
933 return temp;
934 #else
935 return -1;
936 #endif // __STATISTICS__
937 } // malloc_stats_fd
938
939
940 int malloc_info( int options, FILE * stream ) {
941 return printStatsXML( stream );
942 } // malloc_info
943
944
945 void * malloc_get_state( void ) {
946 return 0;
947 } // malloc_get_state
948
949
950 int malloc_set_state( void * ptr ) {
951 return 0;
952 } // malloc_set_state
953} // extern "C"
954
955
956// Local Variables: //
957// tab-width: 4 //
958// compile-command: "cfa -nodebug -O2 heap.c" //
959// End: //
Note: See TracBrowser for help on using the repository browser.