source: libcfa/src/heap.c@ bf71cfd

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr no_list persistent-indexer pthread-emulation qualifiedEnum
Last change on this file since bf71cfd was bf71cfd, checked in by Thierry Delisle <tdelisle@…>, 7 years ago

Moved up many directories in source

  • Property mode set to 100644
File size: 34.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// heap.c --
8//
9// Author : Peter A. Buhr
10// Created On : Tue Dec 19 21:58:35 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Jul 31 18:08:50 2018
13// Update Count : 470
14//
15
16#include <unistd.h> // sbrk, sysconf
17#include <stdbool.h> // true, false
18#include <stdio.h> // snprintf, fileno
19#include <errno.h> // errno
20extern "C" {
21#include <sys/mman.h> // mmap, munmap
22} // extern "C"
23
24#include "bits/align.h" // libPow2
25#include "bits/defs.h" // likely, unlikely
26#include "bits/locks.h" // __spinlock_t
27#include "startup.h" // STARTUP_PRIORITY_MEMORY
28#include "stdlib" // bsearchl
29#include "malloc.h"
30
31
32enum {
33 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),
34 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
35};
36
37size_t default_mmap_start() __attribute__(( weak )) {
38 return __CFA_DEFAULT_MMAP_START__;
39} // default_mmap_start
40
41size_t default_heap_expansion() __attribute__(( weak )) {
42 return __CFA_DEFAULT_HEAP_EXPANSION__;
43} // default_heap_expansion
44
45
46// supported mallopt options
47#ifndef M_MMAP_THRESHOLD
48#define M_MMAP_THRESHOLD (-1)
49#endif // M_TOP_PAD
50#ifndef M_TOP_PAD
51#define M_TOP_PAD (-2)
52#endif // M_TOP_PAD
53
54#define FASTLOOKUP
55#define __STATISTICS__
56
57#define SPINLOCK 0
58#define LOCKFREE 1
59#define BUCKETLOCK SPINLOCK
60#if BUCKETLOCK == LOCKFREE
61#include <uStackLF.h>
62#endif // LOCKFREE
63
64#define ALIGN 16
65
66// enum { NoBucketSizes = 93, // number of buckets sizes
67// #ifdef FASTLOOKUP
68// LookupSizes = 65536, // number of fast lookup sizes
69// #endif // FASTLOOKUP
70// };
71#define NoBucketSizes 93 // number of buckets sizes
72#ifdef FASTLOOKUP
73#define LookupSizes 65536 // number of fast lookup sizes
74#endif // FASTLOOKUP
75
76
77static _Bool traceHeap = false;
78
79inline _Bool traceHeap() {
80 return traceHeap;
81} // traceHeap
82
83_Bool traceHeapOn() {
84 _Bool temp = traceHeap;
85 traceHeap = true;
86 return temp;
87} // traceHeapOn
88
89_Bool traceHeapOff() {
90 _Bool temp = traceHeap;
91 traceHeap = false;
92 return temp;
93} // traceHeapOff
94
95
96static _Bool checkFree = false;
97
98inline _Bool checkFree() {
99 return checkFree;
100} // checkFree
101
102_Bool checkFreeOn() {
103 _Bool temp = checkFree;
104 checkFree = true;
105 return temp;
106} // checkFreeOn
107
108_Bool checkFreeOff() {
109 _Bool temp = checkFree;
110 checkFree = false;
111 return temp;
112} // checkFreeOff
113
114
115// static _Bool traceHeapTerm = false;
116
117// inline _Bool traceHeapTerm() {
118// return traceHeapTerm;
119// } // traceHeapTerm
120
121// _Bool traceHeapTermOn() {
122// _Bool temp = traceHeapTerm;
123// traceHeapTerm = true;
124// return temp;
125// } // traceHeapTermOn
126
127// _Bool traceHeapTermOff() {
128// _Bool temp = traceHeapTerm;
129// traceHeapTerm = false;
130// return temp;
131// } // traceHeapTermOff
132
133
134#ifdef __CFA_DEBUG__
135static unsigned int allocfree; // running total of allocations minus frees
136static unsigned int appStart; // storage allocation when application starts
137
138static void checkUnfreed() {
139 unsigned int total = allocfree - appStart;
140 if ( total != 0 ) {
141 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
142 // char helpText[512];
143 // int len = snprintf( helpText, 512, "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"
144 // "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
145 // (long int)getpid(), total, total ); // always print the UNIX pid
146 // __cfaabi_dbg_bits_write( helpText, len );
147 } // if
148} // checkUnfreed
149
150extern "C" {
151void heapAppStart() { // called by __cfaabi_appready_startup
152 appStart = allocfree;
153} // heapAppStart
154
155void heapAppStop() { // called by __cfaabi_appready_startdown
156 checkUnfreed();
157} // heapAppStop
158} // extern "C"
159#endif // __CFA_DEBUG__
160
161
162struct HeapManager {
163// struct FreeHeader; // forward declaration
164
165 struct Storage {
166 struct Header { // header
167 union Kind {
168 struct RealHeader {
169 union {
170 struct { // 32-bit word => 64-bit header, 64-bit word => 128-bit header
171 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
172 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
173 #endif // __ORDER_BIG_ENDIAN__ && __U_WORDSIZE__ == 32
174
175 union {
176// FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
177 void * home; // allocated block points back to home locations (must overlay alignment)
178 size_t blockSize; // size for munmap (must overlay alignment)
179 #if BUCKLOCK == SPINLOCK
180 Storage * next; // freed block points next freed block of same size
181 #endif // SPINLOCK
182 };
183
184 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
185 uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
186 #endif // __ORDER_LITTLE_ENDIAN__ && __U_WORDSIZE__ == 32
187
188 };
189 #if BUCKLOCK == LOCKFREE
190 Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide)
191 #endif // LOCKFREE
192 };
193 } real;
194 struct FakeHeader {
195 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
196 uint32_t alignment; // low-order bits of home/blockSize used for tricks
197 #endif // __ORDER_LITTLE_ENDIAN__
198
199 uint32_t offset;
200
201 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
202 uint32_t alignment; // low-order bits of home/blockSize used for tricks
203 #endif // __ORDER_BIG_ENDIAN__
204 } fake;
205 } kind;
206 } header; // Header
207 char pad[ALIGN - sizeof( Header )];
208 char data[0]; // storage
209 }; // Storage
210
211 static_assert( ALIGN >= sizeof( Storage ), "ALIGN < sizeof( Storage )" );
212
213 struct FreeHeader {
214 #if BUCKLOCK == SPINLOCK
215 __spinlock_t lock; // must be first field for alignment
216 Storage * freeList;
217 #elif BUCKLOCK == LOCKFREE
218 StackLF<Storage> freeList;
219 #else
220 #error undefined lock type for bucket lock
221 #endif // SPINLOCK
222 size_t blockSize; // size of allocations on this list
223 }; // FreeHeader
224
225 // must be first fields for alignment
226 __spinlock_t extlock; // protects allocation-buffer extension
227 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
228
229 void * heapBegin; // start of heap
230 void * heapEnd; // logical end of heap
231 size_t heapRemaining; // amount of storage not allocated in the current chunk
232}; // HeapManager
233
234static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; }
235// statically allocated variables => zero filled.
236
237
238static size_t pageSize; // architecture pagesize
239static size_t heapExpand; // sbrk advance
240static size_t mmapStart; // cross over point for mmap
241static unsigned int maxBucketsUsed; // maximum number of buckets in use
242
243// Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
244static unsigned int bucketSizes[NoBucketSizes] @= { // different bucket sizes
245 16, 32, 48, 64,
246 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224,
247 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896,
248 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144,
249 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360,
250 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720,
251 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440,
252 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880,
253 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760,
254 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520,
255 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792,
256 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016,
257 4_194_304 + sizeof(HeapManager.Storage)
258};
259#ifdef FASTLOOKUP
260static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
261#endif // FASTLOOKUP
262static int mmapFd = -1; // fake or actual fd for anonymous file
263
264
265#ifdef __CFA_DEBUG__
266static _Bool heapBoot = 0; // detect recursion during boot
267#endif // __CFA_DEBUG__
268static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
269
270
271static inline _Bool setMmapStart( size_t value ) {
272 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
273 mmapStart = value; // set global
274
275 // find the closest bucket size less than or equal to the mmapStart size
276 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
277 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
278 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
279 return false;
280} // setMmapStart
281
282
283static void ?{}( HeapManager & manager ) with ( manager ) {
284 pageSize = sysconf( _SC_PAGESIZE );
285
286 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
287 freeLists[i].blockSize = bucketSizes[i];
288 } // for
289
290 #ifdef FASTLOOKUP
291 unsigned int idx = 0;
292 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
293 if ( i > bucketSizes[idx] ) idx += 1;
294 lookup[i] = idx;
295 } // for
296 #endif // FASTLOOKUP
297
298 if ( setMmapStart( default_mmap_start() ) ) {
299 abort( "HeapManager : internal error, mmap start initialization failure." );
300 } // if
301 heapExpand = default_heap_expansion();
302
303 char * End = (char *)sbrk( 0 );
304 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
305 heapBegin = heapEnd = sbrk( 0 ); // get new start point
306} // HeapManager
307
308
309static void ^?{}( HeapManager & ) {
310 #ifdef __STATISTICS__
311 // if ( traceHeapTerm() ) {
312 // printStats();
313 // if ( checkfree() ) checkFree( heapManager, true );
314 // } // if
315 #endif // __STATISTICS__
316} // ~HeapManager
317
318
319static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
320void memory_startup( void ) {
321 #ifdef __CFA_DEBUG__
322 if ( unlikely( heapBoot ) ) { // check for recursion during system boot
323 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
324 abort( "boot() : internal error, recursively invoked during system boot." );
325 } // if
326 heapBoot = true;
327 #endif // __CFA_DEBUG__
328
329 assert( heapManager.heapBegin == 0 );
330 heapManager{};
331} // memory_startup
332
333static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
334void memory_shutdown( void ) {
335 ^heapManager{};
336} // memory_shutdown
337
338
339#ifdef __STATISTICS__
340static unsigned long long int mmap_storage; // heap statistics counters
341static unsigned int mmap_calls;
342static unsigned long long int munmap_storage;
343static unsigned int munmap_calls;
344static unsigned long long int sbrk_storage;
345static unsigned int sbrk_calls;
346static unsigned long long int malloc_storage;
347static unsigned int malloc_calls;
348static unsigned long long int free_storage;
349static unsigned int free_calls;
350static unsigned long long int calloc_storage;
351static unsigned int calloc_calls;
352static unsigned long long int memalign_storage;
353static unsigned int memalign_calls;
354static unsigned long long int cmemalign_storage;
355static unsigned int cmemalign_calls;
356static unsigned long long int realloc_storage;
357static unsigned int realloc_calls;
358
359static int statfd; // statistics file descriptor (changed by malloc_stats_fd)
360
361
362// Use "write" because streams may be shutdown when calls are made.
363static void printStats() {
364 char helpText[512];
365 __cfaabi_dbg_bits_print_buffer( helpText, 512,
366 "\nHeap statistics:\n"
367 " malloc: calls %u / storage %llu\n"
368 " calloc: calls %u / storage %llu\n"
369 " memalign: calls %u / storage %llu\n"
370 " cmemalign: calls %u / storage %llu\n"
371 " realloc: calls %u / storage %llu\n"
372 " free: calls %u / storage %llu\n"
373 " mmap: calls %u / storage %llu\n"
374 " munmap: calls %u / storage %llu\n"
375 " sbrk: calls %u / storage %llu\n",
376 malloc_calls, malloc_storage,
377 calloc_calls, calloc_storage,
378 memalign_calls, memalign_storage,
379 cmemalign_calls, cmemalign_storage,
380 realloc_calls, realloc_storage,
381 free_calls, free_storage,
382 mmap_calls, mmap_storage,
383 munmap_calls, munmap_storage,
384 sbrk_calls, sbrk_storage
385 );
386} // printStats
387
388
389static int printStatsXML( FILE * stream ) {
390 char helpText[512];
391 int len = snprintf( helpText, 512,
392 "<malloc version=\"1\">\n"
393 "<heap nr=\"0\">\n"
394 "<sizes>\n"
395 "</sizes>\n"
396 "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n"
397 "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n"
398 "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n"
399 "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n"
400 "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n"
401 "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n"
402 "<total type=\"mmap\" count=\"%u\" size=\"%llu\"/>\n"
403 "<total type=\"munmap\" count=\"%u\" size=\"%llu\"/>\n"
404 "<total type=\"sbrk\" count=\"%u\" size=\"%llu\"/>\n"
405 "</malloc>",
406 malloc_calls, malloc_storage,
407 calloc_calls, calloc_storage,
408 memalign_calls, memalign_storage,
409 cmemalign_calls, cmemalign_storage,
410 realloc_calls, realloc_storage,
411 free_calls, free_storage,
412 mmap_calls, mmap_storage,
413 munmap_calls, munmap_storage,
414 sbrk_calls, sbrk_storage
415 );
416 return write( fileno( stream ), helpText, len ); // -1 => error
417} // printStatsXML
418#endif // __STATISTICS__
419
420
421static inline void noMemory() {
422 abort( "Heap memory exhausted at %zu bytes.\n"
423 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",
424 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );
425} // noMemory
426
427
428static inline void checkAlign( size_t alignment ) {
429 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {
430 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment );
431 } // if
432} // checkAlign
433
434
435static inline _Bool setHeapExpand( size_t value ) {
436 if ( heapExpand < pageSize ) return true;
437 heapExpand = value;
438 return false;
439} // setHeapExpand
440
441
442static inline void checkHeader( _Bool check, const char * name, void * addr ) {
443 if ( unlikely( check ) ) { // bad address ?
444 abort( "Attempt to %s storage %p with address outside the heap.\n"
445 "Possible cause is duplicate free on same block or overwriting of memory.",
446 name, addr );
447 } // if
448} // checkHeader
449
450
451static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) {
452 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
453 size_t offset = header->kind.fake.offset;
454 alignment = header->kind.fake.alignment & -2; // remove flag from value
455 #ifdef __CFA_DEBUG__
456 checkAlign( alignment ); // check alignment
457 #endif // __CFA_DEBUG__
458 header = (HeapManager.Storage.Header *)((char *)header - offset);
459 } // if
460} // fakeHeader
461
462
463#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
464
465static inline _Bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
466 header = headerAddr( addr );
467
468 if ( unlikely( heapEnd < addr ) ) { // mmapped ?
469 fakeHeader( header, size, alignment );
470 size = header->kind.real.blockSize & -3; // mmap size
471 return true;
472 } // if
473
474 #ifdef __CFA_DEBUG__
475 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
476 #endif // __CFA_DEBUG__
477 // header may be safe to dereference
478 fakeHeader( header, size, alignment );
479 #ifdef __CFA_DEBUG__
480 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
481 #endif // __CFA_DEBUG__
482
483 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
484 #ifdef __CFA_DEBUG__
485 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
486 abort( "Attempt to %s storage %p with corrupted header.\n"
487 "Possible cause is duplicate free on same block or overwriting of header information.",
488 name, addr );
489 } // if
490 #endif // __CFA_DEBUG__
491 size = freeElem->blockSize;
492 return false;
493} // headers
494
495
496static inline void * extend( size_t size ) with ( heapManager ) {
497 lock( extlock __cfaabi_dbg_ctx2 );
498 ptrdiff_t rem = heapRemaining - size;
499 if ( rem < 0 ) {
500 // If the size requested is bigger than the current remaining storage, increase the size of the heap.
501
502 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
503 if ( sbrk( increase ) == (void *)-1 ) {
504 unlock( extlock );
505 errno = ENOMEM;
506 return 0;
507 } // if
508#ifdef __STATISTICS__
509 sbrk_calls += 1;
510 sbrk_storage += increase;
511#endif // __STATISTICS__
512#ifdef __CFA_DEBUG__
513 // Set new memory to garbage so subsequent uninitialized usages might fail.
514 memset( (char *)heapEnd + heapRemaining, '\377', increase );
515#endif // __CFA_DEBUG__
516 rem = heapRemaining + increase - size;
517 } // if
518
519 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
520 heapRemaining = rem;
521 heapEnd = (char *)heapEnd + size;
522 unlock( extlock );
523 return block;
524} // extend
525
526
527static inline void * doMalloc( size_t size ) with ( heapManager ) {
528 HeapManager.Storage * block;
529
530 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
531 // along with the block and is a multiple of the alignment size.
532
533 size_t tsize = size + sizeof(HeapManager.Storage);
534 if ( likely( tsize < mmapStart ) ) { // small size => sbrk
535 HeapManager.FreeHeader * freeElem =
536 #ifdef FASTLOOKUP
537 tsize < LookupSizes ? &freeLists[lookup[tsize]] :
538 #endif // FASTLOOKUP
539 bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
540 assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ?
541 assert( tsize <= freeElem->blockSize ); // search failure ?
542 tsize = freeElem->blockSize; // total space needed for request
543
544 // Spin until the lock is acquired for this particular size of block.
545
546 #if defined( SPINLOCK )
547 lock( freeElem->lock __cfaabi_dbg_ctx2 );
548 block = freeElem->freeList; // remove node from stack
549 #else
550 block = freeElem->freeList.pop();
551 #endif // SPINLOCK
552 if ( unlikely( block == 0 ) ) { // no free block ?
553 #if defined( SPINLOCK )
554 unlock( freeElem->lock );
555 #endif // SPINLOCK
556 // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more
557 // and then carve it off.
558
559 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call
560 if ( unlikely( block == 0 ) ) return 0;
561 #if defined( SPINLOCK )
562 } else {
563 freeElem->freeList = block->header.kind.real.next;
564 unlock( freeElem->lock );
565 #endif // SPINLOCK
566 } // if
567
568 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size
569 } else { // large size => mmap
570 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size
571 #ifdef __STATISTICS__
572 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
573 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST );
574 #endif // __STATISTICS__
575 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
576 if ( block == (HeapManager.Storage *)MAP_FAILED ) {
577 // Do not call strerror( errno ) as it may call malloc.
578 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno );
579 } // if
580#ifdef __CFA_DEBUG__
581 // Set new memory to garbage so subsequent uninitialized usages might fail.
582 memset( block, '\377', tsize );
583#endif // __CFA_DEBUG__
584 block->header.kind.real.blockSize = tsize; // storage size for munmap
585 } // if
586
587 void * area = &(block->data); // adjust off header to user bytes
588
589 #ifdef __CFA_DEBUG__
590 assert( ((uintptr_t)area & (libAlign() - 1)) == 0 ); // minimum alignment ?
591 __atomic_add_fetch( &allocfree, tsize, __ATOMIC_SEQ_CST );
592 if ( traceHeap() ) {
593 enum { BufferSize = 64 };
594 char helpText[BufferSize];
595 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize );
596 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size );
597 __cfaabi_dbg_bits_write( helpText, len );
598 } // if
599 #endif // __CFA_DEBUG__
600
601 return area;
602} // doMalloc
603
604
605static inline void doFree( void * addr ) with ( heapManager ) {
606 #ifdef __CFA_DEBUG__
607 if ( unlikely( heapManager.heapBegin == 0 ) ) {
608 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );
609 } // if
610 #endif // __CFA_DEBUG__
611
612 HeapManager.Storage.Header * header;
613 HeapManager.FreeHeader * freeElem;
614 size_t size, alignment; // not used (see realloc)
615
616 if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ?
617 #ifdef __STATISTICS__
618 __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST );
619 __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST );
620 #endif // __STATISTICS__
621 if ( munmap( header, size ) == -1 ) {
622 #ifdef __CFA_DEBUG__
623 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
624 "Possible cause is invalid pointer.",
625 addr );
626 #endif // __CFA_DEBUG__
627 } // if
628 } else {
629 #ifdef __CFA_DEBUG__
630 // Set free memory to garbage so subsequent usages might fail.
631 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
632 #endif // __CFA_DEBUG__
633
634 #ifdef __STATISTICS__
635 free_storage += size;
636 #endif // __STATISTICS__
637 #if defined( SPINLOCK )
638 lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock
639 header->kind.real.next = freeElem->freeList; // push on stack
640 freeElem->freeList = (HeapManager.Storage *)header;
641 unlock( freeElem->lock ); // release spin lock
642 #else
643 freeElem->freeList.push( *(HeapManager.Storage *)header );
644 #endif // SPINLOCK
645 } // if
646
647 #ifdef __CFA_DEBUG__
648 __atomic_add_fetch( &allocfree, -size, __ATOMIC_SEQ_CST );
649 if ( traceHeap() ) {
650 enum { BufferSize = 64 };
651 char helpText[BufferSize];
652 int len = snprintf( helpText, BufferSize, "Free( %p ) size:%zu\n", addr, size );
653 __cfaabi_dbg_bits_write( helpText, len );
654 } // if
655 #endif // __CFA_DEBUG__
656} // doFree
657
658
659size_t checkFree( HeapManager & manager ) with ( manager ) {
660 size_t total = 0;
661 #ifdef __STATISTICS__
662 __cfaabi_dbg_bits_acquire();
663 __cfaabi_dbg_bits_print_nolock( "\nBin lists (bin size : free blocks on list)\n" );
664 #endif // __STATISTICS__
665 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {
666 size_t size = freeLists[i].blockSize;
667 #ifdef __STATISTICS__
668 unsigned int N = 0;
669 #endif // __STATISTICS__
670 #if defined( SPINLOCK )
671 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0; p = p->header.kind.real.next ) {
672 #else
673 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0; p = p->header.kind.real.next.top ) {
674 #endif // SPINLOCK
675 total += size;
676 #ifdef __STATISTICS__
677 N += 1;
678 #endif // __STATISTICS__
679 } // for
680 #ifdef __STATISTICS__
681 __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u ", size, N );
682 if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" );
683 #endif // __STATISTICS__
684 } // for
685 #ifdef __STATISTICS__
686 __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total );
687 __cfaabi_dbg_bits_release();
688 #endif // __STATISTICS__
689 return (char *)heapEnd - (char *)heapBegin - total;
690} // checkFree
691
692
693static inline void * malloc2( size_t size ) { // necessary for malloc statistics
694 assert( heapManager.heapBegin != 0 );
695 void * area = doMalloc( size );
696 if ( unlikely( area == 0 ) ) errno = ENOMEM; // POSIX
697 return area;
698} // malloc2
699
700
701static inline void * memalign2( size_t alignment, size_t size ) { // necessary for malloc statistics
702#ifdef __CFA_DEBUG__
703 checkAlign( alignment ); // check alignment
704#endif // __CFA_DEBUG__
705
706 // if alignment <= default alignment, do normal malloc as two headers are unnecessary
707 if ( unlikely( alignment <= libAlign() ) ) return malloc2( size );
708
709 // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
710 // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
711 // .-------------v-----------------v----------------v----------,
712 // | Real Header | ... padding ... | Fake Header | data ... |
713 // `-------------^-----------------^-+--------------^----------'
714 // |<--------------------------------' offset/align |<-- alignment boundary
715
716 // subtract libAlign() because it is already the minimum alignment
717 // add sizeof(Storage) for fake header
718 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
719 if ( unlikely( area == 0 ) ) return area;
720
721 // address in the block of the "next" alignment address
722 char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment );
723
724 // address of header from malloc
725 HeapManager.Storage.Header * realHeader = headerAddr( area );
726 // address of fake header * before* the alignment location
727 HeapManager.Storage.Header * fakeHeader = headerAddr( user );
728 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
729 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
730 // SKULLDUGGERY: odd alignment imples fake header
731 fakeHeader->kind.fake.alignment = alignment | 1;
732
733 return user;
734} // memalign2
735
736
737extern "C" {
738 void * malloc( size_t size ) {
739 #ifdef __STATISTICS__
740 __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST );
741 __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST );
742 #endif // __STATISTICS__
743
744 return malloc2( size );
745 } // malloc
746
747
748 void * calloc( size_t noOfElems, size_t elemSize ) {
749 size_t size = noOfElems * elemSize;
750 #ifdef __STATISTICS__
751 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );
752 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST );
753 #endif // __STATISTICS__
754
755 char * area = (char *)malloc2( size );
756 if ( unlikely( area == 0 ) ) return 0;
757 HeapManager.Storage.Header * header;
758 HeapManager.FreeHeader * freeElem;
759 size_t asize, alignment;
760 _Bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment );
761 #ifndef __CFA_DEBUG__
762 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
763 if ( ! mapped )
764 #endif // __CFA_DEBUG__
765 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros
766 header->kind.real.blockSize |= 2; // mark as zero filled
767 return area;
768 } // calloc
769
770
771 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
772 size_t size = noOfElems * elemSize;
773 #ifdef __STATISTICS__
774 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
775 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST );
776 #endif // __STATISTICS__
777
778 char * area = (char *)memalign2( alignment, size );
779 if ( unlikely( area == 0 ) ) return 0;
780 HeapManager.Storage.Header * header;
781 HeapManager.FreeHeader * freeElem;
782 size_t asize;
783 _Bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment );
784 #ifndef __CFA_DEBUG__
785 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
786 if ( ! mapped )
787 #endif // __CFA_DEBUG__
788 memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros
789 header->kind.real.blockSize |= 2; // mark as zero filled
790
791 return area;
792 } // cmemalign
793
794
795 void * realloc( void * addr, size_t size ) {
796 #ifdef __STATISTICS__
797 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
798 #endif // __STATISTICS__
799
800 if ( unlikely( addr == 0 ) ) return malloc2( size ); // special cases
801 if ( unlikely( size == 0 ) ) { free( addr ); return 0; }
802
803 HeapManager.Storage.Header * header;
804 HeapManager.FreeHeader * freeElem;
805 size_t asize, alignment = 0;
806 headers( "realloc", addr, header, freeElem, asize, alignment );
807
808 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
809 if ( usize >= size ) { // already sufficient storage
810 // This case does not result in a new profiler entry because the previous one still exists and it must match with
811 // the free for this memory. Hence, this realloc does not appear in the profiler output.
812 return addr;
813 } // if
814
815 #ifdef __STATISTICS__
816 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
817 #endif // __STATISTICS__
818
819 void * area;
820 if ( unlikely( alignment != 0 ) ) { // previous request memalign?
821 area = memalign( alignment, size ); // create new area
822 } else {
823 area = malloc2( size ); // create new area
824 } // if
825 if ( unlikely( area == 0 ) ) return 0;
826 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ?
827 assert( (header->kind.real.blockSize & 1) == 0 );
828 _Bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment );
829 #ifndef __CFA_DEBUG__
830 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
831 if ( ! mapped )
832 #endif // __CFA_DEBUG__
833 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part
834 header->kind.real.blockSize |= 2; // mark new request as zero fill
835 } // if
836 memcpy( area, addr, usize ); // copy bytes
837 free( addr );
838 return area;
839 } // realloc
840
841
842 void * memalign( size_t alignment, size_t size ) {
843 #ifdef __STATISTICS__
844 __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST );
845 __atomic_add_fetch( &memalign_storage, size, __ATOMIC_SEQ_CST );
846 #endif // __STATISTICS__
847
848 void * area = memalign2( alignment, size );
849
850 return area;
851 } // memalign
852
853
854 void * aligned_alloc( size_t alignment, size_t size ) {
855 return memalign( alignment, size );
856 } // aligned_alloc
857
858
859 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
860 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
861 * memptr = memalign( alignment, size );
862 if ( unlikely( * memptr == 0 ) ) return ENOMEM;
863 return 0;
864 } // posix_memalign
865
866
867 void * valloc( size_t size ) {
868 return memalign( pageSize, size );
869 } // valloc
870
871
872 void free( void * addr ) {
873 #ifdef __STATISTICS__
874 __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST );
875 #endif // __STATISTICS__
876
877 if ( unlikely( addr == 0 ) ) { // special case
878 #ifdef __CFA_DEBUG__
879 if ( traceHeap() ) {
880 #define nullmsg "Free( 0x0 ) size:0\n"
881 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
882 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 );
883 } // if
884 #endif // __CFA_DEBUG__
885 return;
886 } // exit
887
888 doFree( addr );
889 } // free
890
891 int mallopt( int option, int value ) {
892 choose( option ) {
893 case M_TOP_PAD:
894 if ( setHeapExpand( value ) ) fallthru default;
895 case M_MMAP_THRESHOLD:
896 if ( setMmapStart( value ) ) fallthru default;
897 default:
898 return 1; // success, or unsupported
899 } // switch
900 return 0; // error
901 } // mallopt
902
903
904 int malloc_trim( size_t ) {
905 return 0; // => impossible to release memory
906 } // malloc_trim
907
908 size_t malloc_usable_size( void * addr ) {
909 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size
910 HeapManager.Storage.Header * header;
911 HeapManager.FreeHeader * freeElem;
912 size_t size, alignment;
913
914 headers( "malloc_usable_size", addr, header, freeElem, size, alignment );
915 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block
916 return usize;
917 } // malloc_usable_size
918
919
920 size_t malloc_alignment( void * addr ) {
921 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment
922 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );
923 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
924 return header->kind.fake.alignment & -2; // remove flag from value
925 } else {
926 return libAlign (); // minimum alignment
927 } // if
928 } // malloc_alignment
929
930
931 _Bool malloc_zero_fill( void * addr ) {
932 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill
933 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );
934 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
935 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);
936 } // if
937 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?
938 } // malloc_zero_fill
939
940
941 void malloc_stats( void ) {
942 #ifdef __STATISTICS__
943 printStats();
944 if ( checkFree() ) checkFree( heapManager );
945 #endif // __STATISTICS__
946 } // malloc_stats
947
948
949 int malloc_stats_fd( int fd ) {
950 #ifdef __STATISTICS__
951 int temp = statfd;
952 statfd = fd;
953 return temp;
954 #else
955 return -1;
956 #endif // __STATISTICS__
957 } // malloc_stats_fd
958
959
960 int malloc_info( int options, FILE * stream ) {
961 return printStatsXML( stream );
962 } // malloc_info
963
964
965 void * malloc_get_state( void ) {
966 return 0;
967 } // malloc_get_state
968
969
970 int malloc_set_state( void * ptr ) {
971 return 0;
972 } // malloc_set_state
973} // extern "C"
974
975
976// Local Variables: //
977// tab-width: 4 //
978// compile-command: "cfa -nodebug -O2 heap.c" //
979// End: //
Note: See TracBrowser for help on using the repository browser.