1 | //
|
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
|
---|
3 | //
|
---|
4 | // The contents of this file are covered under the licence agreement in the
|
---|
5 | // file "LICENCE" distributed with Cforall.
|
---|
6 | //
|
---|
7 | // heap.c --
|
---|
8 | //
|
---|
9 | // Author : Peter A. Buhr
|
---|
10 | // Created On : Tue Dec 19 21:58:35 2017
|
---|
11 | // Last Modified By : Peter A. Buhr
|
---|
12 | // Last Modified On : Fri Mar 6 10:14:52 2020
|
---|
13 | // Update Count : 650
|
---|
14 | //
|
---|
15 |
|
---|
16 | #include <unistd.h> // sbrk, sysconf
|
---|
17 | #include <stdbool.h> // true, false
|
---|
18 | #include <stdio.h> // snprintf, fileno
|
---|
19 | #include <errno.h> // errno
|
---|
20 | #include <string.h> // memset, memcpy
|
---|
21 | extern "C" {
|
---|
22 | #include <sys/mman.h> // mmap, munmap
|
---|
23 | } // extern "C"
|
---|
24 |
|
---|
25 | // #comment TD : Many of these should be merged into math I believe
|
---|
26 | #include "bits/align.hfa" // libPow2
|
---|
27 | #include "bits/defs.hfa" // likely, unlikely
|
---|
28 | #include "bits/locks.hfa" // __spinlock_t
|
---|
29 | #include "startup.hfa" // STARTUP_PRIORITY_MEMORY
|
---|
30 | //#include "stdlib.hfa" // bsearchl
|
---|
31 | #include "malloc.h"
|
---|
32 |
|
---|
33 | #define MIN(x, y) (y > x ? x : y)
|
---|
34 |
|
---|
35 | static bool traceHeap = false;
|
---|
36 |
|
---|
37 | inline bool traceHeap() { return traceHeap; }
|
---|
38 |
|
---|
39 | bool traceHeapOn() {
|
---|
40 | bool temp = traceHeap;
|
---|
41 | traceHeap = true;
|
---|
42 | return temp;
|
---|
43 | } // traceHeapOn
|
---|
44 |
|
---|
45 | bool traceHeapOff() {
|
---|
46 | bool temp = traceHeap;
|
---|
47 | traceHeap = false;
|
---|
48 | return temp;
|
---|
49 | } // traceHeapOff
|
---|
50 |
|
---|
51 | bool traceHeapTerm() { return false; }
|
---|
52 |
|
---|
53 |
|
---|
54 | static bool prtFree = false;
|
---|
55 |
|
---|
56 | inline bool prtFree() {
|
---|
57 | return prtFree;
|
---|
58 | } // prtFree
|
---|
59 |
|
---|
60 | bool prtFreeOn() {
|
---|
61 | bool temp = prtFree;
|
---|
62 | prtFree = true;
|
---|
63 | return temp;
|
---|
64 | } // prtFreeOn
|
---|
65 |
|
---|
66 | bool prtFreeOff() {
|
---|
67 | bool temp = prtFree;
|
---|
68 | prtFree = false;
|
---|
69 | return temp;
|
---|
70 | } // prtFreeOff
|
---|
71 |
|
---|
72 |
|
---|
73 | enum {
|
---|
74 | // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address,
|
---|
75 | // the brk address is extended by the extension amount.
|
---|
76 | __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
|
---|
77 |
|
---|
78 | // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets;
|
---|
79 | // values greater than or equal to this value are mmap from the operating system.
|
---|
80 | __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),
|
---|
81 | };
|
---|
82 |
|
---|
83 | size_t default_mmap_start() __attribute__(( weak )) {
|
---|
84 | return __CFA_DEFAULT_MMAP_START__;
|
---|
85 | } // default_mmap_start
|
---|
86 |
|
---|
87 | size_t default_heap_expansion() __attribute__(( weak )) {
|
---|
88 | return __CFA_DEFAULT_HEAP_EXPANSION__;
|
---|
89 | } // default_heap_expansion
|
---|
90 |
|
---|
91 |
|
---|
92 | #ifdef __CFA_DEBUG__
|
---|
93 | static unsigned int allocFree; // running total of allocations minus frees
|
---|
94 |
|
---|
95 | static void prtUnfreed() {
|
---|
96 | if ( allocFree != 0 ) {
|
---|
97 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
|
---|
98 | char helpText[512];
|
---|
99 | int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"
|
---|
100 | "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
|
---|
101 | (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid
|
---|
102 | __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
|
---|
103 | } // if
|
---|
104 | } // prtUnfreed
|
---|
105 |
|
---|
106 | extern "C" {
|
---|
107 | void heapAppStart() { // called by __cfaabi_appready_startup
|
---|
108 | allocFree = 0;
|
---|
109 | } // heapAppStart
|
---|
110 |
|
---|
111 | void heapAppStop() { // called by __cfaabi_appready_startdown
|
---|
112 | fclose( stdin ); fclose( stdout );
|
---|
113 | prtUnfreed();
|
---|
114 | } // heapAppStop
|
---|
115 | } // extern "C"
|
---|
116 | #endif // __CFA_DEBUG__
|
---|
117 |
|
---|
118 |
|
---|
119 | // statically allocated variables => zero filled.
|
---|
120 | static size_t pageSize; // architecture pagesize
|
---|
121 | static size_t heapExpand; // sbrk advance
|
---|
122 | static size_t mmapStart; // cross over point for mmap
|
---|
123 | static unsigned int maxBucketsUsed; // maximum number of buckets in use
|
---|
124 |
|
---|
125 |
|
---|
126 | #define SPINLOCK 0
|
---|
127 | #define LOCKFREE 1
|
---|
128 | #define BUCKETLOCK SPINLOCK
|
---|
129 | #if BUCKETLOCK == LOCKFREE
|
---|
130 | #include <uStackLF.h>
|
---|
131 | #endif // LOCKFREE
|
---|
132 |
|
---|
133 | // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
|
---|
134 | // Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
|
---|
135 | enum { NoBucketSizes = 91 }; // number of buckets sizes
|
---|
136 |
|
---|
137 | struct HeapManager {
|
---|
138 | // struct FreeHeader; // forward declaration
|
---|
139 |
|
---|
140 | struct Storage {
|
---|
141 | struct Header { // header
|
---|
142 | union Kind {
|
---|
143 | struct RealHeader {
|
---|
144 | union {
|
---|
145 | struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
|
---|
146 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
147 | uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
|
---|
148 | #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
149 |
|
---|
150 | union {
|
---|
151 | // FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
|
---|
152 | void * home; // allocated block points back to home locations (must overlay alignment)
|
---|
153 | size_t blockSize; // size for munmap (must overlay alignment)
|
---|
154 | #if BUCKLOCK == SPINLOCK
|
---|
155 | Storage * next; // freed block points next freed block of same size
|
---|
156 | #endif // SPINLOCK
|
---|
157 | };
|
---|
158 |
|
---|
159 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
160 | uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header
|
---|
161 | #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
162 | };
|
---|
163 | // future code
|
---|
164 | #if BUCKLOCK == LOCKFREE
|
---|
165 | Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide)
|
---|
166 | #endif // LOCKFREE
|
---|
167 | };
|
---|
168 | } real; // RealHeader
|
---|
169 | struct FakeHeader {
|
---|
170 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
---|
171 | uint32_t alignment; // low-order bits of home/blockSize used for tricks
|
---|
172 | #endif // __ORDER_LITTLE_ENDIAN__
|
---|
173 |
|
---|
174 | uint32_t offset;
|
---|
175 |
|
---|
176 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
---|
177 | uint32_t alignment; // low-order bits of home/blockSize used for tricks
|
---|
178 | #endif // __ORDER_BIG_ENDIAN__
|
---|
179 | } fake; // FakeHeader
|
---|
180 | } kind; // Kind
|
---|
181 | } header; // Header
|
---|
182 | char pad[libAlign() - sizeof( Header )];
|
---|
183 | char data[0]; // storage
|
---|
184 | }; // Storage
|
---|
185 |
|
---|
186 | static_assert( libAlign() >= sizeof( Storage ), "libAlign() < sizeof( Storage )" );
|
---|
187 |
|
---|
188 | struct FreeHeader {
|
---|
189 | #if BUCKLOCK == SPINLOCK
|
---|
190 | __spinlock_t lock; // must be first field for alignment
|
---|
191 | Storage * freeList;
|
---|
192 | #elif BUCKLOCK == LOCKFREE
|
---|
193 | // future code
|
---|
194 | StackLF<Storage> freeList;
|
---|
195 | #else
|
---|
196 | #error undefined lock type for bucket lock
|
---|
197 | #endif // SPINLOCK
|
---|
198 | size_t blockSize; // size of allocations on this list
|
---|
199 | }; // FreeHeader
|
---|
200 |
|
---|
201 | // must be first fields for alignment
|
---|
202 | __spinlock_t extlock; // protects allocation-buffer extension
|
---|
203 | FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
|
---|
204 |
|
---|
205 | void * heapBegin; // start of heap
|
---|
206 | void * heapEnd; // logical end of heap
|
---|
207 | size_t heapRemaining; // amount of storage not allocated in the current chunk
|
---|
208 | }; // HeapManager
|
---|
209 |
|
---|
210 | static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; }
|
---|
211 |
|
---|
212 |
|
---|
213 | #define FASTLOOKUP
|
---|
214 | #define __STATISTICS__
|
---|
215 |
|
---|
216 | // Bucket size must be multiple of 16.
|
---|
217 | // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
|
---|
218 | static const unsigned int bucketSizes[] @= { // different bucket sizes
|
---|
219 | 16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4
|
---|
220 | 96, 112, 128 + sizeof(HeapManager.Storage), // 3
|
---|
221 | 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4
|
---|
222 | 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4
|
---|
223 | 640, 768, 896, 1_024 + sizeof(HeapManager.Storage), // 4
|
---|
224 | 1_536, 2_048 + sizeof(HeapManager.Storage), // 2
|
---|
225 | 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), // 4
|
---|
226 | 6_144, 8_192 + sizeof(HeapManager.Storage), // 2
|
---|
227 | 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(HeapManager.Storage), // 8
|
---|
228 | 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(HeapManager.Storage), // 8
|
---|
229 | 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(HeapManager.Storage), // 8
|
---|
230 | 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(HeapManager.Storage), // 8
|
---|
231 | 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(HeapManager.Storage), // 8
|
---|
232 | 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(HeapManager.Storage), // 8
|
---|
233 | 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), // 4
|
---|
234 | 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), // 8
|
---|
235 | 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(HeapManager.Storage), // 4
|
---|
236 | };
|
---|
237 |
|
---|
238 | static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0]), "size of bucket array wrong" );
|
---|
239 |
|
---|
240 | #ifdef FASTLOOKUP
|
---|
241 | enum { LookupSizes = 65_536 + sizeof(HeapManager.Storage) }; // number of fast lookup sizes
|
---|
242 | static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
|
---|
243 | #endif // FASTLOOKUP
|
---|
244 |
|
---|
245 | static int mmapFd = -1; // fake or actual fd for anonymous file
|
---|
246 | #ifdef __CFA_DEBUG__
|
---|
247 | static bool heapBoot = 0; // detect recursion during boot
|
---|
248 | #endif // __CFA_DEBUG__
|
---|
249 | static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
|
---|
250 |
|
---|
251 |
|
---|
252 | #ifdef __STATISTICS__
|
---|
253 | // Heap statistics counters.
|
---|
254 | static unsigned long long int mmap_storage;
|
---|
255 | static unsigned int mmap_calls;
|
---|
256 | static unsigned long long int munmap_storage;
|
---|
257 | static unsigned int munmap_calls;
|
---|
258 | static unsigned long long int sbrk_storage;
|
---|
259 | static unsigned int sbrk_calls;
|
---|
260 | static unsigned long long int malloc_storage;
|
---|
261 | static unsigned int malloc_calls;
|
---|
262 | static unsigned long long int free_storage;
|
---|
263 | static unsigned int free_calls;
|
---|
264 | static unsigned long long int calloc_storage;
|
---|
265 | static unsigned int calloc_calls;
|
---|
266 | static unsigned long long int memalign_storage;
|
---|
267 | static unsigned int memalign_calls;
|
---|
268 | static unsigned long long int cmemalign_storage;
|
---|
269 | static unsigned int cmemalign_calls;
|
---|
270 | static unsigned long long int realloc_storage;
|
---|
271 | static unsigned int realloc_calls;
|
---|
272 | // Statistics file descriptor (changed by malloc_stats_fd).
|
---|
273 | static int statfd = STDERR_FILENO; // default stderr
|
---|
274 |
|
---|
275 | // Use "write" because streams may be shutdown when calls are made.
|
---|
276 | static void printStats() {
|
---|
277 | char helpText[512];
|
---|
278 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
279 | "\nHeap statistics:\n"
|
---|
280 | " malloc: calls %u / storage %llu\n"
|
---|
281 | " calloc: calls %u / storage %llu\n"
|
---|
282 | " memalign: calls %u / storage %llu\n"
|
---|
283 | " cmemalign: calls %u / storage %llu\n"
|
---|
284 | " realloc: calls %u / storage %llu\n"
|
---|
285 | " free: calls %u / storage %llu\n"
|
---|
286 | " mmap: calls %u / storage %llu\n"
|
---|
287 | " munmap: calls %u / storage %llu\n"
|
---|
288 | " sbrk: calls %u / storage %llu\n",
|
---|
289 | malloc_calls, malloc_storage,
|
---|
290 | calloc_calls, calloc_storage,
|
---|
291 | memalign_calls, memalign_storage,
|
---|
292 | cmemalign_calls, cmemalign_storage,
|
---|
293 | realloc_calls, realloc_storage,
|
---|
294 | free_calls, free_storage,
|
---|
295 | mmap_calls, mmap_storage,
|
---|
296 | munmap_calls, munmap_storage,
|
---|
297 | sbrk_calls, sbrk_storage
|
---|
298 | );
|
---|
299 | } // printStats
|
---|
300 |
|
---|
301 | static int printStatsXML( FILE * stream ) { // see malloc_info
|
---|
302 | char helpText[512];
|
---|
303 | int len = snprintf( helpText, sizeof(helpText),
|
---|
304 | "<malloc version=\"1\">\n"
|
---|
305 | "<heap nr=\"0\">\n"
|
---|
306 | "<sizes>\n"
|
---|
307 | "</sizes>\n"
|
---|
308 | "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
309 | "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
310 | "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
311 | "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
312 | "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
313 | "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
314 | "<total type=\"mmap\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
315 | "<total type=\"munmap\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
316 | "<total type=\"sbrk\" count=\"%u\" size=\"%llu\"/>\n"
|
---|
317 | "</malloc>",
|
---|
318 | malloc_calls, malloc_storage,
|
---|
319 | calloc_calls, calloc_storage,
|
---|
320 | memalign_calls, memalign_storage,
|
---|
321 | cmemalign_calls, cmemalign_storage,
|
---|
322 | realloc_calls, realloc_storage,
|
---|
323 | free_calls, free_storage,
|
---|
324 | mmap_calls, mmap_storage,
|
---|
325 | munmap_calls, munmap_storage,
|
---|
326 | sbrk_calls, sbrk_storage
|
---|
327 | );
|
---|
328 | __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit
|
---|
329 | return len;
|
---|
330 | } // printStatsXML
|
---|
331 | #endif // __STATISTICS__
|
---|
332 |
|
---|
333 |
|
---|
334 | // static inline void noMemory() {
|
---|
335 | // abort( "Heap memory exhausted at %zu bytes.\n"
|
---|
336 | // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",
|
---|
337 | // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );
|
---|
338 | // } // noMemory
|
---|
339 |
|
---|
340 |
|
---|
341 | static inline void checkAlign( size_t alignment ) {
|
---|
342 | if ( alignment < libAlign() || ! libPow2( alignment ) ) {
|
---|
343 | abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
|
---|
344 | } // if
|
---|
345 | } // checkAlign
|
---|
346 |
|
---|
347 |
|
---|
348 | static inline bool setHeapExpand( size_t value ) {
|
---|
349 | if ( heapExpand < pageSize ) return true;
|
---|
350 | heapExpand = value;
|
---|
351 | return false;
|
---|
352 | } // setHeapExpand
|
---|
353 |
|
---|
354 |
|
---|
355 | // thunk problem
|
---|
356 | size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) {
|
---|
357 | size_t l = 0, m, h = dim;
|
---|
358 | while ( l < h ) {
|
---|
359 | m = (l + h) / 2;
|
---|
360 | if ( (unsigned int &)(vals[m]) < key ) { // cast away const
|
---|
361 | l = m + 1;
|
---|
362 | } else {
|
---|
363 | h = m;
|
---|
364 | } // if
|
---|
365 | } // while
|
---|
366 | return l;
|
---|
367 | } // Bsearchl
|
---|
368 |
|
---|
369 |
|
---|
370 | static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk
|
---|
371 | if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
|
---|
372 | mmapStart = value; // set global
|
---|
373 |
|
---|
374 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
375 | maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
|
---|
376 | assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
|
---|
377 | assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
|
---|
378 | return false;
|
---|
379 | } // setMmapStart
|
---|
380 |
|
---|
381 |
|
---|
382 | static inline void checkHeader( bool check, const char name[], void * addr ) {
|
---|
383 | if ( unlikely( check ) ) { // bad address ?
|
---|
384 | abort( "Attempt to %s storage %p with address outside the heap.\n"
|
---|
385 | "Possible cause is duplicate free on same block or overwriting of memory.",
|
---|
386 | name, addr );
|
---|
387 | } // if
|
---|
388 | } // checkHeader
|
---|
389 |
|
---|
390 |
|
---|
391 | static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) {
|
---|
392 | if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
|
---|
393 | size_t offset = header->kind.fake.offset;
|
---|
394 | alignment = header->kind.fake.alignment & -2; // remove flag from value
|
---|
395 | #ifdef __CFA_DEBUG__
|
---|
396 | checkAlign( alignment ); // check alignment
|
---|
397 | #endif // __CFA_DEBUG__
|
---|
398 | header = (HeapManager.Storage.Header *)((char *)header - offset);
|
---|
399 | } // if
|
---|
400 | } // fakeHeader
|
---|
401 |
|
---|
402 |
|
---|
403 | // <-------+----------------------------------------------------> bsize (bucket size)
|
---|
404 | // |header |addr
|
---|
405 | //==================================================================================
|
---|
406 | // | alignment
|
---|
407 | // <-----------------<------------+-----------------------------> bsize (bucket size)
|
---|
408 | // |fake-header | addr
|
---|
409 | #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
|
---|
410 |
|
---|
411 | // <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
|
---|
412 | // |header |addr
|
---|
413 | //==================================================================================
|
---|
414 | // | alignment
|
---|
415 | // <------------------------------<<---------- dsize --------->>> bsize (bucket size)
|
---|
416 | // |fake-header |addr
|
---|
417 | #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
|
---|
418 |
|
---|
419 |
|
---|
420 | static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
|
---|
421 | header = headerAddr( addr );
|
---|
422 |
|
---|
423 | if ( unlikely( heapEnd < addr ) ) { // mmapped ?
|
---|
424 | fakeHeader( header, alignment );
|
---|
425 | size = header->kind.real.blockSize & -3; // mmap size
|
---|
426 | return true;
|
---|
427 | } // if
|
---|
428 |
|
---|
429 | #ifdef __CFA_DEBUG__
|
---|
430 | checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
|
---|
431 | #endif // __CFA_DEBUG__
|
---|
432 |
|
---|
433 | // header may be safe to dereference
|
---|
434 | fakeHeader( header, alignment );
|
---|
435 | #ifdef __CFA_DEBUG__
|
---|
436 | checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
|
---|
437 | #endif // __CFA_DEBUG__
|
---|
438 |
|
---|
439 | freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
|
---|
440 | #ifdef __CFA_DEBUG__
|
---|
441 | if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
|
---|
442 | abort( "Attempt to %s storage %p with corrupted header.\n"
|
---|
443 | "Possible cause is duplicate free on same block or overwriting of header information.",
|
---|
444 | name, addr );
|
---|
445 | } // if
|
---|
446 | #endif // __CFA_DEBUG__
|
---|
447 | size = freeElem->blockSize;
|
---|
448 | return false;
|
---|
449 | } // headers
|
---|
450 |
|
---|
451 |
|
---|
452 | static inline void * extend( size_t size ) with ( heapManager ) {
|
---|
453 | lock( extlock __cfaabi_dbg_ctx2 );
|
---|
454 | ptrdiff_t rem = heapRemaining - size;
|
---|
455 | if ( rem < 0 ) {
|
---|
456 | // If the size requested is bigger than the current remaining storage, increase the size of the heap.
|
---|
457 |
|
---|
458 | size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
|
---|
459 | if ( sbrk( increase ) == (void *)-1 ) {
|
---|
460 | unlock( extlock );
|
---|
461 | errno = ENOMEM;
|
---|
462 | return 0p;
|
---|
463 | } // if
|
---|
464 | #ifdef __STATISTICS__
|
---|
465 | sbrk_calls += 1;
|
---|
466 | sbrk_storage += increase;
|
---|
467 | #endif // __STATISTICS__
|
---|
468 | #ifdef __CFA_DEBUG__
|
---|
469 | // Set new memory to garbage so subsequent uninitialized usages might fail.
|
---|
470 | memset( (char *)heapEnd + heapRemaining, '\377', increase );
|
---|
471 | #endif // __CFA_DEBUG__
|
---|
472 | rem = heapRemaining + increase - size;
|
---|
473 | } // if
|
---|
474 |
|
---|
475 | HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
|
---|
476 | heapRemaining = rem;
|
---|
477 | heapEnd = (char *)heapEnd + size;
|
---|
478 | unlock( extlock );
|
---|
479 | return block;
|
---|
480 | } // extend
|
---|
481 |
|
---|
482 |
|
---|
483 | static inline void * doMalloc( size_t size ) with ( heapManager ) {
|
---|
484 | HeapManager.Storage * block; // pointer to new block of storage
|
---|
485 |
|
---|
486 | // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
|
---|
487 | // along with the block and is a multiple of the alignment size.
|
---|
488 |
|
---|
489 | if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0p;
|
---|
490 | size_t tsize = size + sizeof(HeapManager.Storage);
|
---|
491 | if ( likely( tsize < mmapStart ) ) { // small size => sbrk
|
---|
492 | size_t posn;
|
---|
493 | #ifdef FASTLOOKUP
|
---|
494 | if ( tsize < LookupSizes ) posn = lookup[tsize];
|
---|
495 | else
|
---|
496 | #endif // FASTLOOKUP
|
---|
497 | posn = Bsearchl( (unsigned int)tsize, bucketSizes, (size_t)maxBucketsUsed );
|
---|
498 | HeapManager.FreeHeader * freeElem = &freeLists[posn];
|
---|
499 | // #ifdef FASTLOOKUP
|
---|
500 | // if ( tsize < LookupSizes )
|
---|
501 | // freeElem = &freeLists[lookup[tsize]];
|
---|
502 | // else
|
---|
503 | // #endif // FASTLOOKUP
|
---|
504 | // freeElem = bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
|
---|
505 | // HeapManager.FreeHeader * freeElem =
|
---|
506 | // #ifdef FASTLOOKUP
|
---|
507 | // tsize < LookupSizes ? &freeLists[lookup[tsize]] :
|
---|
508 | // #endif // FASTLOOKUP
|
---|
509 | // bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search
|
---|
510 | assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ?
|
---|
511 | assert( tsize <= freeElem->blockSize ); // search failure ?
|
---|
512 | tsize = freeElem->blockSize; // total space needed for request
|
---|
513 |
|
---|
514 | // Spin until the lock is acquired for this particular size of block.
|
---|
515 |
|
---|
516 | #if defined( SPINLOCK )
|
---|
517 | lock( freeElem->lock __cfaabi_dbg_ctx2 );
|
---|
518 | block = freeElem->freeList; // remove node from stack
|
---|
519 | #else
|
---|
520 | block = freeElem->freeList.pop();
|
---|
521 | #endif // SPINLOCK
|
---|
522 | if ( unlikely( block == 0p ) ) { // no free block ?
|
---|
523 | #if defined( SPINLOCK )
|
---|
524 | unlock( freeElem->lock );
|
---|
525 | #endif // SPINLOCK
|
---|
526 |
|
---|
527 | // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more
|
---|
528 | // and then carve it off.
|
---|
529 |
|
---|
530 | block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call
|
---|
531 | if ( unlikely( block == 0p ) ) return 0p;
|
---|
532 | #if defined( SPINLOCK )
|
---|
533 | } else {
|
---|
534 | freeElem->freeList = block->header.kind.real.next;
|
---|
535 | unlock( freeElem->lock );
|
---|
536 | #endif // SPINLOCK
|
---|
537 | } // if
|
---|
538 |
|
---|
539 | block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size
|
---|
540 | } else { // large size => mmap
|
---|
541 | if ( unlikely( size > ~0ul - pageSize ) ) return 0p;
|
---|
542 | tsize = libCeiling( tsize, pageSize ); // must be multiple of page size
|
---|
543 | #ifdef __STATISTICS__
|
---|
544 | __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
|
---|
545 | __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST );
|
---|
546 | #endif // __STATISTICS__
|
---|
547 | block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
|
---|
548 | if ( block == (HeapManager.Storage *)MAP_FAILED ) {
|
---|
549 | // Do not call strerror( errno ) as it may call malloc.
|
---|
550 | abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno );
|
---|
551 | } // if
|
---|
552 | #ifdef __CFA_DEBUG__
|
---|
553 | // Set new memory to garbage so subsequent uninitialized usages might fail.
|
---|
554 | memset( block, '\377', tsize );
|
---|
555 | #endif // __CFA_DEBUG__
|
---|
556 | block->header.kind.real.blockSize = tsize; // storage size for munmap
|
---|
557 | } // if
|
---|
558 |
|
---|
559 | void * addr = &(block->data); // adjust off header to user bytes
|
---|
560 |
|
---|
561 | #ifdef __CFA_DEBUG__
|
---|
562 | assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ?
|
---|
563 | __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST );
|
---|
564 | if ( traceHeap() ) {
|
---|
565 | enum { BufferSize = 64 };
|
---|
566 | char helpText[BufferSize];
|
---|
567 | int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize );
|
---|
568 | // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size );
|
---|
569 | __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
|
---|
570 | } // if
|
---|
571 | #endif // __CFA_DEBUG__
|
---|
572 |
|
---|
573 | return addr;
|
---|
574 | } // doMalloc
|
---|
575 |
|
---|
576 |
|
---|
577 | static inline void doFree( void * addr ) with ( heapManager ) {
|
---|
578 | #ifdef __CFA_DEBUG__
|
---|
579 | if ( unlikely( heapManager.heapBegin == 0p ) ) {
|
---|
580 | abort( "doFree( %p ) : internal error, called before heap is initialized.", addr );
|
---|
581 | } // if
|
---|
582 | #endif // __CFA_DEBUG__
|
---|
583 |
|
---|
584 | HeapManager.Storage.Header * header;
|
---|
585 | HeapManager.FreeHeader * freeElem;
|
---|
586 | size_t size, alignment; // not used (see realloc)
|
---|
587 |
|
---|
588 | if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ?
|
---|
589 | #ifdef __STATISTICS__
|
---|
590 | __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST );
|
---|
591 | __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST );
|
---|
592 | #endif // __STATISTICS__
|
---|
593 | if ( munmap( header, size ) == -1 ) {
|
---|
594 | #ifdef __CFA_DEBUG__
|
---|
595 | abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
|
---|
596 | "Possible cause is invalid pointer.",
|
---|
597 | addr );
|
---|
598 | #endif // __CFA_DEBUG__
|
---|
599 | } // if
|
---|
600 | } else {
|
---|
601 | #ifdef __CFA_DEBUG__
|
---|
602 | // Set free memory to garbage so subsequent usages might fail.
|
---|
603 | memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
|
---|
604 | #endif // __CFA_DEBUG__
|
---|
605 |
|
---|
606 | #ifdef __STATISTICS__
|
---|
607 | free_storage += size;
|
---|
608 | #endif // __STATISTICS__
|
---|
609 | #if defined( SPINLOCK )
|
---|
610 | lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock
|
---|
611 | header->kind.real.next = freeElem->freeList; // push on stack
|
---|
612 | freeElem->freeList = (HeapManager.Storage *)header;
|
---|
613 | unlock( freeElem->lock ); // release spin lock
|
---|
614 | #else
|
---|
615 | freeElem->freeList.push( *(HeapManager.Storage *)header );
|
---|
616 | #endif // SPINLOCK
|
---|
617 | } // if
|
---|
618 |
|
---|
619 | #ifdef __CFA_DEBUG__
|
---|
620 | __atomic_add_fetch( &allocFree, -size, __ATOMIC_SEQ_CST );
|
---|
621 | if ( traceHeap() ) {
|
---|
622 | enum { BufferSize = 64 };
|
---|
623 | char helpText[BufferSize];
|
---|
624 | int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );
|
---|
625 | __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
|
---|
626 | } // if
|
---|
627 | #endif // __CFA_DEBUG__
|
---|
628 | } // doFree
|
---|
629 |
|
---|
630 |
|
---|
631 | size_t prtFree( HeapManager & manager ) with ( manager ) {
|
---|
632 | size_t total = 0;
|
---|
633 | #ifdef __STATISTICS__
|
---|
634 | __cfaabi_bits_acquire();
|
---|
635 | __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" );
|
---|
636 | #endif // __STATISTICS__
|
---|
637 | for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) {
|
---|
638 | size_t size = freeLists[i].blockSize;
|
---|
639 | #ifdef __STATISTICS__
|
---|
640 | unsigned int N = 0;
|
---|
641 | #endif // __STATISTICS__
|
---|
642 |
|
---|
643 | #if defined( SPINLOCK )
|
---|
644 | for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
|
---|
645 | #else
|
---|
646 | for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) {
|
---|
647 | #endif // SPINLOCK
|
---|
648 | total += size;
|
---|
649 | #ifdef __STATISTICS__
|
---|
650 | N += 1;
|
---|
651 | #endif // __STATISTICS__
|
---|
652 | } // for
|
---|
653 |
|
---|
654 | #ifdef __STATISTICS__
|
---|
655 | __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N );
|
---|
656 | if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" );
|
---|
657 | #endif // __STATISTICS__
|
---|
658 | } // for
|
---|
659 | #ifdef __STATISTICS__
|
---|
660 | __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total );
|
---|
661 | __cfaabi_bits_release();
|
---|
662 | #endif // __STATISTICS__
|
---|
663 | return (char *)heapEnd - (char *)heapBegin - total;
|
---|
664 | } // prtFree
|
---|
665 |
|
---|
666 |
|
---|
667 | static void ?{}( HeapManager & manager ) with ( manager ) {
|
---|
668 | pageSize = sysconf( _SC_PAGESIZE );
|
---|
669 |
|
---|
670 | for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
|
---|
671 | freeLists[i].blockSize = bucketSizes[i];
|
---|
672 | } // for
|
---|
673 |
|
---|
674 | #ifdef FASTLOOKUP
|
---|
675 | unsigned int idx = 0;
|
---|
676 | for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {
|
---|
677 | if ( i > bucketSizes[idx] ) idx += 1;
|
---|
678 | lookup[i] = idx;
|
---|
679 | } // for
|
---|
680 | #endif // FASTLOOKUP
|
---|
681 |
|
---|
682 | if ( setMmapStart( default_mmap_start() ) ) {
|
---|
683 | abort( "HeapManager : internal error, mmap start initialization failure." );
|
---|
684 | } // if
|
---|
685 | heapExpand = default_heap_expansion();
|
---|
686 |
|
---|
687 | char * end = (char *)sbrk( 0 );
|
---|
688 | sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment
|
---|
689 | heapBegin = heapEnd = sbrk( 0 ); // get new start point
|
---|
690 | } // HeapManager
|
---|
691 |
|
---|
692 |
|
---|
693 | static void ^?{}( HeapManager & ) {
|
---|
694 | #ifdef __STATISTICS__
|
---|
695 | if ( traceHeapTerm() ) {
|
---|
696 | printStats();
|
---|
697 | // if ( prtfree() ) prtFree( heapManager, true );
|
---|
698 | } // if
|
---|
699 | #endif // __STATISTICS__
|
---|
700 | } // ~HeapManager
|
---|
701 |
|
---|
702 |
|
---|
703 | static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));
|
---|
704 | void memory_startup( void ) {
|
---|
705 | #ifdef __CFA_DEBUG__
|
---|
706 | if ( unlikely( heapBoot ) ) { // check for recursion during system boot
|
---|
707 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
|
---|
708 | abort( "boot() : internal error, recursively invoked during system boot." );
|
---|
709 | } // if
|
---|
710 | heapBoot = true;
|
---|
711 | #endif // __CFA_DEBUG__
|
---|
712 |
|
---|
713 | //assert( heapManager.heapBegin != 0 );
|
---|
714 | //heapManager{};
|
---|
715 | if ( heapManager.heapBegin == 0p ) heapManager{};
|
---|
716 | } // memory_startup
|
---|
717 |
|
---|
718 | static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));
|
---|
719 | void memory_shutdown( void ) {
|
---|
720 | ^heapManager{};
|
---|
721 | } // memory_shutdown
|
---|
722 |
|
---|
723 |
|
---|
724 | static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics
|
---|
725 | //assert( heapManager.heapBegin != 0 );
|
---|
726 | if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ?
|
---|
727 | void * addr = doMalloc( size );
|
---|
728 | if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX
|
---|
729 | return addr;
|
---|
730 | } // mallocNoStats
|
---|
731 |
|
---|
732 |
|
---|
733 | static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) {
|
---|
734 | size_t size = noOfElems * elemSize;
|
---|
735 | char * addr = (char *)mallocNoStats( size );
|
---|
736 | if ( unlikely( addr == 0p ) ) return 0p;
|
---|
737 |
|
---|
738 | HeapManager.Storage.Header * header;
|
---|
739 | HeapManager.FreeHeader * freeElem;
|
---|
740 | size_t bsize, alignment;
|
---|
741 | bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment );
|
---|
742 | #ifndef __CFA_DEBUG__
|
---|
743 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
744 | if ( ! mapped )
|
---|
745 | #endif // __CFA_DEBUG__
|
---|
746 | // Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
|
---|
747 | // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
|
---|
748 | // `-header`-addr `-size
|
---|
749 | memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros
|
---|
750 |
|
---|
751 | header->kind.real.blockSize |= 2; // mark as zero filled
|
---|
752 | return addr;
|
---|
753 | } // callocNoStats
|
---|
754 |
|
---|
755 |
|
---|
756 | static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics
|
---|
757 | #ifdef __CFA_DEBUG__
|
---|
758 | checkAlign( alignment ); // check alignment
|
---|
759 | #endif // __CFA_DEBUG__
|
---|
760 |
|
---|
761 | // if alignment <= default alignment, do normal malloc as two headers are unnecessary
|
---|
762 | if ( unlikely( alignment <= libAlign() ) ) return mallocNoStats( size );
|
---|
763 |
|
---|
764 | // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
|
---|
765 | // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
|
---|
766 | // .-------------v-----------------v----------------v----------,
|
---|
767 | // | Real Header | ... padding ... | Fake Header | data ... |
|
---|
768 | // `-------------^-----------------^-+--------------^----------'
|
---|
769 | // |<--------------------------------' offset/align |<-- alignment boundary
|
---|
770 |
|
---|
771 | // subtract libAlign() because it is already the minimum alignment
|
---|
772 | // add sizeof(Storage) for fake header
|
---|
773 | char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
|
---|
774 | if ( unlikely( addr == 0p ) ) return addr;
|
---|
775 |
|
---|
776 | // address in the block of the "next" alignment address
|
---|
777 | char * user = (char *)libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment );
|
---|
778 |
|
---|
779 | // address of header from malloc
|
---|
780 | HeapManager.Storage.Header * realHeader = headerAddr( addr );
|
---|
781 | // address of fake header * before* the alignment location
|
---|
782 | HeapManager.Storage.Header * fakeHeader = headerAddr( user );
|
---|
783 | // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
|
---|
784 | fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
|
---|
785 | // SKULLDUGGERY: odd alignment imples fake header
|
---|
786 | fakeHeader->kind.fake.alignment = alignment | 1;
|
---|
787 |
|
---|
788 | return user;
|
---|
789 | } // memalignNoStats
|
---|
790 |
|
---|
791 |
|
---|
792 | static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) {
|
---|
793 | size_t size = noOfElems * elemSize;
|
---|
794 | char * addr = (char *)memalignNoStats( alignment, size );
|
---|
795 | if ( unlikely( addr == 0p ) ) return 0p;
|
---|
796 | HeapManager.Storage.Header * header;
|
---|
797 | HeapManager.FreeHeader * freeElem;
|
---|
798 | size_t bsize;
|
---|
799 | bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment );
|
---|
800 | #ifndef __CFA_DEBUG__
|
---|
801 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
802 | if ( ! mapped )
|
---|
803 | #endif // __CFA_DEBUG__
|
---|
804 | memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros
|
---|
805 | header->kind.real.blockSize |= 2; // mark as zero filled
|
---|
806 |
|
---|
807 | return addr;
|
---|
808 | } // cmemalignNoStats
|
---|
809 |
|
---|
810 |
|
---|
811 | // supported mallopt options
|
---|
812 | #ifndef M_MMAP_THRESHOLD
|
---|
813 | #define M_MMAP_THRESHOLD (-1)
|
---|
814 | #endif // M_TOP_PAD
|
---|
815 | #ifndef M_TOP_PAD
|
---|
816 | #define M_TOP_PAD (-2)
|
---|
817 | #endif // M_TOP_PAD
|
---|
818 |
|
---|
819 |
|
---|
820 | extern "C" {
|
---|
821 | // Allocates size bytes and returns a pointer to the allocated memory. The memory is not initialized. If size is 0,
|
---|
822 | // then malloc() returns either 0p, or a unique pointer value that can later be successfully passed to free().
|
---|
823 | void * malloc( size_t size ) {
|
---|
824 | #ifdef __STATISTICS__
|
---|
825 | __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST );
|
---|
826 | __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST );
|
---|
827 | #endif // __STATISTICS__
|
---|
828 |
|
---|
829 | return mallocNoStats( size );
|
---|
830 | } // malloc
|
---|
831 |
|
---|
832 | // Allocate memory for an array of nmemb elements of size bytes each and returns a pointer to the allocated
|
---|
833 | // memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a unique pointer
|
---|
834 | // value that can later be successfully passed to free().
|
---|
835 | void * calloc( size_t noOfElems, size_t elemSize ) {
|
---|
836 | #ifdef __STATISTICS__
|
---|
837 | __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST );
|
---|
838 | __atomic_add_fetch( &calloc_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST );
|
---|
839 | #endif // __STATISTICS__
|
---|
840 |
|
---|
841 | return callocNoStats( noOfElems, elemSize );
|
---|
842 | } // calloc
|
---|
843 |
|
---|
844 | // Change the size of the memory block pointed to by ptr to size bytes. The contents shall be unchanged in the range
|
---|
845 | // from the start of the region up to the minimum of the old and new sizes. If the new size is larger than the old
|
---|
846 | // size, the added memory shall not be initialized. If ptr is 0p, then the call is equivalent to malloc(size), for
|
---|
847 | // all values of size; if size is equal to zero, and ptr is not 0p, then the call is equivalent to free(ptr). Unless
|
---|
848 | // ptr is 0p, it must have been returned by an earlier call to malloc(), calloc() or realloc(). If the area pointed
|
---|
849 | // to was moved, a free(ptr) is done.
|
---|
850 | void * realloc( void * oaddr, size_t size ) {
|
---|
851 | #ifdef __STATISTICS__
|
---|
852 | __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
|
---|
853 | #endif // __STATISTICS__
|
---|
854 |
|
---|
855 | // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
|
---|
856 | if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases
|
---|
857 | if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
|
---|
858 |
|
---|
859 | HeapManager.Storage.Header * header;
|
---|
860 | HeapManager.FreeHeader * freeElem;
|
---|
861 | size_t bsize, oalign = 0;
|
---|
862 | headers( "realloc", oaddr, header, freeElem, bsize, oalign );
|
---|
863 |
|
---|
864 | size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
865 | if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size
|
---|
866 | // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know
|
---|
867 | // where to start filling, i.e., do not overwrite existing values in space.
|
---|
868 | //
|
---|
869 | // This case does not result in a new profiler entry because the previous one still exists and it must match with
|
---|
870 | // the free for this memory. Hence, this realloc does not appear in the profiler output.
|
---|
871 | return oaddr;
|
---|
872 | } // if
|
---|
873 |
|
---|
874 | #ifdef __STATISTICS__
|
---|
875 | __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
|
---|
876 | #endif // __STATISTICS__
|
---|
877 |
|
---|
878 | // change size and copy old content to new storage
|
---|
879 |
|
---|
880 | void * naddr;
|
---|
881 | if ( unlikely( oalign != 0 ) ) { // previous request memalign?
|
---|
882 | if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
|
---|
883 | naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area
|
---|
884 | } else {
|
---|
885 | naddr = memalignNoStats( oalign, size ); // create new aligned area
|
---|
886 | } // if
|
---|
887 | } else {
|
---|
888 | if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
|
---|
889 | naddr = callocNoStats( 1, size ); // create new area
|
---|
890 | } else {
|
---|
891 | naddr = mallocNoStats( size ); // create new area
|
---|
892 | } // if
|
---|
893 | } // if
|
---|
894 | if ( unlikely( naddr == 0p ) ) return 0p;
|
---|
895 |
|
---|
896 | headers( "realloc", naddr, header, freeElem, bsize, oalign );
|
---|
897 | size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
|
---|
898 | // To preserve prior fill, the entire bucket must be copied versus the size.
|
---|
899 | memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes
|
---|
900 | free( oaddr );
|
---|
901 | return naddr;
|
---|
902 | } // realloc
|
---|
903 |
|
---|
904 | // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of
|
---|
905 | // alignment, which must be a power of two. (obsolete)
|
---|
906 | void * memalign( size_t alignment, size_t size ) {
|
---|
907 | #ifdef __STATISTICS__
|
---|
908 | __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST );
|
---|
909 | __atomic_add_fetch( &memalign_storage, size, __ATOMIC_SEQ_CST );
|
---|
910 | #endif // __STATISTICS__
|
---|
911 |
|
---|
912 | return memalignNoStats( alignment, size );
|
---|
913 | } // memalign
|
---|
914 |
|
---|
915 |
|
---|
916 | // Same as calloc() with memory alignment.
|
---|
917 | void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {
|
---|
918 | #ifdef __STATISTICS__
|
---|
919 | __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST );
|
---|
920 | __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST );
|
---|
921 | #endif // __STATISTICS__
|
---|
922 |
|
---|
923 | return cmemalignNoStats( alignment, noOfElems, elemSize );
|
---|
924 | } // cmemalign
|
---|
925 |
|
---|
926 | // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
|
---|
927 | // of alignment. This requirement is universally ignored.
|
---|
928 | void * aligned_alloc( size_t alignment, size_t size ) {
|
---|
929 | return memalign( alignment, size );
|
---|
930 | } // aligned_alloc
|
---|
931 |
|
---|
932 |
|
---|
933 | // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated
|
---|
934 | // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size
|
---|
935 | // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
|
---|
936 | // free(3).
|
---|
937 | int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
|
---|
938 | if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
|
---|
939 | * memptr = memalign( alignment, size );
|
---|
940 | if ( unlikely( * memptr == 0p ) ) return ENOMEM;
|
---|
941 | return 0;
|
---|
942 | } // posix_memalign
|
---|
943 |
|
---|
944 | // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
|
---|
945 | // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
|
---|
946 | void * valloc( size_t size ) {
|
---|
947 | return memalign( pageSize, size );
|
---|
948 | } // valloc
|
---|
949 |
|
---|
950 |
|
---|
951 | // Same as valloc but rounds size to multiple of page size.
|
---|
952 | void * pvalloc( size_t size ) {
|
---|
953 | return memalign( pageSize, libCeiling( size, pageSize ) );
|
---|
954 | } // pvalloc
|
---|
955 |
|
---|
956 |
|
---|
957 | // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc()
|
---|
958 | // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior occurs. If ptr is
|
---|
959 | // 0p, no operation is performed.
|
---|
960 | void free( void * addr ) {
|
---|
961 | #ifdef __STATISTICS__
|
---|
962 | __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST );
|
---|
963 | #endif // __STATISTICS__
|
---|
964 |
|
---|
965 | if ( unlikely( addr == 0p ) ) { // special case
|
---|
966 | // #ifdef __CFA_DEBUG__
|
---|
967 | // if ( traceHeap() ) {
|
---|
968 | // #define nullmsg "Free( 0x0 ) size:0\n"
|
---|
969 | // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf.
|
---|
970 | // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 );
|
---|
971 | // } // if
|
---|
972 | // #endif // __CFA_DEBUG__
|
---|
973 | return;
|
---|
974 | } // exit
|
---|
975 |
|
---|
976 | doFree( addr );
|
---|
977 | } // free
|
---|
978 |
|
---|
979 |
|
---|
980 | // Returns the alignment of the allocation.
|
---|
981 | size_t malloc_alignment( void * addr ) {
|
---|
982 | if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment
|
---|
983 | HeapManager.Storage.Header * header = headerAddr( addr );
|
---|
984 | if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
|
---|
985 | return header->kind.fake.alignment & -2; // remove flag from value
|
---|
986 | } else {
|
---|
987 | return libAlign (); // minimum alignment
|
---|
988 | } // if
|
---|
989 | } // malloc_alignment
|
---|
990 |
|
---|
991 |
|
---|
992 | // Returns true if the allocation is zero filled, i.e., initially allocated by calloc().
|
---|
993 | bool malloc_zero_fill( void * addr ) {
|
---|
994 | if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill
|
---|
995 | HeapManager.Storage.Header * header = headerAddr( addr );
|
---|
996 | if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
|
---|
997 | header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);
|
---|
998 | } // if
|
---|
999 | return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?
|
---|
1000 | } // malloc_zero_fill
|
---|
1001 |
|
---|
1002 |
|
---|
1003 | // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
|
---|
1004 | // malloc or a related function.
|
---|
1005 | size_t malloc_usable_size( void * addr ) {
|
---|
1006 | if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size
|
---|
1007 | HeapManager.Storage.Header * header;
|
---|
1008 | HeapManager.FreeHeader * freeElem;
|
---|
1009 | size_t bsize, alignment;
|
---|
1010 |
|
---|
1011 | headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment );
|
---|
1012 | return dataStorage( bsize, addr, header ); // data storage in bucket
|
---|
1013 | } // malloc_usable_size
|
---|
1014 |
|
---|
1015 |
|
---|
1016 | // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
|
---|
1017 | void malloc_stats( void ) {
|
---|
1018 | #ifdef __STATISTICS__
|
---|
1019 | printStats();
|
---|
1020 | if ( prtFree() ) prtFree( heapManager );
|
---|
1021 | #endif // __STATISTICS__
|
---|
1022 | } // malloc_stats
|
---|
1023 |
|
---|
1024 | // Changes the file descripter where malloc_stats() writes statistics.
|
---|
1025 | int malloc_stats_fd( int fd __attribute__(( unused )) ) {
|
---|
1026 | #ifdef __STATISTICS__
|
---|
1027 | int temp = statfd;
|
---|
1028 | statfd = fd;
|
---|
1029 | return temp;
|
---|
1030 | #else
|
---|
1031 | return -1;
|
---|
1032 | #endif // __STATISTICS__
|
---|
1033 | } // malloc_stats_fd
|
---|
1034 |
|
---|
1035 |
|
---|
1036 | // Adjusts parameters that control the behavior of the memory-allocation functions (see malloc). The param argument
|
---|
1037 | // specifies the parameter to be modified, and value specifies the new value for that parameter.
|
---|
1038 | int mallopt( int option, int value ) {
|
---|
1039 | choose( option ) {
|
---|
1040 | case M_TOP_PAD:
|
---|
1041 | if ( setHeapExpand( value ) ) return 1;
|
---|
1042 | case M_MMAP_THRESHOLD:
|
---|
1043 | if ( setMmapStart( value ) ) return 1;
|
---|
1044 | } // switch
|
---|
1045 | return 0; // error, unsupported
|
---|
1046 | } // mallopt
|
---|
1047 |
|
---|
1048 | // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
|
---|
1049 | int malloc_trim( size_t ) {
|
---|
1050 | return 0; // => impossible to release memory
|
---|
1051 | } // malloc_trim
|
---|
1052 |
|
---|
1053 |
|
---|
1054 | // Exports an XML string that describes the current state of the memory-allocation implementation in the caller.
|
---|
1055 | // The string is printed on the file stream stream. The exported string includes information about all arenas (see
|
---|
1056 | // malloc).
|
---|
1057 | int malloc_info( int options, FILE * stream ) {
|
---|
1058 | if ( options != 0 ) { errno = EINVAL; return -1; }
|
---|
1059 | return printStatsXML( stream );
|
---|
1060 | } // malloc_info
|
---|
1061 |
|
---|
1062 |
|
---|
1063 | // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap
|
---|
1064 | // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data
|
---|
1065 | // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
|
---|
1066 | // result. (The caller must free this memory.)
|
---|
1067 | void * malloc_get_state( void ) {
|
---|
1068 | return 0p; // unsupported
|
---|
1069 | } // malloc_get_state
|
---|
1070 |
|
---|
1071 |
|
---|
1072 | // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
|
---|
1073 | // structure pointed to by state.
|
---|
1074 | int malloc_set_state( void * ptr ) {
|
---|
1075 | return 0; // unsupported
|
---|
1076 | } // malloc_set_state
|
---|
1077 | } // extern "C"
|
---|
1078 |
|
---|
1079 |
|
---|
1080 | // Must have CFA linkage to overload with C linkage realloc.
|
---|
1081 | void * realloc( void * oaddr, size_t nalign, size_t size ) {
|
---|
1082 | #ifdef __STATISTICS__
|
---|
1083 | __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
|
---|
1084 | #endif // __STATISTICS__
|
---|
1085 |
|
---|
1086 | // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
|
---|
1087 | if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases
|
---|
1088 | if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
|
---|
1089 |
|
---|
1090 | if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum
|
---|
1091 | #ifdef __CFA_DEBUG__
|
---|
1092 | else
|
---|
1093 | checkAlign( nalign ); // check alignment
|
---|
1094 | #endif // __CFA_DEBUG__
|
---|
1095 |
|
---|
1096 | HeapManager.Storage.Header * header;
|
---|
1097 | HeapManager.FreeHeader * freeElem;
|
---|
1098 | size_t bsize, oalign = 0;
|
---|
1099 | headers( "realloc", oaddr, header, freeElem, bsize, oalign );
|
---|
1100 | size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1101 |
|
---|
1102 | if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out
|
---|
1103 | headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
|
---|
1104 | return realloc( oaddr, size );
|
---|
1105 | } // if
|
---|
1106 |
|
---|
1107 | #ifdef __STATISTICS__
|
---|
1108 | __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
|
---|
1109 | #endif // __STATISTICS__
|
---|
1110 |
|
---|
1111 | // change size and copy old content to new storage
|
---|
1112 |
|
---|
1113 | void * naddr;
|
---|
1114 | if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
|
---|
1115 | naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area
|
---|
1116 | } else {
|
---|
1117 | naddr = memalignNoStats( nalign, size ); // create new aligned area
|
---|
1118 | } // if
|
---|
1119 |
|
---|
1120 | headers( "realloc", naddr, header, freeElem, bsize, oalign );
|
---|
1121 | size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
|
---|
1122 | // To preserve prior fill, the entire bucket must be copied versus the size.
|
---|
1123 | memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes
|
---|
1124 | free( oaddr );
|
---|
1125 | return naddr;
|
---|
1126 | } // realloc
|
---|
1127 |
|
---|
1128 |
|
---|
1129 | // Local Variables: //
|
---|
1130 | // tab-width: 4 //
|
---|
1131 | // compile-command: "cfa -nodebug -O2 heap.cfa" //
|
---|
1132 | // End: //
|
---|