1 | // |
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo |
---|
3 | // |
---|
4 | // The contents of this file are covered under the licence agreement in the |
---|
5 | // file "LICENCE" distributed with Cforall. |
---|
6 | // |
---|
7 | // heap.c -- |
---|
8 | // |
---|
9 | // Author : Peter A. Buhr |
---|
10 | // Created On : Tue Dec 19 21:58:35 2017 |
---|
11 | // Last Modified By : Peter A. Buhr |
---|
12 | // Last Modified On : Sat Aug 11 08:22:16 2018 |
---|
13 | // Update Count : 495 |
---|
14 | // |
---|
15 | |
---|
16 | #include <unistd.h> // sbrk, sysconf |
---|
17 | #include <stdbool.h> // true, false |
---|
18 | #include <stdio.h> // snprintf, fileno |
---|
19 | #include <errno.h> // errno |
---|
20 | extern "C" { |
---|
21 | #include <sys/mman.h> // mmap, munmap |
---|
22 | } // extern "C" |
---|
23 | |
---|
24 | #include "bits/align.hfa" // libPow2 |
---|
25 | #include "bits/defs.hfa" // likely, unlikely |
---|
26 | #include "bits/locks.hfa" // __spinlock_t |
---|
27 | #include "startup.hfa" // STARTUP_PRIORITY_MEMORY |
---|
28 | #include "stdlib.hfa" // bsearchl |
---|
29 | #include "malloc.h" |
---|
30 | |
---|
31 | |
---|
32 | enum { |
---|
33 | __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), |
---|
34 | __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024), |
---|
35 | }; |
---|
36 | |
---|
37 | size_t default_mmap_start() __attribute__(( weak )) { |
---|
38 | return __CFA_DEFAULT_MMAP_START__; |
---|
39 | } // default_mmap_start |
---|
40 | |
---|
41 | size_t default_heap_expansion() __attribute__(( weak )) { |
---|
42 | return __CFA_DEFAULT_HEAP_EXPANSION__; |
---|
43 | } // default_heap_expansion |
---|
44 | |
---|
45 | |
---|
46 | // supported mallopt options |
---|
47 | #ifndef M_MMAP_THRESHOLD |
---|
48 | #define M_MMAP_THRESHOLD (-1) |
---|
49 | #endif // M_TOP_PAD |
---|
50 | #ifndef M_TOP_PAD |
---|
51 | #define M_TOP_PAD (-2) |
---|
52 | #endif // M_TOP_PAD |
---|
53 | |
---|
54 | #define FASTLOOKUP |
---|
55 | #define __STATISTICS__ |
---|
56 | |
---|
57 | #define SPINLOCK 0 |
---|
58 | #define LOCKFREE 1 |
---|
59 | #define BUCKETLOCK SPINLOCK |
---|
60 | #if BUCKETLOCK == LOCKFREE |
---|
61 | #include <uStackLF.h> |
---|
62 | #endif // LOCKFREE |
---|
63 | |
---|
64 | #define ALIGN 16 |
---|
65 | |
---|
66 | // enum { NoBucketSizes = 93, // number of buckets sizes |
---|
67 | // #ifdef FASTLOOKUP |
---|
68 | // LookupSizes = 65536, // number of fast lookup sizes |
---|
69 | // #endif // FASTLOOKUP |
---|
70 | // }; |
---|
71 | #define NoBucketSizes 93 // number of buckets sizes |
---|
72 | #ifdef FASTLOOKUP |
---|
73 | #define LookupSizes 65536 // number of fast lookup sizes |
---|
74 | #endif // FASTLOOKUP |
---|
75 | |
---|
76 | |
---|
77 | static bool traceHeap = false; |
---|
78 | |
---|
79 | inline bool traceHeap() { |
---|
80 | return traceHeap; |
---|
81 | } // traceHeap |
---|
82 | |
---|
83 | bool traceHeapOn() { |
---|
84 | bool temp = traceHeap; |
---|
85 | traceHeap = true; |
---|
86 | return temp; |
---|
87 | } // traceHeapOn |
---|
88 | |
---|
89 | bool traceHeapOff() { |
---|
90 | bool temp = traceHeap; |
---|
91 | traceHeap = false; |
---|
92 | return temp; |
---|
93 | } // traceHeapOff |
---|
94 | |
---|
95 | |
---|
96 | static bool checkFree = false; |
---|
97 | |
---|
98 | inline bool checkFree() { |
---|
99 | return checkFree; |
---|
100 | } // checkFree |
---|
101 | |
---|
102 | bool checkFreeOn() { |
---|
103 | bool temp = checkFree; |
---|
104 | checkFree = true; |
---|
105 | return temp; |
---|
106 | } // checkFreeOn |
---|
107 | |
---|
108 | bool checkFreeOff() { |
---|
109 | bool temp = checkFree; |
---|
110 | checkFree = false; |
---|
111 | return temp; |
---|
112 | } // checkFreeOff |
---|
113 | |
---|
114 | |
---|
115 | // static bool traceHeapTerm = false; |
---|
116 | |
---|
117 | // inline bool traceHeapTerm() { |
---|
118 | // return traceHeapTerm; |
---|
119 | // } // traceHeapTerm |
---|
120 | |
---|
121 | // bool traceHeapTermOn() { |
---|
122 | // bool temp = traceHeapTerm; |
---|
123 | // traceHeapTerm = true; |
---|
124 | // return temp; |
---|
125 | // } // traceHeapTermOn |
---|
126 | |
---|
127 | // bool traceHeapTermOff() { |
---|
128 | // bool temp = traceHeapTerm; |
---|
129 | // traceHeapTerm = false; |
---|
130 | // return temp; |
---|
131 | // } // traceHeapTermOff |
---|
132 | |
---|
133 | |
---|
134 | #ifdef __CFA_DEBUG__ |
---|
135 | static unsigned int allocFree; // running total of allocations minus frees |
---|
136 | |
---|
137 | static void checkUnfreed() { |
---|
138 | if ( allocFree != 0 ) { |
---|
139 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. |
---|
140 | // char helpText[512]; |
---|
141 | // int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n" |
---|
142 | // "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", |
---|
143 | // (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid |
---|
144 | // __cfaabi_dbg_bits_write( helpText, len ); |
---|
145 | } // if |
---|
146 | } // checkUnfreed |
---|
147 | |
---|
148 | extern "C" { |
---|
149 | void heapAppStart() { // called by __cfaabi_appready_startup |
---|
150 | allocFree = 0; |
---|
151 | } // heapAppStart |
---|
152 | |
---|
153 | void heapAppStop() { // called by __cfaabi_appready_startdown |
---|
154 | fclose( stdin ); fclose( stdout ); |
---|
155 | checkUnfreed(); |
---|
156 | } // heapAppStop |
---|
157 | } // extern "C" |
---|
158 | #endif // __CFA_DEBUG__ |
---|
159 | |
---|
160 | |
---|
161 | struct HeapManager { |
---|
162 | // struct FreeHeader; // forward declaration |
---|
163 | |
---|
164 | struct Storage { |
---|
165 | struct Header { // header |
---|
166 | union Kind { |
---|
167 | struct RealHeader { |
---|
168 | union { |
---|
169 | struct { // 32-bit word => 64-bit header, 64-bit word => 128-bit header |
---|
170 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4 |
---|
171 | uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header |
---|
172 | #endif // __ORDER_BIG_ENDIAN__ && __U_WORDSIZE__ == 32 |
---|
173 | |
---|
174 | union { |
---|
175 | // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) |
---|
176 | void * home; // allocated block points back to home locations (must overlay alignment) |
---|
177 | size_t blockSize; // size for munmap (must overlay alignment) |
---|
178 | #if BUCKLOCK == SPINLOCK |
---|
179 | Storage * next; // freed block points next freed block of same size |
---|
180 | #endif // SPINLOCK |
---|
181 | }; |
---|
182 | |
---|
183 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4 |
---|
184 | uint32_t padding; // unused, force home/blocksize to overlay alignment in fake header |
---|
185 | #endif // __ORDER_LITTLE_ENDIAN__ && __U_WORDSIZE__ == 32 |
---|
186 | |
---|
187 | }; |
---|
188 | #if BUCKLOCK == LOCKFREE |
---|
189 | Stack<Storage>::Link next; // freed block points next freed block of same size (double-wide) |
---|
190 | #endif // LOCKFREE |
---|
191 | }; |
---|
192 | } real; // RealHeader |
---|
193 | struct FakeHeader { |
---|
194 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
---|
195 | uint32_t alignment; // low-order bits of home/blockSize used for tricks |
---|
196 | #endif // __ORDER_LITTLE_ENDIAN__ |
---|
197 | |
---|
198 | uint32_t offset; |
---|
199 | |
---|
200 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
---|
201 | uint32_t alignment; // low-order bits of home/blockSize used for tricks |
---|
202 | #endif // __ORDER_BIG_ENDIAN__ |
---|
203 | } fake; // FakeHeader |
---|
204 | } kind; // Kind |
---|
205 | } header; // Header |
---|
206 | char pad[ALIGN - sizeof( Header )]; |
---|
207 | char data[0]; // storage |
---|
208 | }; // Storage |
---|
209 | |
---|
210 | static_assert( ALIGN >= sizeof( Storage ), "ALIGN < sizeof( Storage )" ); |
---|
211 | |
---|
212 | struct FreeHeader { |
---|
213 | #if BUCKLOCK == SPINLOCK |
---|
214 | __spinlock_t lock; // must be first field for alignment |
---|
215 | Storage * freeList; |
---|
216 | #elif BUCKLOCK == LOCKFREE |
---|
217 | StackLF<Storage> freeList; |
---|
218 | #else |
---|
219 | #error undefined lock type for bucket lock |
---|
220 | #endif // SPINLOCK |
---|
221 | size_t blockSize; // size of allocations on this list |
---|
222 | }; // FreeHeader |
---|
223 | |
---|
224 | // must be first fields for alignment |
---|
225 | __spinlock_t extlock; // protects allocation-buffer extension |
---|
226 | FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes |
---|
227 | |
---|
228 | void * heapBegin; // start of heap |
---|
229 | void * heapEnd; // logical end of heap |
---|
230 | size_t heapRemaining; // amount of storage not allocated in the current chunk |
---|
231 | }; // HeapManager |
---|
232 | |
---|
233 | static inline size_t getKey( const HeapManager.FreeHeader & freeheader ) { return freeheader.blockSize; } |
---|
234 | // statically allocated variables => zero filled. |
---|
235 | |
---|
236 | |
---|
237 | static size_t pageSize; // architecture pagesize |
---|
238 | static size_t heapExpand; // sbrk advance |
---|
239 | static size_t mmapStart; // cross over point for mmap |
---|
240 | static unsigned int maxBucketsUsed; // maximum number of buckets in use |
---|
241 | |
---|
242 | // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. |
---|
243 | static unsigned int bucketSizes[NoBucketSizes] @= { // different bucket sizes |
---|
244 | 16, 32, 48, 64, |
---|
245 | 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224, |
---|
246 | 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896, |
---|
247 | 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144, |
---|
248 | 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, |
---|
249 | 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, |
---|
250 | 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, |
---|
251 | 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, |
---|
252 | 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, |
---|
253 | 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, |
---|
254 | 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792, |
---|
255 | 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016, |
---|
256 | 4_194_304 + sizeof(HeapManager.Storage) |
---|
257 | }; |
---|
258 | #ifdef FASTLOOKUP |
---|
259 | static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes |
---|
260 | #endif // FASTLOOKUP |
---|
261 | static int mmapFd = -1; // fake or actual fd for anonymous file |
---|
262 | |
---|
263 | |
---|
264 | #ifdef __CFA_DEBUG__ |
---|
265 | static bool heapBoot = 0; // detect recursion during boot |
---|
266 | #endif // __CFA_DEBUG__ |
---|
267 | static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing |
---|
268 | |
---|
269 | |
---|
270 | static inline bool setMmapStart( size_t value ) { |
---|
271 | if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; |
---|
272 | mmapStart = value; // set global |
---|
273 | |
---|
274 | // find the closest bucket size less than or equal to the mmapStart size |
---|
275 | maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search |
---|
276 | assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? |
---|
277 | assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? |
---|
278 | return false; |
---|
279 | } // setMmapStart |
---|
280 | |
---|
281 | |
---|
282 | static void ?{}( HeapManager & manager ) with ( manager ) { |
---|
283 | pageSize = sysconf( _SC_PAGESIZE ); |
---|
284 | |
---|
285 | for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists |
---|
286 | freeLists[i].blockSize = bucketSizes[i]; |
---|
287 | } // for |
---|
288 | |
---|
289 | #ifdef FASTLOOKUP |
---|
290 | unsigned int idx = 0; |
---|
291 | for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { |
---|
292 | if ( i > bucketSizes[idx] ) idx += 1; |
---|
293 | lookup[i] = idx; |
---|
294 | } // for |
---|
295 | #endif // FASTLOOKUP |
---|
296 | |
---|
297 | if ( setMmapStart( default_mmap_start() ) ) { |
---|
298 | abort( "HeapManager : internal error, mmap start initialization failure." ); |
---|
299 | } // if |
---|
300 | heapExpand = default_heap_expansion(); |
---|
301 | |
---|
302 | char * End = (char *)sbrk( 0 ); |
---|
303 | sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment |
---|
304 | heapBegin = heapEnd = sbrk( 0 ); // get new start point |
---|
305 | } // HeapManager |
---|
306 | |
---|
307 | |
---|
308 | static void ^?{}( HeapManager & ) { |
---|
309 | #ifdef __STATISTICS__ |
---|
310 | // if ( traceHeapTerm() ) { |
---|
311 | // printStats(); |
---|
312 | // if ( checkfree() ) checkFree( heapManager, true ); |
---|
313 | // } // if |
---|
314 | #endif // __STATISTICS__ |
---|
315 | } // ~HeapManager |
---|
316 | |
---|
317 | |
---|
318 | static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); |
---|
319 | void memory_startup( void ) { |
---|
320 | #ifdef __CFA_DEBUG__ |
---|
321 | if ( unlikely( heapBoot ) ) { // check for recursion during system boot |
---|
322 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. |
---|
323 | abort( "boot() : internal error, recursively invoked during system boot." ); |
---|
324 | } // if |
---|
325 | heapBoot = true; |
---|
326 | #endif // __CFA_DEBUG__ |
---|
327 | |
---|
328 | assert( heapManager.heapBegin == 0 ); |
---|
329 | heapManager{}; |
---|
330 | } // memory_startup |
---|
331 | |
---|
332 | static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); |
---|
333 | void memory_shutdown( void ) { |
---|
334 | ^heapManager{}; |
---|
335 | } // memory_shutdown |
---|
336 | |
---|
337 | |
---|
338 | #ifdef __STATISTICS__ |
---|
339 | static unsigned long long int mmap_storage; // heap statistics counters |
---|
340 | static unsigned int mmap_calls; |
---|
341 | static unsigned long long int munmap_storage; |
---|
342 | static unsigned int munmap_calls; |
---|
343 | static unsigned long long int sbrk_storage; |
---|
344 | static unsigned int sbrk_calls; |
---|
345 | static unsigned long long int malloc_storage; |
---|
346 | static unsigned int malloc_calls; |
---|
347 | static unsigned long long int free_storage; |
---|
348 | static unsigned int free_calls; |
---|
349 | static unsigned long long int calloc_storage; |
---|
350 | static unsigned int calloc_calls; |
---|
351 | static unsigned long long int memalign_storage; |
---|
352 | static unsigned int memalign_calls; |
---|
353 | static unsigned long long int cmemalign_storage; |
---|
354 | static unsigned int cmemalign_calls; |
---|
355 | static unsigned long long int realloc_storage; |
---|
356 | static unsigned int realloc_calls; |
---|
357 | |
---|
358 | static int statfd; // statistics file descriptor (changed by malloc_stats_fd) |
---|
359 | |
---|
360 | |
---|
361 | // Use "write" because streams may be shutdown when calls are made. |
---|
362 | static void printStats() { |
---|
363 | char helpText[512]; |
---|
364 | __cfaabi_dbg_bits_print_buffer( helpText, sizeof(helpText), |
---|
365 | "\nHeap statistics:\n" |
---|
366 | " malloc: calls %u / storage %llu\n" |
---|
367 | " calloc: calls %u / storage %llu\n" |
---|
368 | " memalign: calls %u / storage %llu\n" |
---|
369 | " cmemalign: calls %u / storage %llu\n" |
---|
370 | " realloc: calls %u / storage %llu\n" |
---|
371 | " free: calls %u / storage %llu\n" |
---|
372 | " mmap: calls %u / storage %llu\n" |
---|
373 | " munmap: calls %u / storage %llu\n" |
---|
374 | " sbrk: calls %u / storage %llu\n", |
---|
375 | malloc_calls, malloc_storage, |
---|
376 | calloc_calls, calloc_storage, |
---|
377 | memalign_calls, memalign_storage, |
---|
378 | cmemalign_calls, cmemalign_storage, |
---|
379 | realloc_calls, realloc_storage, |
---|
380 | free_calls, free_storage, |
---|
381 | mmap_calls, mmap_storage, |
---|
382 | munmap_calls, munmap_storage, |
---|
383 | sbrk_calls, sbrk_storage |
---|
384 | ); |
---|
385 | } // printStats |
---|
386 | |
---|
387 | |
---|
388 | static int printStatsXML( FILE * stream ) { |
---|
389 | char helpText[512]; |
---|
390 | int len = snprintf( helpText, sizeof(helpText), |
---|
391 | "<malloc version=\"1\">\n" |
---|
392 | "<heap nr=\"0\">\n" |
---|
393 | "<sizes>\n" |
---|
394 | "</sizes>\n" |
---|
395 | "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n" |
---|
396 | "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n" |
---|
397 | "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n" |
---|
398 | "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n" |
---|
399 | "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n" |
---|
400 | "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n" |
---|
401 | "<total type=\"mmap\" count=\"%u\" size=\"%llu\"/>\n" |
---|
402 | "<total type=\"munmap\" count=\"%u\" size=\"%llu\"/>\n" |
---|
403 | "<total type=\"sbrk\" count=\"%u\" size=\"%llu\"/>\n" |
---|
404 | "</malloc>", |
---|
405 | malloc_calls, malloc_storage, |
---|
406 | calloc_calls, calloc_storage, |
---|
407 | memalign_calls, memalign_storage, |
---|
408 | cmemalign_calls, cmemalign_storage, |
---|
409 | realloc_calls, realloc_storage, |
---|
410 | free_calls, free_storage, |
---|
411 | mmap_calls, mmap_storage, |
---|
412 | munmap_calls, munmap_storage, |
---|
413 | sbrk_calls, sbrk_storage |
---|
414 | ); |
---|
415 | return write( fileno( stream ), helpText, len ); // -1 => error |
---|
416 | } // printStatsXML |
---|
417 | #endif // __STATISTICS__ |
---|
418 | |
---|
419 | |
---|
420 | static inline void noMemory() { |
---|
421 | abort( "Heap memory exhausted at %zu bytes.\n" |
---|
422 | "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.", |
---|
423 | ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) ); |
---|
424 | } // noMemory |
---|
425 | |
---|
426 | |
---|
427 | static inline void checkAlign( size_t alignment ) { |
---|
428 | if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) { |
---|
429 | abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment ); |
---|
430 | } // if |
---|
431 | } // checkAlign |
---|
432 | |
---|
433 | |
---|
434 | static inline bool setHeapExpand( size_t value ) { |
---|
435 | if ( heapExpand < pageSize ) return true; |
---|
436 | heapExpand = value; |
---|
437 | return false; |
---|
438 | } // setHeapExpand |
---|
439 | |
---|
440 | |
---|
441 | static inline void checkHeader( bool check, const char * name, void * addr ) { |
---|
442 | if ( unlikely( check ) ) { // bad address ? |
---|
443 | abort( "Attempt to %s storage %p with address outside the heap.\n" |
---|
444 | "Possible cause is duplicate free on same block or overwriting of memory.", |
---|
445 | name, addr ); |
---|
446 | } // if |
---|
447 | } // checkHeader |
---|
448 | |
---|
449 | |
---|
450 | static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) { |
---|
451 | if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? |
---|
452 | size_t offset = header->kind.fake.offset; |
---|
453 | alignment = header->kind.fake.alignment & -2; // remove flag from value |
---|
454 | #ifdef __CFA_DEBUG__ |
---|
455 | checkAlign( alignment ); // check alignment |
---|
456 | #endif // __CFA_DEBUG__ |
---|
457 | header = (HeapManager.Storage.Header *)((char *)header - offset); |
---|
458 | } // if |
---|
459 | } // fakeHeader |
---|
460 | |
---|
461 | |
---|
462 | #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) |
---|
463 | |
---|
464 | static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { |
---|
465 | header = headerAddr( addr ); |
---|
466 | |
---|
467 | if ( unlikely( heapEnd < addr ) ) { // mmapped ? |
---|
468 | fakeHeader( header, size, alignment ); |
---|
469 | size = header->kind.real.blockSize & -3; // mmap size |
---|
470 | return true; |
---|
471 | } // if |
---|
472 | |
---|
473 | #ifdef __CFA_DEBUG__ |
---|
474 | checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? |
---|
475 | #endif // __CFA_DEBUG__ |
---|
476 | // header may be safe to dereference |
---|
477 | fakeHeader( header, size, alignment ); |
---|
478 | #ifdef __CFA_DEBUG__ |
---|
479 | checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) |
---|
480 | #endif // __CFA_DEBUG__ |
---|
481 | |
---|
482 | freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); |
---|
483 | #ifdef __CFA_DEBUG__ |
---|
484 | if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { |
---|
485 | abort( "Attempt to %s storage %p with corrupted header.\n" |
---|
486 | "Possible cause is duplicate free on same block or overwriting of header information.", |
---|
487 | name, addr ); |
---|
488 | } // if |
---|
489 | #endif // __CFA_DEBUG__ |
---|
490 | size = freeElem->blockSize; |
---|
491 | return false; |
---|
492 | } // headers |
---|
493 | |
---|
494 | |
---|
495 | static inline void * extend( size_t size ) with ( heapManager ) { |
---|
496 | lock( extlock __cfaabi_dbg_ctx2 ); |
---|
497 | ptrdiff_t rem = heapRemaining - size; |
---|
498 | if ( rem < 0 ) { |
---|
499 | // If the size requested is bigger than the current remaining storage, increase the size of the heap. |
---|
500 | |
---|
501 | size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); |
---|
502 | if ( sbrk( increase ) == (void *)-1 ) { |
---|
503 | unlock( extlock ); |
---|
504 | errno = ENOMEM; |
---|
505 | return 0; |
---|
506 | } // if |
---|
507 | #ifdef __STATISTICS__ |
---|
508 | sbrk_calls += 1; |
---|
509 | sbrk_storage += increase; |
---|
510 | #endif // __STATISTICS__ |
---|
511 | #ifdef __CFA_DEBUG__ |
---|
512 | // Set new memory to garbage so subsequent uninitialized usages might fail. |
---|
513 | memset( (char *)heapEnd + heapRemaining, '\377', increase ); |
---|
514 | #endif // __CFA_DEBUG__ |
---|
515 | rem = heapRemaining + increase - size; |
---|
516 | } // if |
---|
517 | |
---|
518 | HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; |
---|
519 | heapRemaining = rem; |
---|
520 | heapEnd = (char *)heapEnd + size; |
---|
521 | unlock( extlock ); |
---|
522 | return block; |
---|
523 | } // extend |
---|
524 | |
---|
525 | |
---|
526 | static inline void * doMalloc( size_t size ) with ( heapManager ) { |
---|
527 | HeapManager.Storage * block; |
---|
528 | |
---|
529 | // Look up size in the size list. Make sure the user request includes space for the header that must be allocated |
---|
530 | // along with the block and is a multiple of the alignment size. |
---|
531 | |
---|
532 | size_t tsize = size + sizeof(HeapManager.Storage); |
---|
533 | if ( likely( tsize < mmapStart ) ) { // small size => sbrk |
---|
534 | HeapManager.FreeHeader * freeElem = |
---|
535 | #ifdef FASTLOOKUP |
---|
536 | tsize < LookupSizes ? &freeLists[lookup[tsize]] : |
---|
537 | #endif // FASTLOOKUP |
---|
538 | bsearchl( tsize, freeLists, (size_t)maxBucketsUsed ); // binary search |
---|
539 | assert( freeElem <= &freeLists[maxBucketsUsed] ); // subscripting error ? |
---|
540 | assert( tsize <= freeElem->blockSize ); // search failure ? |
---|
541 | tsize = freeElem->blockSize; // total space needed for request |
---|
542 | |
---|
543 | // Spin until the lock is acquired for this particular size of block. |
---|
544 | |
---|
545 | #if defined( SPINLOCK ) |
---|
546 | lock( freeElem->lock __cfaabi_dbg_ctx2 ); |
---|
547 | block = freeElem->freeList; // remove node from stack |
---|
548 | #else |
---|
549 | block = freeElem->freeList.pop(); |
---|
550 | #endif // SPINLOCK |
---|
551 | if ( unlikely( block == 0 ) ) { // no free block ? |
---|
552 | #if defined( SPINLOCK ) |
---|
553 | unlock( freeElem->lock ); |
---|
554 | #endif // SPINLOCK |
---|
555 | // Freelist for that size was empty, so carve it out of the heap if there's enough left, or get some more |
---|
556 | // and then carve it off. |
---|
557 | |
---|
558 | block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call |
---|
559 | if ( unlikely( block == 0 ) ) return 0; |
---|
560 | #if defined( SPINLOCK ) |
---|
561 | } else { |
---|
562 | freeElem->freeList = block->header.kind.real.next; |
---|
563 | unlock( freeElem->lock ); |
---|
564 | #endif // SPINLOCK |
---|
565 | } // if |
---|
566 | |
---|
567 | block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size |
---|
568 | } else { // large size => mmap |
---|
569 | tsize = libCeiling( tsize, pageSize ); // must be multiple of page size |
---|
570 | #ifdef __STATISTICS__ |
---|
571 | __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST ); |
---|
572 | __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST ); |
---|
573 | #endif // __STATISTICS__ |
---|
574 | block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); |
---|
575 | if ( block == (HeapManager.Storage *)MAP_FAILED ) { |
---|
576 | // Do not call strerror( errno ) as it may call malloc. |
---|
577 | abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno ); |
---|
578 | } // if |
---|
579 | #ifdef __CFA_DEBUG__ |
---|
580 | // Set new memory to garbage so subsequent uninitialized usages might fail. |
---|
581 | memset( block, '\377', tsize ); |
---|
582 | #endif // __CFA_DEBUG__ |
---|
583 | block->header.kind.real.blockSize = tsize; // storage size for munmap |
---|
584 | } // if |
---|
585 | |
---|
586 | void * area = &(block->data); // adjust off header to user bytes |
---|
587 | |
---|
588 | #ifdef __CFA_DEBUG__ |
---|
589 | assert( ((uintptr_t)area & (libAlign() - 1)) == 0 ); // minimum alignment ? |
---|
590 | __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST ); |
---|
591 | if ( traceHeap() ) { |
---|
592 | enum { BufferSize = 64 }; |
---|
593 | char helpText[BufferSize]; |
---|
594 | int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", area, size, tsize ); |
---|
595 | // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", area, size ); |
---|
596 | __cfaabi_dbg_bits_write( helpText, len ); |
---|
597 | } // if |
---|
598 | #endif // __CFA_DEBUG__ |
---|
599 | |
---|
600 | return area; |
---|
601 | } // doMalloc |
---|
602 | |
---|
603 | |
---|
604 | static inline void doFree( void * addr ) with ( heapManager ) { |
---|
605 | #ifdef __CFA_DEBUG__ |
---|
606 | if ( unlikely( heapManager.heapBegin == 0 ) ) { |
---|
607 | abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); |
---|
608 | } // if |
---|
609 | #endif // __CFA_DEBUG__ |
---|
610 | |
---|
611 | HeapManager.Storage.Header * header; |
---|
612 | HeapManager.FreeHeader * freeElem; |
---|
613 | size_t size, alignment; // not used (see realloc) |
---|
614 | |
---|
615 | if ( headers( "free", addr, header, freeElem, size, alignment ) ) { // mmapped ? |
---|
616 | #ifdef __STATISTICS__ |
---|
617 | __atomic_add_fetch( &munmap_calls, 1, __ATOMIC_SEQ_CST ); |
---|
618 | __atomic_add_fetch( &munmap_storage, size, __ATOMIC_SEQ_CST ); |
---|
619 | #endif // __STATISTICS__ |
---|
620 | if ( munmap( header, size ) == -1 ) { |
---|
621 | #ifdef __CFA_DEBUG__ |
---|
622 | abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n" |
---|
623 | "Possible cause is invalid pointer.", |
---|
624 | addr ); |
---|
625 | #endif // __CFA_DEBUG__ |
---|
626 | } // if |
---|
627 | } else { |
---|
628 | #ifdef __CFA_DEBUG__ |
---|
629 | // Set free memory to garbage so subsequent usages might fail. |
---|
630 | memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) ); |
---|
631 | #endif // __CFA_DEBUG__ |
---|
632 | |
---|
633 | #ifdef __STATISTICS__ |
---|
634 | free_storage += size; |
---|
635 | #endif // __STATISTICS__ |
---|
636 | #if defined( SPINLOCK ) |
---|
637 | lock( freeElem->lock __cfaabi_dbg_ctx2 ); // acquire spin lock |
---|
638 | header->kind.real.next = freeElem->freeList; // push on stack |
---|
639 | freeElem->freeList = (HeapManager.Storage *)header; |
---|
640 | unlock( freeElem->lock ); // release spin lock |
---|
641 | #else |
---|
642 | freeElem->freeList.push( *(HeapManager.Storage *)header ); |
---|
643 | #endif // SPINLOCK |
---|
644 | } // if |
---|
645 | |
---|
646 | #ifdef __CFA_DEBUG__ |
---|
647 | __atomic_add_fetch( &allocFree, -size, __ATOMIC_SEQ_CST ); |
---|
648 | if ( traceHeap() ) { |
---|
649 | char helpText[64]; |
---|
650 | int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); |
---|
651 | __cfaabi_dbg_bits_write( helpText, len ); |
---|
652 | } // if |
---|
653 | #endif // __CFA_DEBUG__ |
---|
654 | } // doFree |
---|
655 | |
---|
656 | |
---|
657 | size_t checkFree( HeapManager & manager ) with ( manager ) { |
---|
658 | size_t total = 0; |
---|
659 | #ifdef __STATISTICS__ |
---|
660 | __cfaabi_dbg_bits_acquire(); |
---|
661 | __cfaabi_dbg_bits_print_nolock( "\nBin lists (bin size : free blocks on list)\n" ); |
---|
662 | #endif // __STATISTICS__ |
---|
663 | for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) { |
---|
664 | size_t size = freeLists[i].blockSize; |
---|
665 | #ifdef __STATISTICS__ |
---|
666 | unsigned int N = 0; |
---|
667 | #endif // __STATISTICS__ |
---|
668 | #if defined( SPINLOCK ) |
---|
669 | for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0; p = p->header.kind.real.next ) { |
---|
670 | #else |
---|
671 | for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0; p = p->header.kind.real.next.top ) { |
---|
672 | #endif // SPINLOCK |
---|
673 | total += size; |
---|
674 | #ifdef __STATISTICS__ |
---|
675 | N += 1; |
---|
676 | #endif // __STATISTICS__ |
---|
677 | } // for |
---|
678 | #ifdef __STATISTICS__ |
---|
679 | __cfaabi_dbg_bits_print_nolock( "%7zu, %-7u ", size, N ); |
---|
680 | if ( (i + 1) % 8 == 0 ) __cfaabi_dbg_bits_print_nolock( "\n" ); |
---|
681 | #endif // __STATISTICS__ |
---|
682 | } // for |
---|
683 | #ifdef __STATISTICS__ |
---|
684 | __cfaabi_dbg_bits_print_nolock( "\ntotal free blocks:%zu\n", total ); |
---|
685 | __cfaabi_dbg_bits_release(); |
---|
686 | #endif // __STATISTICS__ |
---|
687 | return (char *)heapEnd - (char *)heapBegin - total; |
---|
688 | } // checkFree |
---|
689 | |
---|
690 | |
---|
691 | static inline void * malloc2( size_t size ) { // necessary for malloc statistics |
---|
692 | assert( heapManager.heapBegin != 0 ); |
---|
693 | void * area = doMalloc( size ); |
---|
694 | if ( unlikely( area == 0 ) ) errno = ENOMEM; // POSIX |
---|
695 | return area; |
---|
696 | } // malloc2 |
---|
697 | |
---|
698 | |
---|
699 | static inline void * memalign2( size_t alignment, size_t size ) { // necessary for malloc statistics |
---|
700 | #ifdef __CFA_DEBUG__ |
---|
701 | checkAlign( alignment ); // check alignment |
---|
702 | #endif // __CFA_DEBUG__ |
---|
703 | |
---|
704 | // if alignment <= default alignment, do normal malloc as two headers are unnecessary |
---|
705 | if ( unlikely( alignment <= libAlign() ) ) return malloc2( size ); |
---|
706 | |
---|
707 | // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for |
---|
708 | // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC. |
---|
709 | // .-------------v-----------------v----------------v----------, |
---|
710 | // | Real Header | ... padding ... | Fake Header | data ... | |
---|
711 | // `-------------^-----------------^-+--------------^----------' |
---|
712 | // |<--------------------------------' offset/align |<-- alignment boundary |
---|
713 | |
---|
714 | // subtract libAlign() because it is already the minimum alignment |
---|
715 | // add sizeof(Storage) for fake header |
---|
716 | char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); |
---|
717 | if ( unlikely( area == 0 ) ) return area; |
---|
718 | |
---|
719 | // address in the block of the "next" alignment address |
---|
720 | char * user = (char *)libCeiling( (uintptr_t)(area + sizeof(HeapManager.Storage)), alignment ); |
---|
721 | |
---|
722 | // address of header from malloc |
---|
723 | HeapManager.Storage.Header * realHeader = headerAddr( area ); |
---|
724 | // address of fake header * before* the alignment location |
---|
725 | HeapManager.Storage.Header * fakeHeader = headerAddr( user ); |
---|
726 | // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment |
---|
727 | fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader; |
---|
728 | // SKULLDUGGERY: odd alignment imples fake header |
---|
729 | fakeHeader->kind.fake.alignment = alignment | 1; |
---|
730 | |
---|
731 | return user; |
---|
732 | } // memalign2 |
---|
733 | |
---|
734 | |
---|
735 | extern "C" { |
---|
736 | void * malloc( size_t size ) { |
---|
737 | #ifdef __STATISTICS__ |
---|
738 | __atomic_add_fetch( &malloc_calls, 1, __ATOMIC_SEQ_CST ); |
---|
739 | __atomic_add_fetch( &malloc_storage, size, __ATOMIC_SEQ_CST ); |
---|
740 | #endif // __STATISTICS__ |
---|
741 | |
---|
742 | return malloc2( size ); |
---|
743 | } // malloc |
---|
744 | |
---|
745 | |
---|
746 | void * calloc( size_t noOfElems, size_t elemSize ) { |
---|
747 | size_t size = noOfElems * elemSize; |
---|
748 | #ifdef __STATISTICS__ |
---|
749 | __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST ); |
---|
750 | __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST ); |
---|
751 | #endif // __STATISTICS__ |
---|
752 | |
---|
753 | char * area = (char *)malloc2( size ); |
---|
754 | if ( unlikely( area == 0 ) ) return 0; |
---|
755 | HeapManager.Storage.Header * header; |
---|
756 | HeapManager.FreeHeader * freeElem; |
---|
757 | size_t asize, alignment; |
---|
758 | bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment ); |
---|
759 | #ifndef __CFA_DEBUG__ |
---|
760 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. |
---|
761 | if ( ! mapped ) |
---|
762 | #endif // __CFA_DEBUG__ |
---|
763 | memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros |
---|
764 | header->kind.real.blockSize |= 2; // mark as zero filled |
---|
765 | return area; |
---|
766 | } // calloc |
---|
767 | |
---|
768 | |
---|
769 | void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { |
---|
770 | size_t size = noOfElems * elemSize; |
---|
771 | #ifdef __STATISTICS__ |
---|
772 | __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); |
---|
773 | __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST ); |
---|
774 | #endif // __STATISTICS__ |
---|
775 | |
---|
776 | char * area = (char *)memalign2( alignment, size ); |
---|
777 | if ( unlikely( area == 0 ) ) return 0; |
---|
778 | HeapManager.Storage.Header * header; |
---|
779 | HeapManager.FreeHeader * freeElem; |
---|
780 | size_t asize; |
---|
781 | bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment ); |
---|
782 | #ifndef __CFA_DEBUG__ |
---|
783 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. |
---|
784 | if ( ! mapped ) |
---|
785 | #endif // __CFA_DEBUG__ |
---|
786 | memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros |
---|
787 | header->kind.real.blockSize |= 2; // mark as zero filled |
---|
788 | |
---|
789 | return area; |
---|
790 | } // cmemalign |
---|
791 | |
---|
792 | |
---|
793 | void * realloc( void * addr, size_t size ) { |
---|
794 | #ifdef __STATISTICS__ |
---|
795 | __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); |
---|
796 | #endif // __STATISTICS__ |
---|
797 | |
---|
798 | if ( unlikely( addr == 0 ) ) return malloc2( size ); // special cases |
---|
799 | if ( unlikely( size == 0 ) ) { free( addr ); return 0; } |
---|
800 | |
---|
801 | HeapManager.Storage.Header * header; |
---|
802 | HeapManager.FreeHeader * freeElem; |
---|
803 | size_t asize, alignment = 0; |
---|
804 | headers( "realloc", addr, header, freeElem, asize, alignment ); |
---|
805 | |
---|
806 | size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block |
---|
807 | if ( usize >= size ) { // already sufficient storage |
---|
808 | // This case does not result in a new profiler entry because the previous one still exists and it must match with |
---|
809 | // the free for this memory. Hence, this realloc does not appear in the profiler output. |
---|
810 | return addr; |
---|
811 | } // if |
---|
812 | |
---|
813 | #ifdef __STATISTICS__ |
---|
814 | __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); |
---|
815 | #endif // __STATISTICS__ |
---|
816 | |
---|
817 | void * area; |
---|
818 | if ( unlikely( alignment != 0 ) ) { // previous request memalign? |
---|
819 | area = memalign( alignment, size ); // create new area |
---|
820 | } else { |
---|
821 | area = malloc2( size ); // create new area |
---|
822 | } // if |
---|
823 | if ( unlikely( area == 0 ) ) return 0; |
---|
824 | if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ? |
---|
825 | assert( (header->kind.real.blockSize & 1) == 0 ); |
---|
826 | bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment ); |
---|
827 | #ifndef __CFA_DEBUG__ |
---|
828 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. |
---|
829 | if ( ! mapped ) |
---|
830 | #endif // __CFA_DEBUG__ |
---|
831 | memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part |
---|
832 | header->kind.real.blockSize |= 2; // mark new request as zero fill |
---|
833 | } // if |
---|
834 | memcpy( area, addr, usize ); // copy bytes |
---|
835 | free( addr ); |
---|
836 | return area; |
---|
837 | } // realloc |
---|
838 | |
---|
839 | |
---|
840 | void * memalign( size_t alignment, size_t size ) { |
---|
841 | #ifdef __STATISTICS__ |
---|
842 | __atomic_add_fetch( &memalign_calls, 1, __ATOMIC_SEQ_CST ); |
---|
843 | __atomic_add_fetch( &memalign_storage, size, __ATOMIC_SEQ_CST ); |
---|
844 | #endif // __STATISTICS__ |
---|
845 | |
---|
846 | void * area = memalign2( alignment, size ); |
---|
847 | |
---|
848 | return area; |
---|
849 | } // memalign |
---|
850 | |
---|
851 | |
---|
852 | void * aligned_alloc( size_t alignment, size_t size ) { |
---|
853 | return memalign( alignment, size ); |
---|
854 | } // aligned_alloc |
---|
855 | |
---|
856 | |
---|
857 | int posix_memalign( void ** memptr, size_t alignment, size_t size ) { |
---|
858 | if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment |
---|
859 | * memptr = memalign( alignment, size ); |
---|
860 | if ( unlikely( * memptr == 0 ) ) return ENOMEM; |
---|
861 | return 0; |
---|
862 | } // posix_memalign |
---|
863 | |
---|
864 | |
---|
865 | void * valloc( size_t size ) { |
---|
866 | return memalign( pageSize, size ); |
---|
867 | } // valloc |
---|
868 | |
---|
869 | |
---|
870 | void free( void * addr ) { |
---|
871 | #ifdef __STATISTICS__ |
---|
872 | __atomic_add_fetch( &free_calls, 1, __ATOMIC_SEQ_CST ); |
---|
873 | #endif // __STATISTICS__ |
---|
874 | |
---|
875 | if ( unlikely( addr == 0 ) ) { // special case |
---|
876 | #ifdef __CFA_DEBUG__ |
---|
877 | if ( traceHeap() ) { |
---|
878 | #define nullmsg "Free( 0x0 ) size:0\n" |
---|
879 | // Do not debug print free( 0 ), as it can cause recursive entry from sprintf. |
---|
880 | __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 ); |
---|
881 | } // if |
---|
882 | #endif // __CFA_DEBUG__ |
---|
883 | return; |
---|
884 | } // exit |
---|
885 | |
---|
886 | doFree( addr ); |
---|
887 | } // free |
---|
888 | |
---|
889 | |
---|
890 | int mallopt( int option, int value ) { |
---|
891 | choose( option ) { |
---|
892 | case M_TOP_PAD: |
---|
893 | if ( setHeapExpand( value ) ) fallthru default; |
---|
894 | case M_MMAP_THRESHOLD: |
---|
895 | if ( setMmapStart( value ) ) fallthru default; |
---|
896 | default: |
---|
897 | return 1; // success, or unsupported |
---|
898 | } // switch |
---|
899 | return 0; // error |
---|
900 | } // mallopt |
---|
901 | |
---|
902 | |
---|
903 | int malloc_trim( size_t ) { |
---|
904 | return 0; // => impossible to release memory |
---|
905 | } // malloc_trim |
---|
906 | |
---|
907 | size_t malloc_usable_size( void * addr ) { |
---|
908 | if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size |
---|
909 | HeapManager.Storage.Header * header; |
---|
910 | HeapManager.FreeHeader * freeElem; |
---|
911 | size_t size, alignment; |
---|
912 | |
---|
913 | headers( "malloc_usable_size", addr, header, freeElem, size, alignment ); |
---|
914 | size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block |
---|
915 | return usize; |
---|
916 | } // malloc_usable_size |
---|
917 | |
---|
918 | |
---|
919 | size_t malloc_alignment( void * addr ) { |
---|
920 | if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment |
---|
921 | HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ); |
---|
922 | if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? |
---|
923 | return header->kind.fake.alignment & -2; // remove flag from value |
---|
924 | } else { |
---|
925 | return libAlign (); // minimum alignment |
---|
926 | } // if |
---|
927 | } // malloc_alignment |
---|
928 | |
---|
929 | |
---|
930 | bool malloc_zero_fill( void * addr ) { |
---|
931 | if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill |
---|
932 | HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ); |
---|
933 | if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? |
---|
934 | header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset); |
---|
935 | } // if |
---|
936 | return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ? |
---|
937 | } // malloc_zero_fill |
---|
938 | |
---|
939 | |
---|
940 | void malloc_stats( void ) { |
---|
941 | #ifdef __STATISTICS__ |
---|
942 | printStats(); |
---|
943 | if ( checkFree() ) checkFree( heapManager ); |
---|
944 | #endif // __STATISTICS__ |
---|
945 | } // malloc_stats |
---|
946 | |
---|
947 | |
---|
948 | int malloc_stats_fd( int fd ) { |
---|
949 | #ifdef __STATISTICS__ |
---|
950 | int temp = statfd; |
---|
951 | statfd = fd; |
---|
952 | return temp; |
---|
953 | #else |
---|
954 | return -1; |
---|
955 | #endif // __STATISTICS__ |
---|
956 | } // malloc_stats_fd |
---|
957 | |
---|
958 | |
---|
959 | int malloc_info( int options, FILE * stream ) { |
---|
960 | return printStatsXML( stream ); |
---|
961 | } // malloc_info |
---|
962 | |
---|
963 | |
---|
964 | void * malloc_get_state( void ) { |
---|
965 | return 0; |
---|
966 | } // malloc_get_state |
---|
967 | |
---|
968 | |
---|
969 | int malloc_set_state( void * ptr ) { |
---|
970 | return 0; |
---|
971 | } // malloc_set_state |
---|
972 | } // extern "C" |
---|
973 | |
---|
974 | |
---|
975 | // Local Variables: // |
---|
976 | // tab-width: 4 // |
---|
977 | // compile-command: "cfa -nodebug -O2 heap.c" // |
---|
978 | // End: // |
---|