1 | //
|
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
|
---|
3 | //
|
---|
4 | // The contents of this file are covered under the licence agreement in the
|
---|
5 | // file "LICENCE" distributed with Cforall.
|
---|
6 | //
|
---|
7 | // heap.cfa --
|
---|
8 | //
|
---|
9 | // Author : Peter A. Buhr
|
---|
10 | // Created On : Tue Dec 19 21:58:35 2017
|
---|
11 | // Last Modified By : Peter A. Buhr
|
---|
12 | // Last Modified On : Thu Oct 13 22:21:52 2022
|
---|
13 | // Update Count : 1557
|
---|
14 | //
|
---|
15 |
|
---|
16 | #include <stdio.h>
|
---|
17 | #include <string.h> // memset, memcpy
|
---|
18 | #include <limits.h> // ULONG_MAX
|
---|
19 | #include <stdlib.h> // EXIT_FAILURE
|
---|
20 | #include <errno.h> // errno, ENOMEM, EINVAL
|
---|
21 | #include <unistd.h> // STDERR_FILENO, sbrk, sysconf
|
---|
22 | #include <malloc.h> // memalign, malloc_usable_size
|
---|
23 | #include <sys/mman.h> // mmap, munmap
|
---|
24 | extern "C" {
|
---|
25 | #include <sys/sysinfo.h> // get_nprocs
|
---|
26 | } // extern "C"
|
---|
27 |
|
---|
28 | #include "bits/align.hfa" // libAlign
|
---|
29 | #include "bits/defs.hfa" // likely, unlikely
|
---|
30 | #include "concurrency/kernel/fwd.hfa" // __POLL_PREEMPTION
|
---|
31 | #include "startup.hfa" // STARTUP_PRIORITY_MEMORY
|
---|
32 | #include "math.hfa" // ceiling, min
|
---|
33 | #include "bitmanip.hfa" // is_pow2, ceiling2
|
---|
34 |
|
---|
35 | // supported mallopt options
|
---|
36 | #ifndef M_MMAP_THRESHOLD
|
---|
37 | #define M_MMAP_THRESHOLD (-1)
|
---|
38 | #endif // M_MMAP_THRESHOLD
|
---|
39 |
|
---|
40 | #ifndef M_TOP_PAD
|
---|
41 | #define M_TOP_PAD (-2)
|
---|
42 | #endif // M_TOP_PAD
|
---|
43 |
|
---|
44 | #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size
|
---|
45 | #define RETURNSPIN // toggle spinlock / lockfree stack
|
---|
46 | #define OWNERSHIP // return freed memory to owner thread
|
---|
47 |
|
---|
48 | #define CACHE_ALIGN 64
|
---|
49 | #define CALIGN __attribute__(( aligned(CACHE_ALIGN) ))
|
---|
50 |
|
---|
51 | #define TLSMODEL __attribute__(( tls_model("initial-exec") ))
|
---|
52 |
|
---|
53 | //#define __STATISTICS__
|
---|
54 |
|
---|
55 | enum {
|
---|
56 | // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk
|
---|
57 | // address is extended by the extension amount.
|
---|
58 | __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024,
|
---|
59 |
|
---|
60 | // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values
|
---|
61 | // greater than or equal to this value are mmap from the operating system.
|
---|
62 | __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1,
|
---|
63 |
|
---|
64 | // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from
|
---|
65 | // the malloc/free counter to adjust for storage the program does not free.
|
---|
66 | __CFA_DEFAULT_HEAP_UNFREED__ = 0
|
---|
67 | }; // enum
|
---|
68 |
|
---|
69 |
|
---|
70 | //####################### Heap Trace/Print ####################
|
---|
71 |
|
---|
72 |
|
---|
73 | static bool traceHeap = false;
|
---|
74 |
|
---|
75 | inline bool traceHeap() libcfa_public { return traceHeap; }
|
---|
76 |
|
---|
77 | bool traceHeapOn() libcfa_public {
|
---|
78 | bool temp = traceHeap;
|
---|
79 | traceHeap = true;
|
---|
80 | return temp;
|
---|
81 | } // traceHeapOn
|
---|
82 |
|
---|
83 | bool traceHeapOff() libcfa_public {
|
---|
84 | bool temp = traceHeap;
|
---|
85 | traceHeap = false;
|
---|
86 | return temp;
|
---|
87 | } // traceHeapOff
|
---|
88 |
|
---|
89 | bool traceHeapTerm() libcfa_public { return false; }
|
---|
90 |
|
---|
91 |
|
---|
92 | static bool prtFree = false;
|
---|
93 |
|
---|
94 | bool prtFree() {
|
---|
95 | return prtFree;
|
---|
96 | } // prtFree
|
---|
97 |
|
---|
98 | bool prtFreeOn() {
|
---|
99 | bool temp = prtFree;
|
---|
100 | prtFree = true;
|
---|
101 | return temp;
|
---|
102 | } // prtFreeOn
|
---|
103 |
|
---|
104 | bool prtFreeOff() {
|
---|
105 | bool temp = prtFree;
|
---|
106 | prtFree = false;
|
---|
107 | return temp;
|
---|
108 | } // prtFreeOff
|
---|
109 |
|
---|
110 |
|
---|
111 | //######################### Spin Lock #########################
|
---|
112 |
|
---|
113 |
|
---|
114 | // pause to prevent excess processor bus usage
|
---|
115 | #if defined( __i386 ) || defined( __x86_64 )
|
---|
116 | #define Pause() __asm__ __volatile__ ( "pause" : : : )
|
---|
117 | #elif defined(__ARM_ARCH)
|
---|
118 | #define Pause() __asm__ __volatile__ ( "YIELD" : : : )
|
---|
119 | #else
|
---|
120 | #error unsupported architecture
|
---|
121 | #endif
|
---|
122 |
|
---|
123 | typedef volatile uintptr_t SpinLock_t CALIGN; // aligned addressable word-size
|
---|
124 |
|
---|
125 | static inline __attribute__((always_inline)) void lock( volatile SpinLock_t & slock ) {
|
---|
126 | enum { SPIN_START = 4, SPIN_END = 64 * 1024, };
|
---|
127 | unsigned int spin = SPIN_START;
|
---|
128 |
|
---|
129 | for ( unsigned int i = 1;; i += 1 ) {
|
---|
130 | if ( slock == 0 && __atomic_test_and_set( &slock, __ATOMIC_SEQ_CST ) == 0 ) break; // Fence
|
---|
131 | for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin
|
---|
132 | spin += spin; // powers of 2
|
---|
133 | //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2
|
---|
134 | if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning
|
---|
135 | } // for
|
---|
136 | } // spin_lock
|
---|
137 |
|
---|
138 | static inline __attribute__((always_inline)) void unlock( volatile SpinLock_t & slock ) {
|
---|
139 | __atomic_clear( &slock, __ATOMIC_SEQ_CST ); // Fence
|
---|
140 | } // spin_unlock
|
---|
141 |
|
---|
142 |
|
---|
143 | //####################### Heap Statistics ####################
|
---|
144 |
|
---|
145 |
|
---|
146 | #ifdef __STATISTICS__
|
---|
147 | enum { CntTriples = 12 }; // number of counter triples
|
---|
148 | enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE };
|
---|
149 |
|
---|
150 | struct StatsOverlay { // overlay for iteration
|
---|
151 | unsigned int calls, calls_0;
|
---|
152 | unsigned long long int request, alloc;
|
---|
153 | };
|
---|
154 |
|
---|
155 | // Heap statistics counters.
|
---|
156 | union HeapStatistics {
|
---|
157 | struct { // minimum qualification
|
---|
158 | unsigned int malloc_calls, malloc_0_calls;
|
---|
159 | unsigned long long int malloc_storage_request, malloc_storage_alloc;
|
---|
160 | unsigned int aalloc_calls, aalloc_0_calls;
|
---|
161 | unsigned long long int aalloc_storage_request, aalloc_storage_alloc;
|
---|
162 | unsigned int calloc_calls, calloc_0_calls;
|
---|
163 | unsigned long long int calloc_storage_request, calloc_storage_alloc;
|
---|
164 | unsigned int memalign_calls, memalign_0_calls;
|
---|
165 | unsigned long long int memalign_storage_request, memalign_storage_alloc;
|
---|
166 | unsigned int amemalign_calls, amemalign_0_calls;
|
---|
167 | unsigned long long int amemalign_storage_request, amemalign_storage_alloc;
|
---|
168 | unsigned int cmemalign_calls, cmemalign_0_calls;
|
---|
169 | unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc;
|
---|
170 | unsigned int resize_calls, resize_0_calls;
|
---|
171 | unsigned long long int resize_storage_request, resize_storage_alloc;
|
---|
172 | unsigned int realloc_calls, realloc_0_calls;
|
---|
173 | unsigned long long int realloc_storage_request, realloc_storage_alloc;
|
---|
174 | unsigned int free_calls, free_null_calls;
|
---|
175 | unsigned long long int free_storage_request, free_storage_alloc;
|
---|
176 | unsigned int return_pulls, return_pushes;
|
---|
177 | unsigned long long int return_storage_request, return_storage_alloc;
|
---|
178 | unsigned int mmap_calls, mmap_0_calls; // no zero calls
|
---|
179 | unsigned long long int mmap_storage_request, mmap_storage_alloc;
|
---|
180 | unsigned int munmap_calls, munmap_0_calls; // no zero calls
|
---|
181 | unsigned long long int munmap_storage_request, munmap_storage_alloc;
|
---|
182 | };
|
---|
183 | struct StatsOverlay counters[CntTriples]; // overlay for iteration
|
---|
184 | }; // HeapStatistics
|
---|
185 |
|
---|
186 | static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay),
|
---|
187 | "Heap statistics counter-triplets does not match with array size" );
|
---|
188 |
|
---|
189 | static void HeapStatisticsCtor( HeapStatistics & stats ) {
|
---|
190 | memset( &stats, '\0', sizeof(stats) ); // very fast
|
---|
191 | // for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
|
---|
192 | // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0;
|
---|
193 | // } // for
|
---|
194 | } // HeapStatisticsCtor
|
---|
195 |
|
---|
196 | static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) {
|
---|
197 | for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
|
---|
198 | lhs.counters[i].calls += rhs.counters[i].calls;
|
---|
199 | lhs.counters[i].calls_0 += rhs.counters[i].calls_0;
|
---|
200 | lhs.counters[i].request += rhs.counters[i].request;
|
---|
201 | lhs.counters[i].alloc += rhs.counters[i].alloc;
|
---|
202 | } // for
|
---|
203 | return lhs;
|
---|
204 | } // ?+=?
|
---|
205 | #endif // __STATISTICS__
|
---|
206 |
|
---|
207 |
|
---|
208 | #define SPINLOCK 0
|
---|
209 | #define LOCKFREE 1
|
---|
210 | #define BUCKETLOCK SPINLOCK
|
---|
211 | #if BUCKETLOCK == SPINLOCK
|
---|
212 | #elif BUCKETLOCK == LOCKFREE
|
---|
213 | #include <containers/lockfree.hfa>
|
---|
214 | #else
|
---|
215 | #error undefined lock type for bucket lock
|
---|
216 | #endif // LOCKFREE
|
---|
217 |
|
---|
218 | // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
|
---|
219 | // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
|
---|
220 | enum { NoBucketSizes = 91 }; // number of buckets sizes
|
---|
221 |
|
---|
222 | struct Heap {
|
---|
223 | struct Storage {
|
---|
224 | struct Header { // header
|
---|
225 | union Kind {
|
---|
226 | struct RealHeader {
|
---|
227 | union {
|
---|
228 | struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
|
---|
229 | union {
|
---|
230 | // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped
|
---|
231 | // FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
|
---|
232 | void * home; // allocated block points back to home locations (must overlay alignment)
|
---|
233 | size_t blockSize; // size for munmap (must overlay alignment)
|
---|
234 | #if BUCKETLOCK == SPINLOCK
|
---|
235 | Storage * next; // freed block points to next freed block of same size
|
---|
236 | #endif // SPINLOCK
|
---|
237 | };
|
---|
238 | size_t size; // allocation size in bytes
|
---|
239 | };
|
---|
240 | #if BUCKETLOCK == LOCKFREE
|
---|
241 | Link(Storage) next; // freed block points next freed block of same size (double-wide)
|
---|
242 | #endif // LOCKFREE
|
---|
243 | };
|
---|
244 | } real; // RealHeader
|
---|
245 |
|
---|
246 | struct FakeHeader {
|
---|
247 | uintptr_t alignment; // 1st low-order bit => fake header & alignment
|
---|
248 | uintptr_t offset;
|
---|
249 | } fake; // FakeHeader
|
---|
250 | } kind; // Kind
|
---|
251 | } header; // Header
|
---|
252 |
|
---|
253 | char pad[libAlign() - sizeof( Header )];
|
---|
254 | char data[0]; // storage
|
---|
255 | }; // Storage
|
---|
256 |
|
---|
257 | static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" );
|
---|
258 |
|
---|
259 | struct __attribute__(( aligned (8) )) FreeHeader {
|
---|
260 | size_t blockSize __attribute__(( aligned(8) )); // size of allocations on this list
|
---|
261 | #if BUCKETLOCK == SPINLOCK
|
---|
262 | #ifdef OWNERSHIP
|
---|
263 | #ifdef RETURNSPIN
|
---|
264 | SpinLock_t returnLock;
|
---|
265 | #endif // RETURNSPIN
|
---|
266 | Storage * returnList; // other thread return list
|
---|
267 | #endif // OWNERSHIP
|
---|
268 | Storage * freeList; // thread free list
|
---|
269 | #else
|
---|
270 | StackLF(Storage) freeList;
|
---|
271 | #endif // BUCKETLOCK
|
---|
272 | Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap)
|
---|
273 | }; // FreeHeader
|
---|
274 |
|
---|
275 | FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
|
---|
276 | void * heapBuffer; // start of free storage in buffer
|
---|
277 | size_t heapReserve; // amount of remaining free storage in buffer
|
---|
278 |
|
---|
279 | #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
|
---|
280 | Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics or check unfreed storage
|
---|
281 | #endif // __STATISTICS__ || __CFA_DEBUG__
|
---|
282 | Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads
|
---|
283 |
|
---|
284 | #ifdef __CFA_DEBUG__
|
---|
285 | int64_t allocUnfreed; // running total of allocations minus frees; can be negative
|
---|
286 | #endif // __CFA_DEBUG__
|
---|
287 |
|
---|
288 | #ifdef __STATISTICS__
|
---|
289 | HeapStatistics stats; // local statistic table for this heap
|
---|
290 | #endif // __STATISTICS__
|
---|
291 | }; // Heap
|
---|
292 |
|
---|
293 | #if BUCKETLOCK == LOCKFREE
|
---|
294 | inline __attribute__((always_inline))
|
---|
295 | static {
|
---|
296 | Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; }
|
---|
297 | void ?{}( Heap.FreeHeader & ) {}
|
---|
298 | void ^?{}( Heap.FreeHeader & ) {}
|
---|
299 | } // distribution
|
---|
300 | #endif // LOCKFREE
|
---|
301 |
|
---|
302 |
|
---|
303 | struct HeapMaster {
|
---|
304 | SpinLock_t extLock; // protects allocation-buffer extension
|
---|
305 | SpinLock_t mgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd
|
---|
306 |
|
---|
307 | void * heapBegin; // start of heap
|
---|
308 | void * heapEnd; // logical end of heap
|
---|
309 | size_t heapRemaining; // amount of storage not allocated in the current chunk
|
---|
310 | size_t pageSize; // architecture pagesize
|
---|
311 | size_t heapExpand; // sbrk advance
|
---|
312 | size_t mmapStart; // cross over point for mmap
|
---|
313 | unsigned int maxBucketsUsed; // maximum number of buckets in use
|
---|
314 |
|
---|
315 | Heap * heapManagersList; // heap-list head
|
---|
316 | Heap * freeHeapManagersList; // free-list head
|
---|
317 |
|
---|
318 | // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links.
|
---|
319 | Heap * heapManagersStorage; // next heap to use in heap superblock
|
---|
320 | Heap * heapManagersStorageEnd; // logical heap outside of superblock's end
|
---|
321 |
|
---|
322 | #ifdef __STATISTICS__
|
---|
323 | HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting
|
---|
324 | unsigned long int threads_started, threads_exited; // counts threads that have started and exited
|
---|
325 | unsigned long int reused_heap, new_heap; // counts reusability of heaps
|
---|
326 | unsigned int sbrk_calls;
|
---|
327 | unsigned long long int sbrk_storage;
|
---|
328 | int stats_fd;
|
---|
329 | #endif // __STATISTICS__
|
---|
330 | }; // HeapMaster
|
---|
331 |
|
---|
332 |
|
---|
333 | #ifdef FASTLOOKUP
|
---|
334 | enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes
|
---|
335 | static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
|
---|
336 | #endif // FASTLOOKUP
|
---|
337 |
|
---|
338 | static volatile bool heapMasterBootFlag = false; // trigger for first heap
|
---|
339 | static HeapMaster heapMaster @= {}; // program global
|
---|
340 |
|
---|
341 | static void heapMasterCtor();
|
---|
342 | static void heapMasterDtor();
|
---|
343 | static Heap * getHeap();
|
---|
344 |
|
---|
345 |
|
---|
346 | // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16.
|
---|
347 | // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
|
---|
348 | // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
|
---|
349 | static const unsigned int bucketSizes[] @= { // different bucket sizes
|
---|
350 | 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4
|
---|
351 | 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3
|
---|
352 | 160, 192, 224, 256 + sizeof(Heap.Storage), // 4
|
---|
353 | 320, 384, 448, 512 + sizeof(Heap.Storage), // 4
|
---|
354 | 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4
|
---|
355 | 1_536, 2_048 + sizeof(Heap.Storage), // 2
|
---|
356 | 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4
|
---|
357 | 6_144, 8_192 + sizeof(Heap.Storage), // 2
|
---|
358 | 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8
|
---|
359 | 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8
|
---|
360 | 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8
|
---|
361 | 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8
|
---|
362 | 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8
|
---|
363 | 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8
|
---|
364 | 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4
|
---|
365 | 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8
|
---|
366 | 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4
|
---|
367 | };
|
---|
368 |
|
---|
369 | static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );
|
---|
370 |
|
---|
371 |
|
---|
372 | // extern visibility, used by runtime kernel
|
---|
373 | libcfa_public size_t __page_size; // architecture pagesize
|
---|
374 | libcfa_public int __map_prot; // common mmap/mprotect protection
|
---|
375 |
|
---|
376 |
|
---|
377 | // Thread-local storage is allocated lazily when the storage is accessed.
|
---|
378 | static __thread size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing
|
---|
379 | static __thread Heap * volatile heapManager CALIGN TLSMODEL;
|
---|
380 | static __thread size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing
|
---|
381 |
|
---|
382 |
|
---|
383 | // declare helper functions for HeapMaster
|
---|
384 | void noMemory(); // forward, called by "builtin_new" when malloc returns 0
|
---|
385 |
|
---|
386 |
|
---|
387 | // generic Bsearchl does not inline, so substitute with hand-coded binary-search.
|
---|
388 | inline __attribute__((always_inline))
|
---|
389 | static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) {
|
---|
390 | size_t l = 0, m, h = dim;
|
---|
391 | while ( l < h ) {
|
---|
392 | m = (l + h) / 2;
|
---|
393 | if ( (unsigned int &)(vals[m]) < key ) { // cast away const
|
---|
394 | l = m + 1;
|
---|
395 | } else {
|
---|
396 | h = m;
|
---|
397 | } // if
|
---|
398 | } // while
|
---|
399 | return l;
|
---|
400 | } // Bsearchl
|
---|
401 |
|
---|
402 |
|
---|
403 | void heapMasterCtor() with( heapMaster ) {
|
---|
404 | // Singleton pattern to initialize heap master
|
---|
405 |
|
---|
406 | verify( bucketSizes[0] == (16 + sizeof(Heap.Storage)) );
|
---|
407 |
|
---|
408 | __page_size = sysconf( _SC_PAGESIZE );
|
---|
409 | __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
|
---|
410 |
|
---|
411 | ?{}( extLock );
|
---|
412 | ?{}( mgrLock );
|
---|
413 |
|
---|
414 | char * end = (char *)sbrk( 0 );
|
---|
415 | heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment
|
---|
416 | heapRemaining = 0;
|
---|
417 | heapExpand = malloc_expansion();
|
---|
418 | mmapStart = malloc_mmap_start();
|
---|
419 |
|
---|
420 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
421 | maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search
|
---|
422 |
|
---|
423 | verify( (mmapStart >= pageSize) && (bucketSizes[NoBucketSizes - 1] >= mmapStart) );
|
---|
424 | verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
|
---|
425 | verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
|
---|
426 |
|
---|
427 | heapManagersList = 0p;
|
---|
428 | freeHeapManagersList = 0p;
|
---|
429 |
|
---|
430 | heapManagersStorage = 0p;
|
---|
431 | heapManagersStorageEnd = 0p;
|
---|
432 |
|
---|
433 | #ifdef __STATISTICS__
|
---|
434 | HeapStatisticsCtor( stats ); // clear statistic counters
|
---|
435 | threads_started = threads_exited = 0;
|
---|
436 | reused_heap = new_heap = 0;
|
---|
437 | sbrk_calls = sbrk_storage = 0;
|
---|
438 | stats_fd = STDERR_FILENO;
|
---|
439 | #endif // __STATISTICS__
|
---|
440 |
|
---|
441 | #ifdef FASTLOOKUP
|
---|
442 | for ( unsigned int i = 0, idx = 0; i < LookupSizes; i += 1 ) {
|
---|
443 | if ( i > bucketSizes[idx] ) idx += 1;
|
---|
444 | lookup[i] = idx;
|
---|
445 | verify( i <= bucketSizes[idx] );
|
---|
446 | verify( (i <= 32 && idx == 0) || (i > bucketSizes[idx - 1]) );
|
---|
447 | } // for
|
---|
448 | #endif // FASTLOOKUP
|
---|
449 |
|
---|
450 | heapMasterBootFlag = true;
|
---|
451 | } // heapMasterCtor
|
---|
452 |
|
---|
453 |
|
---|
454 | #define NO_MEMORY_MSG "**** Error **** insufficient heap memory available to allocate %zd new bytes."
|
---|
455 |
|
---|
456 | Heap * getHeap() with( heapMaster ) {
|
---|
457 | Heap * heap;
|
---|
458 | if ( freeHeapManagersList ) { // free heap for reused ?
|
---|
459 | heap = freeHeapManagersList;
|
---|
460 | freeHeapManagersList = heap->nextFreeHeapManager;
|
---|
461 |
|
---|
462 | #ifdef __STATISTICS__
|
---|
463 | reused_heap += 1;
|
---|
464 | #endif // __STATISTICS__
|
---|
465 | } else { // free heap not found, create new
|
---|
466 | // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps *
|
---|
467 | // 12K ~= 120K byte superblock. Where 128-heap superblock handles a medium sized multi-processor server.
|
---|
468 | size_t remaining = heapManagersStorageEnd - heapManagersStorage; // remaining free heaps in superblock
|
---|
469 | if ( ! heapManagersStorage || remaining != 0 ) {
|
---|
470 | // Each block of heaps is a multiple of the number of cores on the computer.
|
---|
471 | int HeapDim = get_nprocs(); // get_nprocs_conf does not work
|
---|
472 | size_t size = HeapDim * sizeof( Heap );
|
---|
473 |
|
---|
474 | heapManagersStorage = (Heap *)mmap( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 );
|
---|
475 | if ( unlikely( heapManagersStorage == (Heap *)MAP_FAILED ) ) { // failed ?
|
---|
476 | if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory
|
---|
477 | // Do not call strerror( errno ) as it may call malloc.
|
---|
478 | abort( "**** Error **** attempt to allocate block of heaps of size %zu bytes and mmap failed with errno %d.", size, errno );
|
---|
479 | } // if
|
---|
480 | heapManagersStorageEnd = &heapManagersStorage[HeapDim]; // outside array
|
---|
481 | } // if
|
---|
482 |
|
---|
483 | heap = heapManagersStorage;
|
---|
484 | heapManagersStorage = heapManagersStorage + 1; // bump next heap
|
---|
485 |
|
---|
486 | #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
|
---|
487 | heap->nextHeapManager = heapManagersList;
|
---|
488 | #endif // __STATISTICS__ || __CFA_DEBUG__
|
---|
489 | heapManagersList = heap;
|
---|
490 |
|
---|
491 | #ifdef __STATISTICS__
|
---|
492 | new_heap += 1;
|
---|
493 | #endif // __STATISTICS__
|
---|
494 |
|
---|
495 | with( *heap ) {
|
---|
496 | for ( unsigned int j = 0; j < NoBucketSizes; j += 1 ) { // initialize free lists
|
---|
497 | #ifdef OWNERSHIP
|
---|
498 | #ifdef RETURNSPIN
|
---|
499 | ?{}( freeLists[j].returnLock );
|
---|
500 | #endif // RETURNSPIN
|
---|
501 | freeLists[j].returnList = 0p;
|
---|
502 | #endif // OWNERSHIP
|
---|
503 | freeLists[j].freeList = 0p;
|
---|
504 | freeLists[j].homeManager = heap;
|
---|
505 | freeLists[j].blockSize = bucketSizes[j];
|
---|
506 | } // for
|
---|
507 |
|
---|
508 | heapBuffer = 0p;
|
---|
509 | heapReserve = 0;
|
---|
510 | nextFreeHeapManager = 0p;
|
---|
511 | #ifdef __CFA_DEBUG__
|
---|
512 | allocUnfreed = 0;
|
---|
513 | #endif // __CFA_DEBUG__
|
---|
514 | } // with
|
---|
515 | } // if
|
---|
516 |
|
---|
517 | return heap;
|
---|
518 | } // getHeap
|
---|
519 |
|
---|
520 |
|
---|
521 | void heapManagerCtor() libcfa_public {
|
---|
522 | if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor();
|
---|
523 |
|
---|
524 | lock( heapMaster.mgrLock ); // protect heapMaster counters
|
---|
525 |
|
---|
526 | // get storage for heap manager
|
---|
527 |
|
---|
528 | heapManager = getHeap();
|
---|
529 |
|
---|
530 | #ifdef __STATISTICS__
|
---|
531 | HeapStatisticsCtor( heapManager->stats ); // heap local
|
---|
532 | heapMaster.threads_started += 1;
|
---|
533 | #endif // __STATISTICS__
|
---|
534 |
|
---|
535 | unlock( heapMaster.mgrLock );
|
---|
536 | } // heapManagerCtor
|
---|
537 |
|
---|
538 |
|
---|
539 | void heapManagerDtor() libcfa_public {
|
---|
540 | lock( heapMaster.mgrLock );
|
---|
541 |
|
---|
542 | // place heap on list of free heaps for reusability
|
---|
543 | heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList;
|
---|
544 | heapMaster.freeHeapManagersList = heapManager;
|
---|
545 |
|
---|
546 | #ifdef __STATISTICS__
|
---|
547 | heapMaster.threads_exited += 1;
|
---|
548 | #endif // __STATISTICS__
|
---|
549 |
|
---|
550 | // Do not set heapManager to NULL because it is used after Cforall is shutdown but before the program shuts down.
|
---|
551 |
|
---|
552 | unlock( heapMaster.mgrLock );
|
---|
553 | } // heapManagerDtor
|
---|
554 |
|
---|
555 |
|
---|
556 | //####################### Memory Allocation Routines Helpers ####################
|
---|
557 |
|
---|
558 |
|
---|
559 | extern int cfa_main_returned; // from interpose.cfa
|
---|
560 | extern "C" {
|
---|
561 | void memory_startup( void ) {
|
---|
562 | if ( ! heapMasterBootFlag ) heapManagerCtor(); // sanity check
|
---|
563 | } // memory_startup
|
---|
564 |
|
---|
565 | void memory_shutdown( void ) {
|
---|
566 | heapManagerDtor();
|
---|
567 | } // memory_shutdown
|
---|
568 |
|
---|
569 | void heapAppStart() { // called by __cfaabi_appready_startup
|
---|
570 | verify( heapManager );
|
---|
571 | #ifdef __CFA_DEBUG__
|
---|
572 | heapManager->allocUnfreed = 0; // clear prior allocation counts
|
---|
573 | #endif // __CFA_DEBUG__
|
---|
574 |
|
---|
575 | #ifdef __STATISTICS__
|
---|
576 | HeapStatisticsCtor( heapManager->stats ); // clear prior statistic counters
|
---|
577 | #endif // __STATISTICS__
|
---|
578 | } // heapAppStart
|
---|
579 |
|
---|
580 | void heapAppStop() { // called by __cfaabi_appready_startdown
|
---|
581 | fclose( stdin ); fclose( stdout ); // free buffer storage
|
---|
582 | if ( ! cfa_main_returned ) return; // do not check unfreed storage if exit called
|
---|
583 |
|
---|
584 | #ifdef __CFA_DEBUG__
|
---|
585 | // allocUnfreed is set to 0 when a heap is created and it accumulates any unfreed storage during its multiple thread
|
---|
586 | // usages. At the end, add up each heap allocUnfreed value across all heaps to get the total unfreed storage.
|
---|
587 | int64_t allocUnfreed = 0;
|
---|
588 | for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) {
|
---|
589 | allocUnfreed += heap->allocUnfreed;
|
---|
590 | } // for
|
---|
591 |
|
---|
592 | allocUnfreed -= malloc_unfreed(); // subtract any user specified unfreed storage
|
---|
593 | if ( allocUnfreed > 0 ) {
|
---|
594 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
|
---|
595 | char helpText[512];
|
---|
596 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
597 | "CFA warning (UNIX pid:%ld) : program terminating with %ju(0x%jx) bytes of storage allocated but not freed.\n"
|
---|
598 | "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
|
---|
599 | (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid
|
---|
600 | } // if
|
---|
601 | #endif // __CFA_DEBUG__
|
---|
602 | } // heapAppStop
|
---|
603 | } // extern "C"
|
---|
604 |
|
---|
605 |
|
---|
606 | #ifdef __STATISTICS__
|
---|
607 | static HeapStatistics stats; // zero filled
|
---|
608 |
|
---|
609 | #define prtFmt \
|
---|
610 | "\nHeap statistics: (storage request / allocation)\n" \
|
---|
611 | " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
612 | " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
613 | " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
614 | " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
615 | " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
616 | " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
617 | " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
618 | " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
619 | " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
620 | " return pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n" \
|
---|
621 | " sbrk calls %'u; storage %'llu bytes\n" \
|
---|
622 | " mmap calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
623 | " munmap calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
624 | " threads started %'lu; exited %'lu\n" \
|
---|
625 | " heaps new %'lu; reused %'lu\n"
|
---|
626 |
|
---|
627 | // Use "write" because streams may be shutdown when calls are made.
|
---|
628 | static int printStats( HeapStatistics & stats ) with( heapMaster, stats ) { // see malloc_stats
|
---|
629 | char helpText[sizeof(prtFmt) + 1024]; // space for message and values
|
---|
630 | return __cfaabi_bits_print_buffer( stats_fd, helpText, sizeof(helpText), prtFmt,
|
---|
631 | malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc,
|
---|
632 | aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc,
|
---|
633 | calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc,
|
---|
634 | memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc,
|
---|
635 | amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc,
|
---|
636 | cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc,
|
---|
637 | resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc,
|
---|
638 | realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc,
|
---|
639 | free_calls, free_null_calls, free_storage_request, free_storage_alloc,
|
---|
640 | return_pulls, return_pushes, return_storage_request, return_storage_alloc,
|
---|
641 | sbrk_calls, sbrk_storage,
|
---|
642 | mmap_calls, mmap_storage_request, mmap_storage_alloc,
|
---|
643 | munmap_calls, munmap_storage_request, munmap_storage_alloc,
|
---|
644 | threads_started, threads_exited,
|
---|
645 | new_heap, reused_heap
|
---|
646 | );
|
---|
647 | } // printStats
|
---|
648 |
|
---|
649 | #define prtFmtXML \
|
---|
650 | "<malloc version=\"1\">\n" \
|
---|
651 | "<heap nr=\"0\">\n" \
|
---|
652 | "<sizes>\n" \
|
---|
653 | "</sizes>\n" \
|
---|
654 | "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
655 | "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
656 | "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
657 | "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
658 | "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
659 | "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
660 | "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
661 | "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
662 | "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
663 | "<total type=\"return\" pulls=\"%'u;\" 0 pushes=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
664 | "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \
|
---|
665 | "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \
|
---|
666 | "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
667 | "<total type=\"threads\" started=\"%'lu;\" exited=\"%'lu\"/>\n" \
|
---|
668 | "<total type=\"heaps\" new=\"%'lu;\" reused=\"%'lu\"/>\n" \
|
---|
669 | "</malloc>"
|
---|
670 |
|
---|
671 | static int printStatsXML( HeapStatistics & stats, FILE * stream ) with( heapMaster, stats ) { // see malloc_info
|
---|
672 | char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values
|
---|
673 | return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML,
|
---|
674 | malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc,
|
---|
675 | aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc,
|
---|
676 | calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc,
|
---|
677 | memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc,
|
---|
678 | amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc,
|
---|
679 | cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc,
|
---|
680 | resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc,
|
---|
681 | realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc,
|
---|
682 | free_calls, free_null_calls, free_storage_request, free_storage_alloc,
|
---|
683 | return_pulls, return_pushes, return_storage_request, return_storage_alloc,
|
---|
684 | sbrk_calls, sbrk_storage,
|
---|
685 | mmap_calls, mmap_storage_request, mmap_storage_alloc,
|
---|
686 | munmap_calls, munmap_storage_request, munmap_storage_alloc,
|
---|
687 | threads_started, threads_exited,
|
---|
688 | new_heap, reused_heap
|
---|
689 | );
|
---|
690 | } // printStatsXML
|
---|
691 |
|
---|
692 | static HeapStatistics & collectStats( HeapStatistics & stats ) with( heapMaster ) {
|
---|
693 | lock( mgrLock );
|
---|
694 |
|
---|
695 | stats += heapMaster.stats;
|
---|
696 | for ( Heap * heap = heapManagersList; heap; heap = heap->nextHeapManager ) {
|
---|
697 | stats += heap->stats;
|
---|
698 | } // for
|
---|
699 |
|
---|
700 | unlock( mgrLock );
|
---|
701 | return stats;
|
---|
702 | } // collectStats
|
---|
703 | #endif // __STATISTICS__
|
---|
704 |
|
---|
705 |
|
---|
706 | static bool setMmapStart( size_t value ) with( heapMaster ) { // true => mmapped, false => sbrk
|
---|
707 | if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false;
|
---|
708 | mmapStart = value; // set global
|
---|
709 |
|
---|
710 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
711 | maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search
|
---|
712 | verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
|
---|
713 | verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
|
---|
714 | return true;
|
---|
715 | } // setMmapStart
|
---|
716 |
|
---|
717 |
|
---|
718 | // <-------+----------------------------------------------------> bsize (bucket size)
|
---|
719 | // |header |addr
|
---|
720 | //==================================================================================
|
---|
721 | // align/offset |
|
---|
722 | // <-----------------<------------+-----------------------------> bsize (bucket size)
|
---|
723 | // |fake-header | addr
|
---|
724 | #define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) ))
|
---|
725 | #define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset))
|
---|
726 |
|
---|
727 | // <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
|
---|
728 | // |header |addr
|
---|
729 | //==================================================================================
|
---|
730 | // align/offset |
|
---|
731 | // <------------------------------<<---------- dsize --------->>> bsize (bucket size)
|
---|
732 | // |fake-header |addr
|
---|
733 | #define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
|
---|
734 |
|
---|
735 |
|
---|
736 | inline __attribute__((always_inline))
|
---|
737 | static void checkAlign( size_t alignment ) {
|
---|
738 | if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) {
|
---|
739 | abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
|
---|
740 | } // if
|
---|
741 | } // checkAlign
|
---|
742 |
|
---|
743 |
|
---|
744 | inline __attribute__((always_inline))
|
---|
745 | static void checkHeader( bool check, const char name[], void * addr ) {
|
---|
746 | if ( unlikely( check ) ) { // bad address ?
|
---|
747 | abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n"
|
---|
748 | "Possible cause is duplicate free on same block or overwriting of memory.",
|
---|
749 | name, addr );
|
---|
750 | } // if
|
---|
751 | } // checkHeader
|
---|
752 |
|
---|
753 |
|
---|
754 | // Manipulate sticky bits stored in unused 3 low-order bits of an address.
|
---|
755 | // bit0 => alignment => fake header
|
---|
756 | // bit1 => zero filled (calloc)
|
---|
757 | // bit2 => mapped allocation versus sbrk
|
---|
758 | #define StickyBits( header ) (((header)->kind.real.blockSize & 0x7))
|
---|
759 | #define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7)
|
---|
760 | #define MarkAlignmentBit( align ) ((align) | 1)
|
---|
761 | #define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1))
|
---|
762 | #define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1)
|
---|
763 | #define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2))
|
---|
764 | #define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2))
|
---|
765 | #define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2)
|
---|
766 | #define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4))
|
---|
767 | #define MarkMmappedBit( size ) ((size) | 4)
|
---|
768 |
|
---|
769 |
|
---|
770 | inline __attribute__((always_inline))
|
---|
771 | static void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) {
|
---|
772 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
773 | alignment = ClearAlignmentBit( header ); // clear flag from value
|
---|
774 | #ifdef __CFA_DEBUG__
|
---|
775 | checkAlign( alignment ); // check alignment
|
---|
776 | #endif // __CFA_DEBUG__
|
---|
777 | header = RealHeader( header ); // backup from fake to real header
|
---|
778 | } else {
|
---|
779 | alignment = libAlign(); // => no fake header
|
---|
780 | } // if
|
---|
781 | } // fakeHeader
|
---|
782 |
|
---|
783 |
|
---|
784 | inline __attribute__((always_inline))
|
---|
785 | static bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header,
|
---|
786 | Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapMaster, *heapManager ) {
|
---|
787 | header = HeaderAddr( addr );
|
---|
788 |
|
---|
789 | #ifdef __CFA_DEBUG__
|
---|
790 | checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ?
|
---|
791 | #endif // __CFA_DEBUG__
|
---|
792 |
|
---|
793 | if ( likely( ! StickyBits( header ) ) ) { // no sticky bits ?
|
---|
794 | freeHead = (Heap.FreeHeader *)(header->kind.real.home);
|
---|
795 | alignment = libAlign();
|
---|
796 | } else {
|
---|
797 | fakeHeader( header, alignment );
|
---|
798 | if ( unlikely( MmappedBit( header ) ) ) { // mmapped ?
|
---|
799 | verify( addr < heapBegin || heapEnd < addr );
|
---|
800 | size = ClearStickyBits( header->kind.real.blockSize ); // mmap size
|
---|
801 | return true;
|
---|
802 | } // if
|
---|
803 |
|
---|
804 | freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home ));
|
---|
805 | } // if
|
---|
806 | size = freeHead->blockSize;
|
---|
807 |
|
---|
808 | #ifdef __CFA_DEBUG__
|
---|
809 | checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
|
---|
810 |
|
---|
811 | Heap * homeManager;
|
---|
812 | if ( unlikely( freeHead == 0p || // freed and only free-list node => null link
|
---|
813 | // freed and link points at another free block not to a bucket in the bucket array.
|
---|
814 | (homeManager = freeHead->homeManager, freeHead < &homeManager->freeLists[0] ||
|
---|
815 | &homeManager->freeLists[NoBucketSizes] <= freeHead ) ) ) {
|
---|
816 | abort( "**** Error **** attempt to %s storage %p with corrupted header.\n"
|
---|
817 | "Possible cause is duplicate free on same block or overwriting of header information.",
|
---|
818 | name, addr );
|
---|
819 | } // if
|
---|
820 | #endif // __CFA_DEBUG__
|
---|
821 |
|
---|
822 | return false;
|
---|
823 | } // headers
|
---|
824 |
|
---|
825 |
|
---|
826 | static void * master_extend( size_t size ) with( heapMaster ) {
|
---|
827 | lock( extLock );
|
---|
828 |
|
---|
829 | ptrdiff_t rem = heapRemaining - size;
|
---|
830 | if ( unlikely( rem < 0 ) ) {
|
---|
831 | // If the size requested is bigger than the current remaining storage, increase the size of the heap.
|
---|
832 |
|
---|
833 | size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() );
|
---|
834 | // Do not call abort or strerror( errno ) as they may call malloc.
|
---|
835 | if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ?
|
---|
836 | unlock( extLock );
|
---|
837 | abort( NO_MEMORY_MSG, size ); // no memory
|
---|
838 | } // if
|
---|
839 |
|
---|
840 | // Make storage executable for thunks.
|
---|
841 | if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
|
---|
842 | unlock( extLock );
|
---|
843 | abort( "**** Error **** attempt to make heap storage executable for thunks and mprotect failed with errno %d.", errno );
|
---|
844 | } // if
|
---|
845 |
|
---|
846 | rem = heapRemaining + increase - size;
|
---|
847 |
|
---|
848 | #ifdef __STATISTICS__
|
---|
849 | sbrk_calls += 1;
|
---|
850 | sbrk_storage += increase;
|
---|
851 | #endif // __STATISTICS__
|
---|
852 | } // if
|
---|
853 |
|
---|
854 | Heap.Storage * block = (Heap.Storage *)heapEnd;
|
---|
855 | heapRemaining = rem;
|
---|
856 | heapEnd = (char *)heapEnd + size;
|
---|
857 |
|
---|
858 | unlock( extLock );
|
---|
859 | return block;
|
---|
860 | } // master_extend
|
---|
861 |
|
---|
862 |
|
---|
863 | __attribute__(( noinline ))
|
---|
864 | static void * manager_extend( size_t size ) with( *heapManager ) {
|
---|
865 | ptrdiff_t rem = heapReserve - size;
|
---|
866 |
|
---|
867 | if ( unlikely( rem < 0 ) ) { // negative
|
---|
868 | // If the size requested is bigger than the current remaining reserve, use the current reserve to populate
|
---|
869 | // smaller freeLists, and increase the reserve.
|
---|
870 |
|
---|
871 | rem = heapReserve; // positive
|
---|
872 |
|
---|
873 | if ( rem >= bucketSizes[0] ) { // minimal size ? otherwise ignore
|
---|
874 | size_t bucket;
|
---|
875 | #ifdef FASTLOOKUP
|
---|
876 | if ( likely( rem < LookupSizes ) ) bucket = lookup[rem];
|
---|
877 | #endif // FASTLOOKUP
|
---|
878 | bucket = Bsearchl( rem, bucketSizes, heapMaster.maxBucketsUsed );
|
---|
879 | verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed );
|
---|
880 | Heap.FreeHeader * freeHead = &(freeLists[bucket]);
|
---|
881 |
|
---|
882 | // The remaining storage many not be bucket size, whereas all other allocations are. Round down to previous
|
---|
883 | // bucket size in this case.
|
---|
884 | if ( unlikely( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1;
|
---|
885 | Heap.Storage * block = (Heap.Storage *)heapBuffer;
|
---|
886 |
|
---|
887 | block->header.kind.real.next = freeHead->freeList; // push on stack
|
---|
888 | freeHead->freeList = block;
|
---|
889 | } // if
|
---|
890 |
|
---|
891 | size_t increase = ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), libAlign() );
|
---|
892 | heapBuffer = master_extend( increase );
|
---|
893 | rem = increase - size;
|
---|
894 | } // if
|
---|
895 |
|
---|
896 | Heap.Storage * block = (Heap.Storage *)heapBuffer;
|
---|
897 | heapReserve = rem;
|
---|
898 | heapBuffer = (char *)heapBuffer + size;
|
---|
899 |
|
---|
900 | return block;
|
---|
901 | } // manager_extend
|
---|
902 |
|
---|
903 |
|
---|
904 | #define BOOT_HEAP_MANAGER \
|
---|
905 | if ( unlikely( ! heapMasterBootFlag ) ) { \
|
---|
906 | heapManagerCtor(); /* trigger for first heap */ \
|
---|
907 | } /* if */
|
---|
908 |
|
---|
909 | #ifdef __STATISTICS__
|
---|
910 | #define STAT_NAME __counter
|
---|
911 | #define STAT_PARM , unsigned int STAT_NAME
|
---|
912 | #define STAT_ARG( name ) , name
|
---|
913 | #define STAT_0_CNT( counter ) stats.counters[counter].calls_0 += 1
|
---|
914 | #else
|
---|
915 | #define STAT_NAME
|
---|
916 | #define STAT_PARM
|
---|
917 | #define STAT_ARG( name )
|
---|
918 | #define STAT_0_CNT( counter )
|
---|
919 | #endif // __STATISTICS__
|
---|
920 |
|
---|
921 | #define PROLOG( counter, ... ) \
|
---|
922 | BOOT_HEAP_MANAGER; \
|
---|
923 | if ( unlikely( size == 0 ) || /* 0 BYTE ALLOCATION RETURNS NULL POINTER */ \
|
---|
924 | unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) { /* error check */ \
|
---|
925 | STAT_0_CNT( counter ); \
|
---|
926 | __VA_ARGS__; \
|
---|
927 | return 0p; \
|
---|
928 | } /* if */
|
---|
929 |
|
---|
930 |
|
---|
931 | #define SCRUB_SIZE 1024lu
|
---|
932 | // Do not use '\xfe' for scrubbing because dereferencing an address composed of it causes a SIGSEGV *without* a valid IP
|
---|
933 | // pointer in the interrupt frame.
|
---|
934 | #define SCRUB '\xff'
|
---|
935 |
|
---|
936 | static void * doMalloc( size_t size STAT_PARM ) libcfa_nopreempt with( *heapManager ) {
|
---|
937 | PROLOG( STAT_NAME );
|
---|
938 |
|
---|
939 | verify( heapManager );
|
---|
940 | Heap.Storage * block; // pointer to new block of storage
|
---|
941 |
|
---|
942 | // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
|
---|
943 | // along with the block and is a multiple of the alignment size.
|
---|
944 | size_t tsize = size + sizeof(Heap.Storage);
|
---|
945 |
|
---|
946 | #ifdef __STATISTICS__
|
---|
947 | stats.counters[STAT_NAME].calls += 1;
|
---|
948 | stats.counters[STAT_NAME].request += size;
|
---|
949 | #endif // __STATISTICS__
|
---|
950 |
|
---|
951 | #ifdef __CFA_DEBUG__
|
---|
952 | allocUnfreed += size;
|
---|
953 | #endif // __CFA_DEBUG__
|
---|
954 |
|
---|
955 | if ( likely( tsize < heapMaster.mmapStart ) ) { // small size => sbrk
|
---|
956 | size_t bucket;
|
---|
957 | #ifdef FASTLOOKUP
|
---|
958 | if ( likely( tsize < LookupSizes ) ) bucket = lookup[tsize];
|
---|
959 | else
|
---|
960 | #endif // FASTLOOKUP
|
---|
961 | bucket = Bsearchl( tsize, bucketSizes, heapMaster.maxBucketsUsed );
|
---|
962 | verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed );
|
---|
963 | Heap.FreeHeader * freeHead = &freeLists[bucket];
|
---|
964 |
|
---|
965 | verify( freeHead <= &freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ?
|
---|
966 | verify( tsize <= freeHead->blockSize ); // search failure ?
|
---|
967 |
|
---|
968 | tsize = freeHead->blockSize; // total space needed for request
|
---|
969 | #ifdef __STATISTICS__
|
---|
970 | stats.counters[STAT_NAME].alloc += tsize;
|
---|
971 | #endif // __STATISTICS__
|
---|
972 |
|
---|
973 | // Spin until the lock is acquired for this particular size of block.
|
---|
974 |
|
---|
975 | #if BUCKETLOCK == SPINLOCK
|
---|
976 | block = freeHead->freeList; // remove node from stack
|
---|
977 | #else
|
---|
978 | block = pop( freeHead->freeList );
|
---|
979 | #endif // BUCKETLOCK
|
---|
980 | if ( unlikely( block == 0p ) ) { // no free block ?
|
---|
981 | #ifdef OWNERSHIP
|
---|
982 | // Freelist for that size is empty, so carve it out of the heap, if there is enough left, or get some more
|
---|
983 | // and then carve it off.
|
---|
984 | #ifdef RETURNSPIN
|
---|
985 | #if BUCKETLOCK == SPINLOCK
|
---|
986 | lock( freeHead->returnLock );
|
---|
987 | block = freeHead->returnList;
|
---|
988 | freeHead->returnList = 0p;
|
---|
989 | unlock( freeHead->returnLock );
|
---|
990 | #else
|
---|
991 | block = __atomic_exchange_n( &freeHead->returnList, nullptr, __ATOMIC_SEQ_CST );
|
---|
992 | #endif // RETURNSPIN
|
---|
993 |
|
---|
994 | if ( likely( block == 0p ) ) { // return list also empty?
|
---|
995 | #endif // OWNERSHIP
|
---|
996 | // Do not leave kernel thread as manager_extend accesses heapManager.
|
---|
997 | disable_interrupts();
|
---|
998 | block = (Heap.Storage *)manager_extend( tsize ); // mutual exclusion on call
|
---|
999 | enable_interrupts( false );
|
---|
1000 |
|
---|
1001 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1002 |
|
---|
1003 | #ifdef __CFA_DEBUG__
|
---|
1004 | // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes.
|
---|
1005 | memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) );
|
---|
1006 | #endif // __CFA_DEBUG__
|
---|
1007 | #endif // BUCKETLOCK
|
---|
1008 | #ifdef OWNERSHIP
|
---|
1009 | } else { // merge returnList into freeHead
|
---|
1010 | #ifdef __STATISTICS__
|
---|
1011 | stats.return_pulls += 1;
|
---|
1012 | #endif // __STATISTICS__
|
---|
1013 |
|
---|
1014 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1015 |
|
---|
1016 | freeHead->freeList = block->header.kind.real.next;
|
---|
1017 | } // if
|
---|
1018 | #endif // OWNERSHIP
|
---|
1019 | } else {
|
---|
1020 | // Memory is scrubbed in doFree.
|
---|
1021 | freeHead->freeList = block->header.kind.real.next;
|
---|
1022 | } // if
|
---|
1023 |
|
---|
1024 | block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size
|
---|
1025 | } else { // large size => mmap
|
---|
1026 | if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p;
|
---|
1027 | tsize = ceiling2( tsize, __page_size ); // must be multiple of page size
|
---|
1028 | #ifdef __STATISTICS__
|
---|
1029 | stats.counters[STAT_NAME].alloc += tsize;
|
---|
1030 | stats.mmap_calls += 1;
|
---|
1031 | stats.mmap_storage_request += size;
|
---|
1032 | stats.mmap_storage_alloc += tsize;
|
---|
1033 | #endif // __STATISTICS__
|
---|
1034 |
|
---|
1035 | disable_interrupts();
|
---|
1036 | block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 );
|
---|
1037 | enable_interrupts( false );
|
---|
1038 |
|
---|
1039 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1040 |
|
---|
1041 | if ( unlikely( block == (Heap.Storage *)MAP_FAILED ) ) { // failed ?
|
---|
1042 | if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory
|
---|
1043 | // Do not call strerror( errno ) as it may call malloc.
|
---|
1044 | abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.", size, heapMaster.mmapStart, errno );
|
---|
1045 | } // if
|
---|
1046 | block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap
|
---|
1047 |
|
---|
1048 | #ifdef __CFA_DEBUG__
|
---|
1049 | // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes. The rest of
|
---|
1050 | // the storage set to 0 by mmap.
|
---|
1051 | memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) );
|
---|
1052 | #endif // __CFA_DEBUG__
|
---|
1053 | } // if
|
---|
1054 |
|
---|
1055 | block->header.kind.real.size = size; // store allocation size
|
---|
1056 | void * addr = &(block->data); // adjust off header to user bytes
|
---|
1057 | verify( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ?
|
---|
1058 |
|
---|
1059 | #ifdef __CFA_DEBUG__
|
---|
1060 | if ( traceHeap() ) {
|
---|
1061 | char helpText[64];
|
---|
1062 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
1063 | "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug
|
---|
1064 | } // if
|
---|
1065 | #endif // __CFA_DEBUG__
|
---|
1066 |
|
---|
1067 | // poll_interrupts(); // call rollforward
|
---|
1068 |
|
---|
1069 | return addr;
|
---|
1070 | } // doMalloc
|
---|
1071 |
|
---|
1072 |
|
---|
1073 | static void doFree( void * addr ) libcfa_nopreempt with( *heapManager ) {
|
---|
1074 | verify( addr );
|
---|
1075 |
|
---|
1076 | // detect free after thread-local storage destruction and use global stats in that case
|
---|
1077 |
|
---|
1078 | Heap.Storage.Header * header;
|
---|
1079 | Heap.FreeHeader * freeHead;
|
---|
1080 | size_t size, alignment;
|
---|
1081 |
|
---|
1082 | bool mapped = headers( "free", addr, header, freeHead, size, alignment );
|
---|
1083 | #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
|
---|
1084 | size_t rsize = header->kind.real.size; // optimization
|
---|
1085 | #endif // __STATISTICS__ || __CFA_DEBUG__
|
---|
1086 |
|
---|
1087 | #ifdef __STATISTICS__
|
---|
1088 | stats.free_storage_request += rsize;
|
---|
1089 | stats.free_storage_alloc += size;
|
---|
1090 | #endif // __STATISTICS__
|
---|
1091 |
|
---|
1092 | #ifdef __CFA_DEBUG__
|
---|
1093 | allocUnfreed -= rsize;
|
---|
1094 | #endif // __CFA_DEBUG__
|
---|
1095 |
|
---|
1096 | if ( unlikely( mapped ) ) { // mmapped ?
|
---|
1097 | #ifdef __STATISTICS__
|
---|
1098 | stats.munmap_calls += 1;
|
---|
1099 | stats.munmap_storage_request += rsize;
|
---|
1100 | stats.munmap_storage_alloc += size;
|
---|
1101 | #endif // __STATISTICS__
|
---|
1102 |
|
---|
1103 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1104 |
|
---|
1105 | // Does not matter where this storage is freed.
|
---|
1106 | if ( unlikely( munmap( header, size ) == -1 ) ) {
|
---|
1107 | // Do not call strerror( errno ) as it may call malloc.
|
---|
1108 | abort( "**** Error **** attempt to deallocate large object %p and munmap failed with errno %d.\n"
|
---|
1109 | "Possible cause is invalid delete pointer: either not allocated or with corrupt header.",
|
---|
1110 | addr, errno );
|
---|
1111 | } // if
|
---|
1112 | } else {
|
---|
1113 | #ifdef __CFA_DEBUG__
|
---|
1114 | // memset is NOT always inlined!
|
---|
1115 | disable_interrupts();
|
---|
1116 | // Scrub old memory so subsequent usages might fail. Only scrub the first/last SCRUB_SIZE bytes.
|
---|
1117 | char * data = ((Heap.Storage *)header)->data; // data address
|
---|
1118 | size_t dsize = size - sizeof(Heap.Storage); // data size
|
---|
1119 | if ( dsize <= SCRUB_SIZE * 2 ) {
|
---|
1120 | memset( data, SCRUB, dsize ); // scrub all
|
---|
1121 | } else {
|
---|
1122 | memset( data, SCRUB, SCRUB_SIZE ); // scrub front
|
---|
1123 | memset( data + dsize - SCRUB_SIZE, SCRUB, SCRUB_SIZE ); // scrub back
|
---|
1124 | } // if
|
---|
1125 | enable_interrupts( false );
|
---|
1126 | #endif // __CFA_DEBUG__
|
---|
1127 |
|
---|
1128 | if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread
|
---|
1129 | header->kind.real.next = freeHead->freeList; // push on stack
|
---|
1130 | freeHead->freeList = (Heap.Storage *)header;
|
---|
1131 | } else { // return to thread owner
|
---|
1132 | verify( heapManager );
|
---|
1133 |
|
---|
1134 | #ifdef OWNERSHIP
|
---|
1135 | #ifdef RETURNSPIN
|
---|
1136 | lock( freeHead->returnLock );
|
---|
1137 | header->kind.real.next = freeHead->returnList; // push to bucket return list
|
---|
1138 | freeHead->returnList = (Heap.Storage *)header;
|
---|
1139 | unlock( freeHead->returnLock );
|
---|
1140 | #else // lock free
|
---|
1141 | header->kind.real.next = freeHead->returnList; // link new node to top node
|
---|
1142 | // CAS resets header->kind.real.next = freeHead->returnList on failure
|
---|
1143 | while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, header,
|
---|
1144 | false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) );
|
---|
1145 | #endif // RETURNSPIN
|
---|
1146 |
|
---|
1147 | #else // no OWNERSHIP
|
---|
1148 |
|
---|
1149 | freeHead = &heap->freeLists[ClearStickyBits( header->kind.real.home ) - &freeHead->homeManager->freeLists[0]];
|
---|
1150 | header->kind.real.next = freeHead->freeList; // push on stack
|
---|
1151 | freeHead->freeList = (Heap.Storage *)header;
|
---|
1152 | #endif // ! OWNERSHIP
|
---|
1153 |
|
---|
1154 | #ifdef __U_STATISTICS__
|
---|
1155 | stats.return_pushes += 1;
|
---|
1156 | stats.return_storage_request += rsize;
|
---|
1157 | stats.return_storage_alloc += size;
|
---|
1158 | #endif // __U_STATISTICS__
|
---|
1159 |
|
---|
1160 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1161 | } // if
|
---|
1162 | } // if
|
---|
1163 |
|
---|
1164 | #ifdef __CFA_DEBUG__
|
---|
1165 | if ( traceHeap() ) {
|
---|
1166 | char helpText[64];
|
---|
1167 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
1168 | "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug
|
---|
1169 | } // if
|
---|
1170 | #endif // __CFA_DEBUG__
|
---|
1171 |
|
---|
1172 | // poll_interrupts(); // call rollforward
|
---|
1173 | } // doFree
|
---|
1174 |
|
---|
1175 |
|
---|
1176 | size_t prtFree( Heap & manager ) with( manager ) {
|
---|
1177 | size_t total = 0;
|
---|
1178 | #ifdef __STATISTICS__
|
---|
1179 | __cfaabi_bits_acquire();
|
---|
1180 | __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" );
|
---|
1181 | #endif // __STATISTICS__
|
---|
1182 | for ( unsigned int i = 0; i < heapMaster.maxBucketsUsed; i += 1 ) {
|
---|
1183 | size_t size = freeLists[i].blockSize;
|
---|
1184 | #ifdef __STATISTICS__
|
---|
1185 | unsigned int N = 0;
|
---|
1186 | #endif // __STATISTICS__
|
---|
1187 |
|
---|
1188 | #if BUCKETLOCK == SPINLOCK
|
---|
1189 | for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
|
---|
1190 | #else
|
---|
1191 | for(;;) {
|
---|
1192 | // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) {
|
---|
1193 | // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) {
|
---|
1194 | // Heap.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works`
|
---|
1195 | // typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works`
|
---|
1196 | // p = temp;
|
---|
1197 | #endif // BUCKETLOCK
|
---|
1198 | total += size;
|
---|
1199 | #ifdef __STATISTICS__
|
---|
1200 | N += 1;
|
---|
1201 | #endif // __STATISTICS__
|
---|
1202 | } // for
|
---|
1203 |
|
---|
1204 | #ifdef __STATISTICS__
|
---|
1205 | __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N );
|
---|
1206 | if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" );
|
---|
1207 | #endif // __STATISTICS__
|
---|
1208 | } // for
|
---|
1209 | #ifdef __STATISTICS__
|
---|
1210 | __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total );
|
---|
1211 | __cfaabi_bits_release();
|
---|
1212 | #endif // __STATISTICS__
|
---|
1213 | return (char *)heapMaster.heapEnd - (char *)heapMaster.heapBegin - total;
|
---|
1214 | } // prtFree
|
---|
1215 |
|
---|
1216 |
|
---|
1217 | #ifdef __STATISTICS__
|
---|
1218 | static void incCalls( intptr_t statName ) libcfa_nopreempt {
|
---|
1219 | heapManager->stats.counters[statName].calls += 1;
|
---|
1220 | } // incCalls
|
---|
1221 |
|
---|
1222 | static void incZeroCalls( intptr_t statName ) libcfa_nopreempt {
|
---|
1223 | heapManager->stats.counters[statName].calls_0 += 1;
|
---|
1224 | } // incZeroCalls
|
---|
1225 | #endif // __STATISTICS__
|
---|
1226 |
|
---|
1227 | #ifdef __CFA_DEBUG__
|
---|
1228 | static void incUnfreed( intptr_t offset ) libcfa_nopreempt {
|
---|
1229 | heapManager->allocUnfreed += offset;
|
---|
1230 | } // incUnfreed
|
---|
1231 | #endif // __CFA_DEBUG__
|
---|
1232 |
|
---|
1233 |
|
---|
1234 | static void * memalignNoStats( size_t alignment, size_t size STAT_PARM ) {
|
---|
1235 | checkAlign( alignment ); // check alignment
|
---|
1236 |
|
---|
1237 | // if alignment <= default alignment or size == 0, do normal malloc as two headers are unnecessary
|
---|
1238 | if ( unlikely( alignment <= libAlign() || size == 0 ) ) return doMalloc( size STAT_ARG( STAT_NAME ) );
|
---|
1239 |
|
---|
1240 | // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
|
---|
1241 | // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
|
---|
1242 | // .-------------v-----------------v----------------v----------,
|
---|
1243 | // | Real Header | ... padding ... | Fake Header | data ... |
|
---|
1244 | // `-------------^-----------------^-+--------------^----------'
|
---|
1245 | // |<--------------------------------' offset/align |<-- alignment boundary
|
---|
1246 |
|
---|
1247 | // subtract libAlign() because it is already the minimum alignment
|
---|
1248 | // add sizeof(Storage) for fake header
|
---|
1249 | size_t offset = alignment - libAlign() + sizeof(Heap.Storage);
|
---|
1250 | char * addr = (char *)doMalloc( size + offset STAT_ARG( STAT_NAME ) );
|
---|
1251 |
|
---|
1252 | // address in the block of the "next" alignment address
|
---|
1253 | char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment );
|
---|
1254 |
|
---|
1255 | // address of header from malloc
|
---|
1256 | Heap.Storage.Header * realHeader = HeaderAddr( addr );
|
---|
1257 | realHeader->kind.real.size = size; // correct size to eliminate above alignment offset
|
---|
1258 | #ifdef __CFA_DEBUG__
|
---|
1259 | incUnfreed( -offset ); // adjustment off the offset from call to doMalloc
|
---|
1260 | #endif // __CFA_DEBUG__
|
---|
1261 |
|
---|
1262 | // address of fake header *before* the alignment location
|
---|
1263 | Heap.Storage.Header * fakeHeader = HeaderAddr( user );
|
---|
1264 |
|
---|
1265 | // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
|
---|
1266 | fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
|
---|
1267 | // SKULLDUGGERY: odd alignment implies fake header
|
---|
1268 | fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment );
|
---|
1269 |
|
---|
1270 | return user;
|
---|
1271 | } // memalignNoStats
|
---|
1272 |
|
---|
1273 |
|
---|
1274 | //####################### Memory Allocation Routines ####################
|
---|
1275 |
|
---|
1276 |
|
---|
1277 | extern "C" {
|
---|
1278 | // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0,
|
---|
1279 | // then malloc() returns a unique pointer value that can later be successfully passed to free().
|
---|
1280 | void * malloc( size_t size ) libcfa_public {
|
---|
1281 | return doMalloc( size STAT_ARG( MALLOC ) );
|
---|
1282 | } // malloc
|
---|
1283 |
|
---|
1284 |
|
---|
1285 | // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
|
---|
1286 | void * aalloc( size_t dim, size_t elemSize ) libcfa_public {
|
---|
1287 | return doMalloc( dim * elemSize STAT_ARG( AALLOC ) );
|
---|
1288 | } // aalloc
|
---|
1289 |
|
---|
1290 |
|
---|
1291 | // Same as aalloc() with memory set to zero.
|
---|
1292 | void * calloc( size_t dim, size_t elemSize ) libcfa_public {
|
---|
1293 | size_t size = dim * elemSize;
|
---|
1294 | char * addr = (char *)doMalloc( size STAT_ARG( CALLOC ) );
|
---|
1295 |
|
---|
1296 | if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
|
---|
1297 |
|
---|
1298 | Heap.Storage.Header * header;
|
---|
1299 | Heap.FreeHeader * freeHead;
|
---|
1300 | size_t bsize, alignment;
|
---|
1301 |
|
---|
1302 | #ifndef __CFA_DEBUG__
|
---|
1303 | bool mapped =
|
---|
1304 | #endif // __CFA_DEBUG__
|
---|
1305 | headers( "calloc", addr, header, freeHead, bsize, alignment );
|
---|
1306 |
|
---|
1307 | #ifndef __CFA_DEBUG__
|
---|
1308 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
1309 | if ( likely( ! mapped ) )
|
---|
1310 | #endif // __CFA_DEBUG__
|
---|
1311 | // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
|
---|
1312 | // `-header`-addr `-size
|
---|
1313 | memset( addr, '\0', size ); // set to zeros
|
---|
1314 |
|
---|
1315 | MarkZeroFilledBit( header ); // mark as zero fill
|
---|
1316 | return addr;
|
---|
1317 | } // calloc
|
---|
1318 |
|
---|
1319 |
|
---|
1320 | // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is
|
---|
1321 | // 0p, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is
|
---|
1322 | // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier
|
---|
1323 | // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
|
---|
1324 | void * resize( void * oaddr, size_t size ) libcfa_public {
|
---|
1325 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1326 | return doMalloc( size STAT_ARG( RESIZE ) );
|
---|
1327 | } // if
|
---|
1328 |
|
---|
1329 | PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr )
|
---|
1330 |
|
---|
1331 | Heap.Storage.Header * header;
|
---|
1332 | Heap.FreeHeader * freeHead;
|
---|
1333 | size_t bsize, oalign;
|
---|
1334 | headers( "resize", oaddr, header, freeHead, bsize, oalign );
|
---|
1335 |
|
---|
1336 | size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1337 | // same size, DO NOT preserve STICKY PROPERTIES.
|
---|
1338 | if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
|
---|
1339 | ClearZeroFillBit( header ); // no alignment and turn off 0 fill
|
---|
1340 | #ifdef __CFA_DEBUG__
|
---|
1341 | incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
|
---|
1342 | #endif // __CFA_DEBUG__
|
---|
1343 | header->kind.real.size = size; // reset allocation size
|
---|
1344 | #ifdef __STATISTICS__
|
---|
1345 | incCalls( RESIZE );
|
---|
1346 | #endif // __STATISTICS__
|
---|
1347 | return oaddr;
|
---|
1348 | } // if
|
---|
1349 |
|
---|
1350 | // change size, DO NOT preserve STICKY PROPERTIES.
|
---|
1351 | doFree( oaddr ); // free previous storage
|
---|
1352 |
|
---|
1353 | return doMalloc( size STAT_ARG( RESIZE ) ); // create new area
|
---|
1354 | } // resize
|
---|
1355 |
|
---|
1356 |
|
---|
1357 | // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
|
---|
1358 | // the old and new sizes.
|
---|
1359 | void * realloc( void * oaddr, size_t size ) libcfa_public {
|
---|
1360 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1361 | return doMalloc( size STAT_ARG( REALLOC ) );
|
---|
1362 | } // if
|
---|
1363 |
|
---|
1364 | PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr )
|
---|
1365 |
|
---|
1366 | Heap.Storage.Header * header;
|
---|
1367 | Heap.FreeHeader * freeHead;
|
---|
1368 | size_t bsize, oalign;
|
---|
1369 | headers( "realloc", oaddr, header, freeHead, bsize, oalign );
|
---|
1370 |
|
---|
1371 | size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1372 | size_t osize = header->kind.real.size; // old allocation size
|
---|
1373 | bool ozfill = ZeroFillBit( header ); // old allocation zero filled
|
---|
1374 | if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage
|
---|
1375 | #ifdef __CFA_DEBUG__
|
---|
1376 | incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
|
---|
1377 | #endif // __CFA_DEBUG__
|
---|
1378 | header->kind.real.size = size; // reset allocation size
|
---|
1379 | if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ?
|
---|
1380 | memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1381 | } // if
|
---|
1382 | #ifdef __STATISTICS__
|
---|
1383 | incCalls( REALLOC );
|
---|
1384 | #endif // __STATISTICS__
|
---|
1385 | return oaddr;
|
---|
1386 | } // if
|
---|
1387 |
|
---|
1388 | // change size and copy old content to new storage
|
---|
1389 |
|
---|
1390 | void * naddr;
|
---|
1391 | if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ?
|
---|
1392 | naddr = doMalloc( size STAT_ARG( REALLOC ) ); // create new area
|
---|
1393 | } else {
|
---|
1394 | naddr = memalignNoStats( oalign, size STAT_ARG( REALLOC ) ); // create new aligned area
|
---|
1395 | } // if
|
---|
1396 |
|
---|
1397 | headers( "realloc", naddr, header, freeHead, bsize, oalign );
|
---|
1398 | // To preserve prior fill, the entire bucket must be copied versus the size.
|
---|
1399 | memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes
|
---|
1400 | doFree( oaddr ); // free previous storage
|
---|
1401 |
|
---|
1402 | if ( unlikely( ozfill ) ) { // previous request zero fill ?
|
---|
1403 | MarkZeroFilledBit( header ); // mark new request as zero filled
|
---|
1404 | if ( size > osize ) { // previous request larger ?
|
---|
1405 | memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1406 | } // if
|
---|
1407 | } // if
|
---|
1408 | return naddr;
|
---|
1409 | } // realloc
|
---|
1410 |
|
---|
1411 |
|
---|
1412 | // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
|
---|
1413 | void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public {
|
---|
1414 | return realloc( oaddr, dim * elemSize );
|
---|
1415 | } // reallocarray
|
---|
1416 |
|
---|
1417 |
|
---|
1418 | // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
|
---|
1419 | void * memalign( size_t alignment, size_t size ) libcfa_public {
|
---|
1420 | return memalignNoStats( alignment, size STAT_ARG( MEMALIGN ) );
|
---|
1421 | } // memalign
|
---|
1422 |
|
---|
1423 |
|
---|
1424 | // Same as aalloc() with memory alignment.
|
---|
1425 | void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
|
---|
1426 | return memalignNoStats( alignment, dim * elemSize STAT_ARG( AMEMALIGN ) );
|
---|
1427 | } // amemalign
|
---|
1428 |
|
---|
1429 |
|
---|
1430 | // Same as calloc() with memory alignment.
|
---|
1431 | void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
|
---|
1432 | size_t size = dim * elemSize;
|
---|
1433 | char * addr = (char *)memalignNoStats( alignment, size STAT_ARG( CMEMALIGN ) );
|
---|
1434 |
|
---|
1435 | if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
|
---|
1436 |
|
---|
1437 | Heap.Storage.Header * header;
|
---|
1438 | Heap.FreeHeader * freeHead;
|
---|
1439 | size_t bsize;
|
---|
1440 |
|
---|
1441 | #ifndef __CFA_DEBUG__
|
---|
1442 | bool mapped =
|
---|
1443 | #endif // __CFA_DEBUG__
|
---|
1444 | headers( "cmemalign", addr, header, freeHead, bsize, alignment );
|
---|
1445 |
|
---|
1446 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
1447 | #ifndef __CFA_DEBUG__
|
---|
1448 | if ( ! mapped )
|
---|
1449 | #endif // __CFA_DEBUG__
|
---|
1450 | // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
|
---|
1451 | // `-header`-addr `-size
|
---|
1452 | memset( addr, '\0', size ); // set to zeros
|
---|
1453 |
|
---|
1454 | MarkZeroFilledBit( header ); // mark as zero filled
|
---|
1455 | return addr;
|
---|
1456 | } // cmemalign
|
---|
1457 |
|
---|
1458 |
|
---|
1459 | // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
|
---|
1460 | // of alignment. This requirement is universally ignored.
|
---|
1461 | void * aligned_alloc( size_t alignment, size_t size ) libcfa_public {
|
---|
1462 | return memalign( alignment, size );
|
---|
1463 | } // aligned_alloc
|
---|
1464 |
|
---|
1465 |
|
---|
1466 | // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated
|
---|
1467 | // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size
|
---|
1468 | // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
|
---|
1469 | // free(3).
|
---|
1470 | int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public {
|
---|
1471 | if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
|
---|
1472 | *memptr = memalign( alignment, size );
|
---|
1473 | return 0;
|
---|
1474 | } // posix_memalign
|
---|
1475 |
|
---|
1476 |
|
---|
1477 | // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
|
---|
1478 | // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
|
---|
1479 | void * valloc( size_t size ) libcfa_public {
|
---|
1480 | return memalign( __page_size, size );
|
---|
1481 | } // valloc
|
---|
1482 |
|
---|
1483 |
|
---|
1484 | // Same as valloc but rounds size to multiple of page size.
|
---|
1485 | void * pvalloc( size_t size ) libcfa_public {
|
---|
1486 | return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
|
---|
1487 | } // pvalloc
|
---|
1488 |
|
---|
1489 |
|
---|
1490 | // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc()
|
---|
1491 | // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
|
---|
1492 | // 0p, no operation is performed.
|
---|
1493 | void free( void * addr ) libcfa_public {
|
---|
1494 | // verify( heapManager );
|
---|
1495 |
|
---|
1496 | if ( unlikely( addr == 0p ) ) { // special case
|
---|
1497 | #ifdef __STATISTICS__
|
---|
1498 | if ( heapManager )
|
---|
1499 | incZeroCalls( FREE );
|
---|
1500 | #endif // __STATISTICS__
|
---|
1501 | return;
|
---|
1502 | } // if
|
---|
1503 |
|
---|
1504 | #ifdef __STATISTICS__
|
---|
1505 | incCalls( FREE );
|
---|
1506 | #endif // __STATISTICS__
|
---|
1507 |
|
---|
1508 | doFree( addr ); // handles heapManager == nullptr
|
---|
1509 | } // free
|
---|
1510 |
|
---|
1511 |
|
---|
1512 | // Returns the alignment of an allocation.
|
---|
1513 | size_t malloc_alignment( void * addr ) libcfa_public {
|
---|
1514 | if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment
|
---|
1515 | Heap.Storage.Header * header = HeaderAddr( addr );
|
---|
1516 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
1517 | return ClearAlignmentBit( header ); // clear flag from value
|
---|
1518 | } else {
|
---|
1519 | return libAlign(); // minimum alignment
|
---|
1520 | } // if
|
---|
1521 | } // malloc_alignment
|
---|
1522 |
|
---|
1523 |
|
---|
1524 | // Returns true if the allocation is zero filled, e.g., allocated by calloc().
|
---|
1525 | bool malloc_zero_fill( void * addr ) libcfa_public {
|
---|
1526 | if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill
|
---|
1527 | Heap.Storage.Header * header = HeaderAddr( addr );
|
---|
1528 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
1529 | header = RealHeader( header ); // backup from fake to real header
|
---|
1530 | } // if
|
---|
1531 | return ZeroFillBit( header ); // zero filled ?
|
---|
1532 | } // malloc_zero_fill
|
---|
1533 |
|
---|
1534 |
|
---|
1535 | // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
|
---|
1536 | size_t malloc_size( void * addr ) libcfa_public {
|
---|
1537 | if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size
|
---|
1538 | Heap.Storage.Header * header = HeaderAddr( addr );
|
---|
1539 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
1540 | header = RealHeader( header ); // backup from fake to real header
|
---|
1541 | } // if
|
---|
1542 | return header->kind.real.size;
|
---|
1543 | } // malloc_size
|
---|
1544 |
|
---|
1545 |
|
---|
1546 | // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
|
---|
1547 | // malloc or a related function.
|
---|
1548 | size_t malloc_usable_size( void * addr ) libcfa_public {
|
---|
1549 | if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size
|
---|
1550 | Heap.Storage.Header * header;
|
---|
1551 | Heap.FreeHeader * freeHead;
|
---|
1552 | size_t bsize, alignment;
|
---|
1553 |
|
---|
1554 | headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment );
|
---|
1555 | return DataStorage( bsize, addr, header ); // data storage in bucket
|
---|
1556 | } // malloc_usable_size
|
---|
1557 |
|
---|
1558 |
|
---|
1559 | // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
|
---|
1560 | void malloc_stats( void ) libcfa_public {
|
---|
1561 | #ifdef __STATISTICS__
|
---|
1562 | HeapStatistics stats;
|
---|
1563 | HeapStatisticsCtor( stats );
|
---|
1564 | if ( printStats( collectStats( stats ) ) == -1 ) {
|
---|
1565 | #else
|
---|
1566 | #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n"
|
---|
1567 | if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) {
|
---|
1568 | #endif // __STATISTICS__
|
---|
1569 | abort( "**** Error **** write failed in malloc_stats" );
|
---|
1570 | } // if
|
---|
1571 | } // malloc_stats
|
---|
1572 |
|
---|
1573 |
|
---|
1574 | // Changes the file descriptor where malloc_stats() writes statistics.
|
---|
1575 | int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public {
|
---|
1576 | #ifdef __STATISTICS__
|
---|
1577 | int temp = heapMaster.stats_fd;
|
---|
1578 | heapMaster.stats_fd = fd;
|
---|
1579 | return temp;
|
---|
1580 | #else
|
---|
1581 | return -1; // unsupported
|
---|
1582 | #endif // __STATISTICS__
|
---|
1583 | } // malloc_stats_fd
|
---|
1584 |
|
---|
1585 |
|
---|
1586 | // Prints an XML string that describes the current state of the memory-allocation implementation in the caller.
|
---|
1587 | // The string is printed on the file stream stream. The exported string includes information about all arenas (see
|
---|
1588 | // malloc).
|
---|
1589 | int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public {
|
---|
1590 | if ( options != 0 ) { errno = EINVAL; return -1; }
|
---|
1591 | #ifdef __STATISTICS__
|
---|
1592 | HeapStatistics stats;
|
---|
1593 | HeapStatisticsCtor( stats );
|
---|
1594 | return printStatsXML( collectStats( stats ), stream ); // returns bytes written or -1
|
---|
1595 | #else
|
---|
1596 | return 0; // unsupported
|
---|
1597 | #endif // __STATISTICS__
|
---|
1598 | } // malloc_info
|
---|
1599 |
|
---|
1600 |
|
---|
1601 | // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
|
---|
1602 | // specifies the parameter to be modified, and value specifies the new value for that parameter.
|
---|
1603 | int mallopt( int option, int value ) libcfa_public {
|
---|
1604 | if ( value < 0 ) return 0;
|
---|
1605 | choose( option ) {
|
---|
1606 | case M_TOP_PAD:
|
---|
1607 | heapMaster.heapExpand = ceiling2( value, __page_size );
|
---|
1608 | return 1;
|
---|
1609 | case M_MMAP_THRESHOLD:
|
---|
1610 | if ( setMmapStart( value ) ) return 1;
|
---|
1611 | } // choose
|
---|
1612 | return 0; // error, unsupported
|
---|
1613 | } // mallopt
|
---|
1614 |
|
---|
1615 |
|
---|
1616 | // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
|
---|
1617 | int malloc_trim( size_t ) libcfa_public {
|
---|
1618 | return 0; // => impossible to release memory
|
---|
1619 | } // malloc_trim
|
---|
1620 |
|
---|
1621 |
|
---|
1622 | // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap
|
---|
1623 | // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data
|
---|
1624 | // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
|
---|
1625 | // result. (The caller must free this memory.)
|
---|
1626 | void * malloc_get_state( void ) libcfa_public {
|
---|
1627 | return 0p; // unsupported
|
---|
1628 | } // malloc_get_state
|
---|
1629 |
|
---|
1630 |
|
---|
1631 | // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
|
---|
1632 | // structure pointed to by state.
|
---|
1633 | int malloc_set_state( void * ) libcfa_public {
|
---|
1634 | return 0; // unsupported
|
---|
1635 | } // malloc_set_state
|
---|
1636 |
|
---|
1637 |
|
---|
1638 | // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
|
---|
1639 | __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; }
|
---|
1640 |
|
---|
1641 | // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped.
|
---|
1642 | __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; }
|
---|
1643 |
|
---|
1644 | // Amount subtracted to adjust for unfreed program storage (debug only).
|
---|
1645 | __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; }
|
---|
1646 | } // extern "C"
|
---|
1647 |
|
---|
1648 |
|
---|
1649 | // Must have CFA linkage to overload with C linkage realloc.
|
---|
1650 | void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public {
|
---|
1651 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1652 | return memalignNoStats( nalign, size STAT_ARG( RESIZE ) );
|
---|
1653 | } // if
|
---|
1654 |
|
---|
1655 | PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr )
|
---|
1656 |
|
---|
1657 | // Attempt to reuse existing alignment.
|
---|
1658 | Heap.Storage.Header * header = HeaderAddr( oaddr );
|
---|
1659 | bool isFakeHeader = AlignmentBit( header ); // old fake header ?
|
---|
1660 | size_t oalign;
|
---|
1661 |
|
---|
1662 | if ( unlikely( isFakeHeader ) ) {
|
---|
1663 | checkAlign( nalign ); // check alignment
|
---|
1664 | oalign = ClearAlignmentBit( header ); // old alignment
|
---|
1665 | if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ?
|
---|
1666 | && ( oalign <= nalign // going down
|
---|
1667 | || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
|
---|
1668 | ) ) {
|
---|
1669 | HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
|
---|
1670 | Heap.FreeHeader * freeHead;
|
---|
1671 | size_t bsize, oalign;
|
---|
1672 | headers( "resize", oaddr, header, freeHead, bsize, oalign );
|
---|
1673 | size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1674 |
|
---|
1675 | if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage
|
---|
1676 | HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
|
---|
1677 | ClearZeroFillBit( header ); // turn off 0 fill
|
---|
1678 | #ifdef __CFA_DEBUG__
|
---|
1679 | incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
|
---|
1680 | #endif // __CFA_DEBUG__
|
---|
1681 | header->kind.real.size = size; // reset allocation size
|
---|
1682 | #ifdef __STATISTICS__
|
---|
1683 | incCalls( RESIZE );
|
---|
1684 | #endif // __STATISTICS__
|
---|
1685 | return oaddr;
|
---|
1686 | } // if
|
---|
1687 | } // if
|
---|
1688 | } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
|
---|
1689 | && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed
|
---|
1690 | return resize( oaddr, size ); // duplicate special case checks
|
---|
1691 | } // if
|
---|
1692 |
|
---|
1693 | // change size, DO NOT preserve STICKY PROPERTIES.
|
---|
1694 | doFree( oaddr ); // free previous storage
|
---|
1695 | return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); // create new aligned area
|
---|
1696 | } // resize
|
---|
1697 |
|
---|
1698 |
|
---|
1699 | void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public {
|
---|
1700 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1701 | return memalignNoStats( nalign, size STAT_ARG( REALLOC ) );
|
---|
1702 | } // if
|
---|
1703 |
|
---|
1704 | PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr )
|
---|
1705 |
|
---|
1706 | // Attempt to reuse existing alignment.
|
---|
1707 | Heap.Storage.Header * header = HeaderAddr( oaddr );
|
---|
1708 | bool isFakeHeader = AlignmentBit( header ); // old fake header ?
|
---|
1709 | size_t oalign;
|
---|
1710 | if ( unlikely( isFakeHeader ) ) {
|
---|
1711 | checkAlign( nalign ); // check alignment
|
---|
1712 | oalign = ClearAlignmentBit( header ); // old alignment
|
---|
1713 | if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ?
|
---|
1714 | && ( oalign <= nalign // going down
|
---|
1715 | || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
|
---|
1716 | ) ) {
|
---|
1717 | HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
|
---|
1718 | return realloc( oaddr, size ); // duplicate special case checks
|
---|
1719 | } // if
|
---|
1720 | } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
|
---|
1721 | && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed
|
---|
1722 | return realloc( oaddr, size ); // duplicate special case checks
|
---|
1723 | } // if
|
---|
1724 |
|
---|
1725 | Heap.FreeHeader * freeHead;
|
---|
1726 | size_t bsize;
|
---|
1727 | headers( "realloc", oaddr, header, freeHead, bsize, oalign );
|
---|
1728 |
|
---|
1729 | // change size and copy old content to new storage
|
---|
1730 |
|
---|
1731 | size_t osize = header->kind.real.size; // old allocation size
|
---|
1732 | bool ozfill = ZeroFillBit( header ); // old allocation zero filled
|
---|
1733 |
|
---|
1734 | void * naddr = memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); // create new aligned area
|
---|
1735 |
|
---|
1736 | headers( "realloc", naddr, header, freeHead, bsize, oalign );
|
---|
1737 | memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes
|
---|
1738 | doFree( oaddr ); // free previous storage
|
---|
1739 |
|
---|
1740 | if ( unlikely( ozfill ) ) { // previous request zero fill ?
|
---|
1741 | MarkZeroFilledBit( header ); // mark new request as zero filled
|
---|
1742 | if ( size > osize ) { // previous request larger ?
|
---|
1743 | memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1744 | } // if
|
---|
1745 | } // if
|
---|
1746 | return naddr;
|
---|
1747 | } // realloc
|
---|
1748 |
|
---|
1749 |
|
---|
1750 | void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ) __THROW {
|
---|
1751 | return realloc( oaddr, nalign, dim * elemSize );
|
---|
1752 | } // reallocarray
|
---|
1753 |
|
---|
1754 |
|
---|
1755 | // Local Variables: //
|
---|
1756 | // tab-width: 4 //
|
---|
1757 | // compile-command: "cfa -nodebug -O2 heap.cfa" //
|
---|
1758 | // End: //
|
---|