1 | //
|
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
|
---|
3 | //
|
---|
4 | // The contents of this file are covered under the licence agreement in the
|
---|
5 | // file "LICENCE" distributed with Cforall.
|
---|
6 | //
|
---|
7 | // heap.cfa --
|
---|
8 | //
|
---|
9 | // Author : Peter A. Buhr
|
---|
10 | // Created On : Tue Dec 19 21:58:35 2017
|
---|
11 | // Last Modified By : Peter A. Buhr
|
---|
12 | // Last Modified On : Fri Dec 30 08:37:37 2022
|
---|
13 | // Update Count : 1605
|
---|
14 | //
|
---|
15 |
|
---|
16 | #include <stdio.h>
|
---|
17 | #include <string.h> // memset, memcpy
|
---|
18 | #include <limits.h> // ULONG_MAX
|
---|
19 | #include <errno.h> // errno, ENOMEM, EINVAL
|
---|
20 | #include <unistd.h> // STDERR_FILENO, sbrk, sysconf, write
|
---|
21 | #include <sys/mman.h> // mmap, munmap
|
---|
22 | extern "C" {
|
---|
23 | #include <sys/sysinfo.h> // get_nprocs
|
---|
24 | } // extern "C"
|
---|
25 |
|
---|
26 | #include "heap.hfa"
|
---|
27 | #include "bits/align.hfa" // libAlign
|
---|
28 | #include "bits/defs.hfa" // likely, unlikely
|
---|
29 | #include "concurrency/kernel/fwd.hfa" // __POLL_PREEMPTION
|
---|
30 | #include "startup.hfa" // STARTUP_PRIORITY_MEMORY
|
---|
31 | #include "math.hfa" // ceiling, min
|
---|
32 | #include "bitmanip.hfa" // is_pow2, ceiling2
|
---|
33 |
|
---|
34 | // supported mallopt options
|
---|
35 | #ifndef M_MMAP_THRESHOLD
|
---|
36 | #define M_MMAP_THRESHOLD (-1)
|
---|
37 | #endif // M_MMAP_THRESHOLD
|
---|
38 |
|
---|
39 | #ifndef M_TOP_PAD
|
---|
40 | #define M_TOP_PAD (-2)
|
---|
41 | #endif // M_TOP_PAD
|
---|
42 |
|
---|
43 | #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size
|
---|
44 | #define OWNERSHIP // return freed memory to owner thread
|
---|
45 | #define RETURNSPIN // toggle spinlock / lockfree queue
|
---|
46 | #if ! defined( OWNERSHIP ) && defined( RETURNSPIN )
|
---|
47 | #warning "RETURNSPIN is ignored without OWNERSHIP; suggest commenting out RETURNSPIN"
|
---|
48 | #endif // ! OWNERSHIP && RETURNSPIN
|
---|
49 |
|
---|
50 | #define CACHE_ALIGN 64
|
---|
51 | #define CALIGN __attribute__(( aligned(CACHE_ALIGN) ))
|
---|
52 |
|
---|
53 | #define TLSMODEL __attribute__(( tls_model("initial-exec") ))
|
---|
54 |
|
---|
55 | //#define __STATISTICS__
|
---|
56 |
|
---|
57 | enum {
|
---|
58 | // The default extension heap amount in units of bytes. When the current heap reaches the brk address, the brk
|
---|
59 | // address is extended by the extension amount.
|
---|
60 | __CFA_DEFAULT_HEAP_EXPANSION__ = 10 * 1024 * 1024,
|
---|
61 |
|
---|
62 | // The mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; values
|
---|
63 | // greater than or equal to this value are mmap from the operating system.
|
---|
64 | __CFA_DEFAULT_MMAP_START__ = 512 * 1024 + 1,
|
---|
65 |
|
---|
66 | // The default unfreed storage amount in units of bytes. When the uC++ program ends it subtracts this amount from
|
---|
67 | // the malloc/free counter to adjust for storage the program does not free.
|
---|
68 | __CFA_DEFAULT_HEAP_UNFREED__ = 0
|
---|
69 | }; // enum
|
---|
70 |
|
---|
71 |
|
---|
72 | //####################### Heap Trace/Print ####################
|
---|
73 |
|
---|
74 |
|
---|
75 | static bool traceHeap = false;
|
---|
76 |
|
---|
77 | inline bool traceHeap() libcfa_public { return traceHeap; }
|
---|
78 |
|
---|
79 | bool traceHeapOn() libcfa_public {
|
---|
80 | bool temp = traceHeap;
|
---|
81 | traceHeap = true;
|
---|
82 | return temp;
|
---|
83 | } // traceHeapOn
|
---|
84 |
|
---|
85 | bool traceHeapOff() libcfa_public {
|
---|
86 | bool temp = traceHeap;
|
---|
87 | traceHeap = false;
|
---|
88 | return temp;
|
---|
89 | } // traceHeapOff
|
---|
90 |
|
---|
91 | bool traceHeapTerm() libcfa_public { return false; }
|
---|
92 |
|
---|
93 |
|
---|
94 | static bool prtFree = false;
|
---|
95 |
|
---|
96 | bool prtFree() {
|
---|
97 | return prtFree;
|
---|
98 | } // prtFree
|
---|
99 |
|
---|
100 | bool prtFreeOn() {
|
---|
101 | bool temp = prtFree;
|
---|
102 | prtFree = true;
|
---|
103 | return temp;
|
---|
104 | } // prtFreeOn
|
---|
105 |
|
---|
106 | bool prtFreeOff() {
|
---|
107 | bool temp = prtFree;
|
---|
108 | prtFree = false;
|
---|
109 | return temp;
|
---|
110 | } // prtFreeOff
|
---|
111 |
|
---|
112 |
|
---|
113 | //######################### Helpers #########################
|
---|
114 |
|
---|
115 |
|
---|
116 | // generic Bsearchl does not inline, so substitute with hand-coded binary-search.
|
---|
117 | inline __attribute__((always_inline))
|
---|
118 | static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) {
|
---|
119 | size_t l = 0, m, h = dim;
|
---|
120 | while ( l < h ) {
|
---|
121 | m = (l + h) / 2;
|
---|
122 | if ( (unsigned int &)(vals[m]) < key ) { // cast away const
|
---|
123 | l = m + 1;
|
---|
124 | } else {
|
---|
125 | h = m;
|
---|
126 | } // if
|
---|
127 | } // while
|
---|
128 | return l;
|
---|
129 | } // Bsearchl
|
---|
130 |
|
---|
131 |
|
---|
132 | // pause to prevent excess processor bus usage
|
---|
133 | #if defined( __i386 ) || defined( __x86_64 )
|
---|
134 | #define Pause() __asm__ __volatile__ ( "pause" : : : )
|
---|
135 | #elif defined(__ARM_ARCH)
|
---|
136 | #define Pause() __asm__ __volatile__ ( "YIELD" : : : )
|
---|
137 | #else
|
---|
138 | #error unsupported architecture
|
---|
139 | #endif
|
---|
140 |
|
---|
141 | typedef volatile uintptr_t SpinLock_t;
|
---|
142 |
|
---|
143 | static inline __attribute__((always_inline)) void lock( volatile SpinLock_t & slock ) {
|
---|
144 | enum { SPIN_START = 4, SPIN_END = 64 * 1024, };
|
---|
145 | unsigned int spin = SPIN_START;
|
---|
146 |
|
---|
147 | for ( unsigned int i = 1;; i += 1 ) {
|
---|
148 | if ( slock == 0 && __atomic_test_and_set( &slock, __ATOMIC_ACQUIRE ) == 0 ) break; // Fence
|
---|
149 | for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin
|
---|
150 | spin += spin; // powers of 2
|
---|
151 | //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2
|
---|
152 | if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning
|
---|
153 | } // for
|
---|
154 | } // spin_lock
|
---|
155 |
|
---|
156 | static inline __attribute__((always_inline)) void unlock( volatile SpinLock_t & slock ) {
|
---|
157 | __atomic_clear( &slock, __ATOMIC_RELEASE ); // Fence
|
---|
158 | } // spin_unlock
|
---|
159 |
|
---|
160 |
|
---|
161 | //####################### Heap Statistics ####################
|
---|
162 |
|
---|
163 |
|
---|
164 | #ifdef __STATISTICS__
|
---|
165 | enum { CntTriples = 12 }; // number of counter triples
|
---|
166 | enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC, FREE };
|
---|
167 |
|
---|
168 | struct StatsOverlay { // overlay for iteration
|
---|
169 | unsigned int calls, calls_0;
|
---|
170 | unsigned long long int request, alloc;
|
---|
171 | };
|
---|
172 |
|
---|
173 | // Heap statistics counters.
|
---|
174 | union HeapStatistics {
|
---|
175 | struct { // minimum qualification
|
---|
176 | unsigned int malloc_calls, malloc_0_calls;
|
---|
177 | unsigned long long int malloc_storage_request, malloc_storage_alloc;
|
---|
178 | unsigned int aalloc_calls, aalloc_0_calls;
|
---|
179 | unsigned long long int aalloc_storage_request, aalloc_storage_alloc;
|
---|
180 | unsigned int calloc_calls, calloc_0_calls;
|
---|
181 | unsigned long long int calloc_storage_request, calloc_storage_alloc;
|
---|
182 | unsigned int memalign_calls, memalign_0_calls;
|
---|
183 | unsigned long long int memalign_storage_request, memalign_storage_alloc;
|
---|
184 | unsigned int amemalign_calls, amemalign_0_calls;
|
---|
185 | unsigned long long int amemalign_storage_request, amemalign_storage_alloc;
|
---|
186 | unsigned int cmemalign_calls, cmemalign_0_calls;
|
---|
187 | unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc;
|
---|
188 | unsigned int resize_calls, resize_0_calls;
|
---|
189 | unsigned long long int resize_storage_request, resize_storage_alloc;
|
---|
190 | unsigned int realloc_calls, realloc_0_calls;
|
---|
191 | unsigned long long int realloc_storage_request, realloc_storage_alloc;
|
---|
192 | unsigned int free_calls, free_null_calls;
|
---|
193 | unsigned long long int free_storage_request, free_storage_alloc;
|
---|
194 | unsigned int return_pulls, return_pushes;
|
---|
195 | unsigned long long int return_storage_request, return_storage_alloc;
|
---|
196 | unsigned int mmap_calls, mmap_0_calls; // no zero calls
|
---|
197 | unsigned long long int mmap_storage_request, mmap_storage_alloc;
|
---|
198 | unsigned int munmap_calls, munmap_0_calls; // no zero calls
|
---|
199 | unsigned long long int munmap_storage_request, munmap_storage_alloc;
|
---|
200 | };
|
---|
201 | struct StatsOverlay counters[CntTriples]; // overlay for iteration
|
---|
202 | }; // HeapStatistics
|
---|
203 |
|
---|
204 | static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(StatsOverlay),
|
---|
205 | "Heap statistics counter-triplets does not match with array size" );
|
---|
206 |
|
---|
207 | static void HeapStatisticsCtor( HeapStatistics & stats ) {
|
---|
208 | memset( &stats, '\0', sizeof(stats) ); // very fast
|
---|
209 | // for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
|
---|
210 | // stats.counters[i].calls = stats.counters[i].calls_0 = stats.counters[i].request = stats.counters[i].alloc = 0;
|
---|
211 | // } // for
|
---|
212 | } // HeapStatisticsCtor
|
---|
213 |
|
---|
214 | static HeapStatistics & ?+=?( HeapStatistics & lhs, const HeapStatistics & rhs ) {
|
---|
215 | for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
|
---|
216 | lhs.counters[i].calls += rhs.counters[i].calls;
|
---|
217 | lhs.counters[i].calls_0 += rhs.counters[i].calls_0;
|
---|
218 | lhs.counters[i].request += rhs.counters[i].request;
|
---|
219 | lhs.counters[i].alloc += rhs.counters[i].alloc;
|
---|
220 | } // for
|
---|
221 | return lhs;
|
---|
222 | } // ?+=?
|
---|
223 | #endif // __STATISTICS__
|
---|
224 |
|
---|
225 |
|
---|
226 | // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
|
---|
227 | // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
|
---|
228 | enum { NoBucketSizes = 91 }; // number of buckets sizes
|
---|
229 |
|
---|
230 | struct Heap {
|
---|
231 | struct Storage {
|
---|
232 | struct Header { // header
|
---|
233 | union Kind {
|
---|
234 | struct RealHeader {
|
---|
235 | union {
|
---|
236 | struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
|
---|
237 | union {
|
---|
238 | // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped
|
---|
239 | // FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
|
---|
240 | void * home; // allocated block points back to home locations (must overlay alignment)
|
---|
241 | size_t blockSize; // size for munmap (must overlay alignment)
|
---|
242 | Storage * next; // freed block points to next freed block of same size
|
---|
243 | };
|
---|
244 | size_t size; // allocation size in bytes
|
---|
245 | };
|
---|
246 | };
|
---|
247 | } real; // RealHeader
|
---|
248 |
|
---|
249 | struct FakeHeader {
|
---|
250 | uintptr_t alignment; // 1st low-order bit => fake header & alignment
|
---|
251 | uintptr_t offset;
|
---|
252 | } fake; // FakeHeader
|
---|
253 | } kind; // Kind
|
---|
254 | } header; // Header
|
---|
255 |
|
---|
256 | char pad[libAlign() - sizeof( Header )];
|
---|
257 | char data[0]; // storage
|
---|
258 | }; // Storage
|
---|
259 |
|
---|
260 | static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" );
|
---|
261 |
|
---|
262 | struct CALIGN FreeHeader {
|
---|
263 | size_t blockSize CALIGN; // size of allocations on this list
|
---|
264 | #ifdef OWNERSHIP
|
---|
265 | #ifdef RETURNSPIN
|
---|
266 | SpinLock_t returnLock;
|
---|
267 | #endif // RETURNSPIN
|
---|
268 | Storage * returnList; // other thread return list
|
---|
269 | #endif // OWNERSHIP
|
---|
270 |
|
---|
271 | Storage * freeList; // thread free list
|
---|
272 | Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap)
|
---|
273 | }; // FreeHeader
|
---|
274 |
|
---|
275 | FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
|
---|
276 | void * heapBuffer; // start of free storage in buffer
|
---|
277 | size_t heapReserve; // amount of remaining free storage in buffer
|
---|
278 |
|
---|
279 | #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
|
---|
280 | Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics or check unfreed storage
|
---|
281 | #endif // __STATISTICS__ || __CFA_DEBUG__
|
---|
282 | Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads
|
---|
283 |
|
---|
284 | #ifdef __CFA_DEBUG__
|
---|
285 | ptrdiff_t allocUnfreed; // running total of allocations minus frees; can be negative
|
---|
286 | #endif // __CFA_DEBUG__
|
---|
287 |
|
---|
288 | #ifdef __STATISTICS__
|
---|
289 | HeapStatistics stats; // local statistic table for this heap
|
---|
290 | #endif // __STATISTICS__
|
---|
291 | }; // Heap
|
---|
292 |
|
---|
293 |
|
---|
294 | struct HeapMaster {
|
---|
295 | SpinLock_t extLock; // protects allocation-buffer extension
|
---|
296 | SpinLock_t mgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd
|
---|
297 |
|
---|
298 | void * heapBegin; // start of heap
|
---|
299 | void * heapEnd; // logical end of heap
|
---|
300 | size_t heapRemaining; // amount of storage not allocated in the current chunk
|
---|
301 | size_t pageSize; // architecture pagesize
|
---|
302 | size_t heapExpand; // sbrk advance
|
---|
303 | size_t mmapStart; // cross over point for mmap
|
---|
304 | unsigned int maxBucketsUsed; // maximum number of buckets in use
|
---|
305 |
|
---|
306 | Heap * heapManagersList; // heap-list head
|
---|
307 | Heap * freeHeapManagersList; // free-list head
|
---|
308 |
|
---|
309 | // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links.
|
---|
310 | Heap * heapManagersStorage; // next heap to use in heap superblock
|
---|
311 | Heap * heapManagersStorageEnd; // logical heap outside of superblock's end
|
---|
312 |
|
---|
313 | #ifdef __STATISTICS__
|
---|
314 | HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting
|
---|
315 | unsigned long int threads_started, threads_exited; // counts threads that have started and exited
|
---|
316 | unsigned long int reused_heap, new_heap; // counts reusability of heaps
|
---|
317 | unsigned int sbrk_calls;
|
---|
318 | unsigned long long int sbrk_storage;
|
---|
319 | int stats_fd;
|
---|
320 | #endif // __STATISTICS__
|
---|
321 | }; // HeapMaster
|
---|
322 |
|
---|
323 |
|
---|
324 | #ifdef FASTLOOKUP
|
---|
325 | enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes
|
---|
326 | static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes
|
---|
327 | #endif // FASTLOOKUP
|
---|
328 |
|
---|
329 | static volatile bool heapMasterBootFlag = false; // trigger for first heap
|
---|
330 | static HeapMaster heapMaster @= {}; // program global
|
---|
331 |
|
---|
332 | static void heapMasterCtor();
|
---|
333 | static void heapMasterDtor();
|
---|
334 | static Heap * getHeap();
|
---|
335 |
|
---|
336 |
|
---|
337 | // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16.
|
---|
338 | // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
|
---|
339 | // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
|
---|
340 | static const unsigned int bucketSizes[] @= { // different bucket sizes
|
---|
341 | 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4
|
---|
342 | 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3
|
---|
343 | 160, 192, 224, 256 + sizeof(Heap.Storage), // 4
|
---|
344 | 320, 384, 448, 512 + sizeof(Heap.Storage), // 4
|
---|
345 | 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4
|
---|
346 | 1_536, 2_048 + sizeof(Heap.Storage), // 2
|
---|
347 | 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4
|
---|
348 | 6_144, 8_192 + sizeof(Heap.Storage), // 2
|
---|
349 | 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8
|
---|
350 | 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8
|
---|
351 | 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8
|
---|
352 | 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8
|
---|
353 | 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8
|
---|
354 | 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8
|
---|
355 | 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4
|
---|
356 | 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8
|
---|
357 | 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4
|
---|
358 | };
|
---|
359 |
|
---|
360 | static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );
|
---|
361 |
|
---|
362 |
|
---|
363 | // extern visibility, used by runtime kernel
|
---|
364 | libcfa_public size_t __page_size; // architecture pagesize
|
---|
365 | libcfa_public int __map_prot; // common mmap/mprotect protection
|
---|
366 |
|
---|
367 |
|
---|
368 | // Thread-local storage is allocated lazily when the storage is accessed.
|
---|
369 | static __thread size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing
|
---|
370 | static __thread Heap * heapManager CALIGN TLSMODEL;
|
---|
371 | static __thread size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing
|
---|
372 |
|
---|
373 |
|
---|
374 | // declare helper functions for HeapMaster
|
---|
375 | void noMemory(); // forward, called by "builtin_new" when malloc returns 0
|
---|
376 |
|
---|
377 |
|
---|
378 | void heapMasterCtor() with( heapMaster ) {
|
---|
379 | // Singleton pattern to initialize heap master
|
---|
380 |
|
---|
381 | verify( bucketSizes[0] == (16 + sizeof(Heap.Storage)) );
|
---|
382 |
|
---|
383 | __page_size = sysconf( _SC_PAGESIZE );
|
---|
384 | __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
|
---|
385 |
|
---|
386 | extLock = 0;
|
---|
387 | mgrLock = 0;
|
---|
388 |
|
---|
389 | char * end = (char *)sbrk( 0 );
|
---|
390 | heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment
|
---|
391 | heapRemaining = 0;
|
---|
392 | heapExpand = malloc_expansion();
|
---|
393 | mmapStart = malloc_mmap_start();
|
---|
394 |
|
---|
395 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
396 | maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search
|
---|
397 |
|
---|
398 | verify( (mmapStart >= pageSize) && (bucketSizes[NoBucketSizes - 1] >= mmapStart) );
|
---|
399 | verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
|
---|
400 | verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
|
---|
401 |
|
---|
402 | heapManagersList = 0p;
|
---|
403 | freeHeapManagersList = 0p;
|
---|
404 |
|
---|
405 | heapManagersStorage = 0p;
|
---|
406 | heapManagersStorageEnd = 0p;
|
---|
407 |
|
---|
408 | #ifdef __STATISTICS__
|
---|
409 | HeapStatisticsCtor( stats ); // clear statistic counters
|
---|
410 | threads_started = threads_exited = 0;
|
---|
411 | reused_heap = new_heap = 0;
|
---|
412 | sbrk_calls = sbrk_storage = 0;
|
---|
413 | stats_fd = STDERR_FILENO;
|
---|
414 | #endif // __STATISTICS__
|
---|
415 |
|
---|
416 | #ifdef FASTLOOKUP
|
---|
417 | for ( unsigned int i = 0, idx = 0; i < LookupSizes; i += 1 ) {
|
---|
418 | if ( i > bucketSizes[idx] ) idx += 1;
|
---|
419 | lookup[i] = idx;
|
---|
420 | verify( i <= bucketSizes[idx] );
|
---|
421 | verify( (i <= 32 && idx == 0) || (i > bucketSizes[idx - 1]) );
|
---|
422 | } // for
|
---|
423 | #endif // FASTLOOKUP
|
---|
424 |
|
---|
425 | heapMasterBootFlag = true;
|
---|
426 | } // heapMasterCtor
|
---|
427 |
|
---|
428 |
|
---|
429 | #define NO_MEMORY_MSG "**** Error **** insufficient heap memory available to allocate %zd new bytes."
|
---|
430 |
|
---|
431 | Heap * getHeap() with( heapMaster ) {
|
---|
432 | Heap * heap;
|
---|
433 | if ( freeHeapManagersList ) { // free heap for reused ?
|
---|
434 | heap = freeHeapManagersList;
|
---|
435 | freeHeapManagersList = heap->nextFreeHeapManager;
|
---|
436 |
|
---|
437 | #ifdef __STATISTICS__
|
---|
438 | reused_heap += 1;
|
---|
439 | #endif // __STATISTICS__
|
---|
440 | } else { // free heap not found, create new
|
---|
441 | // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps *
|
---|
442 | // 12K ~= 120K byte superblock. Where 128-heap superblock handles a medium sized multi-processor server.
|
---|
443 | size_t remaining = heapManagersStorageEnd - heapManagersStorage; // remaining free heaps in superblock
|
---|
444 | if ( ! heapManagersStorage || remaining == 0 ) {
|
---|
445 | // Each block of heaps is a multiple of the number of cores on the computer.
|
---|
446 | int HeapDim = get_nprocs(); // get_nprocs_conf does not work
|
---|
447 | size_t size = HeapDim * sizeof( Heap );
|
---|
448 |
|
---|
449 | heapManagersStorage = (Heap *)mmap( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 );
|
---|
450 | if ( unlikely( heapManagersStorage == (Heap *)MAP_FAILED ) ) { // failed ?
|
---|
451 | if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory
|
---|
452 | // Do not call strerror( errno ) as it may call malloc.
|
---|
453 | abort( "**** Error **** attempt to allocate block of heaps of size %zu bytes and mmap failed with errno %d.", size, errno );
|
---|
454 | } // if
|
---|
455 | heapManagersStorageEnd = &heapManagersStorage[HeapDim]; // outside array
|
---|
456 | } // if
|
---|
457 |
|
---|
458 | heap = heapManagersStorage;
|
---|
459 | heapManagersStorage = heapManagersStorage + 1; // bump next heap
|
---|
460 |
|
---|
461 | #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
|
---|
462 | heap->nextHeapManager = heapManagersList;
|
---|
463 | #endif // __STATISTICS__ || __CFA_DEBUG__
|
---|
464 | heapManagersList = heap;
|
---|
465 |
|
---|
466 | #ifdef __STATISTICS__
|
---|
467 | new_heap += 1;
|
---|
468 | #endif // __STATISTICS__
|
---|
469 |
|
---|
470 | with( *heap ) {
|
---|
471 | for ( unsigned int j = 0; j < NoBucketSizes; j += 1 ) { // initialize free lists
|
---|
472 | #ifdef OWNERSHIP
|
---|
473 | #ifdef RETURNSPIN
|
---|
474 | freeLists[j].returnLock = 0;
|
---|
475 | freeLists[j].returnList = 0p;
|
---|
476 | #endif // RETURNSPIN
|
---|
477 | #endif // OWNERSHIP
|
---|
478 |
|
---|
479 | freeLists[j].freeList = 0p;
|
---|
480 | freeLists[j].homeManager = heap;
|
---|
481 | freeLists[j].blockSize = bucketSizes[j];
|
---|
482 | } // for
|
---|
483 |
|
---|
484 | heapBuffer = 0p;
|
---|
485 | heapReserve = 0;
|
---|
486 | nextFreeHeapManager = 0p;
|
---|
487 | #ifdef __CFA_DEBUG__
|
---|
488 | allocUnfreed = 0;
|
---|
489 | #endif // __CFA_DEBUG__
|
---|
490 | } // with
|
---|
491 | } // if
|
---|
492 |
|
---|
493 | return heap;
|
---|
494 | } // getHeap
|
---|
495 |
|
---|
496 |
|
---|
497 | void heapManagerCtor() libcfa_public {
|
---|
498 | if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor();
|
---|
499 |
|
---|
500 | lock( heapMaster.mgrLock ); // protect heapMaster counters
|
---|
501 |
|
---|
502 | // get storage for heap manager
|
---|
503 |
|
---|
504 | heapManager = getHeap();
|
---|
505 |
|
---|
506 | #ifdef __STATISTICS__
|
---|
507 | HeapStatisticsCtor( heapManager->stats ); // heap local
|
---|
508 | heapMaster.threads_started += 1;
|
---|
509 | #endif // __STATISTICS__
|
---|
510 |
|
---|
511 | unlock( heapMaster.mgrLock );
|
---|
512 | } // heapManagerCtor
|
---|
513 |
|
---|
514 |
|
---|
515 | void heapManagerDtor() libcfa_public {
|
---|
516 | lock( heapMaster.mgrLock );
|
---|
517 |
|
---|
518 | // place heap on list of free heaps for reusability
|
---|
519 | heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList;
|
---|
520 | heapMaster.freeHeapManagersList = heapManager;
|
---|
521 |
|
---|
522 | #ifdef __STATISTICS__
|
---|
523 | heapMaster.threads_exited += 1;
|
---|
524 | #endif // __STATISTICS__
|
---|
525 |
|
---|
526 | // Do not set heapManager to NULL because it is used after Cforall is shutdown but before the program shuts down.
|
---|
527 |
|
---|
528 | unlock( heapMaster.mgrLock );
|
---|
529 | } // heapManagerDtor
|
---|
530 |
|
---|
531 |
|
---|
532 | //####################### Memory Allocation Routines Helpers ####################
|
---|
533 |
|
---|
534 |
|
---|
535 | extern int cfa_main_returned; // from interpose.cfa
|
---|
536 | extern "C" {
|
---|
537 | void memory_startup( void ) {
|
---|
538 | if ( ! heapMasterBootFlag ) heapManagerCtor(); // sanity check
|
---|
539 | } // memory_startup
|
---|
540 |
|
---|
541 | void memory_shutdown( void ) {
|
---|
542 | heapManagerDtor();
|
---|
543 | } // memory_shutdown
|
---|
544 |
|
---|
545 | void heapAppStart() { // called by __cfaabi_appready_startup
|
---|
546 | verify( heapManager );
|
---|
547 | #ifdef __CFA_DEBUG__
|
---|
548 | heapManager->allocUnfreed = 0; // clear prior allocation counts
|
---|
549 | #endif // __CFA_DEBUG__
|
---|
550 |
|
---|
551 | #ifdef __STATISTICS__
|
---|
552 | HeapStatisticsCtor( heapManager->stats ); // clear prior statistic counters
|
---|
553 | #endif // __STATISTICS__
|
---|
554 | } // heapAppStart
|
---|
555 |
|
---|
556 | void heapAppStop() { // called by __cfaabi_appready_startdown
|
---|
557 | fclose( stdin ); fclose( stdout ); // free buffer storage
|
---|
558 | if ( ! cfa_main_returned ) return; // do not check unfreed storage if exit called
|
---|
559 |
|
---|
560 | #ifdef __CFA_DEBUG__
|
---|
561 | // allocUnfreed is set to 0 when a heap is created and it accumulates any unfreed storage during its multiple thread
|
---|
562 | // usages. At the end, add up each heap allocUnfreed value across all heaps to get the total unfreed storage.
|
---|
563 | ptrdiff_t allocUnfreed = 0;
|
---|
564 | for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) {
|
---|
565 | allocUnfreed += heap->allocUnfreed;
|
---|
566 | } // for
|
---|
567 |
|
---|
568 | allocUnfreed -= malloc_unfreed(); // subtract any user specified unfreed storage
|
---|
569 | if ( allocUnfreed > 0 ) {
|
---|
570 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
|
---|
571 | char helpText[512];
|
---|
572 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
573 | "CFA warning (UNIX pid:%ld) : program terminating with %td(%#tx) bytes of storage allocated but not freed.\n"
|
---|
574 | "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
|
---|
575 | (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid
|
---|
576 | } // if
|
---|
577 | #endif // __CFA_DEBUG__
|
---|
578 | } // heapAppStop
|
---|
579 | } // extern "C"
|
---|
580 |
|
---|
581 |
|
---|
582 | #ifdef __STATISTICS__
|
---|
583 | static HeapStatistics stats; // zero filled
|
---|
584 |
|
---|
585 | #define prtFmt \
|
---|
586 | "\nHeap statistics: (storage request / allocation)\n" \
|
---|
587 | " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
588 | " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
589 | " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
590 | " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
591 | " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
592 | " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
593 | " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
594 | " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
595 | " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
596 | " return pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n" \
|
---|
597 | " sbrk calls %'u; storage %'llu bytes\n" \
|
---|
598 | " mmap calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
599 | " munmap calls %'u; storage %'llu / %'llu bytes\n" \
|
---|
600 | " threads started %'lu; exited %'lu\n" \
|
---|
601 | " heaps new %'lu; reused %'lu\n"
|
---|
602 |
|
---|
603 | // Use "write" because streams may be shutdown when calls are made.
|
---|
604 | static int printStats( HeapStatistics & stats ) with( heapMaster, stats ) { // see malloc_stats
|
---|
605 | char helpText[sizeof(prtFmt) + 1024]; // space for message and values
|
---|
606 | return __cfaabi_bits_print_buffer( stats_fd, helpText, sizeof(helpText), prtFmt,
|
---|
607 | malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc,
|
---|
608 | aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc,
|
---|
609 | calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc,
|
---|
610 | memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc,
|
---|
611 | amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc,
|
---|
612 | cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc,
|
---|
613 | resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc,
|
---|
614 | realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc,
|
---|
615 | free_calls, free_null_calls, free_storage_request, free_storage_alloc,
|
---|
616 | return_pulls, return_pushes, return_storage_request, return_storage_alloc,
|
---|
617 | sbrk_calls, sbrk_storage,
|
---|
618 | mmap_calls, mmap_storage_request, mmap_storage_alloc,
|
---|
619 | munmap_calls, munmap_storage_request, munmap_storage_alloc,
|
---|
620 | threads_started, threads_exited,
|
---|
621 | new_heap, reused_heap
|
---|
622 | );
|
---|
623 | } // printStats
|
---|
624 |
|
---|
625 | #define prtFmtXML \
|
---|
626 | "<malloc version=\"1\">\n" \
|
---|
627 | "<heap nr=\"0\">\n" \
|
---|
628 | "<sizes>\n" \
|
---|
629 | "</sizes>\n" \
|
---|
630 | "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
631 | "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
632 | "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
633 | "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
634 | "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
635 | "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
636 | "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
637 | "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
638 | "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
639 | "<total type=\"return\" pulls=\"%'u;\" 0 pushes=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
640 | "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n" \
|
---|
641 | "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n" \
|
---|
642 | "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n" \
|
---|
643 | "<total type=\"threads\" started=\"%'lu;\" exited=\"%'lu\"/>\n" \
|
---|
644 | "<total type=\"heaps\" new=\"%'lu;\" reused=\"%'lu\"/>\n" \
|
---|
645 | "</malloc>"
|
---|
646 |
|
---|
647 | static int printStatsXML( HeapStatistics & stats, FILE * stream ) with( heapMaster, stats ) { // see malloc_info
|
---|
648 | char helpText[sizeof(prtFmtXML) + 1024]; // space for message and values
|
---|
649 | return __cfaabi_bits_print_buffer( fileno( stream ), helpText, sizeof(helpText), prtFmtXML,
|
---|
650 | malloc_calls, malloc_0_calls, malloc_storage_request, malloc_storage_alloc,
|
---|
651 | aalloc_calls, aalloc_0_calls, aalloc_storage_request, aalloc_storage_alloc,
|
---|
652 | calloc_calls, calloc_0_calls, calloc_storage_request, calloc_storage_alloc,
|
---|
653 | memalign_calls, memalign_0_calls, memalign_storage_request, memalign_storage_alloc,
|
---|
654 | amemalign_calls, amemalign_0_calls, amemalign_storage_request, amemalign_storage_alloc,
|
---|
655 | cmemalign_calls, cmemalign_0_calls, cmemalign_storage_request, cmemalign_storage_alloc,
|
---|
656 | resize_calls, resize_0_calls, resize_storage_request, resize_storage_alloc,
|
---|
657 | realloc_calls, realloc_0_calls, realloc_storage_request, realloc_storage_alloc,
|
---|
658 | free_calls, free_null_calls, free_storage_request, free_storage_alloc,
|
---|
659 | return_pulls, return_pushes, return_storage_request, return_storage_alloc,
|
---|
660 | sbrk_calls, sbrk_storage,
|
---|
661 | mmap_calls, mmap_storage_request, mmap_storage_alloc,
|
---|
662 | munmap_calls, munmap_storage_request, munmap_storage_alloc,
|
---|
663 | threads_started, threads_exited,
|
---|
664 | new_heap, reused_heap
|
---|
665 | );
|
---|
666 | } // printStatsXML
|
---|
667 |
|
---|
668 | static HeapStatistics & collectStats( HeapStatistics & stats ) with( heapMaster ) {
|
---|
669 | lock( mgrLock );
|
---|
670 |
|
---|
671 | stats += heapMaster.stats;
|
---|
672 | for ( Heap * heap = heapManagersList; heap; heap = heap->nextHeapManager ) {
|
---|
673 | stats += heap->stats;
|
---|
674 | } // for
|
---|
675 |
|
---|
676 | unlock( mgrLock );
|
---|
677 | return stats;
|
---|
678 | } // collectStats
|
---|
679 | #endif // __STATISTICS__
|
---|
680 |
|
---|
681 |
|
---|
682 | static bool setMmapStart( size_t value ) with( heapMaster ) { // true => mmapped, false => sbrk
|
---|
683 | if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false;
|
---|
684 | mmapStart = value; // set global
|
---|
685 |
|
---|
686 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
687 | maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search
|
---|
688 |
|
---|
689 | verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ?
|
---|
690 | verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
|
---|
691 | return true;
|
---|
692 | } // setMmapStart
|
---|
693 |
|
---|
694 |
|
---|
695 | // <-------+----------------------------------------------------> bsize (bucket size)
|
---|
696 | // |header |addr
|
---|
697 | //==================================================================================
|
---|
698 | // align/offset |
|
---|
699 | // <-----------------<------------+-----------------------------> bsize (bucket size)
|
---|
700 | // |fake-header | addr
|
---|
701 | #define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) ))
|
---|
702 | #define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset))
|
---|
703 |
|
---|
704 | // <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
|
---|
705 | // |header |addr
|
---|
706 | //==================================================================================
|
---|
707 | // align/offset |
|
---|
708 | // <------------------------------<<---------- dsize --------->>> bsize (bucket size)
|
---|
709 | // |fake-header |addr
|
---|
710 | #define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
|
---|
711 |
|
---|
712 |
|
---|
713 | inline __attribute__((always_inline))
|
---|
714 | static void checkAlign( size_t alignment ) {
|
---|
715 | if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) {
|
---|
716 | abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
|
---|
717 | } // if
|
---|
718 | } // checkAlign
|
---|
719 |
|
---|
720 |
|
---|
721 | inline __attribute__((always_inline))
|
---|
722 | static void checkHeader( bool check, const char name[], void * addr ) {
|
---|
723 | if ( unlikely( check ) ) { // bad address ?
|
---|
724 | abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n"
|
---|
725 | "Possible cause is duplicate free on same block or overwriting of memory.",
|
---|
726 | name, addr );
|
---|
727 | } // if
|
---|
728 | } // checkHeader
|
---|
729 |
|
---|
730 |
|
---|
731 | // Manipulate sticky bits stored in unused 3 low-order bits of an address.
|
---|
732 | // bit0 => alignment => fake header
|
---|
733 | // bit1 => zero filled (calloc)
|
---|
734 | // bit2 => mapped allocation versus sbrk
|
---|
735 | #define StickyBits( header ) (((header)->kind.real.blockSize & 0x7))
|
---|
736 | #define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7)
|
---|
737 | #define MarkAlignmentBit( align ) ((align) | 1)
|
---|
738 | #define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1))
|
---|
739 | #define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1)
|
---|
740 | #define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2))
|
---|
741 | #define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2))
|
---|
742 | #define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2)
|
---|
743 | #define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4))
|
---|
744 | #define MarkMmappedBit( size ) ((size) | 4)
|
---|
745 |
|
---|
746 |
|
---|
747 | inline __attribute__((always_inline))
|
---|
748 | static void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) {
|
---|
749 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
750 | alignment = ClearAlignmentBit( header ); // clear flag from value
|
---|
751 | #ifdef __CFA_DEBUG__
|
---|
752 | checkAlign( alignment ); // check alignment
|
---|
753 | #endif // __CFA_DEBUG__
|
---|
754 | header = RealHeader( header ); // backup from fake to real header
|
---|
755 | } else {
|
---|
756 | alignment = libAlign(); // => no fake header
|
---|
757 | } // if
|
---|
758 | } // fakeHeader
|
---|
759 |
|
---|
760 |
|
---|
761 | inline __attribute__((always_inline))
|
---|
762 | static bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header,
|
---|
763 | Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapMaster, *heapManager ) {
|
---|
764 | header = HeaderAddr( addr );
|
---|
765 |
|
---|
766 | #ifdef __CFA_DEBUG__
|
---|
767 | checkHeader( header < (Heap.Storage.Header *)heapBegin, name, addr ); // bad low address ?
|
---|
768 | #endif // __CFA_DEBUG__
|
---|
769 |
|
---|
770 | if ( likely( ! StickyBits( header ) ) ) { // no sticky bits ?
|
---|
771 | freeHead = (Heap.FreeHeader *)(header->kind.real.home);
|
---|
772 | alignment = libAlign();
|
---|
773 | } else {
|
---|
774 | fakeHeader( header, alignment );
|
---|
775 | if ( unlikely( MmappedBit( header ) ) ) { // mmapped ?
|
---|
776 | verify( addr < heapBegin || heapEnd < addr );
|
---|
777 | size = ClearStickyBits( header->kind.real.blockSize ); // mmap size
|
---|
778 | return true;
|
---|
779 | } // if
|
---|
780 |
|
---|
781 | freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home ));
|
---|
782 | } // if
|
---|
783 | size = freeHead->blockSize;
|
---|
784 |
|
---|
785 | #ifdef __CFA_DEBUG__
|
---|
786 | checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
|
---|
787 |
|
---|
788 | Heap * homeManager;
|
---|
789 | if ( unlikely( freeHead == 0p || // freed and only free-list node => null link
|
---|
790 | // freed and link points at another free block not to a bucket in the bucket array.
|
---|
791 | (homeManager = freeHead->homeManager, freeHead < &homeManager->freeLists[0] ||
|
---|
792 | &homeManager->freeLists[NoBucketSizes] <= freeHead ) ) ) {
|
---|
793 | abort( "**** Error **** attempt to %s storage %p with corrupted header.\n"
|
---|
794 | "Possible cause is duplicate free on same block or overwriting of header information.",
|
---|
795 | name, addr );
|
---|
796 | } // if
|
---|
797 | #endif // __CFA_DEBUG__
|
---|
798 |
|
---|
799 | return false;
|
---|
800 | } // headers
|
---|
801 |
|
---|
802 |
|
---|
803 | static void * master_extend( size_t size ) with( heapMaster ) {
|
---|
804 | lock( extLock );
|
---|
805 |
|
---|
806 | ptrdiff_t rem = heapRemaining - size;
|
---|
807 | if ( unlikely( rem < 0 ) ) { // negative ?
|
---|
808 | // If the size requested is bigger than the current remaining storage, increase the size of the heap.
|
---|
809 |
|
---|
810 | size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() );
|
---|
811 | if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ?
|
---|
812 | unlock( extLock );
|
---|
813 | abort( NO_MEMORY_MSG, size ); // give up
|
---|
814 | } // if
|
---|
815 |
|
---|
816 | // Make storage executable for thunks.
|
---|
817 | if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
|
---|
818 | unlock( extLock );
|
---|
819 | abort( "**** Error **** attempt to make heap storage executable for thunks and mprotect failed with errno %d.", errno );
|
---|
820 | } // if
|
---|
821 |
|
---|
822 | rem = heapRemaining + increase - size;
|
---|
823 |
|
---|
824 | #ifdef __STATISTICS__
|
---|
825 | sbrk_calls += 1;
|
---|
826 | sbrk_storage += increase;
|
---|
827 | #endif // __STATISTICS__
|
---|
828 | } // if
|
---|
829 |
|
---|
830 | Heap.Storage * block = (Heap.Storage *)heapEnd;
|
---|
831 | heapRemaining = rem;
|
---|
832 | heapEnd = (char *)heapEnd + size;
|
---|
833 |
|
---|
834 | unlock( extLock );
|
---|
835 | return block;
|
---|
836 | } // master_extend
|
---|
837 |
|
---|
838 |
|
---|
839 | __attribute__(( noinline ))
|
---|
840 | static void * manager_extend( size_t size ) with( *heapManager ) {
|
---|
841 | ptrdiff_t rem = heapReserve - size;
|
---|
842 |
|
---|
843 | if ( unlikely( rem < 0 ) ) { // negative ?
|
---|
844 | // If the size requested is bigger than the current remaining reserve, use the current reserve to populate
|
---|
845 | // smaller freeLists, and increase the reserve.
|
---|
846 |
|
---|
847 | rem = heapReserve; // positive
|
---|
848 |
|
---|
849 | if ( (unsigned int)rem >= bucketSizes[0] ) { // minimal size ? otherwise ignore
|
---|
850 | size_t bucket;
|
---|
851 | #ifdef FASTLOOKUP
|
---|
852 | if ( likely( rem < LookupSizes ) ) bucket = lookup[rem];
|
---|
853 | #endif // FASTLOOKUP
|
---|
854 | bucket = Bsearchl( rem, bucketSizes, heapMaster.maxBucketsUsed );
|
---|
855 | verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed );
|
---|
856 | Heap.FreeHeader * freeHead = &(freeLists[bucket]);
|
---|
857 |
|
---|
858 | // The remaining storage may not be bucket size, whereas all other allocations are. Round down to previous
|
---|
859 | // bucket size in this case.
|
---|
860 | if ( unlikely( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1;
|
---|
861 | Heap.Storage * block = (Heap.Storage *)heapBuffer;
|
---|
862 |
|
---|
863 | block->header.kind.real.next = freeHead->freeList; // push on stack
|
---|
864 | freeHead->freeList = block;
|
---|
865 | } // if
|
---|
866 |
|
---|
867 | size_t increase = ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), libAlign() );
|
---|
868 | heapBuffer = master_extend( increase );
|
---|
869 | rem = increase - size;
|
---|
870 | } // if
|
---|
871 |
|
---|
872 | Heap.Storage * block = (Heap.Storage *)heapBuffer;
|
---|
873 | heapReserve = rem;
|
---|
874 | heapBuffer = (char *)heapBuffer + size;
|
---|
875 |
|
---|
876 | return block;
|
---|
877 | } // manager_extend
|
---|
878 |
|
---|
879 |
|
---|
880 | #define BOOT_HEAP_MANAGER \
|
---|
881 | if ( unlikely( ! heapMasterBootFlag ) ) { \
|
---|
882 | heapManagerCtor(); /* trigger for first heap */ \
|
---|
883 | } /* if */
|
---|
884 |
|
---|
885 | #ifdef __STATISTICS__
|
---|
886 | #define STAT_NAME __counter
|
---|
887 | #define STAT_PARM , unsigned int STAT_NAME
|
---|
888 | #define STAT_ARG( name ) , name
|
---|
889 | #define STAT_0_CNT( counter ) stats.counters[counter].calls_0 += 1
|
---|
890 | #else
|
---|
891 | #define STAT_NAME
|
---|
892 | #define STAT_PARM
|
---|
893 | #define STAT_ARG( name )
|
---|
894 | #define STAT_0_CNT( counter )
|
---|
895 | #endif // __STATISTICS__
|
---|
896 |
|
---|
897 | #define PROLOG( counter, ... ) \
|
---|
898 | BOOT_HEAP_MANAGER; \
|
---|
899 | if ( unlikely( size == 0 ) || /* 0 BYTE ALLOCATION RETURNS NULL POINTER */ \
|
---|
900 | unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) { /* error check */ \
|
---|
901 | STAT_0_CNT( counter ); \
|
---|
902 | __VA_ARGS__; \
|
---|
903 | return 0p; \
|
---|
904 | } /* if */
|
---|
905 |
|
---|
906 |
|
---|
907 | #define SCRUB_SIZE 1024lu
|
---|
908 | // Do not use '\xfe' for scrubbing because dereferencing an address composed of it causes a SIGSEGV *without* a valid IP
|
---|
909 | // pointer in the interrupt frame.
|
---|
910 | #define SCRUB '\xff'
|
---|
911 |
|
---|
912 | static void * doMalloc( size_t size STAT_PARM ) libcfa_nopreempt with( *heapManager ) {
|
---|
913 | PROLOG( STAT_NAME );
|
---|
914 |
|
---|
915 | verify( heapManager );
|
---|
916 | Heap.Storage * block; // pointer to new block of storage
|
---|
917 |
|
---|
918 | // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
|
---|
919 | // along with the block and is a multiple of the alignment size.
|
---|
920 | size_t tsize = size + sizeof(Heap.Storage);
|
---|
921 |
|
---|
922 | #ifdef __STATISTICS__
|
---|
923 | stats.counters[STAT_NAME].calls += 1;
|
---|
924 | stats.counters[STAT_NAME].request += size;
|
---|
925 | #endif // __STATISTICS__
|
---|
926 |
|
---|
927 | #ifdef __CFA_DEBUG__
|
---|
928 | allocUnfreed += size;
|
---|
929 | #endif // __CFA_DEBUG__
|
---|
930 |
|
---|
931 | if ( likely( tsize < heapMaster.mmapStart ) ) { // small size => sbrk
|
---|
932 | size_t bucket;
|
---|
933 | #ifdef FASTLOOKUP
|
---|
934 | if ( likely( tsize < LookupSizes ) ) bucket = lookup[tsize];
|
---|
935 | else
|
---|
936 | #endif // FASTLOOKUP
|
---|
937 | bucket = Bsearchl( tsize, bucketSizes, heapMaster.maxBucketsUsed );
|
---|
938 | verify( 0 <= bucket && bucket <= heapMaster.maxBucketsUsed );
|
---|
939 | Heap.FreeHeader * freeHead = &freeLists[bucket];
|
---|
940 |
|
---|
941 | verify( freeHead <= &freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ?
|
---|
942 | verify( tsize <= freeHead->blockSize ); // search failure ?
|
---|
943 |
|
---|
944 | tsize = freeHead->blockSize; // total space needed for request
|
---|
945 | #ifdef __STATISTICS__
|
---|
946 | stats.counters[STAT_NAME].alloc += tsize;
|
---|
947 | #endif // __STATISTICS__
|
---|
948 |
|
---|
949 | block = freeHead->freeList; // remove node from stack
|
---|
950 | if ( unlikely( block == 0p ) ) { // no free block ?
|
---|
951 | // Freelist for this size is empty, so check return list (OWNERSHIP), or carve it out of the heap if there
|
---|
952 | // is enough left, or get some more heap storage and carve it off.
|
---|
953 | #ifdef OWNERSHIP
|
---|
954 | if ( unlikely( freeHead->returnList ) ) { // race, get next time if lose race
|
---|
955 | #ifdef RETURNSPIN
|
---|
956 | lock( freeHead->returnLock );
|
---|
957 | block = freeHead->returnList;
|
---|
958 | freeHead->returnList = 0p;
|
---|
959 | unlock( freeHead->returnLock );
|
---|
960 | #else
|
---|
961 | block = __atomic_exchange_n( &freeHead->returnList, 0p, __ATOMIC_SEQ_CST );
|
---|
962 | #endif // RETURNSPIN
|
---|
963 |
|
---|
964 | verify( block );
|
---|
965 | #ifdef __STATISTICS__
|
---|
966 | stats.return_pulls += 1;
|
---|
967 | #endif // __STATISTICS__
|
---|
968 |
|
---|
969 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
970 |
|
---|
971 | freeHead->freeList = block->header.kind.real.next; // merge returnList into freeHead
|
---|
972 | } else {
|
---|
973 | #endif // OWNERSHIP
|
---|
974 | // Do not leave kernel thread as manager_extend accesses heapManager.
|
---|
975 | disable_interrupts();
|
---|
976 | block = (Heap.Storage *)manager_extend( tsize ); // mutual exclusion on call
|
---|
977 | enable_interrupts( false );
|
---|
978 |
|
---|
979 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
980 |
|
---|
981 | #ifdef __CFA_DEBUG__
|
---|
982 | // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes.
|
---|
983 | memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) );
|
---|
984 | #endif // __CFA_DEBUG__
|
---|
985 | #ifdef OWNERSHIP
|
---|
986 | } // if
|
---|
987 | #endif // OWNERSHIP
|
---|
988 | } else {
|
---|
989 | // Memory is scrubbed in doFree.
|
---|
990 | freeHead->freeList = block->header.kind.real.next;
|
---|
991 | } // if
|
---|
992 |
|
---|
993 | block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size
|
---|
994 | } else { // large size => mmap
|
---|
995 | if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p;
|
---|
996 | tsize = ceiling2( tsize, __page_size ); // must be multiple of page size
|
---|
997 |
|
---|
998 | #ifdef __STATISTICS__
|
---|
999 | stats.counters[STAT_NAME].alloc += tsize;
|
---|
1000 | stats.mmap_calls += 1;
|
---|
1001 | stats.mmap_storage_request += size;
|
---|
1002 | stats.mmap_storage_alloc += tsize;
|
---|
1003 | #endif // __STATISTICS__
|
---|
1004 |
|
---|
1005 | disable_interrupts();
|
---|
1006 | block = (Heap.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 );
|
---|
1007 | enable_interrupts( false );
|
---|
1008 |
|
---|
1009 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1010 |
|
---|
1011 | if ( unlikely( block == (Heap.Storage *)MAP_FAILED ) ) { // failed ?
|
---|
1012 | if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory
|
---|
1013 | // Do not call strerror( errno ) as it may call malloc.
|
---|
1014 | abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.",
|
---|
1015 | size, heapMaster.mmapStart, errno );
|
---|
1016 | } // if
|
---|
1017 | block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap
|
---|
1018 |
|
---|
1019 | #ifdef __CFA_DEBUG__
|
---|
1020 | // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes. The
|
---|
1021 | // rest of the storage set to 0 by mmap.
|
---|
1022 | memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) );
|
---|
1023 | #endif // __CFA_DEBUG__
|
---|
1024 | } // if
|
---|
1025 |
|
---|
1026 | block->header.kind.real.size = size; // store allocation size
|
---|
1027 | void * addr = &(block->data); // adjust off header to user bytes
|
---|
1028 | verify( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ?
|
---|
1029 |
|
---|
1030 | #ifdef __CFA_DEBUG__
|
---|
1031 | if ( traceHeap() ) {
|
---|
1032 | char helpText[64];
|
---|
1033 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
1034 | "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug
|
---|
1035 | } // if
|
---|
1036 | #endif // __CFA_DEBUG__
|
---|
1037 |
|
---|
1038 | // poll_interrupts(); // call rollforward
|
---|
1039 |
|
---|
1040 | return addr;
|
---|
1041 | } // doMalloc
|
---|
1042 |
|
---|
1043 |
|
---|
1044 | static void doFree( void * addr ) libcfa_nopreempt with( *heapManager ) {
|
---|
1045 | verify( addr );
|
---|
1046 |
|
---|
1047 | // detect free after thread-local storage destruction and use global stats in that case
|
---|
1048 |
|
---|
1049 | Heap.Storage.Header * header;
|
---|
1050 | Heap.FreeHeader * freeHead;
|
---|
1051 | size_t size, alignment;
|
---|
1052 |
|
---|
1053 | bool mapped = headers( "free", addr, header, freeHead, size, alignment );
|
---|
1054 | #if defined( __STATISTICS__ ) || defined( __CFA_DEBUG__ )
|
---|
1055 | size_t rsize = header->kind.real.size; // optimization
|
---|
1056 | #endif // __STATISTICS__ || __CFA_DEBUG__
|
---|
1057 |
|
---|
1058 | #ifdef __STATISTICS__
|
---|
1059 | stats.free_storage_request += rsize;
|
---|
1060 | stats.free_storage_alloc += size;
|
---|
1061 | #endif // __STATISTICS__
|
---|
1062 |
|
---|
1063 | #ifdef __CFA_DEBUG__
|
---|
1064 | allocUnfreed -= rsize;
|
---|
1065 | #endif // __CFA_DEBUG__
|
---|
1066 |
|
---|
1067 | if ( unlikely( mapped ) ) { // mmapped ?
|
---|
1068 | #ifdef __STATISTICS__
|
---|
1069 | stats.munmap_calls += 1;
|
---|
1070 | stats.munmap_storage_request += rsize;
|
---|
1071 | stats.munmap_storage_alloc += size;
|
---|
1072 | #endif // __STATISTICS__
|
---|
1073 |
|
---|
1074 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1075 |
|
---|
1076 | // Does not matter where this storage is freed.
|
---|
1077 | if ( unlikely( munmap( header, size ) == -1 ) ) {
|
---|
1078 | // Do not call strerror( errno ) as it may call malloc.
|
---|
1079 | abort( "**** Error **** attempt to deallocate large object %p and munmap failed with errno %d.\n"
|
---|
1080 | "Possible cause is invalid delete pointer: either not allocated or with corrupt header.",
|
---|
1081 | addr, errno );
|
---|
1082 | } // if
|
---|
1083 | } else {
|
---|
1084 | #ifdef __CFA_DEBUG__
|
---|
1085 | // memset is NOT always inlined!
|
---|
1086 | disable_interrupts();
|
---|
1087 | // Scrub old memory so subsequent usages might fail. Only scrub the first/last SCRUB_SIZE bytes.
|
---|
1088 | char * data = ((Heap.Storage *)header)->data; // data address
|
---|
1089 | size_t dsize = size - sizeof(Heap.Storage); // data size
|
---|
1090 | if ( dsize <= SCRUB_SIZE * 2 ) {
|
---|
1091 | memset( data, SCRUB, dsize ); // scrub all
|
---|
1092 | } else {
|
---|
1093 | memset( data, SCRUB, SCRUB_SIZE ); // scrub front
|
---|
1094 | memset( data + dsize - SCRUB_SIZE, SCRUB, SCRUB_SIZE ); // scrub back
|
---|
1095 | } // if
|
---|
1096 | enable_interrupts( false );
|
---|
1097 | #endif // __CFA_DEBUG__
|
---|
1098 |
|
---|
1099 | #ifdef OWNERSHIP
|
---|
1100 | if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread
|
---|
1101 | header->kind.real.next = freeHead->freeList; // push on stack
|
---|
1102 | freeHead->freeList = (Heap.Storage *)header;
|
---|
1103 | } else { // return to thread owner
|
---|
1104 | verify( heapManager );
|
---|
1105 |
|
---|
1106 | #ifdef RETURNSPIN
|
---|
1107 | lock( freeHead->returnLock );
|
---|
1108 | header->kind.real.next = freeHead->returnList; // push to bucket return list
|
---|
1109 | freeHead->returnList = (Heap.Storage *)header;
|
---|
1110 | unlock( freeHead->returnLock );
|
---|
1111 | #else // lock free
|
---|
1112 | header->kind.real.next = freeHead->returnList; // link new node to top node
|
---|
1113 | // CAS resets header->kind.real.next = freeHead->returnList on failure
|
---|
1114 | while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, (Heap.Storage *)header,
|
---|
1115 | false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) );
|
---|
1116 |
|
---|
1117 | #ifdef __STATISTICS__
|
---|
1118 | stats.return_pushes += 1;
|
---|
1119 | stats.return_storage_request += rsize;
|
---|
1120 | stats.return_storage_alloc += size;
|
---|
1121 | #endif // __STATISTICS__
|
---|
1122 | #endif // RETURNSPIN
|
---|
1123 | } // if
|
---|
1124 |
|
---|
1125 | #else // no OWNERSHIP
|
---|
1126 |
|
---|
1127 | // kind.real.home is address in owner thread's freeLists, so compute the equivalent position in this thread's freeList.
|
---|
1128 | freeHead = &freeLists[ClearStickyBits( (Heap.FreeHeader *)(header->kind.real.home) ) - &freeHead->homeManager->freeLists[0]];
|
---|
1129 | header->kind.real.next = freeHead->freeList; // push on stack
|
---|
1130 | freeHead->freeList = (Heap.Storage *)header;
|
---|
1131 | #endif // ! OWNERSHIP
|
---|
1132 |
|
---|
1133 | // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.
|
---|
1134 | } // if
|
---|
1135 |
|
---|
1136 | #ifdef __CFA_DEBUG__
|
---|
1137 | if ( traceHeap() ) {
|
---|
1138 | char helpText[64];
|
---|
1139 | __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
|
---|
1140 | "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug
|
---|
1141 | } // if
|
---|
1142 | #endif // __CFA_DEBUG__
|
---|
1143 |
|
---|
1144 | // poll_interrupts(); // call rollforward
|
---|
1145 | } // doFree
|
---|
1146 |
|
---|
1147 |
|
---|
1148 | size_t prtFree( Heap & manager ) with( manager ) {
|
---|
1149 | size_t total = 0;
|
---|
1150 | #ifdef __STATISTICS__
|
---|
1151 | __cfaabi_bits_acquire();
|
---|
1152 | __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" );
|
---|
1153 | #endif // __STATISTICS__
|
---|
1154 | for ( unsigned int i = 0; i < heapMaster.maxBucketsUsed; i += 1 ) {
|
---|
1155 | size_t size = freeLists[i].blockSize;
|
---|
1156 | #ifdef __STATISTICS__
|
---|
1157 | unsigned int N = 0;
|
---|
1158 | #endif // __STATISTICS__
|
---|
1159 |
|
---|
1160 | for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
|
---|
1161 | total += size;
|
---|
1162 | #ifdef __STATISTICS__
|
---|
1163 | N += 1;
|
---|
1164 | #endif // __STATISTICS__
|
---|
1165 | } // for
|
---|
1166 |
|
---|
1167 | #ifdef __STATISTICS__
|
---|
1168 | __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N );
|
---|
1169 | if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" );
|
---|
1170 | #endif // __STATISTICS__
|
---|
1171 | } // for
|
---|
1172 | #ifdef __STATISTICS__
|
---|
1173 | __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total );
|
---|
1174 | __cfaabi_bits_release();
|
---|
1175 | #endif // __STATISTICS__
|
---|
1176 | return (char *)heapMaster.heapEnd - (char *)heapMaster.heapBegin - total;
|
---|
1177 | } // prtFree
|
---|
1178 |
|
---|
1179 |
|
---|
1180 | #ifdef __STATISTICS__
|
---|
1181 | static void incCalls( size_t statName ) libcfa_nopreempt {
|
---|
1182 | heapManager->stats.counters[statName].calls += 1;
|
---|
1183 | } // incCalls
|
---|
1184 |
|
---|
1185 | static void incZeroCalls( size_t statName ) libcfa_nopreempt {
|
---|
1186 | heapManager->stats.counters[statName].calls_0 += 1;
|
---|
1187 | } // incZeroCalls
|
---|
1188 | #endif // __STATISTICS__
|
---|
1189 |
|
---|
1190 | #ifdef __CFA_DEBUG__
|
---|
1191 | static void incUnfreed( intptr_t offset ) libcfa_nopreempt {
|
---|
1192 | heapManager->allocUnfreed += offset;
|
---|
1193 | } // incUnfreed
|
---|
1194 | #endif // __CFA_DEBUG__
|
---|
1195 |
|
---|
1196 |
|
---|
1197 | static void * memalignNoStats( size_t alignment, size_t size STAT_PARM ) {
|
---|
1198 | checkAlign( alignment ); // check alignment
|
---|
1199 |
|
---|
1200 | // if alignment <= default alignment or size == 0, do normal malloc as two headers are unnecessary
|
---|
1201 | if ( unlikely( alignment <= libAlign() || size == 0 ) ) return doMalloc( size STAT_ARG( STAT_NAME ) );
|
---|
1202 |
|
---|
1203 | // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
|
---|
1204 | // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
|
---|
1205 | // .-------------v-----------------v----------------v----------,
|
---|
1206 | // | Real Header | ... padding ... | Fake Header | data ... |
|
---|
1207 | // `-------------^-----------------^-+--------------^----------'
|
---|
1208 | // |<--------------------------------' offset/align |<-- alignment boundary
|
---|
1209 |
|
---|
1210 | // subtract libAlign() because it is already the minimum alignment
|
---|
1211 | // add sizeof(Storage) for fake header
|
---|
1212 | size_t offset = alignment - libAlign() + sizeof(Heap.Storage);
|
---|
1213 | char * addr = (char *)doMalloc( size + offset STAT_ARG( STAT_NAME ) );
|
---|
1214 |
|
---|
1215 | // address in the block of the "next" alignment address
|
---|
1216 | char * user = (char *)ceiling2( (uintptr_t)(addr + sizeof(Heap.Storage)), alignment );
|
---|
1217 |
|
---|
1218 | // address of header from malloc
|
---|
1219 | Heap.Storage.Header * realHeader = HeaderAddr( addr );
|
---|
1220 | realHeader->kind.real.size = size; // correct size to eliminate above alignment offset
|
---|
1221 | #ifdef __CFA_DEBUG__
|
---|
1222 | incUnfreed( -offset ); // adjustment off the offset from call to doMalloc
|
---|
1223 | #endif // __CFA_DEBUG__
|
---|
1224 |
|
---|
1225 | // address of fake header *before* the alignment location
|
---|
1226 | Heap.Storage.Header * fakeHeader = HeaderAddr( user );
|
---|
1227 |
|
---|
1228 | // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
|
---|
1229 | fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
|
---|
1230 | // SKULLDUGGERY: odd alignment implies fake header
|
---|
1231 | fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment );
|
---|
1232 |
|
---|
1233 | return user;
|
---|
1234 | } // memalignNoStats
|
---|
1235 |
|
---|
1236 |
|
---|
1237 | //####################### Memory Allocation Routines ####################
|
---|
1238 |
|
---|
1239 |
|
---|
1240 | extern "C" {
|
---|
1241 | // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0,
|
---|
1242 | // then malloc() returns a unique pointer value that can later be successfully passed to free().
|
---|
1243 | void * malloc( size_t size ) libcfa_public {
|
---|
1244 | return doMalloc( size STAT_ARG( MALLOC ) );
|
---|
1245 | } // malloc
|
---|
1246 |
|
---|
1247 |
|
---|
1248 | // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
|
---|
1249 | void * aalloc( size_t dim, size_t elemSize ) libcfa_public {
|
---|
1250 | return doMalloc( dim * elemSize STAT_ARG( AALLOC ) );
|
---|
1251 | } // aalloc
|
---|
1252 |
|
---|
1253 |
|
---|
1254 | // Same as aalloc() with memory set to zero.
|
---|
1255 | void * calloc( size_t dim, size_t elemSize ) libcfa_public {
|
---|
1256 | size_t size = dim * elemSize;
|
---|
1257 | char * addr = (char *)doMalloc( size STAT_ARG( CALLOC ) );
|
---|
1258 |
|
---|
1259 | if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
|
---|
1260 |
|
---|
1261 | Heap.Storage.Header * header;
|
---|
1262 | Heap.FreeHeader * freeHead;
|
---|
1263 | size_t bsize, alignment;
|
---|
1264 |
|
---|
1265 | #ifndef __CFA_DEBUG__
|
---|
1266 | bool mapped =
|
---|
1267 | #endif // __CFA_DEBUG__
|
---|
1268 | headers( "calloc", addr, header, freeHead, bsize, alignment );
|
---|
1269 |
|
---|
1270 | #ifndef __CFA_DEBUG__
|
---|
1271 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
1272 | if ( likely( ! mapped ) )
|
---|
1273 | #endif // __CFA_DEBUG__
|
---|
1274 | // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
|
---|
1275 | // `-header`-addr `-size
|
---|
1276 | memset( addr, '\0', size ); // set to zeros
|
---|
1277 |
|
---|
1278 | MarkZeroFilledBit( header ); // mark as zero fill
|
---|
1279 | return addr;
|
---|
1280 | } // calloc
|
---|
1281 |
|
---|
1282 |
|
---|
1283 | // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is
|
---|
1284 | // 0p, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is
|
---|
1285 | // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier
|
---|
1286 | // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
|
---|
1287 | void * resize( void * oaddr, size_t size ) libcfa_public {
|
---|
1288 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1289 | return doMalloc( size STAT_ARG( RESIZE ) );
|
---|
1290 | } // if
|
---|
1291 |
|
---|
1292 | PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr )
|
---|
1293 |
|
---|
1294 | Heap.Storage.Header * header;
|
---|
1295 | Heap.FreeHeader * freeHead;
|
---|
1296 | size_t bsize, oalign;
|
---|
1297 | headers( "resize", oaddr, header, freeHead, bsize, oalign );
|
---|
1298 |
|
---|
1299 | size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1300 | // same size, DO NOT preserve STICKY PROPERTIES.
|
---|
1301 | if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
|
---|
1302 | ClearZeroFillBit( header ); // no alignment and turn off 0 fill
|
---|
1303 | #ifdef __CFA_DEBUG__
|
---|
1304 | incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
|
---|
1305 | #endif // __CFA_DEBUG__
|
---|
1306 | header->kind.real.size = size; // reset allocation size
|
---|
1307 | #ifdef __STATISTICS__
|
---|
1308 | incCalls( RESIZE );
|
---|
1309 | #endif // __STATISTICS__
|
---|
1310 | return oaddr;
|
---|
1311 | } // if
|
---|
1312 |
|
---|
1313 | // change size, DO NOT preserve STICKY PROPERTIES.
|
---|
1314 | doFree( oaddr ); // free previous storage
|
---|
1315 |
|
---|
1316 | return doMalloc( size STAT_ARG( RESIZE ) ); // create new area
|
---|
1317 | } // resize
|
---|
1318 |
|
---|
1319 |
|
---|
1320 | // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
|
---|
1321 | // the old and new sizes.
|
---|
1322 | void * realloc( void * oaddr, size_t size ) libcfa_public {
|
---|
1323 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1324 | return doMalloc( size STAT_ARG( REALLOC ) );
|
---|
1325 | } // if
|
---|
1326 |
|
---|
1327 | PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr )
|
---|
1328 |
|
---|
1329 | Heap.Storage.Header * header;
|
---|
1330 | Heap.FreeHeader * freeHead;
|
---|
1331 | size_t bsize, oalign;
|
---|
1332 | headers( "realloc", oaddr, header, freeHead, bsize, oalign );
|
---|
1333 |
|
---|
1334 | size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1335 | size_t osize = header->kind.real.size; // old allocation size
|
---|
1336 | bool ozfill = ZeroFillBit( header ); // old allocation zero filled
|
---|
1337 | if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage
|
---|
1338 | #ifdef __CFA_DEBUG__
|
---|
1339 | incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
|
---|
1340 | #endif // __CFA_DEBUG__
|
---|
1341 | header->kind.real.size = size; // reset allocation size
|
---|
1342 | if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ?
|
---|
1343 | memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1344 | } // if
|
---|
1345 | #ifdef __STATISTICS__
|
---|
1346 | incCalls( REALLOC );
|
---|
1347 | #endif // __STATISTICS__
|
---|
1348 | return oaddr;
|
---|
1349 | } // if
|
---|
1350 |
|
---|
1351 | // change size and copy old content to new storage
|
---|
1352 |
|
---|
1353 | void * naddr;
|
---|
1354 | if ( likely( oalign <= libAlign() ) ) { // previous request not aligned ?
|
---|
1355 | naddr = doMalloc( size STAT_ARG( REALLOC ) ); // create new area
|
---|
1356 | } else {
|
---|
1357 | naddr = memalignNoStats( oalign, size STAT_ARG( REALLOC ) ); // create new aligned area
|
---|
1358 | } // if
|
---|
1359 |
|
---|
1360 | headers( "realloc", naddr, header, freeHead, bsize, oalign );
|
---|
1361 | // To preserve prior fill, the entire bucket must be copied versus the size.
|
---|
1362 | memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes
|
---|
1363 | doFree( oaddr ); // free previous storage
|
---|
1364 |
|
---|
1365 | if ( unlikely( ozfill ) ) { // previous request zero fill ?
|
---|
1366 | MarkZeroFilledBit( header ); // mark new request as zero filled
|
---|
1367 | if ( size > osize ) { // previous request larger ?
|
---|
1368 | memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1369 | } // if
|
---|
1370 | } // if
|
---|
1371 | return naddr;
|
---|
1372 | } // realloc
|
---|
1373 |
|
---|
1374 |
|
---|
1375 | // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
|
---|
1376 | void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public {
|
---|
1377 | return realloc( oaddr, dim * elemSize );
|
---|
1378 | } // reallocarray
|
---|
1379 |
|
---|
1380 |
|
---|
1381 | // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
|
---|
1382 | void * memalign( size_t alignment, size_t size ) libcfa_public {
|
---|
1383 | return memalignNoStats( alignment, size STAT_ARG( MEMALIGN ) );
|
---|
1384 | } // memalign
|
---|
1385 |
|
---|
1386 |
|
---|
1387 | // Same as aalloc() with memory alignment.
|
---|
1388 | void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
|
---|
1389 | return memalignNoStats( alignment, dim * elemSize STAT_ARG( AMEMALIGN ) );
|
---|
1390 | } // amemalign
|
---|
1391 |
|
---|
1392 |
|
---|
1393 | // Same as calloc() with memory alignment.
|
---|
1394 | void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
|
---|
1395 | size_t size = dim * elemSize;
|
---|
1396 | char * addr = (char *)memalignNoStats( alignment, size STAT_ARG( CMEMALIGN ) );
|
---|
1397 |
|
---|
1398 | if ( unlikely( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
|
---|
1399 |
|
---|
1400 | Heap.Storage.Header * header;
|
---|
1401 | Heap.FreeHeader * freeHead;
|
---|
1402 | size_t bsize;
|
---|
1403 |
|
---|
1404 | #ifndef __CFA_DEBUG__
|
---|
1405 | bool mapped =
|
---|
1406 | #endif // __CFA_DEBUG__
|
---|
1407 | headers( "cmemalign", addr, header, freeHead, bsize, alignment );
|
---|
1408 |
|
---|
1409 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
1410 | #ifndef __CFA_DEBUG__
|
---|
1411 | if ( ! mapped )
|
---|
1412 | #endif // __CFA_DEBUG__
|
---|
1413 | // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
|
---|
1414 | // `-header`-addr `-size
|
---|
1415 | memset( addr, '\0', size ); // set to zeros
|
---|
1416 |
|
---|
1417 | MarkZeroFilledBit( header ); // mark as zero filled
|
---|
1418 | return addr;
|
---|
1419 | } // cmemalign
|
---|
1420 |
|
---|
1421 |
|
---|
1422 | // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
|
---|
1423 | // of alignment. This requirement is universally ignored.
|
---|
1424 | void * aligned_alloc( size_t alignment, size_t size ) libcfa_public {
|
---|
1425 | return memalign( alignment, size );
|
---|
1426 | } // aligned_alloc
|
---|
1427 |
|
---|
1428 |
|
---|
1429 | // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated
|
---|
1430 | // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size
|
---|
1431 | // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
|
---|
1432 | // free(3).
|
---|
1433 | int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public {
|
---|
1434 | if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
|
---|
1435 | *memptr = memalign( alignment, size );
|
---|
1436 | return 0;
|
---|
1437 | } // posix_memalign
|
---|
1438 |
|
---|
1439 |
|
---|
1440 | // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
|
---|
1441 | // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
|
---|
1442 | void * valloc( size_t size ) libcfa_public {
|
---|
1443 | return memalign( __page_size, size );
|
---|
1444 | } // valloc
|
---|
1445 |
|
---|
1446 |
|
---|
1447 | // Same as valloc but rounds size to multiple of page size.
|
---|
1448 | void * pvalloc( size_t size ) libcfa_public {
|
---|
1449 | return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
|
---|
1450 | } // pvalloc
|
---|
1451 |
|
---|
1452 |
|
---|
1453 | // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc()
|
---|
1454 | // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
|
---|
1455 | // 0p, no operation is performed.
|
---|
1456 | void free( void * addr ) libcfa_public {
|
---|
1457 | if ( unlikely( addr == 0p ) ) { // special case
|
---|
1458 | #ifdef __STATISTICS__
|
---|
1459 | if ( heapManager )
|
---|
1460 | incZeroCalls( FREE );
|
---|
1461 | #endif // __STATISTICS__
|
---|
1462 | return;
|
---|
1463 | } // if
|
---|
1464 |
|
---|
1465 | #ifdef __STATISTICS__
|
---|
1466 | incCalls( FREE );
|
---|
1467 | #endif // __STATISTICS__
|
---|
1468 |
|
---|
1469 | doFree( addr ); // handles heapManager == nullptr
|
---|
1470 | } // free
|
---|
1471 |
|
---|
1472 |
|
---|
1473 | // Returns the alignment of an allocation.
|
---|
1474 | size_t malloc_alignment( void * addr ) libcfa_public {
|
---|
1475 | if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment
|
---|
1476 | Heap.Storage.Header * header = HeaderAddr( addr );
|
---|
1477 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
1478 | return ClearAlignmentBit( header ); // clear flag from value
|
---|
1479 | } else {
|
---|
1480 | return libAlign(); // minimum alignment
|
---|
1481 | } // if
|
---|
1482 | } // malloc_alignment
|
---|
1483 |
|
---|
1484 |
|
---|
1485 | // Returns true if the allocation is zero filled, e.g., allocated by calloc().
|
---|
1486 | bool malloc_zero_fill( void * addr ) libcfa_public {
|
---|
1487 | if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill
|
---|
1488 | Heap.Storage.Header * header = HeaderAddr( addr );
|
---|
1489 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
1490 | header = RealHeader( header ); // backup from fake to real header
|
---|
1491 | } // if
|
---|
1492 | return ZeroFillBit( header ); // zero filled ?
|
---|
1493 | } // malloc_zero_fill
|
---|
1494 |
|
---|
1495 |
|
---|
1496 | // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
|
---|
1497 | size_t malloc_size( void * addr ) libcfa_public {
|
---|
1498 | if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size
|
---|
1499 | Heap.Storage.Header * header = HeaderAddr( addr );
|
---|
1500 | if ( unlikely( AlignmentBit( header ) ) ) { // fake header ?
|
---|
1501 | header = RealHeader( header ); // backup from fake to real header
|
---|
1502 | } // if
|
---|
1503 | return header->kind.real.size;
|
---|
1504 | } // malloc_size
|
---|
1505 |
|
---|
1506 |
|
---|
1507 | // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
|
---|
1508 | // malloc or a related function.
|
---|
1509 | size_t malloc_usable_size( void * addr ) libcfa_public {
|
---|
1510 | if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size
|
---|
1511 | Heap.Storage.Header * header;
|
---|
1512 | Heap.FreeHeader * freeHead;
|
---|
1513 | size_t bsize, alignment;
|
---|
1514 |
|
---|
1515 | headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment );
|
---|
1516 | return DataStorage( bsize, addr, header ); // data storage in bucket
|
---|
1517 | } // malloc_usable_size
|
---|
1518 |
|
---|
1519 |
|
---|
1520 | // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
|
---|
1521 | void malloc_stats( void ) libcfa_public {
|
---|
1522 | #ifdef __STATISTICS__
|
---|
1523 | HeapStatistics stats;
|
---|
1524 | HeapStatisticsCtor( stats );
|
---|
1525 | if ( printStats( collectStats( stats ) ) == -1 ) {
|
---|
1526 | #else
|
---|
1527 | #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n"
|
---|
1528 | if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) {
|
---|
1529 | #endif // __STATISTICS__
|
---|
1530 | abort( "**** Error **** write failed in malloc_stats" );
|
---|
1531 | } // if
|
---|
1532 | } // malloc_stats
|
---|
1533 |
|
---|
1534 |
|
---|
1535 | // Changes the file descriptor where malloc_stats() writes statistics.
|
---|
1536 | int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public {
|
---|
1537 | #ifdef __STATISTICS__
|
---|
1538 | int temp = heapMaster.stats_fd;
|
---|
1539 | heapMaster.stats_fd = fd;
|
---|
1540 | return temp;
|
---|
1541 | #else
|
---|
1542 | return -1; // unsupported
|
---|
1543 | #endif // __STATISTICS__
|
---|
1544 | } // malloc_stats_fd
|
---|
1545 |
|
---|
1546 |
|
---|
1547 | // Prints an XML string that describes the current state of the memory-allocation implementation in the caller.
|
---|
1548 | // The string is printed on the file stream stream. The exported string includes information about all arenas (see
|
---|
1549 | // malloc).
|
---|
1550 | int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public {
|
---|
1551 | if ( options != 0 ) { errno = EINVAL; return -1; }
|
---|
1552 | #ifdef __STATISTICS__
|
---|
1553 | HeapStatistics stats;
|
---|
1554 | HeapStatisticsCtor( stats );
|
---|
1555 | return printStatsXML( collectStats( stats ), stream ); // returns bytes written or -1
|
---|
1556 | #else
|
---|
1557 | return 0; // unsupported
|
---|
1558 | #endif // __STATISTICS__
|
---|
1559 | } // malloc_info
|
---|
1560 |
|
---|
1561 |
|
---|
1562 | // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
|
---|
1563 | // specifies the parameter to be modified, and value specifies the new value for that parameter.
|
---|
1564 | int mallopt( int option, int value ) libcfa_public {
|
---|
1565 | if ( value < 0 ) return 0;
|
---|
1566 | choose( option ) {
|
---|
1567 | case M_TOP_PAD:
|
---|
1568 | heapMaster.heapExpand = ceiling2( value, __page_size );
|
---|
1569 | return 1;
|
---|
1570 | case M_MMAP_THRESHOLD:
|
---|
1571 | if ( setMmapStart( value ) ) return 1;
|
---|
1572 | } // choose
|
---|
1573 | return 0; // error, unsupported
|
---|
1574 | } // mallopt
|
---|
1575 |
|
---|
1576 |
|
---|
1577 | // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
|
---|
1578 | int malloc_trim( size_t ) libcfa_public {
|
---|
1579 | return 0; // => impossible to release memory
|
---|
1580 | } // malloc_trim
|
---|
1581 |
|
---|
1582 |
|
---|
1583 | // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap
|
---|
1584 | // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data
|
---|
1585 | // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
|
---|
1586 | // result. (The caller must free this memory.)
|
---|
1587 | void * malloc_get_state( void ) libcfa_public {
|
---|
1588 | return 0p; // unsupported
|
---|
1589 | } // malloc_get_state
|
---|
1590 |
|
---|
1591 |
|
---|
1592 | // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
|
---|
1593 | // structure pointed to by state.
|
---|
1594 | int malloc_set_state( void * ) libcfa_public {
|
---|
1595 | return 0; // unsupported
|
---|
1596 | } // malloc_set_state
|
---|
1597 |
|
---|
1598 |
|
---|
1599 | // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
|
---|
1600 | __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; }
|
---|
1601 |
|
---|
1602 | // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped.
|
---|
1603 | __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; }
|
---|
1604 |
|
---|
1605 | // Amount subtracted to adjust for unfreed program storage (debug only).
|
---|
1606 | __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; }
|
---|
1607 | } // extern "C"
|
---|
1608 |
|
---|
1609 |
|
---|
1610 | // Must have CFA linkage to overload with C linkage realloc.
|
---|
1611 | void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public {
|
---|
1612 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1613 | return memalignNoStats( nalign, size STAT_ARG( RESIZE ) );
|
---|
1614 | } // if
|
---|
1615 |
|
---|
1616 | PROLOG( RESIZE, doFree( oaddr ) ); // => free( oaddr )
|
---|
1617 |
|
---|
1618 | // Attempt to reuse existing alignment.
|
---|
1619 | Heap.Storage.Header * header = HeaderAddr( oaddr );
|
---|
1620 | bool isFakeHeader = AlignmentBit( header ); // old fake header ?
|
---|
1621 | size_t oalign;
|
---|
1622 |
|
---|
1623 | if ( unlikely( isFakeHeader ) ) {
|
---|
1624 | checkAlign( nalign ); // check alignment
|
---|
1625 | oalign = ClearAlignmentBit( header ); // old alignment
|
---|
1626 | if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ?
|
---|
1627 | && ( oalign <= nalign // going down
|
---|
1628 | || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
|
---|
1629 | ) ) {
|
---|
1630 | HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
|
---|
1631 | Heap.FreeHeader * freeHead;
|
---|
1632 | size_t bsize, oalign;
|
---|
1633 | headers( "resize", oaddr, header, freeHead, bsize, oalign );
|
---|
1634 | size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1635 |
|
---|
1636 | if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage
|
---|
1637 | HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
|
---|
1638 | ClearZeroFillBit( header ); // turn off 0 fill
|
---|
1639 | #ifdef __CFA_DEBUG__
|
---|
1640 | incUnfreed( size - header->kind.real.size ); // adjustment off the size difference
|
---|
1641 | #endif // __CFA_DEBUG__
|
---|
1642 | header->kind.real.size = size; // reset allocation size
|
---|
1643 | #ifdef __STATISTICS__
|
---|
1644 | incCalls( RESIZE );
|
---|
1645 | #endif // __STATISTICS__
|
---|
1646 | return oaddr;
|
---|
1647 | } // if
|
---|
1648 | } // if
|
---|
1649 | } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
|
---|
1650 | && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed
|
---|
1651 | return resize( oaddr, size ); // duplicate special case checks
|
---|
1652 | } // if
|
---|
1653 |
|
---|
1654 | // change size, DO NOT preserve STICKY PROPERTIES.
|
---|
1655 | doFree( oaddr ); // free previous storage
|
---|
1656 | return memalignNoStats( nalign, size STAT_ARG( RESIZE ) ); // create new aligned area
|
---|
1657 | } // resize
|
---|
1658 |
|
---|
1659 |
|
---|
1660 | void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public {
|
---|
1661 | if ( unlikely( oaddr == 0p ) ) { // => malloc( size )
|
---|
1662 | return memalignNoStats( nalign, size STAT_ARG( REALLOC ) );
|
---|
1663 | } // if
|
---|
1664 |
|
---|
1665 | PROLOG( REALLOC, doFree( oaddr ) ); // => free( oaddr )
|
---|
1666 |
|
---|
1667 | // Attempt to reuse existing alignment.
|
---|
1668 | Heap.Storage.Header * header = HeaderAddr( oaddr );
|
---|
1669 | bool isFakeHeader = AlignmentBit( header ); // old fake header ?
|
---|
1670 | size_t oalign;
|
---|
1671 | if ( unlikely( isFakeHeader ) ) {
|
---|
1672 | checkAlign( nalign ); // check alignment
|
---|
1673 | oalign = ClearAlignmentBit( header ); // old alignment
|
---|
1674 | if ( unlikely( (uintptr_t)oaddr % nalign == 0 // lucky match ?
|
---|
1675 | && ( oalign <= nalign // going down
|
---|
1676 | || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
|
---|
1677 | ) ) {
|
---|
1678 | HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
|
---|
1679 | return realloc( oaddr, size ); // duplicate special case checks
|
---|
1680 | } // if
|
---|
1681 | } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
|
---|
1682 | && nalign == libAlign() ) { // new alignment also on libAlign => no fake header needed
|
---|
1683 | return realloc( oaddr, size ); // duplicate special case checks
|
---|
1684 | } // if
|
---|
1685 |
|
---|
1686 | Heap.FreeHeader * freeHead;
|
---|
1687 | size_t bsize;
|
---|
1688 | headers( "realloc", oaddr, header, freeHead, bsize, oalign );
|
---|
1689 |
|
---|
1690 | // change size and copy old content to new storage
|
---|
1691 |
|
---|
1692 | size_t osize = header->kind.real.size; // old allocation size
|
---|
1693 | bool ozfill = ZeroFillBit( header ); // old allocation zero filled
|
---|
1694 |
|
---|
1695 | void * naddr = memalignNoStats( nalign, size STAT_ARG( REALLOC ) ); // create new aligned area
|
---|
1696 |
|
---|
1697 | headers( "realloc", naddr, header, freeHead, bsize, oalign );
|
---|
1698 | memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes
|
---|
1699 | doFree( oaddr ); // free previous storage
|
---|
1700 |
|
---|
1701 | if ( unlikely( ozfill ) ) { // previous request zero fill ?
|
---|
1702 | MarkZeroFilledBit( header ); // mark new request as zero filled
|
---|
1703 | if ( size > osize ) { // previous request larger ?
|
---|
1704 | memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1705 | } // if
|
---|
1706 | } // if
|
---|
1707 | return naddr;
|
---|
1708 | } // realloc
|
---|
1709 |
|
---|
1710 |
|
---|
1711 | void * reallocarray( void * oaddr, size_t nalign, size_t dim, size_t elemSize ) __THROW {
|
---|
1712 | return realloc( oaddr, nalign, dim * elemSize );
|
---|
1713 | } // reallocarray
|
---|
1714 |
|
---|
1715 |
|
---|
1716 | // Local Variables: //
|
---|
1717 | // tab-width: 4 //
|
---|
1718 | // compile-command: "cfa -nodebug -O2 heap.cfa" //
|
---|
1719 | // End: //
|
---|