1 | #include "heap.h"
|
---|
2 |
|
---|
3 | #include <algorithm> // lower_bound, min
|
---|
4 | #include <cstring> // strlen, memset, memcpy
|
---|
5 | #include <climits> // ULONG_MAX
|
---|
6 | #include <cstdarg> // va_start, va_end
|
---|
7 | #include <cerrno> // errno, ENOMEM, EINVAL
|
---|
8 | #include <cassert>
|
---|
9 | #include <unistd.h> // STDERR_FILENO, sbrk, sysconf, write
|
---|
10 | #include <sys/mman.h> // mmap, munmap
|
---|
11 | #include <cstdint> // uintptr_t, uint64_t, uint32_t
|
---|
12 |
|
---|
13 | #define TLS
|
---|
14 | #define AWAYSPIN // toggle spinlock / lockfree stack
|
---|
15 | #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size
|
---|
16 | #define CFA_THREADSAFE_HEAP
|
---|
17 |
|
---|
18 | #ifdef __CFA_DEBUG__
|
---|
19 | #define __DEBUG__
|
---|
20 | #endif // __CFA_DEBUG__
|
---|
21 |
|
---|
22 | #define LIKELY(x) __builtin_expect(!!(x), 1)
|
---|
23 | #define UNLIKELY(x) __builtin_expect(!!(x), 0)
|
---|
24 |
|
---|
25 | #define str(s) #s
|
---|
26 | #define xstr(s) str(s)
|
---|
27 | #define WARNING( s ) xstr( GCC diagnostic ignored str( -W ## s ) )
|
---|
28 | #define NOWARNING( statement, warning ) \
|
---|
29 | _Pragma( "GCC diagnostic push" ) \
|
---|
30 | _Pragma( WARNING( warning ) ) \
|
---|
31 | statement ; \
|
---|
32 | _Pragma ( "GCC diagnostic pop" )
|
---|
33 |
|
---|
34 | enum { __ALIGN__ = 16, // minimum allocation alignment, bytes
|
---|
35 | __DEFAULT_HEAP_EXPANSION__ = 2 * 1024 * 1024, // sbrk extension amount when full
|
---|
36 | __DEFAULT_MMAP_START__ = 512 * 1024 + 1, // crossover allocation size from sbrk to mmap
|
---|
37 | __DEFAULT_HEAP_UNFREED__ = 0, // amount subtracted to adjust for unfreed program storage
|
---|
38 | }; // enum
|
---|
39 |
|
---|
40 | #ifdef TLS
|
---|
41 | #define TLSMODEL __attribute__(( tls_model("initial-exec") ))
|
---|
42 | #else
|
---|
43 | #define TLSMODEL
|
---|
44 | #endif // TLS
|
---|
45 |
|
---|
46 | #ifdef CFA_THREADSAFE_HEAP
|
---|
47 | extern "C" {
|
---|
48 | void enable_interrupts();
|
---|
49 | void disable_interrupts();
|
---|
50 | }
|
---|
51 | #define ENABLE_INTERRUPTS (void)enable_interrupts()
|
---|
52 | #define DISABLE_INTERRUPTS (void)disable_interrupts()
|
---|
53 | #else
|
---|
54 | #define ENABLE_INTERRUPTS
|
---|
55 | #define DISABLE_INTERRUPTS
|
---|
56 | #endif // CFA_THREADSAFE_HEAP
|
---|
57 |
|
---|
58 | //######################### Helpers #########################
|
---|
59 |
|
---|
60 |
|
---|
61 | // Called by macro assert in assert.h. Replace to prevent recursive call to malloc.
|
---|
62 | /*
|
---|
63 | void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) {
|
---|
64 | extern const char * __progname; // global name of running executable (argv[0])
|
---|
65 | char helpText[1024];
|
---|
66 | int len = snprintf( helpText, sizeof(helpText), "Internal assertion error \"%s\" from program \"%s\" in \"%s\" at line %d in file \"%s.\n",
|
---|
67 | assertion, __progname, function, line, file );
|
---|
68 | NOWARNING( write( STDERR_FILENO, helpText, len ), unused-result );
|
---|
69 | abort();
|
---|
70 | // CONTROL NEVER REACHES HERE!
|
---|
71 | } // __assert_fail
|
---|
72 | FIXME */
|
---|
73 |
|
---|
74 | void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
|
---|
75 | void abort( const char fmt[], ... ) { // overload real abort
|
---|
76 | va_list args;
|
---|
77 | va_start( args, fmt );
|
---|
78 | vfprintf( stderr, fmt, args );
|
---|
79 | if ( fmt[strlen( fmt ) - 1] != '\n' ) { // add optional newline if missing at the end of the format text
|
---|
80 | vfprintf( stderr, "\n", args ); // g++-10 does not allow nullptr for va_list
|
---|
81 | } // if
|
---|
82 | va_end( args );
|
---|
83 | abort(); // call the real abort
|
---|
84 | // CONTROL NEVER REACHES HERE!
|
---|
85 | } // abort
|
---|
86 |
|
---|
87 | static inline bool Pow2( unsigned long int value ) {
|
---|
88 | // clears all bits below value, rounding value down to the next lower multiple of value
|
---|
89 | return (value & (value - 1)) == 0;
|
---|
90 | } // Pow2
|
---|
91 |
|
---|
92 | static inline unsigned long int Floor( unsigned long int value, unsigned long int align ) {
|
---|
93 | assert( Pow2( align ) );
|
---|
94 | // clears all bits above or equal to align, getting (value % align), the phase of value with regards to align
|
---|
95 | return value & -align;
|
---|
96 | } // Floor
|
---|
97 |
|
---|
98 | static inline unsigned long int Ceiling( unsigned long int value, unsigned long int align ) {
|
---|
99 | assert( Pow2( align ) );
|
---|
100 | // "negate, round down, negate" is the same as round up
|
---|
101 | return -Floor( -value, align );
|
---|
102 | } // Ceiling
|
---|
103 |
|
---|
104 | template< typename T > static inline T AtomicFetchAdd( volatile T & counter, int increment ) {
|
---|
105 | return __atomic_fetch_add( &counter, increment, __ATOMIC_SEQ_CST );
|
---|
106 | } // AtomicFetchAdd
|
---|
107 |
|
---|
108 |
|
---|
109 | //######################### Spin Lock #########################
|
---|
110 |
|
---|
111 |
|
---|
112 | #define CACHE_ALIGN 128 // Intel recommendation
|
---|
113 | #define CALIGN __attribute__(( aligned(CACHE_ALIGN) ))
|
---|
114 |
|
---|
115 | // pause to prevent excess processor bus usage
|
---|
116 | #if defined( __i386 ) || defined( __x86_64 )
|
---|
117 | #define Pause() __asm__ __volatile__ ( "pause" : : : )
|
---|
118 | #elif defined(__ARM_ARCH)
|
---|
119 | #define Pause() __asm__ __volatile__ ( "YIELD" : : : )
|
---|
120 | #else
|
---|
121 | #error unsupported architecture
|
---|
122 | #endif
|
---|
123 |
|
---|
124 | typedef volatile uintptr_t SpinLock_t CALIGN; // aligned addressable word-size
|
---|
125 |
|
---|
126 | void spin_acquire( volatile SpinLock_t * lock ) {
|
---|
127 | enum { SPIN_START = 4, SPIN_END = 64 * 1024, };
|
---|
128 | unsigned int spin = SPIN_START;
|
---|
129 |
|
---|
130 | for ( unsigned int i = 1;; i += 1 ) {
|
---|
131 | if ( *lock == 0 && __atomic_test_and_set( lock, __ATOMIC_SEQ_CST ) == 0 ) break; // Fence
|
---|
132 | for ( volatile unsigned int s = 0; s < spin; s += 1 ) Pause(); // exponential spin
|
---|
133 | spin += spin; // powers of 2
|
---|
134 | //if ( i % 64 == 0 ) spin += spin; // slowly increase by powers of 2
|
---|
135 | if ( spin > SPIN_END ) spin = SPIN_END; // cap spinning
|
---|
136 | } // for
|
---|
137 | } // spin_lock
|
---|
138 |
|
---|
139 | void spin_release( volatile SpinLock_t * lock ) {
|
---|
140 | __atomic_clear( lock, __ATOMIC_SEQ_CST ); // Fence
|
---|
141 | } // spin_unlock
|
---|
142 |
|
---|
143 |
|
---|
144 | //####################### Heap Statistics ####################
|
---|
145 |
|
---|
146 |
|
---|
147 | #ifdef __STATISTICS__
|
---|
148 | enum { CntTriples = 12 }; // number of counter triples
|
---|
149 | struct HeapStatistics {
|
---|
150 | enum { MALLOC, AALLOC, CALLOC, MEMALIGN, AMEMALIGN, CMEMALIGN, RESIZE, REALLOC };
|
---|
151 | union {
|
---|
152 | struct {
|
---|
153 | unsigned int malloc_calls, malloc_0_calls;
|
---|
154 | unsigned long long int malloc_storage_request, malloc_storage_alloc;
|
---|
155 | unsigned int aalloc_calls, aalloc_0_calls;
|
---|
156 | unsigned long long int aalloc_storage_request, aalloc_storage_alloc;
|
---|
157 | unsigned int calloc_calls, calloc_0_calls;
|
---|
158 | unsigned long long int calloc_storage_request, calloc_storage_alloc;
|
---|
159 | unsigned int memalign_calls, memalign_0_calls;
|
---|
160 | unsigned long long int memalign_storage_request, memalign_storage_alloc;
|
---|
161 | unsigned int amemalign_calls, amemalign_0_calls;
|
---|
162 | unsigned long long int amemalign_storage_request, amemalign_storage_alloc;
|
---|
163 | unsigned int cmemalign_calls, cmemalign_0_calls;
|
---|
164 | unsigned long long int cmemalign_storage_request, cmemalign_storage_alloc;
|
---|
165 | unsigned int resize_calls, resize_0_calls;
|
---|
166 | unsigned long long int resize_storage_request, resize_storage_alloc;
|
---|
167 | unsigned int realloc_calls, realloc_0_calls;
|
---|
168 | unsigned long long int realloc_storage_request, realloc_storage_alloc;
|
---|
169 | unsigned int free_calls, free_null_calls;
|
---|
170 | unsigned long long int free_storage_request, free_storage_alloc;
|
---|
171 | unsigned int away_pulls, away_pushes;
|
---|
172 | unsigned long long int away_storage_request, away_storage_alloc;
|
---|
173 | unsigned int mmap_calls, mmap_0_calls; // no zero calls
|
---|
174 | unsigned long long int mmap_storage_request, mmap_storage_alloc;
|
---|
175 | unsigned int munmap_calls, munmap_0_calls; // no zero calls
|
---|
176 | unsigned long long int munmap_storage_request, munmap_storage_alloc;
|
---|
177 | };
|
---|
178 | struct { // overlay for iteration
|
---|
179 | unsigned int cnt1, cnt2;
|
---|
180 | unsigned long long int cnt3, cnt4;
|
---|
181 | } counters[CntTriples];
|
---|
182 | };
|
---|
183 |
|
---|
184 | HeapStatistics() {
|
---|
185 | for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
|
---|
186 | counters[i].cnt1 = counters[i].cnt2 = counters[i].cnt3 = counters[i].cnt4 = 0;
|
---|
187 | } // for
|
---|
188 | } // HeapStatistics::HeapStatistics
|
---|
189 |
|
---|
190 | friend HeapStatistics & operator+=( HeapStatistics & lhs, const HeapStatistics & rhs ) {
|
---|
191 | for ( unsigned int i = 0; i < CntTriples; i += 1 ) {
|
---|
192 | lhs.counters[i].cnt1 += rhs.counters[i].cnt1;
|
---|
193 | lhs.counters[i].cnt2 += rhs.counters[i].cnt2;
|
---|
194 | lhs.counters[i].cnt3 += rhs.counters[i].cnt3;
|
---|
195 | lhs.counters[i].cnt4 += rhs.counters[i].cnt4;
|
---|
196 | } // for
|
---|
197 | return lhs;
|
---|
198 | } // HeapStatistics::operator+=
|
---|
199 | }; // HeapStatistics
|
---|
200 |
|
---|
201 | static_assert( sizeof(HeapStatistics) == CntTriples * sizeof(HeapStatistics::counters[0] ),
|
---|
202 | "Heap statistics counter-triplets does not match with array size" );
|
---|
203 | #endif // __STATISTICS__
|
---|
204 |
|
---|
205 |
|
---|
206 | //####################### Heap Structure ####################
|
---|
207 |
|
---|
208 |
|
---|
209 | struct Heap {
|
---|
210 | struct FreeHeader; // forward declaration
|
---|
211 |
|
---|
212 | struct Storage {
|
---|
213 | struct Header { // header
|
---|
214 | union Kind {
|
---|
215 | struct RealHeader {
|
---|
216 | union {
|
---|
217 | struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header
|
---|
218 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
219 | uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header
|
---|
220 | #endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
221 |
|
---|
222 | union {
|
---|
223 | // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped
|
---|
224 | FreeHeader * home; // allocated block points back to home locations (must overlay alignment)
|
---|
225 | size_t blockSize; // size for munmap (must overlay alignment)
|
---|
226 | Storage * next; // freed block points to next freed block of same size
|
---|
227 | };
|
---|
228 | size_t size; // allocation size in bytes
|
---|
229 |
|
---|
230 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
231 | uint64_t padding; // unused, force home/blocksize to overlay alignment in fake header
|
---|
232 | #endif // __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ && __SIZEOF_POINTER__ == 4
|
---|
233 | };
|
---|
234 | };
|
---|
235 | } real; // RealHeader
|
---|
236 | struct FakeHeader {
|
---|
237 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
---|
238 | uint32_t alignment; // 1st low-order bit => fake header & alignment
|
---|
239 | #endif // __ORDER_LITTLE_ENDIAN__
|
---|
240 |
|
---|
241 | uint32_t offset;
|
---|
242 |
|
---|
243 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
---|
244 | uint32_t alignment; // 1st low-order bit => fake header & alignment
|
---|
245 | #endif // __ORDER_BIG_ENDIAN__
|
---|
246 | } fake; // FakeHeader
|
---|
247 | } kind; // Kind
|
---|
248 | } header; // Header
|
---|
249 |
|
---|
250 | char pad[__ALIGN__ - sizeof( Header )];
|
---|
251 | char data[0]; // storage
|
---|
252 | }; // Storage
|
---|
253 |
|
---|
254 | static_assert( __ALIGN__ >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" );
|
---|
255 |
|
---|
256 | struct FreeHeader {
|
---|
257 | #ifdef AWAYSPIN
|
---|
258 | SpinLock_t awayLock; // LOCK(S) MUST BE FIRST FIELD(S) FOR ALIGNMENT
|
---|
259 | #endif // AWAYSPIN
|
---|
260 |
|
---|
261 | Storage * freeList; // thread free list
|
---|
262 | Storage * awayList; // other thread return list
|
---|
263 |
|
---|
264 | Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap)
|
---|
265 | size_t blockSize; // size of allocations on this list
|
---|
266 |
|
---|
267 | bool operator<( const size_t bsize ) const { return blockSize < bsize; }
|
---|
268 | }; // FreeHeader
|
---|
269 |
|
---|
270 | // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
|
---|
271 | // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
|
---|
272 | enum {
|
---|
273 | #ifdef FASTLOOKUP
|
---|
274 | LookupSizes = 65'536 + sizeof(Storage), // number of fast lookup sizes '
|
---|
275 | #endif // FASTLOOKUP
|
---|
276 | NoBucketSizes = 91, // number of bucket sizes
|
---|
277 | }; // enum
|
---|
278 |
|
---|
279 | FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes
|
---|
280 | void * heapBuffer;
|
---|
281 | size_t heapReserve;
|
---|
282 |
|
---|
283 | Heap * nextHeapManager; // intrusive link of existing heaps; traversed to collect statistics
|
---|
284 | Heap * nextFreeHeapManager; // intrusive link of free heaps from terminated threads; reused by new threads
|
---|
285 |
|
---|
286 | #ifdef __DEBUG__
|
---|
287 | long long int allocUnfreed; // running total of allocations minus frees; can be negative
|
---|
288 | #endif // __DEBUG__
|
---|
289 |
|
---|
290 | #ifdef __STATISTICS__
|
---|
291 | HeapStatistics stats; // local statistic table for this heap
|
---|
292 | #endif // __STATISTICS__
|
---|
293 |
|
---|
294 | static void heapManagerCtor(
|
---|
295 | #ifdef __DEBUG__
|
---|
296 | size_t size
|
---|
297 | #endif // __DEBUG__
|
---|
298 | );
|
---|
299 | static void heapManagerDtor();
|
---|
300 | }; // Heap
|
---|
301 |
|
---|
302 |
|
---|
303 | struct ThreadManager {
|
---|
304 | int dummy; // used to trigger allocation of storage
|
---|
305 | ~ThreadManager() { Heap::heapManagerDtor(); } // called automagically when thread terminates
|
---|
306 | }; // ThreadManager
|
---|
307 |
|
---|
308 |
|
---|
309 | struct HeapMaster {
|
---|
310 | SpinLock_t masterExtLock; // protects allocation-buffer extension
|
---|
311 | SpinLock_t masterMgrLock; // protects freeHeapManagersList, heapManagersList, heapManagersStorage, heapManagersStorageEnd
|
---|
312 |
|
---|
313 | #ifdef FASTLOOKUP
|
---|
314 | unsigned char lookup[Heap::LookupSizes]; // O(1) lookup for small sizes
|
---|
315 | #endif // FASTLOOKUP
|
---|
316 |
|
---|
317 | static const unsigned int bucketSizes[]; // initialized statically, outside constructor
|
---|
318 | void * heapBegin; // start of heap
|
---|
319 | void * heapEnd; // logical end of heap
|
---|
320 | size_t heapRemaining; // amount of storage not allocated in the current chunk
|
---|
321 | size_t pageSize; // architecture pagesize
|
---|
322 | size_t heapExpand; // sbrk advance
|
---|
323 | size_t mmapStart; // cross over point for mmap
|
---|
324 | unsigned int maxBucketsUsed; // maximum number of buckets in use
|
---|
325 | static const off_t mmapFd; // fake or actual fd for anonymous file. initialized statically, outside constructor
|
---|
326 |
|
---|
327 | Heap * heapManagersList; // heap-list head
|
---|
328 | Heap * freeHeapManagersList; // free-list head
|
---|
329 |
|
---|
330 | // Heap superblocks are not linked; heaps in superblocks are linked via intrusive links.
|
---|
331 | Heap * heapManagersStorage; // next heap to use in heap superblock
|
---|
332 | Heap * heapManagersStorageEnd; // logical heap outside of superblock's end
|
---|
333 |
|
---|
334 | #ifdef __STATISTICS__
|
---|
335 | unsigned long int threads_started, threads_exited; // counts threads that have started and exited
|
---|
336 | unsigned long int reused_heap, new_heap; // counts reusability of heaps
|
---|
337 | unsigned int sbrk_calls;
|
---|
338 | unsigned long long int sbrk_storage;
|
---|
339 | int stats_fd;
|
---|
340 | HeapStatistics stats; // global stats for thread-local heaps to add there counters when exiting
|
---|
341 | #endif // __STATISTICS__
|
---|
342 |
|
---|
343 | // Prevents two threads from constructing heapMaster.
|
---|
344 | static volatile bool heapMasterBootFlag; // trigger for first heap
|
---|
345 |
|
---|
346 | #ifdef __DEBUG__
|
---|
347 | long long int allocUnfreed;
|
---|
348 | #endif // __DEBUG__
|
---|
349 |
|
---|
350 | static void heapMasterCtor();
|
---|
351 | static void heapMasterDtor();
|
---|
352 | }; // HeapMaster
|
---|
353 |
|
---|
354 | extern "C" {
|
---|
355 | int __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; // common mmap/mprotect protection
|
---|
356 | size_t __cfa_page_size; // architecture pagesize
|
---|
357 | }
|
---|
358 |
|
---|
359 | volatile bool HeapMaster::heapMasterBootFlag = false;
|
---|
360 | static HeapMaster heapMaster; // program global
|
---|
361 |
|
---|
362 | // Thread-local storage is allocated lazily when the storage is accessed.
|
---|
363 | static thread_local size_t PAD1 CALIGN TLSMODEL __attribute__(( unused )); // protect false sharing
|
---|
364 | static thread_local ThreadManager threadManager CALIGN TLSMODEL;
|
---|
365 | // Do not put heapManager in ThreadManager because thread-local destructor results in extra access code.
|
---|
366 | static thread_local Heap * heapManager CALIGN TLSMODEL;
|
---|
367 | static thread_local bool heapManagerBootFlag CALIGN TLSMODEL = false;
|
---|
368 | static thread_local size_t PAD2 CALIGN TLSMODEL __attribute__(( unused )); // protect further false sharing
|
---|
369 |
|
---|
370 |
|
---|
371 | #ifdef __DEBUG__
|
---|
372 | extern "C" {
|
---|
373 | void heapAppStart( void ) {
|
---|
374 | assert( heapManager );
|
---|
375 | heapManager->allocUnfreed = 0;
|
---|
376 | } // heapAppStart
|
---|
377 |
|
---|
378 | void heapAppStop( void ) {
|
---|
379 | long long int allocUnfreed = heapMaster.allocUnfreed;
|
---|
380 | for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) {
|
---|
381 | allocUnfreed += heap->allocUnfreed;
|
---|
382 | } // for
|
---|
383 |
|
---|
384 | allocUnfreed -= malloc_unfreed();
|
---|
385 | if ( allocUnfreed > 0 ) {
|
---|
386 | // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
|
---|
387 | char helpText[512];
|
---|
388 | int len = snprintf( helpText, sizeof(helpText), "Runtime warning (UNIX pid:%ld) : program terminating with %llu(0x%llx) bytes of storage allocated but not freed.\n"
|
---|
389 | "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
|
---|
390 | (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid
|
---|
391 | NOWARNING( write( STDERR_FILENO, helpText, len ), unused-result );
|
---|
392 | } // if
|
---|
393 | } // heapAppStop
|
---|
394 | } // extern "C"
|
---|
395 | #endif // __DEBUG__
|
---|
396 |
|
---|
397 |
|
---|
398 | // declare helper functions for HeapMaster
|
---|
399 | void noMemory(); // forward, called by "builtin_new" when malloc returns 0
|
---|
400 |
|
---|
401 | void HeapMaster::heapMasterCtor() {
|
---|
402 | // Singleton pattern to initialize heap master
|
---|
403 | __cfa_page_size = sysconf( _SC_PAGESIZE );
|
---|
404 |
|
---|
405 | assert( heapMaster.mmapFd == -1 );
|
---|
406 | assert( heapMaster.bucketSizes[0] == (16 + sizeof(Heap::Storage)) );
|
---|
407 |
|
---|
408 | heapMaster.masterExtLock = 0;
|
---|
409 | heapMaster.masterMgrLock = 0;
|
---|
410 |
|
---|
411 | char * end = (char *)sbrk( 0 );
|
---|
412 | heapMaster.heapBegin = heapMaster.heapEnd = sbrk( (char *)Ceiling( (long unsigned int)end, __ALIGN__ ) - end ); // move start of heap to multiple of alignment
|
---|
413 | heapMaster.heapRemaining = 0;
|
---|
414 | heapMaster.heapExpand = malloc_expansion();
|
---|
415 | heapMaster.mmapStart = malloc_mmap_start();
|
---|
416 |
|
---|
417 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
418 | heapMaster.maxBucketsUsed = std::lower_bound( heapMaster.bucketSizes, heapMaster.bucketSizes + (Heap::NoBucketSizes - 1), heapMaster.mmapStart ) - heapMaster.bucketSizes; // binary search
|
---|
419 |
|
---|
420 | assert( (heapMaster.mmapStart >= __cfa_page_size) && (heapMaster.bucketSizes[Heap::NoBucketSizes - 1] >= heapMaster.mmapStart) );
|
---|
421 | assert( heapMaster.maxBucketsUsed < Heap::NoBucketSizes ); // subscript failure ?
|
---|
422 | assert( heapMaster.mmapStart <= heapMaster.bucketSizes[heapMaster.maxBucketsUsed] ); // search failure ?
|
---|
423 |
|
---|
424 | heapMaster.heapManagersList = nullptr;
|
---|
425 | heapMaster.freeHeapManagersList = nullptr;
|
---|
426 |
|
---|
427 | heapMaster.heapManagersStorage = nullptr;
|
---|
428 | heapMaster.heapManagersStorageEnd = nullptr;
|
---|
429 |
|
---|
430 | #ifdef __STATISTICS__
|
---|
431 | heapMaster.threads_started = heapMaster.threads_exited = 0;
|
---|
432 | heapMaster.reused_heap = heapMaster.new_heap = 0;
|
---|
433 | heapMaster.sbrk_calls = heapMaster.sbrk_storage = 0;
|
---|
434 | heapMaster.stats_fd = STDERR_FILENO;
|
---|
435 | #endif // __STATISTICS__
|
---|
436 |
|
---|
437 | #ifdef __DEBUG__
|
---|
438 | heapMaster.allocUnfreed = 0;
|
---|
439 | #endif // __DEBUG__
|
---|
440 |
|
---|
441 | #ifdef FASTLOOKUP
|
---|
442 | for ( unsigned int i = 0, idx = 0; i < Heap::LookupSizes; i += 1 ) {
|
---|
443 | if ( i > heapMaster.bucketSizes[idx] ) idx += 1;
|
---|
444 | heapMaster.lookup[i] = idx;
|
---|
445 | assert( i <= heapMaster.bucketSizes[idx] );
|
---|
446 | assert( (i <= 32 && idx == 0) || (i > heapMaster.bucketSizes[idx - 1]) );
|
---|
447 | } // for
|
---|
448 | #endif // FASTLOOKUP
|
---|
449 |
|
---|
450 | std::set_new_handler( noMemory ); // do not throw exception as the default
|
---|
451 |
|
---|
452 | HeapMaster::heapMasterBootFlag = true;
|
---|
453 | } // HeapMaster::heapMasterCtor
|
---|
454 |
|
---|
455 |
|
---|
456 | #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes."
|
---|
457 |
|
---|
458 | void Heap::heapManagerCtor(
|
---|
459 | #ifdef __DEBUG__
|
---|
460 | size_t size
|
---|
461 | #endif // __DEBUG__
|
---|
462 | ) {
|
---|
463 | if ( UNLIKELY( ! HeapMaster::heapMasterBootFlag ) ) HeapMaster::heapMasterCtor();
|
---|
464 |
|
---|
465 | // Trigger thread_local storage implicit allocation (causes recursive call)
|
---|
466 | volatile int dummy __attribute__(( unused )) = threadManager.dummy;
|
---|
467 |
|
---|
468 | spin_acquire( &heapMaster.masterMgrLock ); // protect heapMaster counters
|
---|
469 | // The atomic test-and-set instruction is a fence so heapManagerBootFlag is read after the magic recursive call to
|
---|
470 | // initialize thread-local storage. Hence, heapManagerBootFlag is NOT declared as volatile.
|
---|
471 | if ( heapManagerBootFlag ) { // singleton
|
---|
472 | spin_release( &heapMaster.masterMgrLock );
|
---|
473 | return; // always return on recursive initiation
|
---|
474 | } // if
|
---|
475 |
|
---|
476 | assert( ! heapManagerBootFlag );
|
---|
477 |
|
---|
478 | // get storage for heap manager
|
---|
479 |
|
---|
480 | if ( heapMaster.freeHeapManagersList ) { // free heap for reused ?
|
---|
481 | heapManager = heapMaster.freeHeapManagersList;
|
---|
482 | heapMaster.freeHeapManagersList = heapManager->nextFreeHeapManager;
|
---|
483 |
|
---|
484 | #ifdef __STATISTICS__
|
---|
485 | heapMaster.reused_heap += 1;
|
---|
486 | #endif // __STATISTICS__
|
---|
487 | } else { // free heap not found, create new
|
---|
488 | // Heap size is about 12K, FreeHeader (128 bytes because of cache alignment) * NoBucketSizes (91) => 128 heaps * 12K ~= 120K byte superblock.
|
---|
489 | // Where 128-heap superblock handles a medium sized multi-processor server.
|
---|
490 | enum { HeapDim = 128 }; // number of heaps in superblock
|
---|
491 | size_t remaining = heapMaster.heapManagersStorageEnd - heapMaster.heapManagersStorage; // remaining free heaps in superblock
|
---|
492 | if ( ! heapMaster.heapManagersStorage || remaining != 0 ) {
|
---|
493 | size_t size = HeapDim * sizeof( Heap );
|
---|
494 | heapMaster.heapManagersStorage = (Heap *)mmap( 0, size, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, heapMaster.mmapFd, 0 );
|
---|
495 | if ( UNLIKELY( heapMaster.heapManagersStorage == MAP_FAILED ) ) { // failed ?
|
---|
496 | if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, size ); // no memory
|
---|
497 | // Do not call strerror( errno ) as it may call malloc.
|
---|
498 | abort( "heapManagerCtor() : internal error, mmap failure, size:%zu error %d.",
|
---|
499 | size, errno );
|
---|
500 | } // if
|
---|
501 | heapMaster.heapManagersStorageEnd = &heapMaster.heapManagersStorage[HeapDim]; // outside array
|
---|
502 | } // if
|
---|
503 |
|
---|
504 | heapManager = heapMaster.heapManagersStorage;
|
---|
505 | heapMaster.heapManagersStorage = heapMaster.heapManagersStorage + 1; // bump next heap
|
---|
506 |
|
---|
507 | heapManager->nextHeapManager = heapMaster.heapManagersList;
|
---|
508 | heapMaster.heapManagersList = heapManager;
|
---|
509 |
|
---|
510 | #ifdef __STATISTICS__
|
---|
511 | heapMaster.new_heap += 1;
|
---|
512 | #endif // __STATISTICS__
|
---|
513 | } // if
|
---|
514 |
|
---|
515 | #ifdef __STATISTICS__
|
---|
516 | heapMaster.threads_started += 1;
|
---|
517 | #endif // __STATISTICS__
|
---|
518 |
|
---|
519 | #ifdef __DEBUG__
|
---|
520 | heapManager->allocUnfreed -= size;
|
---|
521 | #endif // __DEBUG__
|
---|
522 |
|
---|
523 | spin_release( &heapMaster.masterMgrLock );
|
---|
524 |
|
---|
525 | for ( unsigned int j = 0; j < Heap::NoBucketSizes; j += 1 ) { // initialize free lists
|
---|
526 | heapManager->freeLists[j] = (Heap::FreeHeader){
|
---|
527 | #ifdef AWAYSPIN
|
---|
528 | .awayLock = 0,
|
---|
529 | #endif // AWAYSPIN
|
---|
530 | .freeList = nullptr,
|
---|
531 | .awayList = nullptr,
|
---|
532 | .homeManager = heapManager,
|
---|
533 | .blockSize = heapMaster.bucketSizes[j],
|
---|
534 | };
|
---|
535 | } // for
|
---|
536 |
|
---|
537 | heapManager->heapBuffer = nullptr;
|
---|
538 | heapManager->heapReserve = 0;
|
---|
539 | heapManager->nextFreeHeapManager = nullptr;
|
---|
540 | heapManagerBootFlag = true;
|
---|
541 | } // Heap::heapManagerCtor
|
---|
542 |
|
---|
543 |
|
---|
544 | void Heap::heapManagerDtor() {
|
---|
545 | if ( UNLIKELY( ! heapManagerBootFlag ) ) return;
|
---|
546 |
|
---|
547 | DISABLE_INTERRUPTS;
|
---|
548 |
|
---|
549 | spin_acquire( &heapMaster.masterMgrLock );
|
---|
550 |
|
---|
551 | // place heap on list of free heaps for reusability
|
---|
552 | heapManager->nextFreeHeapManager = heapMaster.freeHeapManagersList;
|
---|
553 | heapMaster.freeHeapManagersList = heapManager;
|
---|
554 |
|
---|
555 | // SKULLDUGGERY: The thread heap ends BEFORE the last free(s) occurs from the thread-local storage allocations for
|
---|
556 | // the thread. This final allocation must be handled in doFree for this thread and its terminated heap. However,
|
---|
557 | // this heap has just been put on the heap freelist, and hence there is a race returning the thread-local storage
|
---|
558 | // and a new thread using this heap. The current thread detects it is executing its last free in doFree via
|
---|
559 | // heapManager being null. The trick is for this thread to placed the last free onto the current heap's away-list as
|
---|
560 | // the free-storage header points are this heap. Now, even if other threads are pushing to the away list, it is safe
|
---|
561 | // because of the locking.
|
---|
562 | heapManager = nullptr;
|
---|
563 |
|
---|
564 | #ifdef __STATISTICS__
|
---|
565 | heapMaster.threads_exited += 1;
|
---|
566 | #endif // __STATISTICS__
|
---|
567 |
|
---|
568 | spin_release( &heapMaster.masterMgrLock );
|
---|
569 |
|
---|
570 | ENABLE_INTERRUPTS;
|
---|
571 | } // Heap::heapManagerDtor
|
---|
572 |
|
---|
573 |
|
---|
574 | const off_t HeapMaster::mmapFd = -1;
|
---|
575 | // Bucket size must be multiple of 16.
|
---|
576 | // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
|
---|
577 | const unsigned int HeapMaster::bucketSizes[] = { // different bucket sizes
|
---|
578 | 16 + sizeof(Heap::Storage), 32 + sizeof(Heap::Storage), 48 + sizeof(Heap::Storage), 64 + sizeof(Heap::Storage), // 4
|
---|
579 | 96 + sizeof(Heap::Storage), 112 + sizeof(Heap::Storage), 128 + sizeof(Heap::Storage), // 3
|
---|
580 | 160, 192, 224, 256 + sizeof(Heap::Storage), // 4
|
---|
581 | 320, 384, 448, 512 + sizeof(Heap::Storage), // 4
|
---|
582 | 640, 768, 896, 1'024 + sizeof(Heap::Storage), // 4
|
---|
583 | 1'536, 2'048 + sizeof(Heap::Storage), // 2
|
---|
584 | 2'560, 3'072, 3'584, 4'096 + sizeof(Heap::Storage), // 4
|
---|
585 | 6'144, 8'192 + sizeof(Heap::Storage), // 2
|
---|
586 | 9'216, 10'240, 11'264, 12'288, 13'312, 14'336, 15'360, 16'384 + sizeof(Heap::Storage), // 8
|
---|
587 | 18'432, 20'480, 22'528, 24'576, 26'624, 28'672, 30'720, 32'768 + sizeof(Heap::Storage), // 8
|
---|
588 | 36'864, 40'960, 45'056, 49'152, 53'248, 57'344, 61'440, 65'536 + sizeof(Heap::Storage), // 8
|
---|
589 | 73'728, 81'920, 90'112, 98'304, 106'496, 114'688, 122'880, 131'072 + sizeof(Heap::Storage), // 8
|
---|
590 | 147'456, 163'840, 180'224, 196'608, 212'992, 229'376, 245'760, 262'144 + sizeof(Heap::Storage), // 8
|
---|
591 | 294'912, 327'680, 360'448, 393'216, 425'984, 458'752, 491'520, 524'288 + sizeof(Heap::Storage), // 8
|
---|
592 | 655'360, 786'432, 917'504, 1'048'576 + sizeof(Heap::Storage), // 4
|
---|
593 | 1'179'648, 1'310'720, 1'441'792, 1'572'864, 1'703'936, 1'835'008, 1'966'080, 2'097'152 + sizeof(Heap::Storage), // 8
|
---|
594 | 2'621'440, 3'145'728, 3'670'016, 4'194'304 + sizeof(Heap::Storage), // 4
|
---|
595 | };
|
---|
596 |
|
---|
597 | static_assert( Heap::NoBucketSizes == sizeof(HeapMaster::bucketSizes) / sizeof(HeapMaster::bucketSizes[0]), "size of bucket array wrong" );
|
---|
598 |
|
---|
599 |
|
---|
600 | //####################### Memory Allocation Routines' Helpers ####################
|
---|
601 |
|
---|
602 |
|
---|
603 | #ifdef __STATISTICS__
|
---|
604 | static inline HeapStatistics & collectStats( HeapStatistics & stats ) {
|
---|
605 | spin_acquire( &heapMaster.masterMgrLock );
|
---|
606 |
|
---|
607 | stats += heapMaster.stats;
|
---|
608 | for ( Heap * node = heapMaster.heapManagersList; node; node = node->nextHeapManager ) {
|
---|
609 | stats += node->stats;
|
---|
610 | } // for
|
---|
611 |
|
---|
612 | spin_release(&heapMaster.masterMgrLock);
|
---|
613 | return stats;
|
---|
614 | } // collectStats
|
---|
615 |
|
---|
616 | // Use "write" because streams may be shutdown when calls are made.
|
---|
617 | static void printStats( HeapStatistics & stats ) {
|
---|
618 | char helpText[1024];
|
---|
619 | int len = snprintf( helpText, sizeof(helpText),
|
---|
620 | "\nHeap statistics: (storage request / allocation)\n"
|
---|
621 | " malloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
622 | " aalloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
623 | " calloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
624 | " memalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
625 | " amemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
626 | " cmemalign >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
627 | " resize >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
628 | " realloc >0 calls %'u; 0 calls %'u; storage %'llu / %'llu bytes\n"
|
---|
629 | " free !null calls %'u; null calls %'u; storage %'llu / %'llu bytes\n"
|
---|
630 | " away pulls %'u; pushes %'u; storage %'llu / %'llu bytes\n"
|
---|
631 | " sbrk calls %'u; storage %'llu bytes\n"
|
---|
632 | " mmap calls %'u; storage %'llu / %'llu bytes\n"
|
---|
633 | " munmap calls %'u; storage %'llu / %'llu bytes\n"
|
---|
634 | " threads started %'lu; exited %'lu\n"
|
---|
635 | " heaps new %'lu; reused %'lu\n",
|
---|
636 | stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc,
|
---|
637 | stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc,
|
---|
638 | stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc,
|
---|
639 | stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc,
|
---|
640 | stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc,
|
---|
641 | stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc,
|
---|
642 | stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc,
|
---|
643 | stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc,
|
---|
644 | stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc,
|
---|
645 | stats.away_pulls, stats.away_pushes, stats.away_storage_request, stats.away_storage_alloc,
|
---|
646 | heapMaster.sbrk_calls, heapMaster.sbrk_storage,
|
---|
647 | stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc,
|
---|
648 | stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc,
|
---|
649 | heapMaster.threads_started, heapMaster.threads_exited,
|
---|
650 | heapMaster.new_heap, heapMaster.reused_heap
|
---|
651 | );
|
---|
652 | NOWARNING( write( heapMaster.stats_fd, helpText, len ), unused-result );
|
---|
653 | } // printStats
|
---|
654 |
|
---|
655 |
|
---|
656 | static int printStatsXML( HeapStatistics & stats, FILE * stream ) {
|
---|
657 | char helpText[1024];
|
---|
658 | int len = snprintf( helpText, sizeof(helpText),
|
---|
659 | "<malloc version=\"1\">\n"
|
---|
660 | "<heap nr=\"0\">\n"
|
---|
661 | "<sizes>\n"
|
---|
662 | "</sizes>\n"
|
---|
663 | "<total type=\"malloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
664 | "<total type=\"aalloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
665 | "<total type=\"calloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
666 | "<total type=\"memalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
667 | "<total type=\"amemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
668 | "<total type=\"cmemalign\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
669 | "<total type=\"resize\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
670 | "<total type=\"realloc\" >0 count=\"%'u;\" 0 count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
671 | "<total type=\"free\" !null=\"%'u;\" 0 null=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
672 | "<total type=\"away\" pulls=\"%'u;\" 0 pushes=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
673 | "<total type=\"sbrk\" count=\"%'u;\" size=\"%'llu\"/> bytes\n"
|
---|
674 | "<total type=\"mmap\" count=\"%'u;\" size=\"%'llu / %'llu\" / > bytes\n"
|
---|
675 | "<total type=\"munmap\" count=\"%'u;\" size=\"%'llu / %'llu\"/> bytes\n"
|
---|
676 | "<total type=\"threads\" started=\"%'lu;\" exited=\"%'lu\"/>\n"
|
---|
677 | "</malloc>",
|
---|
678 | stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc,
|
---|
679 | stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc,
|
---|
680 | stats.calloc_calls, stats.calloc_0_calls, stats.calloc_storage_request, stats.calloc_storage_alloc,
|
---|
681 | stats.memalign_calls, stats.memalign_0_calls, stats.memalign_storage_request, stats.memalign_storage_alloc,
|
---|
682 | stats.amemalign_calls, stats.amemalign_0_calls, stats.amemalign_storage_request, stats.amemalign_storage_alloc,
|
---|
683 | stats.cmemalign_calls, stats.cmemalign_0_calls, stats.cmemalign_storage_request, stats.cmemalign_storage_alloc,
|
---|
684 | stats.resize_calls, stats.resize_0_calls, stats.resize_storage_request, stats.resize_storage_alloc,
|
---|
685 | stats.realloc_calls, stats.realloc_0_calls, stats.realloc_storage_request, stats.realloc_storage_alloc,
|
---|
686 | stats.free_calls, stats.free_null_calls, stats.free_storage_request, stats.free_storage_alloc,
|
---|
687 | stats.away_pulls, stats.away_pushes, stats.away_storage_request, stats.away_storage_alloc,
|
---|
688 | heapMaster.sbrk_calls, heapMaster.sbrk_storage,
|
---|
689 | stats.mmap_calls, stats.mmap_storage_request, stats.mmap_storage_alloc,
|
---|
690 | stats.munmap_calls, stats.munmap_storage_request, stats.munmap_storage_alloc,
|
---|
691 | heapMaster.threads_started, heapMaster.threads_exited
|
---|
692 | );
|
---|
693 | NOWARNING( write( fileno(stream), helpText, len ), unused-result );
|
---|
694 | return len;
|
---|
695 | } // printStatsXML
|
---|
696 | #endif // __STATISTICS__
|
---|
697 |
|
---|
698 |
|
---|
699 | inline void noMemory() {
|
---|
700 | abort( "Heap memory exhausted at %zu bytes.\n"
|
---|
701 | "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",
|
---|
702 | ((char *)(sbrk( 0 )) - (char *)(heapMaster.heapBegin)) );
|
---|
703 | } // noMemory
|
---|
704 |
|
---|
705 |
|
---|
706 | static bool setMmapStart( size_t value ) {
|
---|
707 | if ( value < __cfa_page_size || heapMaster.bucketSizes[Heap::NoBucketSizes - 1] < value ) return false;
|
---|
708 | heapMaster.mmapStart = value; // set global
|
---|
709 |
|
---|
710 | // find the closest bucket size less than or equal to the mmapStart size
|
---|
711 | heapMaster.maxBucketsUsed = std::lower_bound( heapMaster.bucketSizes, heapMaster.bucketSizes + (Heap::NoBucketSizes - 1), heapMaster.mmapStart ) - heapMaster.bucketSizes; // binary search
|
---|
712 | assert( heapMaster.maxBucketsUsed < Heap::NoBucketSizes ); // subscript failure ?
|
---|
713 | assert( heapMaster.mmapStart <= heapMaster.bucketSizes[heapMaster.maxBucketsUsed] ); // search failure ?
|
---|
714 | return true;
|
---|
715 | } // setMmapStart
|
---|
716 |
|
---|
717 | // <-------+----------------------------------------------------> bsize (bucket size)
|
---|
718 | // |header |addr
|
---|
719 | //==================================================================================
|
---|
720 | // align/offset |
|
---|
721 | // <-----------------<------------+-----------------------------> bsize (bucket size)
|
---|
722 | // |fake-header | addr
|
---|
723 | #define headerAddr( addr ) ((Heap::Storage::Header *)( (char *)addr - sizeof(Heap::Storage) ))
|
---|
724 | #define realHeader( header ) ((Heap::Storage::Header *)((char *)header - header->kind.fake.offset))
|
---|
725 |
|
---|
726 | // <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
|
---|
727 | // |header |addr
|
---|
728 | //==================================================================================
|
---|
729 | // align/offset |
|
---|
730 | // <------------------------------<<---------- dsize --------->>> bsize (bucket size)
|
---|
731 | // |fake-header |addr
|
---|
732 | #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
|
---|
733 |
|
---|
734 |
|
---|
735 | static inline void checkAlign( size_t alignment ) {
|
---|
736 | if ( UNLIKELY( alignment < __ALIGN__ || ! Pow2( alignment ) ) ) {
|
---|
737 | abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, __ALIGN__ );
|
---|
738 | } // if
|
---|
739 | } // checkAlign
|
---|
740 |
|
---|
741 |
|
---|
742 | static inline void checkHeader( bool check, const char name[], void * addr ) {
|
---|
743 | if ( UNLIKELY( check ) ) { // bad address ?
|
---|
744 | abort( "Attempt to %s storage %p with address outside the heap.\n"
|
---|
745 | "Possible cause is duplicate free on same block or overwriting of memory.",
|
---|
746 | name, addr );
|
---|
747 | } // if
|
---|
748 | } // checkHeader
|
---|
749 |
|
---|
750 |
|
---|
751 | static inline void fakeHeader( Heap::Storage::Header *& header, size_t & alignment ) {
|
---|
752 | if ( UNLIKELY( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
|
---|
753 | alignment = header->kind.fake.alignment & -2; // remove flag from value
|
---|
754 | #ifdef __DEBUG__
|
---|
755 | checkAlign( alignment ); // check alignment
|
---|
756 | #endif // __DEBUG__
|
---|
757 | header = realHeader( header ); // backup from fake to real header
|
---|
758 | } else {
|
---|
759 | alignment = __ALIGN__; // => no fake header
|
---|
760 | } // if
|
---|
761 | } // fakeHeader
|
---|
762 |
|
---|
763 |
|
---|
764 | static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap::Storage::Header *& header, Heap::FreeHeader *& freeHead, size_t & size, size_t & alignment ) {
|
---|
765 | header = headerAddr( addr );
|
---|
766 |
|
---|
767 | if ( UNLIKELY( addr < heapMaster.heapBegin || heapMaster.heapEnd < addr ) ) { // mmapped ?
|
---|
768 | fakeHeader( header, alignment );
|
---|
769 | size = header->kind.real.blockSize & -3; // mmap size
|
---|
770 | return true;
|
---|
771 | } // if
|
---|
772 |
|
---|
773 | #ifdef __DEBUG__
|
---|
774 | checkHeader( header < heapMaster.heapBegin, name, addr ); // bad low address ?
|
---|
775 | #endif // __DEBUG__
|
---|
776 |
|
---|
777 | // header may be safe to dereference
|
---|
778 | fakeHeader( header, alignment );
|
---|
779 | #ifdef __DEBUG__
|
---|
780 | checkHeader( header < heapMaster.heapBegin || heapMaster.heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
|
---|
781 | #endif // __DEBUG__
|
---|
782 |
|
---|
783 | freeHead = (Heap::FreeHeader *)((size_t)header->kind.real.home & -3);
|
---|
784 | #ifdef __DEBUG__
|
---|
785 | Heap * homeManager = freeHead->homeManager;
|
---|
786 | if ( UNLIKELY( freeHead < &homeManager->freeLists[0] || &homeManager->freeLists[Heap::NoBucketSizes - 1] < freeHead ) ) {
|
---|
787 | abort( "Attempt to %s storage %p with corrupted header.\n"
|
---|
788 | "Possible cause is duplicate free on same block or overwriting of header information.",
|
---|
789 | name, addr );
|
---|
790 | } // if
|
---|
791 | #endif // __DEBUG__
|
---|
792 | size = freeHead->blockSize;
|
---|
793 | return false;
|
---|
794 | } // headers
|
---|
795 |
|
---|
796 |
|
---|
797 | static inline void * master_extend( size_t size ) {
|
---|
798 | spin_acquire( &heapMaster.masterExtLock );
|
---|
799 |
|
---|
800 | ptrdiff_t rem = heapMaster.heapRemaining - size;
|
---|
801 | if ( UNLIKELY( rem < 0 ) ) {
|
---|
802 | // If the size requested is bigger than the current remaining storage, increase the size of the heap.
|
---|
803 |
|
---|
804 | size_t increase = Ceiling( size > heapMaster.heapExpand ? size : heapMaster.heapExpand, __ALIGN__ );
|
---|
805 | if ( UNLIKELY( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ?
|
---|
806 | spin_release( &heapMaster.masterExtLock );
|
---|
807 | abort( NO_MEMORY_MSG, size ); // give up
|
---|
808 | } // if
|
---|
809 | #ifdef __STATISTICS__
|
---|
810 | heapMaster.sbrk_calls += 1;
|
---|
811 | heapMaster.sbrk_storage += increase;
|
---|
812 | #endif // __STATISTICS__
|
---|
813 | rem = heapMaster.heapRemaining + increase - size;
|
---|
814 | } // if
|
---|
815 |
|
---|
816 | Heap::Storage * block = (Heap::Storage *)heapMaster.heapEnd;
|
---|
817 | heapMaster.heapRemaining = rem;
|
---|
818 | heapMaster.heapEnd = (char *)heapMaster.heapEnd + size;
|
---|
819 |
|
---|
820 | spin_release( &heapMaster.masterExtLock );
|
---|
821 | return block;
|
---|
822 | } // master_extend
|
---|
823 |
|
---|
824 |
|
---|
825 | static inline void * manager_extend( size_t size ) {
|
---|
826 | ptrdiff_t rem = heapManager->heapReserve - size;
|
---|
827 |
|
---|
828 | if ( UNLIKELY( rem < 0 ) ) { // negative
|
---|
829 | // If the size requested is bigger than the current remaining reserve, use the current reserve to populate
|
---|
830 | // smaller freeLists, and increase the reserve.
|
---|
831 |
|
---|
832 | rem = heapManager->heapReserve; // positive
|
---|
833 |
|
---|
834 | if ( rem >= heapMaster.bucketSizes[0] ) {
|
---|
835 | Heap::FreeHeader * freeHead =
|
---|
836 | #ifdef FASTLOOKUP
|
---|
837 | rem < Heap::LookupSizes ? &(heapManager->freeLists[heapMaster.lookup[rem]]) :
|
---|
838 | #endif // FASTLOOKUP
|
---|
839 | std::lower_bound( heapManager->freeLists, heapManager->freeLists + heapMaster.maxBucketsUsed, rem ); // binary search
|
---|
840 |
|
---|
841 | if ( UNLIKELY( freeHead->blockSize > (size_t)rem ) ) freeHead -= 1;
|
---|
842 | Heap::Storage * block = (Heap::Storage *)heapManager->heapBuffer;
|
---|
843 |
|
---|
844 | block->header.kind.real.next = freeHead->freeList; // push on stack
|
---|
845 | freeHead->freeList = block;
|
---|
846 | } // if
|
---|
847 |
|
---|
848 | size_t increase = Ceiling( size > ( heapMaster.heapExpand / 10 ) ? size : ( heapMaster.heapExpand / 10 ), __ALIGN__ );
|
---|
849 | heapManager->heapBuffer = master_extend(increase);
|
---|
850 | rem = increase - size;
|
---|
851 | } // if
|
---|
852 |
|
---|
853 | Heap::Storage * block = (Heap::Storage *)heapManager->heapBuffer;
|
---|
854 | heapManager->heapReserve = rem;
|
---|
855 | heapManager->heapBuffer = (char *)heapManager->heapBuffer + size;
|
---|
856 |
|
---|
857 | return block;
|
---|
858 | } // manager_extend
|
---|
859 |
|
---|
860 |
|
---|
861 | static inline void * doMalloc( size_t size
|
---|
862 | #ifdef __STATISTICS__
|
---|
863 | , unsigned int counter
|
---|
864 | #endif // __STATISTICS__
|
---|
865 | ) {
|
---|
866 | Heap::Storage * block;
|
---|
867 |
|
---|
868 | // Look up size in the size list. Make sure the user request includes space for the header that must be allocated
|
---|
869 | // along with the block and is a multiple of the alignment size.
|
---|
870 | size_t tsize = size + sizeof(Heap::Storage);
|
---|
871 |
|
---|
872 | #ifdef __STATISTICS__
|
---|
873 | heapManager->stats.counters[counter].cnt1 += 1;
|
---|
874 | heapManager->stats.counters[counter].cnt3 += size;
|
---|
875 | #endif // __STATISTICS__
|
---|
876 |
|
---|
877 | if ( LIKELY( tsize < heapMaster.mmapStart ) ) { // small size => sbrk
|
---|
878 | Heap::FreeHeader * freeHead =
|
---|
879 | #ifdef FASTLOOKUP
|
---|
880 | LIKELY( tsize < Heap::LookupSizes ) ? &(heapManager->freeLists[heapMaster.lookup[tsize]]) :
|
---|
881 | #endif // FASTLOOKUP
|
---|
882 | std::lower_bound( heapManager->freeLists, heapManager->freeLists + heapMaster.maxBucketsUsed, tsize ); // binary search
|
---|
883 |
|
---|
884 | assert( freeHead <= &heapManager->freeLists[heapMaster.maxBucketsUsed] ); // subscripting error ?
|
---|
885 | assert( tsize <= freeHead->blockSize ); // search failure ?
|
---|
886 | tsize = freeHead->blockSize; // total space needed for request
|
---|
887 | #ifdef __STATISTICS__
|
---|
888 | heapManager->stats.counters[counter].cnt4 += tsize;
|
---|
889 | #endif // __STATISTICS__
|
---|
890 |
|
---|
891 | block = freeHead->freeList; // remove node from stack
|
---|
892 | if ( UNLIKELY( block == nullptr ) ) { // no free block ?
|
---|
893 | // Freelist for that size is empty, so carve it out of the heap, if there is enough left, or get some more
|
---|
894 | // and then carve it off.
|
---|
895 |
|
---|
896 | #ifdef AWAYSPIN
|
---|
897 | spin_acquire( &freeHead->awayLock );
|
---|
898 | block = freeHead->awayList;
|
---|
899 | freeHead->awayList = nullptr;
|
---|
900 | spin_release( &freeHead->awayLock );
|
---|
901 | #else
|
---|
902 | block = __atomic_exchange_n( &freeHead->awayList, nullptr, __ATOMIC_SEQ_CST );
|
---|
903 | #endif // AWAYSPIN
|
---|
904 | if ( LIKELY( block == nullptr ) ) { // away list also empty?
|
---|
905 | block = (Heap::Storage *)manager_extend( tsize ); // mutual exclusion on call
|
---|
906 | } else { // merge awayList into freeHead
|
---|
907 | #ifdef __STATISTICS__
|
---|
908 | heapManager->stats.away_pulls += 1;
|
---|
909 | #endif // __STATISTICS__
|
---|
910 | freeHead->freeList = block->header.kind.real.next;
|
---|
911 | } // if
|
---|
912 | } else {
|
---|
913 | freeHead->freeList = block->header.kind.real.next;
|
---|
914 | } // if
|
---|
915 |
|
---|
916 | block->header.kind.real.home = freeHead; // pointer back to free list of apropriate size
|
---|
917 | } else { // large size => mmap
|
---|
918 | if ( UNLIKELY( size > ULONG_MAX - __cfa_page_size ) ) return nullptr; // error check
|
---|
919 | tsize = Ceiling( tsize, __cfa_page_size ); // must be multiple of page size
|
---|
920 | #ifdef __STATISTICS__
|
---|
921 | heapManager->stats.counters[counter].cnt4 += tsize;
|
---|
922 | heapManager->stats.mmap_calls += 1;
|
---|
923 | heapManager->stats.mmap_storage_request += size;
|
---|
924 | heapManager->stats.mmap_storage_alloc += tsize;
|
---|
925 | #endif // __STATISTICS__
|
---|
926 |
|
---|
927 | block = (Heap::Storage *)::mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, heapMaster.mmapFd, 0 );
|
---|
928 | if ( UNLIKELY( block == MAP_FAILED ) ) { // failed ?
|
---|
929 | if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory
|
---|
930 | // Do not call strerror( errno ) as it may call malloc.
|
---|
931 | abort( "(Heap &)0x%p.doMalloc() : internal error, mmap failure, size:%zu %lu %lu error %d.",
|
---|
932 | &heapManager, tsize, size, heapMaster.mmapStart, errno );
|
---|
933 | } // if
|
---|
934 | block->header.kind.real.blockSize = tsize; // storage size for munmap
|
---|
935 | } // if
|
---|
936 |
|
---|
937 | block->header.kind.real.size = size; // store allocation size
|
---|
938 | void * addr = &(block->data); // adjust off header to user bytes
|
---|
939 | assert( ((uintptr_t)addr & (__ALIGN__ - 1)) == 0 ); // minimum alignment ?
|
---|
940 |
|
---|
941 | #ifdef __DEBUG__
|
---|
942 | heapManager->allocUnfreed += size;
|
---|
943 | #endif // __DEBUG__
|
---|
944 |
|
---|
945 | return addr;
|
---|
946 | } // doMalloc
|
---|
947 |
|
---|
948 |
|
---|
949 | static inline void doFree( void * addr ) {
|
---|
950 | DISABLE_INTERRUPTS;
|
---|
951 |
|
---|
952 | if ( UNLIKELY( ! heapManagerBootFlag ) ) Heap::heapManagerCtor( // trigger for first heap
|
---|
953 | #ifdef __DEBUG__
|
---|
954 | 0
|
---|
955 | #endif // __DEBUG__
|
---|
956 | );
|
---|
957 |
|
---|
958 | Heap::Storage::Header * header;
|
---|
959 | Heap::FreeHeader * freeHead;
|
---|
960 | size_t size, alignment; // not used (see realloc)
|
---|
961 |
|
---|
962 | bool mapped = headers( "free", addr, header, freeHead, size, alignment );
|
---|
963 |
|
---|
964 | if ( UNLIKELY( mapped ) ) { // mmapped ?
|
---|
965 | #ifdef __STATISTICS__
|
---|
966 | heapManager->stats.munmap_calls += 1;
|
---|
967 | heapManager->stats.munmap_storage_request += header->kind.real.size;
|
---|
968 | heapManager->stats.munmap_storage_alloc += size;
|
---|
969 | #endif // __STATISTICS__
|
---|
970 | if ( UNLIKELY( munmap( header, size ) == -1 ) ) {
|
---|
971 | abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
|
---|
972 | "Possible cause is invalid pointer.",
|
---|
973 | addr );
|
---|
974 | } // if
|
---|
975 | } else {
|
---|
976 | if ( LIKELY( heapManager == freeHead->homeManager ) ) { // belongs to this thread
|
---|
977 | header->kind.real.next = freeHead->freeList; // push on stack
|
---|
978 | freeHead->freeList = (Heap::Storage *)header;
|
---|
979 | } else { // return to thread owner
|
---|
980 | #ifdef AWAYSPIN
|
---|
981 | spin_acquire( &freeHead->awayLock );
|
---|
982 | header->kind.real.next = freeHead->awayList; // push to bucket away list
|
---|
983 | freeHead->awayList = (Heap::Storage *)header;
|
---|
984 | spin_release( &freeHead->awayLock );
|
---|
985 | #else // lock free
|
---|
986 | header->kind.real.next = freeHead->awayList; // link new node to top node
|
---|
987 | // CAS resets header->kind.real.next = freeHead->awayList on failure
|
---|
988 | while ( ! __atomic_compare_exchange_n( &freeHead->awayList, &header->kind.real.next, header,
|
---|
989 | false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) );
|
---|
990 | #endif // AWAYSPIN
|
---|
991 |
|
---|
992 | // detect free after thread-local storage destruction and use global stats in that case
|
---|
993 | if ( UNLIKELY( heapManager == nullptr ) ) {
|
---|
994 | #ifdef __STATISTICS__
|
---|
995 | AtomicFetchAdd( heapMaster.stats.free_storage_request, header->kind.real.size );
|
---|
996 | AtomicFetchAdd( heapMaster.stats.free_storage_alloc, size );
|
---|
997 | #endif // __STATISTICS__
|
---|
998 | // away push counters are not incremented because this is a self-away push, and there is no
|
---|
999 | // corresponding pull counter that needs to match.
|
---|
1000 | ENABLE_INTERRUPTS;
|
---|
1001 | return;
|
---|
1002 | } // if
|
---|
1003 |
|
---|
1004 | #ifdef __STATISTICS__
|
---|
1005 | heapManager->stats.away_pushes += 1;
|
---|
1006 | heapManager->stats.away_storage_request += header->kind.real.size;
|
---|
1007 | heapManager->stats.away_storage_alloc += size;
|
---|
1008 | #endif // __STATISTICS__
|
---|
1009 | } // if
|
---|
1010 | } // if
|
---|
1011 |
|
---|
1012 | #ifdef __STATISTICS__
|
---|
1013 | heapManager->stats.free_storage_request += header->kind.real.size;
|
---|
1014 | heapManager->stats.free_storage_alloc += size;
|
---|
1015 | #endif // __STATISTICS__
|
---|
1016 |
|
---|
1017 | #ifdef __DEBUG__
|
---|
1018 | heapManager->allocUnfreed -= header->kind.real.size;
|
---|
1019 | #endif // __DEBUG__
|
---|
1020 |
|
---|
1021 | ENABLE_INTERRUPTS;
|
---|
1022 | } // doFree
|
---|
1023 |
|
---|
1024 |
|
---|
1025 | static inline void * mallocNoStats( size_t size
|
---|
1026 | #ifdef __STATISTICS__
|
---|
1027 | , unsigned int counter
|
---|
1028 | #endif // __STATISTICS__
|
---|
1029 | ) {
|
---|
1030 | DISABLE_INTERRUPTS;
|
---|
1031 |
|
---|
1032 | if ( UNLIKELY( ! heapManagerBootFlag ) ) Heap::heapManagerCtor( // trigger for first heap
|
---|
1033 | #ifdef __DEBUG__
|
---|
1034 | size
|
---|
1035 | #endif // __DEBUG__
|
---|
1036 | );
|
---|
1037 |
|
---|
1038 | if ( UNLIKELY( size ) == 0 || // 0 BYTE ALLOCATION RETURNS NULL POINTER
|
---|
1039 | UNLIKELY( size > ULONG_MAX - sizeof(Heap::Storage) ) ) { // error check
|
---|
1040 | #ifdef __STATISTICS__
|
---|
1041 | heapManager->stats.counters[counter].cnt2 += 1;
|
---|
1042 | #endif // __STATISTICS__
|
---|
1043 | ENABLE_INTERRUPTS;
|
---|
1044 | return nullptr;
|
---|
1045 | } // if
|
---|
1046 |
|
---|
1047 | void * addr = doMalloc( size
|
---|
1048 | #ifdef __STATISTICS__
|
---|
1049 | , counter
|
---|
1050 | #endif // __STATISTICS__
|
---|
1051 | );
|
---|
1052 |
|
---|
1053 | ENABLE_INTERRUPTS;
|
---|
1054 | return addr;
|
---|
1055 | } // mallocNoStats
|
---|
1056 |
|
---|
1057 |
|
---|
1058 | static inline void * memalignNoStats( size_t alignment, size_t size
|
---|
1059 | #ifdef __STATISTICS__
|
---|
1060 | , unsigned int counter
|
---|
1061 | #endif // __STATISTICS__
|
---|
1062 | ) {
|
---|
1063 | DISABLE_INTERRUPTS;
|
---|
1064 |
|
---|
1065 | if ( UNLIKELY( ! heapManagerBootFlag ) ) Heap::heapManagerCtor( // trigger for first heap
|
---|
1066 | #ifdef __DEBUG__
|
---|
1067 | size
|
---|
1068 | #endif // __DEBUG__
|
---|
1069 | );
|
---|
1070 |
|
---|
1071 | if ( UNLIKELY( size ) == 0 || // 0 BYTE ALLOCATION RETURNS NULL POINTER
|
---|
1072 | UNLIKELY( size > ULONG_MAX - sizeof(Heap::Storage) ) ) { // error check
|
---|
1073 | #ifdef __STATISTICS__
|
---|
1074 | heapManager->stats.counters[counter].cnt2 += 1;
|
---|
1075 | #endif // __STATISTICS__
|
---|
1076 |
|
---|
1077 | ENABLE_INTERRUPTS;
|
---|
1078 | return nullptr;
|
---|
1079 | } // if
|
---|
1080 |
|
---|
1081 | #ifdef __DEBUG__
|
---|
1082 | checkAlign( alignment ); // check alignment
|
---|
1083 | #endif // __DEBUG__
|
---|
1084 |
|
---|
1085 | // if alignment <= default alignment, do normal malloc as two headers are unnecessary
|
---|
1086 | if ( UNLIKELY( alignment <= __ALIGN__ ) ) {
|
---|
1087 | void * addr = doMalloc( size
|
---|
1088 | #ifdef __STATISTICS__
|
---|
1089 | , counter
|
---|
1090 | #endif // __STATISTICS__
|
---|
1091 | );
|
---|
1092 |
|
---|
1093 | ENABLE_INTERRUPTS;
|
---|
1094 | return addr;
|
---|
1095 | }
|
---|
1096 |
|
---|
1097 |
|
---|
1098 | // Allocate enough storage to guarantee an address on the alignment boundary, and sufficient space before it for
|
---|
1099 | // administrative storage. NOTE, WHILE THERE ARE 2 HEADERS, THE FIRST ONE IS IMPLICITLY CREATED BY DOMALLOC.
|
---|
1100 | // .-------------v-----------------v----------------v----------,
|
---|
1101 | // | Real Header | ... padding ... | Fake Header | data ... |
|
---|
1102 | // `-------------^-----------------^-+--------------^----------'
|
---|
1103 | // |<--------------------------------' offset/align |<-- alignment boundary
|
---|
1104 |
|
---|
1105 | // subtract __ALIGN__ because it is already the minimum alignment
|
---|
1106 | // add sizeof(Heap::Storage) for fake header
|
---|
1107 | char * addr = (char *)doMalloc( size + alignment - __ALIGN__ + sizeof(Heap::Storage)
|
---|
1108 | #ifdef __STATISTICS__
|
---|
1109 | , counter
|
---|
1110 | #endif // __STATISTICS__
|
---|
1111 | );
|
---|
1112 |
|
---|
1113 | // address in the block of the "next" alignment address
|
---|
1114 | char * user = (char *)Ceiling( (uintptr_t)(addr + sizeof(Heap::Storage)), alignment );
|
---|
1115 |
|
---|
1116 | // address of header from malloc
|
---|
1117 | Heap::Storage::Header * realHeader = headerAddr( addr );
|
---|
1118 | realHeader->kind.real.size = size; // correct size to eliminate above alignment offset
|
---|
1119 | // address of fake header * before* the alignment location
|
---|
1120 | Heap::Storage::Header * fakeHeader = headerAddr( user );
|
---|
1121 | // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
|
---|
1122 | fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
|
---|
1123 | // SKULLDUGGERY: odd alignment imples fake header
|
---|
1124 | fakeHeader->kind.fake.alignment = alignment | 1;
|
---|
1125 |
|
---|
1126 | ENABLE_INTERRUPTS;
|
---|
1127 | return user;
|
---|
1128 | } // memalignNoStats
|
---|
1129 |
|
---|
1130 | // Operators new and new [] call malloc; delete calls free
|
---|
1131 |
|
---|
1132 |
|
---|
1133 | //####################### Memory Allocation Routines ####################
|
---|
1134 |
|
---|
1135 |
|
---|
1136 | extern "C" {
|
---|
1137 | // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0,
|
---|
1138 | // then malloc() returns a unique pointer value that can later be successfully passed to free().
|
---|
1139 | void * malloc( size_t size ) {
|
---|
1140 | return mallocNoStats( size
|
---|
1141 | #ifdef __STATISTICS__
|
---|
1142 | , HeapStatistics::MALLOC
|
---|
1143 | #endif // __STATISTICS__
|
---|
1144 | );
|
---|
1145 | } // malloc
|
---|
1146 |
|
---|
1147 |
|
---|
1148 | // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
|
---|
1149 | void * aalloc( size_t dim, size_t elemSize ) {
|
---|
1150 | return mallocNoStats( dim * elemSize
|
---|
1151 | #ifdef __STATISTICS__
|
---|
1152 | , HeapStatistics::AALLOC
|
---|
1153 | #endif // __STATISTICS__
|
---|
1154 | );
|
---|
1155 | } // aalloc
|
---|
1156 |
|
---|
1157 |
|
---|
1158 | // Same as aalloc() with memory set to zero.
|
---|
1159 | void * calloc( size_t dim, size_t elemSize ) {
|
---|
1160 | size_t size = dim * elemSize;
|
---|
1161 | char * addr = (char *)mallocNoStats( size
|
---|
1162 | #ifdef __STATISTICS__
|
---|
1163 | , HeapStatistics::CALLOC
|
---|
1164 | #endif // __STATISTICS__
|
---|
1165 | );
|
---|
1166 |
|
---|
1167 | if ( UNLIKELY( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
|
---|
1168 |
|
---|
1169 | Heap::Storage::Header * header;
|
---|
1170 | Heap::FreeHeader * freeHead;
|
---|
1171 | size_t bsize, alignment;
|
---|
1172 |
|
---|
1173 | #ifndef __DEBUG__
|
---|
1174 | bool mapped =
|
---|
1175 | #endif // __DEBUG__
|
---|
1176 | headers( "calloc", addr, header, freeHead, bsize, alignment );
|
---|
1177 |
|
---|
1178 | #ifndef __DEBUG__
|
---|
1179 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
1180 | if ( LIKELY( ! mapped ) )
|
---|
1181 | #endif // __DEBUG__
|
---|
1182 | // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
|
---|
1183 | // `-header`-addr `-size
|
---|
1184 | memset( addr, '\0', size ); // set to zeros
|
---|
1185 |
|
---|
1186 | header->kind.real.blockSize |= 2; // mark as zero filled
|
---|
1187 | return addr;
|
---|
1188 | } // calloc
|
---|
1189 |
|
---|
1190 |
|
---|
1191 | // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is
|
---|
1192 | // nullptr, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is
|
---|
1193 | // not nullptr, then the call is equivalent to free(oaddr). Unless oaddr is nullptr, it must have been returned by an earlier
|
---|
1194 | // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
|
---|
1195 | void * resize( void * oaddr, size_t size ) {
|
---|
1196 | if ( UNLIKELY( oaddr == nullptr ) ) { // special cases
|
---|
1197 | return mallocNoStats( size
|
---|
1198 | #ifdef __STATISTICS__
|
---|
1199 | , HeapStatistics::RESIZE
|
---|
1200 | #endif // __STATISTICS__
|
---|
1201 | );
|
---|
1202 | } // if
|
---|
1203 |
|
---|
1204 | // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
|
---|
1205 | if ( UNLIKELY( size == 0 ) ) { // special cases
|
---|
1206 | #ifdef __STATISTICS__
|
---|
1207 | heapManager->stats.resize_0_calls += 1;
|
---|
1208 | #endif // __STATISTICS__
|
---|
1209 | doFree( oaddr ); // free previous storage
|
---|
1210 | return nullptr;
|
---|
1211 | } // if
|
---|
1212 |
|
---|
1213 | Heap::Storage::Header * header;
|
---|
1214 | Heap::FreeHeader * freeHead;
|
---|
1215 | size_t bsize, oalign;
|
---|
1216 | headers( "resize", oaddr, header, freeHead, bsize, oalign );
|
---|
1217 |
|
---|
1218 | size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1219 | // same size, DO NOT preserve STICKY PROPERTIES.
|
---|
1220 | if ( oalign == __ALIGN__ && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
|
---|
1221 | #ifdef __STATISTICS__
|
---|
1222 | heapManager->stats.resize_calls += 1;
|
---|
1223 | #endif // __STATISTICS__
|
---|
1224 | header->kind.real.blockSize &= -2; // no alignment and turn off 0 fill
|
---|
1225 | header->kind.real.size = size; // reset allocation size
|
---|
1226 | return oaddr;
|
---|
1227 | } // if
|
---|
1228 |
|
---|
1229 | // change size, DO NOT preserve STICKY PROPERTIES.
|
---|
1230 | doFree( oaddr ); // free previous storage
|
---|
1231 | return mallocNoStats( size // create new area
|
---|
1232 | #ifdef __STATISTICS__
|
---|
1233 | , HeapStatistics::RESIZE
|
---|
1234 | #endif // __STATISTICS__
|
---|
1235 | );
|
---|
1236 | } // resize
|
---|
1237 |
|
---|
1238 |
|
---|
1239 | // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
|
---|
1240 | // the old and new sizes.
|
---|
1241 | void * realloc( void * oaddr, size_t size ) {
|
---|
1242 | if ( UNLIKELY( oaddr == nullptr ) ) { // special cases
|
---|
1243 | return mallocNoStats( size
|
---|
1244 | #ifdef __STATISTICS__
|
---|
1245 | , HeapStatistics::REALLOC
|
---|
1246 | #endif // __STATISTICS__
|
---|
1247 | );
|
---|
1248 | } // if
|
---|
1249 |
|
---|
1250 | // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
|
---|
1251 | if ( UNLIKELY( size == 0 ) ) { // special cases
|
---|
1252 | #ifdef __STATISTICS__
|
---|
1253 | heapManager->stats.realloc_0_calls += 1;
|
---|
1254 | #endif // __STATISTICS__
|
---|
1255 | doFree( oaddr ); // free previous storage
|
---|
1256 | return nullptr;
|
---|
1257 | } // if
|
---|
1258 |
|
---|
1259 | Heap::Storage::Header * header;
|
---|
1260 | Heap::FreeHeader * freeHead;
|
---|
1261 | size_t bsize, oalign;
|
---|
1262 | headers( "realloc", oaddr, header, freeHead, bsize, oalign );
|
---|
1263 |
|
---|
1264 | size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1265 | size_t osize = header->kind.real.size; // old allocation size
|
---|
1266 | bool ozfill = (header->kind.real.blockSize & 2); // old allocation zero filled
|
---|
1267 | if ( UNLIKELY( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage
|
---|
1268 | #ifdef __STATISTICS__
|
---|
1269 | heapManager->stats.realloc_calls += 1;
|
---|
1270 | heapManager->stats.realloc_storage_request += size;
|
---|
1271 | #endif // __STATISTICS__
|
---|
1272 |
|
---|
1273 | header->kind.real.size = size; // reset allocation size
|
---|
1274 | if ( UNLIKELY( ozfill ) && size > osize ) { // previous request zero fill and larger ?
|
---|
1275 | memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1276 | } // if
|
---|
1277 | return oaddr;
|
---|
1278 | } // if
|
---|
1279 |
|
---|
1280 | // change size and copy old content to new storage
|
---|
1281 |
|
---|
1282 | void * naddr;
|
---|
1283 | if ( UNLIKELY( oalign <= __ALIGN__ ) ) { // previous request not aligned ?
|
---|
1284 | naddr = mallocNoStats( size // create new area
|
---|
1285 | #ifdef __STATISTICS__
|
---|
1286 | , HeapStatistics::REALLOC
|
---|
1287 | #endif // __STATISTICS__
|
---|
1288 | );
|
---|
1289 | } else {
|
---|
1290 | naddr = memalignNoStats( oalign, size // create new aligned area
|
---|
1291 | #ifdef __STATISTICS__
|
---|
1292 | , HeapStatistics::REALLOC
|
---|
1293 | #endif // __STATISTICS__
|
---|
1294 | );
|
---|
1295 | } // if
|
---|
1296 |
|
---|
1297 | headers( "realloc", naddr, header, freeHead, bsize, oalign );
|
---|
1298 | // To preserve prior fill, the entire bucket must be copied versus the size.
|
---|
1299 | memcpy( naddr, oaddr, std::min( osize, size ) ); // copy bytes
|
---|
1300 | doFree( oaddr ); // free previous storage
|
---|
1301 |
|
---|
1302 | if ( UNLIKELY( ozfill ) ) { // previous request zero fill ?
|
---|
1303 | header->kind.real.blockSize |= 2; // mark new request as zero filled
|
---|
1304 | if ( size > osize ) { // previous request larger ?
|
---|
1305 | memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1306 | } // if
|
---|
1307 | } // if
|
---|
1308 | return naddr;
|
---|
1309 | } // realloc
|
---|
1310 |
|
---|
1311 |
|
---|
1312 | // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
|
---|
1313 | void * memalign( size_t alignment, size_t size ) {
|
---|
1314 | return memalignNoStats( alignment, size
|
---|
1315 | #ifdef __STATISTICS__
|
---|
1316 | , HeapStatistics::MEMALIGN
|
---|
1317 | #endif // __STATISTICS__
|
---|
1318 | );
|
---|
1319 | } // memalign
|
---|
1320 |
|
---|
1321 |
|
---|
1322 | // Same as aalloc() with memory alignment.
|
---|
1323 | void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {
|
---|
1324 | return memalignNoStats( alignment, dim * elemSize
|
---|
1325 | #ifdef __STATISTICS__
|
---|
1326 | , HeapStatistics::AMEMALIGN
|
---|
1327 | #endif // __STATISTICS__
|
---|
1328 | );
|
---|
1329 | } // amemalign
|
---|
1330 |
|
---|
1331 |
|
---|
1332 | // Same as calloc() with memory alignment.
|
---|
1333 | void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {
|
---|
1334 | size_t size = dim * elemSize;
|
---|
1335 | char * addr = (char *)memalignNoStats( alignment, size
|
---|
1336 | #ifdef __STATISTICS__
|
---|
1337 | , HeapStatistics::CMEMALIGN
|
---|
1338 | #endif // __STATISTICS__
|
---|
1339 | );
|
---|
1340 |
|
---|
1341 | if ( UNLIKELY( addr == NULL ) ) return NULL; // stop further processing if 0p is returned
|
---|
1342 |
|
---|
1343 | Heap::Storage::Header * header;
|
---|
1344 | Heap::FreeHeader * freeHead;
|
---|
1345 | size_t bsize;
|
---|
1346 |
|
---|
1347 | #ifndef __DEBUG__
|
---|
1348 | bool mapped =
|
---|
1349 | #endif // __DEBUG__
|
---|
1350 | headers( "cmemalign", addr, header, freeHead, bsize, alignment );
|
---|
1351 |
|
---|
1352 | // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
|
---|
1353 | #ifndef __DEBUG__
|
---|
1354 | if ( LIKELY( ! mapped ) )
|
---|
1355 | #endif // __DEBUG__
|
---|
1356 | // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
|
---|
1357 | // `-header`-addr `-size
|
---|
1358 | memset( addr, '\0', size ); // set to zeros
|
---|
1359 |
|
---|
1360 | header->kind.real.blockSize |= 2; // mark as zero filled
|
---|
1361 | return addr;
|
---|
1362 | } // cmemalign
|
---|
1363 |
|
---|
1364 |
|
---|
1365 | // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
|
---|
1366 | // of alignment. This requirement is universally ignored.
|
---|
1367 | void * aligned_alloc( size_t alignment, size_t size ) {
|
---|
1368 | return memalign( alignment, size );
|
---|
1369 | } // aligned_alloc
|
---|
1370 |
|
---|
1371 |
|
---|
1372 | // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated
|
---|
1373 | // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size
|
---|
1374 | // is 0, then posix_memalign() returns either nullptr, or a unique pointer value that can later be successfully passed to
|
---|
1375 | // free(3).
|
---|
1376 | int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
|
---|
1377 | if ( UNLIKELY( alignment < __ALIGN__ || ! Pow2( alignment ) ) ) return EINVAL; // check alignment
|
---|
1378 | *memptr = memalign( alignment, size );
|
---|
1379 | return 0;
|
---|
1380 | } // posix_memalign
|
---|
1381 |
|
---|
1382 |
|
---|
1383 | // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
|
---|
1384 | // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
|
---|
1385 | void * valloc( size_t size ) {
|
---|
1386 | return memalign( __cfa_page_size, size );
|
---|
1387 | } // valloc
|
---|
1388 |
|
---|
1389 |
|
---|
1390 | // Same as valloc but rounds size to multiple of page size.
|
---|
1391 | void * pvalloc( size_t size ) { // round size to multiple of page size
|
---|
1392 | return memalign( __cfa_page_size, Ceiling( size, __cfa_page_size ) );
|
---|
1393 | } // pvalloc
|
---|
1394 |
|
---|
1395 |
|
---|
1396 | // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc()
|
---|
1397 | // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
|
---|
1398 | // nullptr, no operation is performed.
|
---|
1399 | void free( void * addr ) {
|
---|
1400 | // detect free after thread-local storage destruction and use global stats in that case
|
---|
1401 | #ifdef __STATISTICS__
|
---|
1402 | DISABLE_INTERRUPTS;
|
---|
1403 | if ( UNLIKELY( ! heapManagerBootFlag ) ) Heap::heapManagerCtor( // trigger for first heap
|
---|
1404 | #ifdef __DEBUG__
|
---|
1405 | 0
|
---|
1406 | #endif // __DEBUG__
|
---|
1407 | );
|
---|
1408 | #endif // __STATISTICS__
|
---|
1409 |
|
---|
1410 | // detect free after thread-local storage destruction and use global stats in that case
|
---|
1411 | if ( UNLIKELY( addr == nullptr ) ) { // special case
|
---|
1412 | #ifdef __STATISTICS__
|
---|
1413 | if ( LIKELY( heapManager ) ) heapManager->stats.free_null_calls += 1;
|
---|
1414 | else AtomicFetchAdd( heapMaster.stats.free_null_calls, 1 );
|
---|
1415 | ENABLE_INTERRUPTS;
|
---|
1416 | #endif // __STATISTICS__
|
---|
1417 | return;
|
---|
1418 | } // fi
|
---|
1419 |
|
---|
1420 | #ifdef __STATISTICS__
|
---|
1421 | if ( LIKELY( heapManager ) ) heapManager->stats.free_calls += 1;
|
---|
1422 | else AtomicFetchAdd( heapMaster.stats.free_calls, 1 );
|
---|
1423 | ENABLE_INTERRUPTS;
|
---|
1424 | #endif // __STATISTICS__
|
---|
1425 |
|
---|
1426 | doFree( addr );
|
---|
1427 | } // free
|
---|
1428 |
|
---|
1429 |
|
---|
1430 | // Returns the alignment of an allocation.
|
---|
1431 | size_t malloc_alignment( void * addr ) {
|
---|
1432 | if ( UNLIKELY( addr == nullptr ) ) return __ALIGN__; // minimum alignment
|
---|
1433 | Heap::Storage::Header * header = headerAddr( addr );
|
---|
1434 | if ( UNLIKELY( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
|
---|
1435 | return header->kind.fake.alignment & -2; // remove flag from value
|
---|
1436 | } else {
|
---|
1437 | return __ALIGN__; // minimum alignment
|
---|
1438 | } // if
|
---|
1439 | } // malloc_alignment
|
---|
1440 |
|
---|
1441 |
|
---|
1442 | // Returns true if the allocation is zero filled, e.g., allocated by calloc().
|
---|
1443 | bool malloc_zero_fill( void * addr ) {
|
---|
1444 | if ( UNLIKELY( addr == nullptr ) ) return false; // null allocation is not zero fill
|
---|
1445 | Heap::Storage::Header * header = headerAddr( addr );
|
---|
1446 | if ( UNLIKELY( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
|
---|
1447 | header = realHeader( header ); // backup from fake to real header
|
---|
1448 | } // if
|
---|
1449 | return (header->kind.real.blockSize & 2) != 0; // zero filled ?
|
---|
1450 | } // malloc_zero_fill
|
---|
1451 |
|
---|
1452 |
|
---|
1453 | // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
|
---|
1454 | size_t malloc_size( void * addr ) {
|
---|
1455 | if ( UNLIKELY( addr == nullptr ) ) return 0; // null allocation is not zero fill
|
---|
1456 | Heap::Storage::Header * header = headerAddr( addr );
|
---|
1457 | if ( UNLIKELY( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
|
---|
1458 | header = realHeader( header ); // backup from fake to real header
|
---|
1459 | } // if
|
---|
1460 | return header->kind.real.size;
|
---|
1461 | } // malloc_size
|
---|
1462 |
|
---|
1463 |
|
---|
1464 | // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
|
---|
1465 | // malloc or a related function.
|
---|
1466 | size_t malloc_usable_size( void * addr ) {
|
---|
1467 | if ( UNLIKELY( addr == nullptr ) ) return 0; // null allocation has 0 size
|
---|
1468 | Heap::Storage::Header * header;
|
---|
1469 | Heap::FreeHeader * freeHead;
|
---|
1470 | size_t bsize, alignment;
|
---|
1471 |
|
---|
1472 | headers( "malloc_usable_size", addr, header, freeHead, bsize, alignment );
|
---|
1473 | return dataStorage( bsize, addr, header ); // data storage in bucket
|
---|
1474 | } // malloc_usable_size
|
---|
1475 |
|
---|
1476 |
|
---|
1477 | // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
|
---|
1478 | void malloc_stats() {
|
---|
1479 | #ifdef __STATISTICS__
|
---|
1480 | HeapStatistics stats;
|
---|
1481 | printStats( collectStats( stats ) );
|
---|
1482 | #else
|
---|
1483 | #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n"
|
---|
1484 | NOWARNING( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ), unused-result );
|
---|
1485 | #endif // __STATISTICS__
|
---|
1486 | } // malloc_stats
|
---|
1487 |
|
---|
1488 |
|
---|
1489 | // Changes the file descriptor where malloc_stats() writes statistics.
|
---|
1490 | int malloc_stats_fd( int fd __attribute__(( unused )) ) {
|
---|
1491 | #ifdef __STATISTICS__
|
---|
1492 | int temp = heapMaster.stats_fd;
|
---|
1493 | heapMaster.stats_fd = fd;
|
---|
1494 | return temp;
|
---|
1495 | #else
|
---|
1496 | return -1; // unsupported
|
---|
1497 | #endif // __STATISTICS__
|
---|
1498 | } // malloc_stats_fd
|
---|
1499 |
|
---|
1500 |
|
---|
1501 | // Prints an XML string that describes the current state of the memory-allocation implementation in the caller.
|
---|
1502 | // The string is printed on the file stream stream. The exported string includes information about all arenas (see
|
---|
1503 | // malloc).
|
---|
1504 | int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {
|
---|
1505 | if ( options != 0 ) { errno = EINVAL; return -1; }
|
---|
1506 | #ifdef __STATISTICS__
|
---|
1507 | HeapStatistics stats;
|
---|
1508 | return printStatsXML( collectStats( stats ), stream );
|
---|
1509 | #else
|
---|
1510 | return 0; // unsupported
|
---|
1511 | #endif // __STATISTICS__
|
---|
1512 | } // malloc_info
|
---|
1513 |
|
---|
1514 |
|
---|
1515 | // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
|
---|
1516 | // specifies the parameter to be modified, and value specifies the new value for that parameter.
|
---|
1517 | int mallopt( int option, int value ) {
|
---|
1518 | if ( value < 0 ) return 0;
|
---|
1519 | switch( option ) {
|
---|
1520 | case M_TOP_PAD:
|
---|
1521 | heapMaster.heapExpand = Ceiling( value, __cfa_page_size );
|
---|
1522 | return 1;
|
---|
1523 | case M_MMAP_THRESHOLD:
|
---|
1524 | if ( setMmapStart( value ) ) return 1;
|
---|
1525 | break;
|
---|
1526 | } // switch
|
---|
1527 | return 0; // error, unsupported
|
---|
1528 | } // mallopt
|
---|
1529 |
|
---|
1530 |
|
---|
1531 | // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
|
---|
1532 | int malloc_trim( size_t ) {
|
---|
1533 | return 0; // => impossible to release memory
|
---|
1534 | } // malloc_trim
|
---|
1535 |
|
---|
1536 |
|
---|
1537 | // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap
|
---|
1538 | // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data
|
---|
1539 | // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
|
---|
1540 | // result. (The caller must free this memory.)
|
---|
1541 | void * malloc_get_state( void ) {
|
---|
1542 | return nullptr; // unsupported
|
---|
1543 | } // malloc_get_state
|
---|
1544 |
|
---|
1545 |
|
---|
1546 | // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
|
---|
1547 | // structure pointed to by state.
|
---|
1548 | int malloc_set_state( void * ) {
|
---|
1549 | return 0; // unsupported
|
---|
1550 | } // malloc_set_state
|
---|
1551 |
|
---|
1552 | // Set the amount (bytes) to extend the heap size once all the current storage in the heap is allocated.
|
---|
1553 | size_t malloc_expansion() { return __DEFAULT_HEAP_EXPANSION__; }
|
---|
1554 |
|
---|
1555 | // Set the crossover point between allocations occuring in the sbrk area or separately mmapped.
|
---|
1556 | size_t malloc_mmap_start() { return __DEFAULT_MMAP_START__; }
|
---|
1557 |
|
---|
1558 | // Amount subtracted to adjust for unfreed program storage (debug only).
|
---|
1559 | size_t malloc_unfreed() { return __DEFAULT_HEAP_UNFREED__; }
|
---|
1560 | } // extern "C"
|
---|
1561 |
|
---|
1562 |
|
---|
1563 | #ifdef __cforall
|
---|
1564 | void * resize( void * oaddr, size_t nalign, size_t size )
|
---|
1565 | #else
|
---|
1566 | extern "C" {
|
---|
1567 | void * _X6resizeFPv_Pvmm__1( void * oaddr, size_t nalign, size_t size )
|
---|
1568 | #endif
|
---|
1569 | {
|
---|
1570 | if ( UNLIKELY( oaddr == nullptr ) ) {
|
---|
1571 | return memalignNoStats( nalign, size
|
---|
1572 | #ifdef __STATISTICS__
|
---|
1573 | , HeapStatistics::RESIZE
|
---|
1574 | #endif // __STATISTICS__
|
---|
1575 | );
|
---|
1576 | } // if
|
---|
1577 |
|
---|
1578 | // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
|
---|
1579 | if ( UNLIKELY( size == 0 ) ) { // special cases
|
---|
1580 | #ifdef __STATISTICS__
|
---|
1581 | heapManager->stats.resize_0_calls += 1;
|
---|
1582 | #endif // __STATISTICS__
|
---|
1583 | doFree( oaddr ); // free previous storage
|
---|
1584 | return nullptr;
|
---|
1585 | } // if
|
---|
1586 |
|
---|
1587 | #ifdef __DEBUG__
|
---|
1588 | checkAlign( nalign ); // check alignment
|
---|
1589 | #endif // __DEBUG__
|
---|
1590 |
|
---|
1591 | // Attempt to reuse existing alignment.
|
---|
1592 | Heap::Storage::Header * header = headerAddr( oaddr );
|
---|
1593 | bool isFakeHeader = header->kind.fake.alignment & 1; // old fake header ?
|
---|
1594 | size_t oalign;
|
---|
1595 | if ( UNLIKELY( isFakeHeader ) ) {
|
---|
1596 | oalign = header->kind.fake.alignment & -2; // old alignment
|
---|
1597 | if ( UNLIKELY( (uintptr_t)oaddr % nalign == 0 // lucky match ?
|
---|
1598 | && ( oalign <= nalign // going down
|
---|
1599 | || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
|
---|
1600 | ) ) {
|
---|
1601 | headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
|
---|
1602 | Heap::FreeHeader * freeHead;
|
---|
1603 | size_t bsize, oalign;
|
---|
1604 | headers( "resize", oaddr, header, freeHead, bsize, oalign );
|
---|
1605 | size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
|
---|
1606 |
|
---|
1607 | if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage
|
---|
1608 | headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
|
---|
1609 |
|
---|
1610 | header->kind.real.blockSize &= -2; // turn off 0 fill
|
---|
1611 | header->kind.real.size = size; // reset allocation size
|
---|
1612 | return oaddr;
|
---|
1613 | } // if
|
---|
1614 | } // if
|
---|
1615 | } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
|
---|
1616 | && nalign == __ALIGN__ ) { // new alignment also on libAlign => no fake header needed
|
---|
1617 | return resize( oaddr, size ); // duplicate special case checks
|
---|
1618 | } // if
|
---|
1619 |
|
---|
1620 | // change size, DO NOT preserve STICKY PROPERTIES.
|
---|
1621 | doFree( oaddr ); // free previous storage
|
---|
1622 | return memalignNoStats( nalign, size // create new aligned area
|
---|
1623 | #ifdef __STATISTICS__
|
---|
1624 | , HeapStatistics::RESIZE
|
---|
1625 | #endif // __STATISTICS__
|
---|
1626 | );
|
---|
1627 | } // resize
|
---|
1628 | #ifndef __cforall
|
---|
1629 | }
|
---|
1630 | #endif
|
---|
1631 |
|
---|
1632 |
|
---|
1633 | #ifdef __cforall
|
---|
1634 | void * realloc( void * oaddr, size_t nalign, size_t size )
|
---|
1635 | #else
|
---|
1636 | extern "C" {
|
---|
1637 | void * _X7reallocFPv_Pvmm__1( void * oaddr, size_t nalign, size_t size )
|
---|
1638 | #endif
|
---|
1639 | {
|
---|
1640 | if ( UNLIKELY( oaddr == nullptr ) ) {
|
---|
1641 | return memalignNoStats( nalign, size
|
---|
1642 | #ifdef __STATISTICS__
|
---|
1643 | , HeapStatistics::REALLOC
|
---|
1644 | #endif // __STATISTICS__
|
---|
1645 | );
|
---|
1646 | } // if
|
---|
1647 |
|
---|
1648 | // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
|
---|
1649 | if ( UNLIKELY( size == 0 ) ) { // special cases
|
---|
1650 | #ifdef __STATISTICS__
|
---|
1651 | heapManager->stats.realloc_0_calls += 1;
|
---|
1652 | #endif // __STATISTICS__
|
---|
1653 | doFree( oaddr ); // free previous storage
|
---|
1654 | return nullptr;
|
---|
1655 | } // if
|
---|
1656 |
|
---|
1657 | #ifdef __DEBUG__
|
---|
1658 | checkAlign( nalign ); // check alignment
|
---|
1659 | #endif // __DEBUG__
|
---|
1660 |
|
---|
1661 | // Attempt to reuse existing alignment.
|
---|
1662 | Heap::Storage::Header * header = headerAddr( oaddr );
|
---|
1663 | bool isFakeHeader = header->kind.fake.alignment & 1; // old fake header ?
|
---|
1664 | size_t oalign;
|
---|
1665 | if ( UNLIKELY( isFakeHeader ) ) {
|
---|
1666 | oalign = header->kind.fake.alignment & -2; // old alignment
|
---|
1667 | if ( UNLIKELY( (uintptr_t)oaddr % nalign == 0 // lucky match ?
|
---|
1668 | && ( oalign <= nalign // going down
|
---|
1669 | || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
|
---|
1670 | ) ) {
|
---|
1671 | headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
|
---|
1672 | return realloc( oaddr, size ); // duplicate special case checks
|
---|
1673 | } // if
|
---|
1674 | } else if ( ! isFakeHeader // old real header (aligned on libAlign) ?
|
---|
1675 | && nalign == __ALIGN__ ) { // new alignment also on libAlign => no fake header needed
|
---|
1676 | return realloc( oaddr, size ); // duplicate special case checks
|
---|
1677 | } // if
|
---|
1678 |
|
---|
1679 | Heap::FreeHeader * freeHead;
|
---|
1680 | size_t bsize;
|
---|
1681 | headers( "realloc", oaddr, header, freeHead, bsize, oalign );
|
---|
1682 |
|
---|
1683 | // change size and copy old content to new storage
|
---|
1684 |
|
---|
1685 | size_t osize = header->kind.real.size; // old allocation size
|
---|
1686 | bool ozfill = (header->kind.real.blockSize & 2); // old allocation zero filled
|
---|
1687 |
|
---|
1688 | void * naddr = memalignNoStats( nalign, size // create new aligned area
|
---|
1689 | #ifdef __STATISTICS__
|
---|
1690 | , HeapStatistics::REALLOC
|
---|
1691 | #endif // __STATISTICS__
|
---|
1692 | );
|
---|
1693 |
|
---|
1694 | headers( "realloc", naddr, header, freeHead, bsize, oalign );
|
---|
1695 | memcpy( naddr, oaddr, std::min( osize, size ) ); // copy bytes
|
---|
1696 | doFree( oaddr ); // free previous storage
|
---|
1697 |
|
---|
1698 | if ( UNLIKELY( ozfill ) ) { // previous request zero fill ?
|
---|
1699 | header->kind.real.blockSize |= 2; // mark new request as zero filled
|
---|
1700 | if ( size > osize ) { // previous request larger ?
|
---|
1701 | memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
|
---|
1702 | } // if
|
---|
1703 | } // if
|
---|
1704 | return naddr;
|
---|
1705 | } // realloc
|
---|
1706 | #ifndef __cforall
|
---|
1707 | }
|
---|
1708 | #endif
|
---|
1709 |
|
---|
1710 | // zip -r HeapPerThread.zip heap/HeapPerThread.h heap/HeapPerThread.cc heap/Makefile heap/affinity.h heap/test.cc heap/away.cc
|
---|
1711 |
|
---|
1712 | // g++-10 -Wall -Wextra -g -O3 -DNDEBUG -D__STATISTICS__ -DTLS HeapPerThread.cc -fPIC -shared -o HeapPerThread.so
|
---|
1713 |
|
---|
1714 | // Local Variables: //
|
---|
1715 | // tab-width: 4 //
|
---|
1716 | // compile-command: "g++-10 -Wall -Wextra -g -O3 -DNDEBUG -D__STATISTICS__ HeapPerThread.cc -c" //
|
---|
1717 | // End: //
|
---|