Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision d74369beaab60052b0f6f1c8ae57d8e72c9e2250)
+++ libcfa/src/heap.cfa	(revision 58e280f46565e06007c2e7fc6db8434759dbaef4)
@@ -10,6 +10,6 @@
 // Created On       : Tue Dec 19 21:58:35 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Nov 22 14:16:30 2019
-// Update Count     : 626
+// Last Modified On : Sun Nov 24 17:56:15 2019
+// Update Count     : 638
 //
 
@@ -18,4 +18,5 @@
 #include <stdio.h>										// snprintf, fileno
 #include <errno.h>										// errno
+#include <string.h>										// memset, memcpy
 extern "C" {
 #include <sys/mman.h>									// mmap, munmap
@@ -27,5 +28,5 @@
 #include "bits/locks.hfa"								// __spinlock_t
 #include "startup.hfa"									// STARTUP_PRIORITY_MEMORY
-#include "stdlib.hfa"									// bsearchl
+//#include "stdlib.hfa"									// bsearchl
 #include "malloc.h"
 
@@ -90,6 +91,11 @@
 
 enum {
+	// Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address,
+	// the brk address is extended by the extension amount.
+	__CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
+
+	// Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets;
+	// values greater than or equal to this value are mmap from the operating system.
 	__CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1),
-	__CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
 };
 
@@ -128,4 +134,5 @@
 } // extern "C"
 #endif // __CFA_DEBUG__
+
 
 // statically allocated variables => zero filled.
@@ -226,4 +233,5 @@
 #define __STATISTICS__
 
+// Bucket size must be multiple of 16.
 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
 static const unsigned int bucketSizes[] @= {			// different bucket sizes
@@ -365,117 +373,5 @@
 
 
-static inline bool setMmapStart( size_t value ) {		// true => mmapped, false => sbrk
-  if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
-	mmapStart = value;									// set global
-
-	// find the closest bucket size less than or equal to the mmapStart size
-	maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
-	assert( maxBucketsUsed < NoBucketSizes );			// subscript failure ?
-	assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
-	return false;
-} // setMmapStart
-
-
-static inline void checkHeader( bool check, const char * name, void * addr ) {
-	if ( unlikely( check ) ) {							// bad address ?
-		abort( "Attempt to %s storage %p with address outside the heap.\n"
-			   "Possible cause is duplicate free on same block or overwriting of memory.",
-			   name, addr );
-	} // if
-} // checkHeader
-
-
-static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) {
-	if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
-		size_t offset = header->kind.fake.offset;
-		alignment = header->kind.fake.alignment & -2;	// remove flag from value
-		#ifdef __CFA_DEBUG__
-		checkAlign( alignment );						// check alignment
-		#endif // __CFA_DEBUG__
-		header = (HeapManager.Storage.Header *)((char *)header - offset);
-	} // if
-} // fakeHeader
-
-
-// <-------+----------------------------------------------------> bsize (bucket size)
-// |header |addr
-//==================================================================================
-//                                | alignment
-// <-----------------<------------+-----------------------------> bsize (bucket size)
-//                   |fake-header | addr
-#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
-
-// <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
-// |header |addr
-//==================================================================================
-//                                | alignment
-// <------------------------------<<---------- dsize --------->>> bsize (bucket size)
-//                   |fake-header |addr
-#define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
-
-
-static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
-	header = headerAddr( addr );
-
-	if ( unlikely( heapEnd < addr ) ) {					// mmapped ?
-		fakeHeader( header, alignment );
-		size = header->kind.real.blockSize & -3;		// mmap size
-		return true;
-	} // if
-
-	#ifdef __CFA_DEBUG__
-	checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
-	#endif // __CFA_DEBUG__
-
-	// header may be safe to dereference
-	fakeHeader( header, alignment );
-	#ifdef __CFA_DEBUG__
-	checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
-	#endif // __CFA_DEBUG__
-
-	freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
-	#ifdef __CFA_DEBUG__
-	if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
-		abort( "Attempt to %s storage %p with corrupted header.\n"
-			   "Possible cause is duplicate free on same block or overwriting of header information.",
-			   name, addr );
-	} // if
-	#endif // __CFA_DEBUG__
-	size = freeElem->blockSize;
-	return false;
-} // headers
-
-
-static inline void * extend( size_t size ) with ( heapManager ) {
-	lock( extlock __cfaabi_dbg_ctx2 );
-	ptrdiff_t rem = heapRemaining - size;
-	if ( rem < 0 ) {
-		// If the size requested is bigger than the current remaining storage, increase the size of the heap.
-
-		size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
-		if ( sbrk( increase ) == (void *)-1 ) {
-			unlock( extlock );
-			errno = ENOMEM;
-			return 0p;
-		} // if
-		#ifdef __STATISTICS__
-		sbrk_calls += 1;
-		sbrk_storage += increase;
-		#endif // __STATISTICS__
-		#ifdef __CFA_DEBUG__
-		// Set new memory to garbage so subsequent uninitialized usages might fail.
-		memset( (char *)heapEnd + heapRemaining, '\377', increase );
-		#endif // __CFA_DEBUG__
-		rem = heapRemaining + increase - size;
-	} // if
-
-	HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
-	heapRemaining = rem;
-	heapEnd = (char *)heapEnd + size;
-	unlock( extlock );
-	return block;
-} // extend
-
-
+// thunk problem
 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) {
 	size_t l = 0, m, h = dim;
@@ -490,4 +386,117 @@
 	return l;
 } // Bsearchl
+
+
+static inline bool setMmapStart( size_t value ) {		// true => mmapped, false => sbrk
+  if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;
+	mmapStart = value;									// set global
+
+	// find the closest bucket size less than or equal to the mmapStart size
+	maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search
+	assert( maxBucketsUsed < NoBucketSizes );			// subscript failure ?
+	assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?
+	return false;
+} // setMmapStart
+
+
+static inline void checkHeader( bool check, const char * name, void * addr ) {
+	if ( unlikely( check ) ) {							// bad address ?
+		abort( "Attempt to %s storage %p with address outside the heap.\n"
+			   "Possible cause is duplicate free on same block or overwriting of memory.",
+			   name, addr );
+	} // if
+} // checkHeader
+
+
+static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) {
+	if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
+		size_t offset = header->kind.fake.offset;
+		alignment = header->kind.fake.alignment & -2;	// remove flag from value
+		#ifdef __CFA_DEBUG__
+		checkAlign( alignment );						// check alignment
+		#endif // __CFA_DEBUG__
+		header = (HeapManager.Storage.Header *)((char *)header - offset);
+	} // if
+} // fakeHeader
+
+
+// <-------+----------------------------------------------------> bsize (bucket size)
+// |header |addr
+//==================================================================================
+//                                | alignment
+// <-----------------<------------+-----------------------------> bsize (bucket size)
+//                   |fake-header | addr
+#define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))
+
+// <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
+// |header |addr
+//==================================================================================
+//                                | alignment
+// <------------------------------<<---------- dsize --------->>> bsize (bucket size)
+//                   |fake-header |addr
+#define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
+
+
+static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) {
+	header = headerAddr( addr );
+
+	if ( unlikely( heapEnd < addr ) ) {					// mmapped ?
+		fakeHeader( header, alignment );
+		size = header->kind.real.blockSize & -3;		// mmap size
+		return true;
+	} // if
+
+	#ifdef __CFA_DEBUG__
+	checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
+	#endif // __CFA_DEBUG__
+
+	// header may be safe to dereference
+	fakeHeader( header, alignment );
+	#ifdef __CFA_DEBUG__
+	checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
+	#endif // __CFA_DEBUG__
+
+	freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3);
+	#ifdef __CFA_DEBUG__
+	if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
+		abort( "Attempt to %s storage %p with corrupted header.\n"
+			   "Possible cause is duplicate free on same block or overwriting of header information.",
+			   name, addr );
+	} // if
+	#endif // __CFA_DEBUG__
+	size = freeElem->blockSize;
+	return false;
+} // headers
+
+
+static inline void * extend( size_t size ) with ( heapManager ) {
+	lock( extlock __cfaabi_dbg_ctx2 );
+	ptrdiff_t rem = heapRemaining - size;
+	if ( rem < 0 ) {
+		// If the size requested is bigger than the current remaining storage, increase the size of the heap.
+
+		size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() );
+		if ( sbrk( increase ) == (void *)-1 ) {
+			unlock( extlock );
+			errno = ENOMEM;
+			return 0p;
+		} // if
+		#ifdef __STATISTICS__
+		sbrk_calls += 1;
+		sbrk_storage += increase;
+		#endif // __STATISTICS__
+		#ifdef __CFA_DEBUG__
+		// Set new memory to garbage so subsequent uninitialized usages might fail.
+		memset( (char *)heapEnd + heapRemaining, '\377', increase );
+		#endif // __CFA_DEBUG__
+		rem = heapRemaining + increase - size;
+	} // if
+
+	HeapManager.Storage * block = (HeapManager.Storage *)heapEnd;
+	heapRemaining = rem;
+	heapEnd = (char *)heapEnd + size;
+	unlock( extlock );
+	return block;
+} // extend
 
 
@@ -541,9 +550,9 @@
 			block = (HeapManager.Storage *)extend( tsize );	// mutual exclusion on call
   if ( unlikely( block == 0p ) ) return 0p;
-			#if defined( SPINLOCK )
+		#if defined( SPINLOCK )
 		} else {
 			freeElem->freeList = block->header.kind.real.next;
 			unlock( freeElem->lock );
-			#endif // SPINLOCK
+		#endif // SPINLOCK
 		} // if
 
@@ -696,6 +705,6 @@
 	heapExpand = default_heap_expansion();
 
-	char * End = (char *)sbrk( 0 );
-	sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment
+	char * end = (char *)sbrk( 0 );
+	sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment
 	heapBegin = heapEnd = sbrk( 0 );					// get new start point
 } // HeapManager
@@ -755,7 +764,7 @@
 	if ( ! mapped )
 	#endif // __CFA_DEBUG__
-	    // Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
-	    // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
-	    // `-header`-addr                      `-size
+		// Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
+		// <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
+		// `-header`-addr                      `-size
 		memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros
 
@@ -904,4 +913,5 @@
 		} // if
 	  if ( unlikely( naddr == 0p ) ) return 0p;
+
 		headers( "realloc", naddr, header, freeElem, bsize, oalign );
 		size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
@@ -971,5 +981,5 @@
 			// if ( traceHeap() ) {
 			// 	#define nullmsg "Free( 0x0 ) size:0\n"
-			// 	// Do not debug print free( 0 ), as it can cause recursive entry from sprintf.
+			// 	// Do not debug print free( 0p ), as it can cause recursive entry from sprintf.
 			// 	__cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 );
 			// } // if
@@ -982,5 +992,5 @@
 
 
-    // The malloc_alignment() function returns the alignment of the allocation.
+	// The malloc_alignment() function returns the alignment of the allocation.
 	size_t malloc_alignment( void * addr ) {
 	  if ( unlikely( addr == 0p ) ) return libAlign();	// minimum alignment
@@ -994,5 +1004,5 @@
 
 
-    // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().
+	// The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().
 	bool malloc_zero_fill( void * addr ) {
 	  if ( unlikely( addr == 0p ) ) return false;		// null allocation is not zero fill
@@ -1018,6 +1028,6 @@
 
 
-    // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and
-    // related functions.
+	// The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and
+	// related functions.
 	void malloc_stats( void ) {
 		#ifdef __STATISTICS__
@@ -1087,14 +1097,14 @@
 // Must have CFA linkage to overload with C linkage realloc.
 void * realloc( void * oaddr, size_t nalign, size_t size ) {
-    #ifdef __STATISTICS__
+	#ifdef __STATISTICS__
 	__atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST );
-    #endif // __STATISTICS__
+	#endif // __STATISTICS__
 
   if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
   if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
 
-    if ( unlikely( nalign == 0 ) ) nalign = libAlign();	// reset alignment to minimum
+	if ( unlikely( nalign == 0 ) ) nalign = libAlign();	// reset alignment to minimum
 	#ifdef __CFA_DEBUG__
-    else
+	else
 		checkAlign( nalign );							// check alignment
 	#endif // __CFA_DEBUG__
@@ -1104,29 +1114,30 @@
 	size_t bsize, oalign = 0;
 	headers( "realloc", oaddr, header, freeElem, bsize, oalign );
-
-    size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
+	size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
 
   if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out
 		headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
 		return realloc( oaddr, size );
-    } // if
-
-    #ifdef __STATISTICS__
+	} // if
+
+	#ifdef __STATISTICS__
 	__atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );
-    #endif // __STATISTICS__
-
-    // change size and copy old content to new storage
-
-    void * naddr;
-    if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-        naddr = cmemalignNoStats( nalign, 1, size );	// create new aligned area
-    } else {
-        naddr = memalignNoStats( nalign, size );		// create new aligned area
-    } // if
-    size_t ndsize = dataStorage( bsize, naddr, header );	// data storage avilable in bucket
+	#endif // __STATISTICS__
+
+	// change size and copy old content to new storage
+
+	void * naddr;
+	if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
+		naddr = cmemalignNoStats( nalign, 1, size );	// create new aligned area
+	} else {
+		naddr = memalignNoStats( nalign, size );		// create new aligned area
+	} // if
+
+	headers( "realloc", naddr, header, freeElem, bsize, oalign );
+	size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
 	// To preserve prior fill, the entire bucket must be copied versus the size.
-    memcpy( naddr, oaddr, MIN( odsize, ndsize ) );		// copy bytes
-    free( oaddr );
-    return naddr;
+	memcpy( naddr, oaddr, MIN( odsize, ndsize ) );		// copy bytes
+	free( oaddr );
+	return naddr;
 } // realloc
 
