Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision 58e97d9ab5836a22e8150d3307e96e09857157ca)
+++ libcfa/src/heap.cfa	(revision 19e5d65d29e8005d1f1fbc0f906bd9bb0a80e867)
@@ -10,6 +10,6 @@
 // Created On       : Tue Dec 19 21:58:35 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Apr 22 18:25:23 2022
-// Update Count     : 1121
+// Last Modified On : Sun Apr 24 09:58:01 2022
+// Update Count     : 1146
 //
 
@@ -240,7 +240,9 @@
 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );
 
-
 // The constructor for heapManager is called explicitly in memory_startup.
 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
+
+
+//####################### Memory Allocation Routines Helpers ####################
 
 
@@ -327,7 +329,7 @@
 
 // Use "write" because streams may be shutdown when calls are made.
-static void printStats() {								// see malloc_stats
+static int printStats() {								// see malloc_stats
 	char helpText[sizeof(prtFmt) + 1024];				// space for message and values
-	__cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt,
+	return __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), prtFmt,
 			stats.malloc_calls, stats.malloc_0_calls, stats.malloc_storage_request, stats.malloc_storage_alloc,
 			stats.aalloc_calls, stats.aalloc_0_calls, stats.aalloc_storage_request, stats.aalloc_storage_alloc,
@@ -417,6 +419,6 @@
 // <-----------------<------------+-----------------------------> bsize (bucket size)
 //                   |fake-header | addr
-#define headerAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) ))
-#define realHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset))
+#define HeaderAddr( addr ) ((Heap.Storage.Header *)( (char *)addr - sizeof(Heap.Storage) ))
+#define RealHeader( header ) ((Heap.Storage.Header *)((char *)header - header->kind.fake.offset))
 
 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size)
@@ -426,10 +428,10 @@
 // <------------------------------<<---------- dsize --------->>> bsize (bucket size)
 //                   |fake-header |addr
-#define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
+#define DataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))
 
 
 static inline void checkAlign( size_t alignment ) {
-	if ( alignment < libAlign() || ! is_pow2( alignment ) ) {
-		abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
+	if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) {
+		abort( "**** Error **** alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );
 	} // if
 } // checkAlign
@@ -438,5 +440,5 @@
 static inline void checkHeader( bool check, const char name[], void * addr ) {
 	if ( unlikely( check ) ) {							// bad address ?
-		abort( "Attempt to %s storage %p with address outside the heap.\n"
+		abort( "**** Error **** attempt to %s storage %p with address outside the heap.\n"
 			   "Possible cause is duplicate free on same block or overwriting of memory.",
 			   name, addr );
@@ -445,11 +447,27 @@
 
 
+// Manipulate sticky bits stored in unused 3 low-order bits of an address.
+//   bit0 => alignment => fake header
+//   bit1 => zero filled (calloc)
+//   bit2 => mapped allocation versus sbrk
+#define StickyBits( header ) (((header)->kind.real.blockSize & 0x7))
+#define ClearStickyBits( addr ) (typeof(addr))((uintptr_t)(addr) & ~7)
+#define MarkAlignmentBit( align ) ((align) | 1)
+#define AlignmentBit( header ) ((((header)->kind.fake.alignment) & 1))
+#define ClearAlignmentBit( header ) (((header)->kind.fake.alignment) & ~1)
+#define ZeroFillBit( header ) ((((header)->kind.real.blockSize) & 2))
+#define ClearZeroFillBit( header ) ((((header)->kind.real.blockSize) &= ~2))
+#define MarkZeroFilledBit( header ) ((header)->kind.real.blockSize |= 2)
+#define MmappedBit( header ) ((((header)->kind.real.blockSize) & 4))
+#define MarkMmappedBit( size ) ((size) | 4)
+
+
 static inline void fakeHeader( Heap.Storage.Header *& header, size_t & alignment ) {
-	if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ?
-		alignment = header->kind.fake.alignment & -2;	// remove flag from value
+	if ( unlikely( AlignmentBit( header ) ) ) {			// fake header ?
+		alignment = ClearAlignmentBit( header );		// clear flag from value
 		#ifdef __CFA_DEBUG__
 		checkAlign( alignment );						// check alignment
 		#endif // __CFA_DEBUG__
-		header = realHeader( header );					// backup from fake to real header
+		header = RealHeader( header );					// backup from fake to real header
 	} else {
 		alignment = libAlign();							// => no fake header
@@ -458,13 +476,7 @@
 
 
-static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header, Heap.FreeHeader *& freeElem,
-							size_t & size, size_t & alignment ) with( heapManager ) {
-	header = headerAddr( addr );
-
-  if ( unlikely( addr < heapBegin || heapEnd < addr ) ) { // mmapped ?
-		fakeHeader( header, alignment );
-		size = header->kind.real.blockSize & -3;		// mmap size
-		return true;
-	} // if
+static inline bool headers( const char name[] __attribute__(( unused )), void * addr, Heap.Storage.Header *& header,
+							Heap.FreeHeader *& freeHead, size_t & size, size_t & alignment ) with( heapManager ) {
+	header = HeaderAddr( addr );
 
 	#ifdef __CFA_DEBUG__
@@ -472,19 +484,29 @@
 	#endif // __CFA_DEBUG__
 
-	// header may be safe to dereference
-	fakeHeader( header, alignment );
+	if ( likely( ! StickyBits( header ) ) ) {			// no sticky bits ?
+		freeHead = (Heap.FreeHeader *)(header->kind.real.home);
+		alignment = libAlign();
+	} else {
+		fakeHeader( header, alignment );
+		if ( unlikely( MmappedBit( header ) ) ) {
+			assert( addr < heapBegin || heapEnd < addr );
+			size = ClearStickyBits( header->kind.real.blockSize ); // mmap size
+			return true;
+		} // if
+
+		freeHead = (Heap.FreeHeader *)(ClearStickyBits( header->kind.real.home ));
+	} // if
+	size = freeHead->blockSize;
+
 	#ifdef __CFA_DEBUG__
 	checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
+
+	if ( freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) {
+		abort( "Attempt to %s storage %p with corrupted header.\n"
+ 			   "Possible cause is duplicate free on same block or overwriting of header information.",
+ 			   name, addr );
+ 	} // if
 	#endif // __CFA_DEBUG__
 
-	freeElem = (Heap.FreeHeader *)((size_t)header->kind.real.home & -3);
-	#ifdef __CFA_DEBUG__
-	if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) {
-		abort( "Attempt to %s storage %p with corrupted header.\n"
-			   "Possible cause is duplicate free on same block or overwriting of header information.",
-			   name, addr );
-	} // if
-	#endif // __CFA_DEBUG__
-	size = freeElem->blockSize;
 	return false;
 } // headers
@@ -513,9 +535,10 @@
 static inline void * extend( size_t size ) with( heapManager ) {
 	lock( extlock __cfaabi_dbg_ctx2 );
+
 	ptrdiff_t rem = heapRemaining - size;
-	if ( rem < 0 ) {
+	if ( unlikely( rem < 0 ) ) {
 		// If the size requested is bigger than the current remaining storage, increase the size of the heap.
 
-		size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size );
+		size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() );
 		// Do not call abort or strerror( errno ) as they may call malloc.
 		if ( sbrk( increase ) == (void *)-1 ) {			// failed, no memory ?
@@ -524,10 +547,12 @@
 			_exit( EXIT_FAILURE );						// give up
 		} // if
+
 		// Make storage executable for thunks.
-		if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
-			unlock( extlock );
-			__cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno );
-			_exit( EXIT_FAILURE );
-		} // if
+		// if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
+		// 	unlock( extlock );
+		// 	__cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno );
+		// 	_exit( EXIT_FAILURE );
+		// } // if
+
 		#ifdef __STATISTICS__
 		sbrk_calls += 1;
@@ -555,7 +580,6 @@
 	// Look up size in the size list.  Make sure the user request includes space for the header that must be allocated
 	// along with the block and is a multiple of the alignment size.
-
-  if ( unlikely( size > ULONG_MAX - sizeof(Heap.Storage) ) ) return 0p;
 	size_t tsize = size + sizeof(Heap.Storage);
+
 	if ( likely( tsize < mmapStart ) ) {				// small size => sbrk
 		size_t posn;
@@ -615,5 +639,5 @@
 		//Memset( block, tsize );
 		#endif // __CFA_DEBUG__
-		block->header.kind.real.blockSize = tsize;		// storage size for munmap
+		block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap
 	} // if
 
@@ -825,15 +849,18 @@
 
 	// address of header from malloc
-	Heap.Storage.Header * realHeader = headerAddr( addr );
-	realHeader->kind.real.size = size;					// correct size to eliminate above alignment offset
+	Heap.Storage.Header * RealHeader = HeaderAddr( addr );
+	RealHeader->kind.real.size = size;					// correct size to eliminate above alignment offset
 	// address of fake header * before* the alignment location
-	Heap.Storage.Header * fakeHeader = headerAddr( user );
+	Heap.Storage.Header * fakeHeader = HeaderAddr( user );
 	// SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment
-	fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)realHeader;
+	fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)RealHeader;
 	// SKULLDUGGERY: odd alignment imples fake header
-	fakeHeader->kind.fake.alignment = alignment | 1;
+	fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment );
 
 	return user;
 } // memalignNoStats
+
+
+//####################### Memory Allocation Routines ####################
 
 
@@ -904,5 +931,5 @@
 			memset( addr, '\0', size );					// set to zeros
 
-		header->kind.real.blockSize |= 2;				// mark as zero filled
+		MarkZeroFilledBit( header );					// mark as zero fill
 		return addr;
 	} // calloc
@@ -938,8 +965,8 @@
 		headers( "resize", oaddr, header, freeElem, bsize, oalign );
 
-		size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
+		size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
 		// same size, DO NOT preserve STICKY PROPERTIES.
 		if ( oalign == libAlign() && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
-			header->kind.real.blockSize &= -2;			// no alignment and turn off 0 fill
+			ClearZeroFillBit( header );					// no alignment and turn off 0 fill
 			header->kind.real.size = size;				// reset allocation size
 			return oaddr;
@@ -983,7 +1010,7 @@
 		headers( "realloc", oaddr, header, freeElem, bsize, oalign );
 
-		size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
+		size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
 		size_t osize = header->kind.real.size;			// old allocation size
-		bool ozfill = (header->kind.real.blockSize & 2); // old allocation zero filled
+		bool ozfill = ZeroFillBit( header );			// old allocation zero filled
 	  if ( unlikely( size <= odsize ) && odsize <= size * 2 ) { // allow up to 50% wasted storage
 	  		header->kind.real.size = size;				// reset allocation size
@@ -1012,5 +1039,5 @@
 
 		if ( unlikely( ozfill ) ) {						// previous request zero fill ?
-			header->kind.real.blockSize |= 2;			// mark new request as zero filled
+			MarkZeroFilledBit( header );				// mark new request as zero filled
 			if ( size > osize ) {						// previous request larger ?
 				memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
@@ -1019,4 +1046,10 @@
 		return naddr;
 	} // realloc
+
+
+	// Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
+	void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {
+		return realloc( oaddr, dim * elemSize );
+	} // reallocarray
 
 
@@ -1085,5 +1118,5 @@
 			memset( addr, '\0', size );					// set to zeros
 
-		header->kind.real.blockSize |= 2;				// mark as zero filled
+		MarkZeroFilledBit( header );					// mark as zero filled
 		return addr;
 	} // cmemalign
@@ -1091,5 +1124,5 @@
 
 	// Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
-    // of alignment. This requirement is universally ignored.
+	// of alignment. This requirement is universally ignored.
 	void * aligned_alloc( size_t alignment, size_t size ) {
 		return memalign( alignment, size );
@@ -1102,6 +1135,6 @@
 	// free(3).
 	int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
-	  if ( alignment < libAlign() || ! is_pow2( alignment ) ) return EINVAL; // check alignment
-		* memptr = memalign( alignment, size );
+		if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
+		*memptr = memalign( alignment, size );
 		return 0;
 	} // posix_memalign
@@ -1117,5 +1150,5 @@
 	// Same as valloc but rounds size to multiple of page size.
 	void * pvalloc( size_t size ) {
-		return memalign( __page_size, ceiling2( size, __page_size ) );
+		return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
 	} // pvalloc
 
@@ -1147,7 +1180,7 @@
 	size_t malloc_alignment( void * addr ) {
 	  if ( unlikely( addr == 0p ) ) return libAlign();	// minimum alignment
-		Heap.Storage.Header * header = headerAddr( addr );
-		if ( (header->kind.fake.alignment & 1) == 1 ) {	// fake header ?
-			return header->kind.fake.alignment & -2;	// remove flag from value
+		Heap.Storage.Header * header = HeaderAddr( addr );
+		if ( unlikely( AlignmentBit( header ) ) ) {		// fake header ?
+			return ClearAlignmentBit( header );			// clear flag from value
 		} else {
 			return libAlign();							// minimum alignment
@@ -1157,16 +1190,16 @@
 
 	// Set the alignment for an the allocation and return previous alignment or 0 if no alignment.
-	size_t malloc_alignment_set$( void * addr, size_t alignment ) {
-	  if ( unlikely( addr == 0p ) ) return libAlign();	// minimum alignment
-		size_t ret;
-		Heap.Storage.Header * header = headerAddr( addr );
-		if ( (header->kind.fake.alignment & 1) == 1 ) {	// fake header ?
-			ret = header->kind.fake.alignment & -2;		// remove flag from old value
-			header->kind.fake.alignment = alignment | 1; // add flag to new value
-		} else {
-			ret = 0;									// => no alignment to change
-		} // if
-		return ret;
-	} // malloc_alignment_set$
+	// size_t malloc_alignment_set$( void * addr, size_t alignment ) {
+	//   if ( unlikely( addr == 0p ) ) return libAlign();	// minimum alignment
+	// 	size_t ret;
+	// 	Heap.Storage.Header * header = HeaderAddr( addr );
+	// 	if ( (header->kind.fake.alignment & 1) == 1 ) {	// fake header ?
+	// 		ret = header->kind.fake.alignment & -2;		// remove flag from old value
+	// 		header->kind.fake.alignment = alignment | 1; // add flag to new value
+	// 	} else {
+	// 		ret = 0;									// => no alignment to change
+	// 	} // if
+	// 	return ret;
+	// } // malloc_alignment_set$
 
 
@@ -1174,30 +1207,30 @@
 	bool malloc_zero_fill( void * addr ) {
 	  if ( unlikely( addr == 0p ) ) return false;		// null allocation is not zero fill
-		Heap.Storage.Header * header = headerAddr( addr );
-		if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
-			header = realHeader( header );				// backup from fake to real header
-		} // if
-		return (header->kind.real.blockSize & 2) != 0;	// zero filled ?
+		Heap.Storage.Header * header = HeaderAddr( addr );
+		if ( unlikely( AlignmentBit( header ) ) ) {		// fake header ?
+			header = RealHeader( header );				// backup from fake to real header
+		} // if
+		return ZeroFillBit( header );					// zero filled ?
 	} // malloc_zero_fill
 
 	// Set allocation is zero filled and return previous zero filled.
-	bool malloc_zero_fill_set$( void * addr ) {
-	  if ( unlikely( addr == 0p ) ) return false;		// null allocation is not zero fill
-		Heap.Storage.Header * header = headerAddr( addr );
-		if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
-			header = realHeader( header );				// backup from fake to real header
-		} // if
-		bool ret = (header->kind.real.blockSize & 2) != 0; // zero filled ?
-		header->kind.real.blockSize |= 2;				// mark as zero filled
-		return ret;
-	} // malloc_zero_fill_set$
-
-
-	// Returns original total allocation size (not bucket size) => array size is dimension * sizeif(T).
+	// bool malloc_zero_fill_set$( void * addr ) {
+	//   if ( unlikely( addr == 0p ) ) return false;		// null allocation is not zero fill
+	// 	Heap.Storage.Header * header = HeaderAddr( addr );
+	// 	if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
+	// 		header = RealHeader( header );				// backup from fake to real header
+	// 	} // if
+	// 	bool ret = (header->kind.real.blockSize & 2) != 0; // zero filled ?
+	// 	header->kind.real.blockSize |= 2;				// mark as zero filled
+	// 	return ret;
+	// } // malloc_zero_fill_set$
+
+
+	// Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
 	size_t malloc_size( void * addr ) {
 	  if ( unlikely( addr == 0p ) ) return 0;			// null allocation has zero size
-		Heap.Storage.Header * header = headerAddr( addr );
-		if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
-			header = realHeader( header );				// backup from fake to real header
+		Heap.Storage.Header * header = HeaderAddr( addr );
+		if ( unlikely( AlignmentBit( header ) ) ) {		// fake header ?
+			header = RealHeader( header );				// backup from fake to real header
 		} // if
 		return header->kind.real.size;
@@ -1205,14 +1238,14 @@
 
 	// Set allocation size and return previous size.
-	size_t malloc_size_set$( void * addr, size_t size ) {
-	  if ( unlikely( addr == 0p ) ) return 0;			// null allocation has 0 size
-		Heap.Storage.Header * header = headerAddr( addr );
-		if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
-			header = realHeader( header );				// backup from fake to real header
-		} // if
-		size_t ret = header->kind.real.size;
-		header->kind.real.size = size;
-		return ret;
-	} // malloc_size_set$
+	// size_t malloc_size_set$( void * addr, size_t size ) {
+	//   if ( unlikely( addr == 0p ) ) return 0;			// null allocation has 0 size
+	// 	Heap.Storage.Header * header = HeaderAddr( addr );
+	// 	if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?
+	// 		header = RealHeader( header );				// backup from fake to real header
+	// 	} // if
+	// 	size_t ret = header->kind.real.size;
+	// 	header->kind.real.size = size;
+	// 	return ret;
+	// } // malloc_size_set$
 
 
@@ -1226,5 +1259,5 @@
 
 		headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment );
-		return dataStorage( bsize, addr, header );		// data storage in bucket
+		return DataStorage( bsize, addr, header );		// data storage in bucket
 	} // malloc_usable_size
 
@@ -1239,5 +1272,5 @@
 
 
-	// Changes the file descripter where malloc_stats() writes statistics.
+	// Changes the file descriptor where malloc_stats() writes statistics.
 	int malloc_stats_fd( int fd __attribute__(( unused )) ) {
 		#ifdef __STATISTICS__
@@ -1246,7 +1279,20 @@
 		return temp;
 		#else
-		return -1;
+		return -1;										// unsupported
 		#endif // __STATISTICS__
 	} // malloc_stats_fd
+
+
+	// Prints an XML string that describes the current state of the memory-allocation implementation in the caller.
+	// The string is printed on the file stream stream.  The exported string includes information about all arenas (see
+	// malloc).
+	int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {
+	  if ( options != 0 ) { errno = EINVAL; return -1; }
+		#ifdef __STATISTICS__
+		return printStatsXML( stream );
+		#else
+		return 0;										// unsupported
+		#endif // __STATISTICS__
+	} // malloc_info
 
 
@@ -1254,11 +1300,12 @@
 	// specifies the parameter to be modified, and value specifies the new value for that parameter.
 	int mallopt( int option, int value ) {
+	  if ( value < 0 ) return 0;
 		choose( option ) {
 		  case M_TOP_PAD:
-			heapExpand = ceiling2( value, __page_size ); return 1;
+			heapExpand = ceiling2( value, __page_size );
+			return 1;
 		  case M_MMAP_THRESHOLD:
 			if ( setMmapStart( value ) ) return 1;
-			break;
-		} // switch
+		} // choose
 		return 0;										// error, unsupported
 	} // mallopt
@@ -1269,17 +1316,4 @@
 		return 0;										// => impossible to release memory
 	} // malloc_trim
-
-
-	// Exports an XML string that describes the current state of the memory-allocation implementation in the caller.
-	// The string is printed on the file stream stream.  The exported string includes information about all arenas (see
-	// malloc).
-	int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {
-	  if ( options != 0 ) { errno = EINVAL; return -1; }
-		#ifdef __STATISTICS__
-		return printStatsXML( stream );
-		#else
-		return 0;										// unsupported
-		#endif // __STATISTICS__
-	} // malloc_info
 
 
@@ -1299,4 +1333,5 @@
 	} // malloc_set_state
 
+
 	// Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
 	__attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }
@@ -1335,23 +1370,23 @@
 
 	// Attempt to reuse existing alignment.
-	Heap.Storage.Header * header = headerAddr( oaddr );
-	bool isFakeHeader = header->kind.fake.alignment & 1; // old fake header ?
+	Heap.Storage.Header * header = HeaderAddr( oaddr );
+	bool isFakeHeader = AlignmentBit( header );			// old fake header ?
 	size_t oalign;
-	if ( isFakeHeader ) {
-		oalign = header->kind.fake.alignment & -2;		// old alignment
-		if ( (uintptr_t)oaddr % nalign == 0				// lucky match ?
+
+	if ( unlikely( isFakeHeader ) ) {
+		oalign = ClearAlignmentBit( header );			// old alignment
+		if ( unlikely( (uintptr_t)oaddr % nalign == 0	// lucky match ?
 			 && ( oalign <= nalign						// going down
 				  || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
-			) {
-			headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
+			) ) {
+			HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
 			Heap.FreeHeader * freeElem;
 			size_t bsize, oalign;
 			headers( "resize", oaddr, header, freeElem, bsize, oalign );
-			size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
+			size_t odsize = DataStorage( bsize, oaddr, header ); // data storage available in bucket
 
 			if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage
-				headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
-
-				header->kind.real.blockSize &= -2;		// turn off 0 fill
+				HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
+				ClearZeroFillBit( header );				// turn off 0 fill
 				header->kind.real.size = size;			// reset allocation size
 				return oaddr;
@@ -1397,19 +1432,20 @@
 
 	// Attempt to reuse existing alignment.
-	Heap.Storage.Header * header = headerAddr( oaddr );
-	bool isFakeHeader = header->kind.fake.alignment & 1; // old fake header ?
+	Heap.Storage.Header * header = HeaderAddr( oaddr );
+	bool isFakeHeader = AlignmentBit( header );			// old fake header ?
 	size_t oalign;
-	if ( isFakeHeader ) {
-		oalign = header->kind.fake.alignment & -2;		// old alignment
-		if ( (uintptr_t)oaddr % nalign == 0				// lucky match ?
+	if ( unlikely( isFakeHeader ) ) {
+		oalign = ClearAlignmentBit( header );			// old alignment
+		if ( unlikely( (uintptr_t)oaddr % nalign == 0	// lucky match ?
 			 && ( oalign <= nalign						// going down
 				  || (oalign >= nalign && oalign <= 256) ) // little alignment storage wasted ?
-			) {
-			headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same)
-			return realloc( oaddr, size );				// duplicate alignment and special case checks
+			) ) {
+			HeaderAddr( oaddr )->kind.fake.alignment = MarkAlignmentBit( nalign ); // update alignment (could be the same)
+			return realloc( oaddr, size );				// duplicate special case checks
 		} // if
 	} else if ( ! isFakeHeader							// old real header (aligned on libAlign) ?
-				&& nalign == libAlign() )				// new alignment also on libAlign => no fake header needed
-		return realloc( oaddr, size );					// duplicate alignment and special case checks
+				&& nalign == libAlign() ) {				// new alignment also on libAlign => no fake header needed
+		return realloc( oaddr, size );					// duplicate special case checks
+	} // if
 
 	#ifdef __STATISTICS__
@@ -1425,5 +1461,5 @@
 
 	size_t osize = header->kind.real.size;				// old allocation size
-	bool ozfill = (header->kind.real.blockSize & 2);	// old allocation zero filled
+	bool ozfill = ZeroFillBit( header );				// old allocation zero filled
 
 	void * naddr = memalignNoStats( nalign, size );		// create new aligned area
@@ -1434,5 +1470,5 @@
 
 	if ( unlikely( ozfill ) ) {							// previous request zero fill ?
-		header->kind.real.blockSize |= 2;				// mark new request as zero filled
+		MarkZeroFilledBit( header );					// mark new request as zero filled
 		if ( size > osize ) {							// previous request larger ?
 			memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
