Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision 1a39a5a0bcbeb171fe0e5b89de440ca60f18cc21)
+++ libcfa/src/heap.cfa	(revision d5d3a90b22aaaa0979ed8f696f3909a2c8633243)
@@ -10,6 +10,6 @@
 // Created On       : Tue Dec 19 21:58:35 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Jul 27 23:16:18 2020
-// Update Count     : 815
+// Last Modified On : Mon Aug  3 19:01:22 2020
+// Update Count     : 828
 //
 
@@ -222,8 +222,9 @@
 
 // Bucket size must be multiple of 16.
-// Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
+// Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
+// malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
 static const unsigned int bucketSizes[] @= {			// different bucket sizes
-	16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4
-	96, 112, 128 + sizeof(HeapManager.Storage), // 3
+	16 + sizeof(HeapManager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4
+	96 + sizeof(HeapManager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3
 	160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4
 	320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4
@@ -434,4 +435,6 @@
 		#endif // __CFA_DEBUG__
 		header = realHeader( header );					// backup from fake to real header
+	} else {
+		alignment = 0;
 	} // if
 } // fakeHeader
@@ -481,5 +484,6 @@
 			unlock( extlock );
 			errno = ENOMEM;
-			return 0p;
+//			return 0p;
+			abort( "no memory" );
 		} // if
 		#ifdef __STATISTICS__
@@ -550,5 +554,5 @@
 
 			block = (HeapManager.Storage *)extend( tsize );	// mutual exclusion on call
-	if ( unlikely( block == 0p ) ) return 0p;
+//	if ( unlikely( block == 0p ) ) return 0p;
 		#if BUCKETLOCK == SPINLOCK
 		} else {
@@ -746,12 +750,11 @@
 
 static inline void * mallocNoStats( size_t size ) {		// necessary for malloc statistics
-	//assert( heapManager.heapBegin != 0 );
-	if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ?
+	verify( heapManager.heapBegin != 0 );				// called before memory_startup ?
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
+
 #if __SIZEOF_POINTER__ == 8
 	verify( size < ((typeof(size_t))1 << 48) );
 #endif // __SIZEOF_POINTER__ == 8
-	void * addr = doMalloc( size );
-	if ( unlikely( addr == 0p ) ) errno = ENOMEM;		// POSIX
-	return addr;
+	return doMalloc( size );
 } // mallocNoStats
 
@@ -759,19 +762,21 @@
 static inline void * callocNoStats( size_t dim, size_t elemSize ) {
 	size_t size = dim * elemSize;
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
 	char * addr = (char *)mallocNoStats( size );
-  if ( unlikely( addr == 0p ) ) return 0p;
 
 	HeapManager.Storage.Header * header;
 	HeapManager.FreeHeader * freeElem;
 	size_t bsize, alignment;
-	bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment );
+	#ifndef __CFA_DEBUG__
+	bool mapped =
+	#endif // __CFA_DEBUG__
+		headers( "calloc", addr, header, freeElem, bsize, alignment );
 	#ifndef __CFA_DEBUG__
 	// Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
 	if ( ! mapped )
 	#endif // __CFA_DEBUG__
-		// Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
-		// <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
+		// <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
 		// `-header`-addr                      `-size
-		memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros
+		memset( addr, '\0', size );						// set to zeros
 
 	header->kind.real.blockSize |= 2;					// mark as zero filled
@@ -781,4 +786,6 @@
 
 static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
+
 	#ifdef __CFA_DEBUG__
 	checkAlign( alignment );							// check alignment
@@ -798,5 +805,4 @@
 	// add sizeof(Storage) for fake header
 	char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
-  if ( unlikely( addr == 0p ) ) return addr;
 
 	// address in the block of the "next" alignment address
@@ -819,6 +825,7 @@
 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) {
 	size_t size = dim * elemSize;
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
 	char * addr = (char *)memalignNoStats( alignment, size );
-  if ( unlikely( addr == 0p ) ) return 0p;
+
 	HeapManager.Storage.Header * header;
 	HeapManager.FreeHeader * freeElem;
@@ -890,5 +897,5 @@
 
 		// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-	  if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases
+	  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
 	  if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
 
@@ -902,5 +909,5 @@
 	  if ( oalign == 0 && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
 			header->kind.real.blockSize &= -2;			// no alignment and turn off 0 fill
-			if ( size != odsize ) header->kind.real.size = size; // reset allocation size
+			header->kind.real.size = size;				// reset allocation size
 			return oaddr;
 		} // if
@@ -908,6 +915,5 @@
 		// change size, DO NOT preserve STICKY PROPERTIES.
 		free( oaddr );
-		void * naddr = mallocNoStats( size );			// create new area
-		return naddr;
+		return mallocNoStats( size );					// create new area
 	} // resize
 
@@ -922,5 +928,5 @@
 
 		// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-	  if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases
+	  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
 	  if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
 
@@ -931,6 +937,11 @@
 
 		size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
-	  if ( size <= odsize && odsize <= size * 2 ) {		// allow up to 50% wasted storage in smaller size
-			if ( size != odsize ) header->kind.real.size = size; // reset allocation size
+		size_t osize = header->kind.real.size;			// old allocation size
+		bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled
+	  if ( unlikely( size <= odsize ) && size > odsize / 2 ) { // allow up to 50% wasted storage
+	  		header->kind.real.size = size;				// reset allocation size
+	  		if ( unlikely( ozfill ) && size > osize ) {	// previous request zero fill and larger ?
+	  			memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage
+	  		} // if
 			return oaddr;
 		} // if
@@ -939,24 +950,20 @@
 
 		void * naddr;
-		if ( unlikely( oalign != 0 ) ) {				// previous request memalign?
-			if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-				naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area
-			} else {
-				naddr = memalignNoStats( oalign, size ); // create new aligned area
+		if ( likely( oalign == 0 ) ) {					// previous request memalign?
+			naddr = mallocNoStats( size );				// create new area
+		} else {
+			naddr = memalignNoStats( oalign, size );	// create new aligned area
+		} // if
+
+		headers( "realloc", naddr, header, freeElem, bsize, oalign );
+		memcpy( naddr, oaddr, MIN( osize, size ) );		// copy bytes
+		free( oaddr );
+
+		if ( unlikely( ozfill ) ) {						// previous request zero fill ?
+			header->kind.real.blockSize |= 2;			// mark new request as zero filled
+			if ( size > osize ) {						// previous request larger ?
+				memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage
 			} // if
-		} else {
-			if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-				naddr = callocNoStats( 1, size );		// create new area
-			} else {
-				naddr = mallocNoStats( size );			// create new area
-			} // if
-		} // if
-	  if ( unlikely( naddr == 0p ) ) return 0p;
-
-		headers( "realloc", naddr, header, freeElem, bsize, oalign );
-		size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
-		// To preserve prior fill, the entire bucket must be copied versus the size.
-		memcpy( naddr, oaddr, MIN( odsize, ndsize ) );	// copy bytes
-		free( oaddr );
+		} // if
 		return naddr;
 	} // realloc
@@ -1008,5 +1015,4 @@
 	  if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
 		* memptr = memalign( alignment, size );
-	  if ( unlikely( * memptr == 0p ) ) return ENOMEM;
 		return 0;
 	} // posix_memalign
@@ -1206,7 +1212,6 @@
 
 	// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-  if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases
+  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
   if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size );
-
 
 	if ( unlikely( nalign == 0 ) ) nalign = libAlign();	// reset alignment to minimum
@@ -1235,11 +1240,5 @@
 	// change size
 
-	void * naddr;
-	if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-		naddr = cmemalignNoStats( nalign, 1, size );	// create new aligned area
-	} else {
-		naddr = memalignNoStats( nalign, size );		// create new aligned area
-	} // if
-
+	void * naddr = memalignNoStats( nalign, size );		// create new aligned area
 	free( oaddr );
 	return naddr;
@@ -1258,5 +1257,4 @@
 	size_t bsize, oalign = 0;
 	headers( "realloc", oaddr, header, freeElem, bsize, oalign );
-	size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
 
 	if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match
@@ -1274,6 +1272,9 @@
 	#endif // __STATISTICS__
 
+	size_t osize = header->kind.real.size;			// old allocation size
+	bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled
+
 	// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-  if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases
+  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
   if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size );
 
@@ -1286,8 +1287,13 @@
 
 	headers( "realloc", naddr, header, freeElem, bsize, oalign );
-	size_t ndsize = dataStorage( bsize, naddr, header ); // data storage available in bucket
-	// To preserve prior fill, the entire bucket must be copied versus the size.
-	memcpy( naddr, oaddr, MIN( odsize, ndsize ) );		// copy bytes
+	memcpy( naddr, oaddr, MIN( osize, size ) );			// copy bytes
 	free( oaddr );
+
+	if ( unlikely( ozfill ) ) {							// previous request zero fill ?
+		header->kind.real.blockSize |= 2;				// mark new request as zero filled
+		if ( size > osize ) {							// previous request larger ?
+			memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage
+		} // if
+	} // if
 	return naddr;
 } // realloc
