Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision 8395152544d83b26e086f1a01fd58d180a6ffb5d)
+++ libcfa/src/heap.cfa	(revision 71dfe494bb0af3fed5d12136b9fea9273c16c541)
@@ -10,6 +10,6 @@
 // Created On       : Tue Dec 19 21:58:35 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Jul 27 23:16:18 2020
-// Update Count     : 815
+// Last Modified On : Mon Aug  3 19:01:22 2020
+// Update Count     : 828
 //
 
@@ -222,8 +222,9 @@
 
 // Bucket size must be multiple of 16.
-// Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size.
+// Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
+// malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
 static const unsigned int bucketSizes[] @= {			// different bucket sizes
-	16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4
-	96, 112, 128 + sizeof(HeapManager.Storage), // 3
+	16 + sizeof(HeapManager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4
+	96 + sizeof(HeapManager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3
 	160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4
 	320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4
@@ -434,4 +435,6 @@
 		#endif // __CFA_DEBUG__
 		header = realHeader( header );					// backup from fake to real header
+	} else {
+		alignment = 0;
 	} // if
 } // fakeHeader
@@ -481,5 +484,6 @@
 			unlock( extlock );
 			errno = ENOMEM;
-			return 0p;
+//			return 0p;
+			abort( "no memory" );
 		} // if
 		#ifdef __STATISTICS__
@@ -550,5 +554,5 @@
 
 			block = (HeapManager.Storage *)extend( tsize );	// mutual exclusion on call
-	if ( unlikely( block == 0p ) ) return 0p;
+//	if ( unlikely( block == 0p ) ) return 0p;
 		#if BUCKETLOCK == SPINLOCK
 		} else {
@@ -746,12 +750,11 @@
 
 static inline void * mallocNoStats( size_t size ) {		// necessary for malloc statistics
-	//assert( heapManager.heapBegin != 0 );
-	if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ?
+	verify( heapManager.heapBegin != 0 );				// called before memory_startup ?
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
+
 #if __SIZEOF_POINTER__ == 8
 	verify( size < ((typeof(size_t))1 << 48) );
 #endif // __SIZEOF_POINTER__ == 8
-	void * addr = doMalloc( size );
-	if ( unlikely( addr == 0p ) ) errno = ENOMEM;		// POSIX
-	return addr;
+	return doMalloc( size );
 } // mallocNoStats
 
@@ -759,19 +762,21 @@
 static inline void * callocNoStats( size_t dim, size_t elemSize ) {
 	size_t size = dim * elemSize;
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
 	char * addr = (char *)mallocNoStats( size );
-  if ( unlikely( addr == 0p ) ) return 0p;
 
 	HeapManager.Storage.Header * header;
 	HeapManager.FreeHeader * freeElem;
 	size_t bsize, alignment;
-	bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment );
+	#ifndef __CFA_DEBUG__
+	bool mapped =
+	#endif // __CFA_DEBUG__
+		headers( "calloc", addr, header, freeElem, bsize, alignment );
 	#ifndef __CFA_DEBUG__
 	// Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero.
 	if ( ! mapped )
 	#endif // __CFA_DEBUG__
-		// Zero entire data space even when > than size => realloc without a new allocation and zero fill works.
-		// <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size)
+		// <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined
 		// `-header`-addr                      `-size
-		memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros
+		memset( addr, '\0', size );						// set to zeros
 
 	header->kind.real.blockSize |= 2;					// mark as zero filled
@@ -781,4 +786,6 @@
 
 static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
+
 	#ifdef __CFA_DEBUG__
 	checkAlign( alignment );							// check alignment
@@ -798,5 +805,4 @@
 	// add sizeof(Storage) for fake header
 	char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) );
-  if ( unlikely( addr == 0p ) ) return addr;
 
 	// address in the block of the "next" alignment address
@@ -819,6 +825,7 @@
 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) {
 	size_t size = dim * elemSize;
+  if ( size == 0 ) return 0p;							// 0 BYTE ALLOCATION RETURNS NULL POINTER
 	char * addr = (char *)memalignNoStats( alignment, size );
-  if ( unlikely( addr == 0p ) ) return 0p;
+
 	HeapManager.Storage.Header * header;
 	HeapManager.FreeHeader * freeElem;
@@ -890,5 +897,5 @@
 
 		// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-	  if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases
+	  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
 	  if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
 
@@ -902,5 +909,5 @@
 	  if ( oalign == 0 && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size
 			header->kind.real.blockSize &= -2;			// no alignment and turn off 0 fill
-			if ( size != odsize ) header->kind.real.size = size; // reset allocation size
+			header->kind.real.size = size;				// reset allocation size
 			return oaddr;
 		} // if
@@ -908,6 +915,5 @@
 		// change size, DO NOT preserve STICKY PROPERTIES.
 		free( oaddr );
-		void * naddr = mallocNoStats( size );			// create new area
-		return naddr;
+		return mallocNoStats( size );					// create new area
 	} // resize
 
@@ -922,5 +928,5 @@
 
 		// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-	  if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases
+	  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
 	  if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size );
 
@@ -931,6 +937,11 @@
 
 		size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
-	  if ( size <= odsize && odsize <= size * 2 ) {		// allow up to 50% wasted storage in smaller size
-			if ( size != odsize ) header->kind.real.size = size; // reset allocation size
+		size_t osize = header->kind.real.size;			// old allocation size
+		bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled
+	  if ( unlikely( size <= odsize ) && size > odsize / 2 ) { // allow up to 50% wasted storage
+	  		header->kind.real.size = size;				// reset allocation size
+	  		if ( unlikely( ozfill ) && size > osize ) {	// previous request zero fill and larger ?
+	  			memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage
+	  		} // if
 			return oaddr;
 		} // if
@@ -939,24 +950,20 @@
 
 		void * naddr;
-		if ( unlikely( oalign != 0 ) ) {				// previous request memalign?
-			if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-				naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area
-			} else {
-				naddr = memalignNoStats( oalign, size ); // create new aligned area
+		if ( likely( oalign == 0 ) ) {					// previous request memalign?
+			naddr = mallocNoStats( size );				// create new area
+		} else {
+			naddr = memalignNoStats( oalign, size );	// create new aligned area
+		} // if
+
+		headers( "realloc", naddr, header, freeElem, bsize, oalign );
+		memcpy( naddr, oaddr, MIN( osize, size ) );		// copy bytes
+		free( oaddr );
+
+		if ( unlikely( ozfill ) ) {						// previous request zero fill ?
+			header->kind.real.blockSize |= 2;			// mark new request as zero filled
+			if ( size > osize ) {						// previous request larger ?
+				memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage
 			} // if
-		} else {
-			if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-				naddr = callocNoStats( 1, size );		// create new area
-			} else {
-				naddr = mallocNoStats( size );			// create new area
-			} // if
-		} // if
-	  if ( unlikely( naddr == 0p ) ) return 0p;
-
-		headers( "realloc", naddr, header, freeElem, bsize, oalign );
-		size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket
-		// To preserve prior fill, the entire bucket must be copied versus the size.
-		memcpy( naddr, oaddr, MIN( odsize, ndsize ) );	// copy bytes
-		free( oaddr );
+		} // if
 		return naddr;
 	} // realloc
@@ -1008,5 +1015,4 @@
 	  if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment
 		* memptr = memalign( alignment, size );
-	  if ( unlikely( * memptr == 0p ) ) return ENOMEM;
 		return 0;
 	} // posix_memalign
@@ -1206,7 +1212,6 @@
 
 	// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-  if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases
+  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
   if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size );
-
 
 	if ( unlikely( nalign == 0 ) ) nalign = libAlign();	// reset alignment to minimum
@@ -1235,11 +1240,5 @@
 	// change size
 
-	void * naddr;
-	if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill
-		naddr = cmemalignNoStats( nalign, 1, size );	// create new aligned area
-	} else {
-		naddr = memalignNoStats( nalign, size );		// create new aligned area
-	} // if
-
+	void * naddr = memalignNoStats( nalign, size );		// create new aligned area
 	free( oaddr );
 	return naddr;
@@ -1258,5 +1257,4 @@
 	size_t bsize, oalign = 0;
 	headers( "realloc", oaddr, header, freeElem, bsize, oalign );
-	size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket
 
 	if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match
@@ -1274,6 +1272,9 @@
 	#endif // __STATISTICS__
 
+	size_t osize = header->kind.real.size;			// old allocation size
+	bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled
+
 	// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
-  if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases
+  if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases
   if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size );
 
@@ -1286,8 +1287,13 @@
 
 	headers( "realloc", naddr, header, freeElem, bsize, oalign );
-	size_t ndsize = dataStorage( bsize, naddr, header ); // data storage available in bucket
-	// To preserve prior fill, the entire bucket must be copied versus the size.
-	memcpy( naddr, oaddr, MIN( odsize, ndsize ) );		// copy bytes
+	memcpy( naddr, oaddr, MIN( osize, size ) );			// copy bytes
 	free( oaddr );
+
+	if ( unlikely( ozfill ) ) {							// previous request zero fill ?
+		header->kind.real.blockSize |= 2;				// mark new request as zero filled
+		if ( size > osize ) {							// previous request larger ?
+			memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage
+		} // if
+	} // if
 	return naddr;
 } // realloc
Index: libcfa/src/stdlib.hfa
===================================================================
--- libcfa/src/stdlib.hfa	(revision 8395152544d83b26e086f1a01fd58d180a6ffb5d)
+++ libcfa/src/stdlib.hfa	(revision 71dfe494bb0af3fed5d12136b9fea9273c16c541)
@@ -10,6 +10,6 @@
 // Created On       : Thu Jan 28 17:12:35 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Tue Jul 21 07:58:05 2020
-// Update Count     : 475
+// Last Modified On : Thu Jul 30 16:14:58 2020
+// Update Count     : 490
 //
 
@@ -71,10 +71,12 @@
 	T * resize( T * ptr, size_t size ) {				// CFA resize, eliminate return-type cast
 		$RE_SPECIALS( ptr, size, malloc, memalign );
-		return (T *)(void *)resize( (void *)ptr, size ); // CFA resize
+		if ( _Alignof(T) <= libAlign() ) return (T *)(void *)resize( (void *)ptr, size ); // CFA resize
+		else return (T *)(void *)resize( (void *)ptr, _Alignof(T), size ); // CFA resize
 	} // resize
 
 	T * realloc( T * ptr, size_t size ) {				// CFA realloc, eliminate return-type cast
 		$RE_SPECIALS( ptr, size, malloc, memalign );
-		return (T *)(void *)realloc( (void *)ptr, size ); // C realloc
+		if ( _Alignof(T) <= libAlign() ) return (T *)(void *)realloc( (void *)ptr, size ); // C realloc
+		else return (T *)(void *)realloc( (void *)ptr, _Alignof(T), size ); // CFA realloc
 	} // realloc
 
@@ -121,15 +123,8 @@
 	forall( dtype S | sized(S) )
 	T * alloc( S ptr[], size_t dim = 1 ) {				// singleton/array resize
-		size_t len = malloc_usable_size( ptr );			// current bucket size
-		if ( sizeof(T) * dim > len ) {					// not enough space ?
-			T * temp = alloc( dim );					// new storage
-			free( ptr );								// free old storage
-			return temp;
-		} else {
-			return (T *)ptr;
-		} // if
-	} // alloc
-
-	T * alloc( T ptr[], size_t dim, bool copy = true ) {
+		return resize( (T *)ptr, dim * sizeof(T) );		// CFA resize
+	} // alloc
+
+	T * alloc( T ptr[], size_t dim = 1, bool copy = true ) {
 		if ( copy ) {
 			return realloc( ptr, dim * sizeof(T) );		// CFA realloc
@@ -168,5 +163,5 @@
 			memset( (char *)nptr + osize, (int)fill, nsize - osize ); // initialize added storage
 		} // if
-		return (T *)nptr;
+		return nptr;
 	} // alloc_set
 
@@ -181,5 +176,5 @@
 			} // for
 		} // if
-		return (T *)nptr;
+		return nptr;
 	} // alloc_align_set
 } // distribution
@@ -195,5 +190,5 @@
 
 	T * alloc_align( T * ptr, size_t align ) {			// aligned realloc array
-		return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc
+		return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA C realloc
 	} // alloc_align
 
@@ -232,9 +227,9 @@
 		size_t osize = malloc_size( ptr );				// current allocation
 		size_t nsize = dim * sizeof(T);					// new allocation
-		T * nptr = alloc_align( ptr, align, nsize );	// CFA alloc_align
+		T * nptr = alloc_align( ptr, align, nsize );
 		if ( nsize > osize ) {							// larger ?
 			memset( (char *)nptr + osize, (int)fill, nsize - osize ); // initialize added storage
 		} // if
-		return (T *)nptr;
+		return nptr;
 	} // alloc_align_set
 
@@ -243,5 +238,5 @@
 		size_t nsize = dim * sizeof(T);					// new allocation
 		size_t ndim = nsize / sizeof(T);				// new dimension
-		T * nptr = alloc_align( ptr, align, nsize );		// CFA alloc_align
+		T * nptr = alloc_align( ptr, align, nsize );
 		if ( ndim > odim ) {							// larger ?
 			for ( i; odim ~ ndim ) {
@@ -249,5 +244,5 @@
 			} // for
 		} // if
-		return (T *)nptr;
+		return nptr;
 	} // alloc_align_set
 } // distribution
Index: tests/heap.cfa
===================================================================
--- tests/heap.cfa	(revision 8395152544d83b26e086f1a01fd58d180a6ffb5d)
+++ tests/heap.cfa	(revision 71dfe494bb0af3fed5d12136b9fea9273c16c541)
@@ -10,6 +10,6 @@
 // Created On       : Tue Nov  6 17:54:56 2018
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Sun Nov 24 12:34:51 2019
-// Update Count     : 28
+// Last Modified On : Tue Aug  4 06:36:17 2020
+// Update Count     : 56
 // 
 
@@ -75,5 +75,4 @@
 		size_t s = (i + 1) * 20;
 		char * area = (char *)malloc( s );
-		if ( area == 0p ) abort( "malloc/free out of memory" );
 		area[0] = '\345'; area[s - 1] = '\345';			// fill first/last
 		area[malloc_usable_size( area ) - 1] = '\345';	// fill ultimate byte
@@ -84,5 +83,4 @@
 		size_t s = i + 1;								// +1 to make initialization simpler
 		locns[i] = (char *)malloc( s );
-		if ( locns[i] == 0p ) abort( "malloc/free out of memory" );
 		locns[i][0] = '\345'; locns[i][s - 1] = '\345';	// fill first/last
 		locns[i][malloc_usable_size( locns[i] ) - 1] = '\345'; // fill ultimate byte
@@ -100,5 +98,4 @@
 		size_t s = i + default_mmap_start();			// cross over point
 		char * area = (char *)malloc( s );
-		if ( area == 0p ) abort( "malloc/free out of memory" );
 		area[0] = '\345'; area[s - 1] = '\345';			// fill first/last
 		area[malloc_usable_size( area ) - 1] = '\345';	// fill ultimate byte
@@ -109,5 +106,4 @@
 		size_t s = i + default_mmap_start();			// cross over point
 		locns[i] = (char *)malloc( s );
-		if ( locns[i] == 0p ) abort( "malloc/free out of memory" );
 		locns[i][0] = '\345'; locns[i][s - 1] = '\345';	// fill first/last
 		locns[i][malloc_usable_size( locns[i] ) - 1] = '\345'; // fill ultimate byte
@@ -125,7 +121,6 @@
 		size_t s = (i + 1) * 20;
 		char * area = (char *)calloc( 5, s );
-		if ( area == 0p ) abort( "calloc/free out of memory" );
 		if ( area[0] != '\0' || area[s - 1] != '\0' ||
-			 area[malloc_usable_size( area ) - 1] != '\0' ||
+			 area[malloc_size( area ) - 1] != '\0' ||
 			 ! malloc_zero_fill( area ) ) abort( "calloc/free corrupt storage1" );
 		area[0] = '\345'; area[s - 1] = '\345';			// fill first/last
@@ -137,7 +132,6 @@
 		size_t s = i + 1;
 		locns[i] = (char *)calloc( 5, s );
-		if ( locns[i] == 0p ) abort( "calloc/free out of memory" );
 		if ( locns[i][0] != '\0' || locns[i][s - 1] != '\0' ||
-			 locns[i][malloc_usable_size( locns[i] ) - 1] != '\0' ||
+			 locns[i][malloc_size( locns[i] ) - 1] != '\0' ||
 			 ! malloc_zero_fill( locns[i] ) ) abort( "calloc/free corrupt storage2" );
 		locns[i][0] = '\345'; locns[i][s - 1] = '\345';	// fill first/last
@@ -156,7 +150,6 @@
 		size_t s = i + default_mmap_start();			// cross over point
 		char * area = (char *)calloc( 1, s );
-		if ( area == 0p ) abort( "calloc/free out of memory" );
 		if ( area[0] != '\0' || area[s - 1] != '\0' ) abort( "calloc/free corrupt storage4.1" );
-		if ( area[malloc_usable_size( area ) - 1] != '\0' ) abort( "calloc/free corrupt storage4.2" );
+		if ( area[malloc_size( area ) - 1] != '\0' ) abort( "calloc/free corrupt storage4.2" );
 		if ( ! malloc_zero_fill( area ) ) abort( "calloc/free corrupt storage4.3" );
 		area[0] = '\345'; area[s - 1] = '\345';			// fill first/last
@@ -168,7 +161,6 @@
 		size_t s = i + default_mmap_start();			// cross over point
 		locns[i] = (char *)calloc( 1, s );
-		if ( locns[i] == 0p ) abort( "calloc/free out of memory" );
 		if ( locns[i][0] != '\0' || locns[i][s - 1] != '\0' ||
-			 locns[i][malloc_usable_size( locns[i] ) - 1] != '\0' ||
+			 locns[i][malloc_size( locns[i] ) - 1] != '\0' ||
 			 ! malloc_zero_fill( locns[i] ) ) abort( "calloc/free corrupt storage5" );
 		locns[i][0] = '\345'; locns[i][s - 1] = '\345';	// fill first/last
@@ -188,5 +180,4 @@
 		for ( s; 1 ~ NoOfAllocs ) {						// allocation of size 0 can return null
 			char * area = (char *)memalign( a, s );
-			if ( area == 0p ) abort( "memalign/free out of memory" );
 			//sout | i | area;
 			if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -206,5 +197,4 @@
 			size_t s = i + default_mmap_start();		// cross over point
 			char * area = (char *)memalign( a, s );
-			if ( area == 0p ) abort( "memalign/free out of memory" );
 			//sout | i | area;
 			if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -222,7 +212,6 @@
 		// initial N byte allocation
 		char * area = (char *)calloc( 5, i );
-		if ( area == 0p ) abort( "calloc/realloc/free out of memory" );
 		if ( area[0] != '\0' || area[i - 1] != '\0' ||
-			 area[malloc_usable_size( area ) - 1] != '\0' ||
+			 area[malloc_size( area ) - 1] != '\0' ||
 			 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage1" );
 
@@ -230,7 +219,6 @@
 		for ( s; i ~ 256 * 1024 ~ 26 ) {				// start at initial memory request
 			area = (char *)realloc( area, s );			// attempt to reuse storage
-			if ( area == 0p ) abort( "calloc/realloc/free out of memory" );
 			if ( area[0] != '\0' || area[s - 1] != '\0' ||
-				 area[malloc_usable_size( area ) - 1] != '\0' ||
+				 area[malloc_size( area ) - 1] != '\0' ||
 				 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage2" );
 		} // for
@@ -244,16 +232,17 @@
 		size_t s = i + default_mmap_start();			// cross over point
 		char * area = (char *)calloc( 1, s );
-		if ( area == 0p ) abort( "calloc/realloc/free out of memory" );
+// 		if ( area == 0p ) abort( "calloc/realloc/free out of memory" );
 		if ( area[0] != '\0' || area[s - 1] != '\0' ||
-			 area[malloc_usable_size( area ) - 1] != '\0' ||
-			 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage1" );
+			 area[malloc_size( area ) - 1] != '\0' ||
+			 ! malloc_zero_fill( area ) ) //abort( "calloc/realloc/free corrupt storage3" );
+			printf( "C %zd %d %d %d %d\n", s, area[0] != '\0', area[s - 1] != '\0', area[malloc_size( area ) - 1] != '\0', ! malloc_zero_fill( area ) );
 
 		// Do not start this loop index at 0 because realloc of 0 bytes frees the storage.
 		for ( r; i ~ 256 * 1024 ~ 26 ) {				// start at initial memory request
 			area = (char *)realloc( area, r );			// attempt to reuse storage
-			if ( area == 0p ) abort( "calloc/realloc/free out of memory" );
+// 			if ( area == 0p ) abort( "calloc/realloc/free out of memory" );
 			if ( area[0] != '\0' || area[r - 1] != '\0' ||
-				 area[malloc_usable_size( area ) - 1] != '\0' ||
-				 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage2" );
+				 area[malloc_size( area ) - 1] != '\0' ||
+				 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage4" );
 		} // for
 		free( area );
@@ -266,5 +255,5 @@
 		// initial N byte allocation
 		char * area = (char *)memalign( a, amount );	// aligned N-byte allocation
-		if ( area == 0p ) abort( "memalign/realloc/free out of memory" ); // no storage ?
+// 		if ( area == 0p ) abort( "memalign/realloc/free out of memory" ); // no storage ?
 		//sout | alignments[a] | area;
 		if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -277,5 +266,4 @@
 			if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "memalign/realloc/free corrupt storage" );
 			area = (char *)realloc( area, s );			// attempt to reuse storage
-			if ( area == 0p ) abort( "memalign/realloc/free out of memory" ); // no storage ?
 			//sout | i | area;
 			if ( (size_t)area % a != 0 ) {				// check for initial alignment
@@ -293,5 +281,4 @@
 		for ( s; 1 ~ limit ) {							// allocation of size 0 can return null
 			char * area = (char *)cmemalign( a, 1, s );
-			if ( area == 0p ) abort( "cmemalign/free out of memory" );
 			//sout | i | area;
 			if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -299,5 +286,5 @@
 			} // if
 			if ( area[0] != '\0' || area[s - 1] != '\0' ||
-				 area[malloc_usable_size( area ) - 1] != '\0' ||
+				 area[malloc_size( area ) - 1] != '\0' ||
 				 ! malloc_zero_fill( area ) ) abort( "cmemalign/free corrupt storage" );
 			area[0] = '\345'; area[s - 1] = '\345';		// fill first/last byte
@@ -312,5 +299,4 @@
 		// initial N byte allocation
 		char * area = (char *)cmemalign( a, 1, amount ); // aligned N-byte allocation
-		if ( area == 0p ) abort( "cmemalign/realloc/free out of memory" ); // no storage ?
 		//sout | alignments[a] | area;
 		if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -318,5 +304,5 @@
 		} // if
 		if ( area[0] != '\0' || area[amount - 1] != '\0' ||
-			 area[malloc_usable_size( area ) - 1] != '\0' ||
+			 area[malloc_size( area ) - 1] != '\0' ||
 			 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc/free corrupt storage1" );
 		area[0] = '\345'; area[amount - 2] = '\345';	// fill first/penultimate byte
@@ -326,11 +312,10 @@
 			if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "cmemalign/realloc/free corrupt storage2" );
 			area = (char *)realloc( area, s );			// attempt to reuse storage
-			if ( area == 0p ) abort( "cmemalign/realloc/free out of memory" ); // no storage ?
 			//sout | i | area;
 			if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
 				abort( "cmemalign/realloc/free bad alignment %p", area );
 			} // if
-			if ( area[s - 1] != '\0' || area[s - 1] != '\0' ||
-				 area[malloc_usable_size( area ) - 1] != '\0' ||
+			if ( area[0] != '\345' || area[s - 1] != '\0' ||
+				 area[malloc_size( area ) - 1] != '\0' ||
 				 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc/free corrupt storage3" );
 			area[s - 1] = '\345';						// fill last byte
@@ -345,5 +330,4 @@
 		// initial N byte allocation
 		char * area = (char *)memalign( a, amount );	// aligned N-byte allocation
-		if ( area == 0p ) abort( "memalign/realloc with align/free out of memory" ); // no storage ?
 		//sout | alignments[a] | area | endl;
 		if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -356,5 +340,4 @@
 			if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "memalign/realloc/free corrupt storage" );
 			area = (char *)realloc( area, a * 2, s );	// attempt to reuse storage
-			if ( area == 0p ) abort( "memalign/realloc with align/free out of memory" ); // no storage ?
 			//sout | i | area | endl;
 			if ( (size_t)area % a * 2 != 0 ) {			// check for initial alignment
@@ -371,6 +354,5 @@
 	for ( size_t a = libAlign() + libAlign(); a <= limit; a += a ) { // generate powers of 2
 		// initial N byte allocation
-		char *area = (char *)cmemalign( a, 1, amount );	// aligned N-byte allocation
-		if ( area == 0p ) abort( "cmemalign/realloc with align/free out of memory" ); // no storage ?
+		char * area = (char *)cmemalign( a, 1, amount ); // aligned N-byte allocation
 		//sout | alignments[a] | area | endl;
 		if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment
@@ -378,5 +360,5 @@
 		} // if
 		if ( area[0] != '\0' || area[amount - 1] != '\0' ||
-			 area[malloc_usable_size( area ) - 1] != '\0' ||
+			 area[malloc_size( area ) - 1] != '\0' ||
 			 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc with align/free corrupt storage1" );
 		area[0] = '\345'; area[amount - 2] = '\345';	// fill first/penultimate byte
@@ -386,11 +368,10 @@
 			if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "cmemalign/realloc with align/free corrupt storage2" );
 			area = (char *)realloc( area, a * 2, s );	// attempt to reuse storage
-			if ( area == 0p ) abort( "cmemalign/realloc with align/free out of memory" ); // no storage ?
 			//sout | i | area | endl;
 			if ( (size_t)area % a * 2 != 0 || malloc_alignment( area ) != a * 2 ) { // check for initial alignment
-				abort( "cmemalign/realloc with align/free bad alignment %p %jd %jd", area, malloc_alignment( area ), a * 2 );
+				abort( "cmemalign/realloc with align/free bad alignment %p %zd %zd", area, malloc_alignment( area ), a * 2 );
 			} // if
 			if ( area[s - 1] != '\0' || area[s - 1] != '\0' ||
-				 area[malloc_usable_size( area ) - 1] != '\0' ||
+				 area[malloc_size( area ) - 1] != '\0' ||
 				 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc/free corrupt storage3" );
 			area[s - 1] = '\345';						// fill last byte
