Index: libcfa/src/concurrency/coroutine.cfa
===================================================================
--- libcfa/src/concurrency/coroutine.cfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/concurrency/coroutine.cfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -10,6 +10,6 @@
 // Created On       : Mon Nov 28 12:27:26 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Oct 23 23:05:24 2020
-// Update Count     : 22
+// Last Modified On : Tue Dec 15 12:06:04 2020
+// Update Count     : 23
 //
 
@@ -28,4 +28,7 @@
 #include "kernel_private.hfa"
 #include "exception.hfa"
+#include "math.hfa"
+
+#define CFA_COROUTINE_USE_MMAP 0
 
 #define __CFA_INVOKE_PRIVATE__
@@ -85,6 +88,8 @@
 static const size_t MinStackSize = 1000;
 extern size_t __page_size;				// architecture pagesize HACK, should go in proper runtime singleton
+extern int __map_prot;
 
 void __stack_prepare( __stack_info_t * this, size_t create_size );
+void __stack_clean  ( __stack_info_t * this );
 
 //-----------------------------------------------------------------------------
@@ -107,16 +112,5 @@
 	bool userStack = ((intptr_t)this.storage & 0x1) != 0;
 	if ( ! userStack && this.storage ) {
-		__attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage;
-		*istorage &= (intptr_t)-1;
-
-		void * storage = this.storage->limit;
-		__cfaabi_dbg_debug_do(
-			storage = (char*)(storage) - __page_size;
-			if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) {
-				abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
-			}
-		);
-		__cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
-		free( storage );
+		__stack_clean( &this );
 	}
 }
@@ -167,24 +161,58 @@
 	assert(__page_size != 0l);
 	size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
+	size = ceiling(size, __page_size);
 
 	// If we are running debug, we also need to allocate a guardpage to catch stack overflows.
 	void * storage;
-	__cfaabi_dbg_debug_do(
-		storage = memalign( __page_size, size + __page_size );
-	);
-	__cfaabi_dbg_no_debug_do(
-		storage = (void*)malloc(size);
-	);
-
+	#if CFA_COROUTINE_USE_MMAP
+		storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+		if(storage == ((void*)-1)) {
+			abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
+		}
+		if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
+			abort( "coroutine stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
+		} // if
+		storage = (void *)(((intptr_t)storage) + __page_size);
+	#else
+		__cfaabi_dbg_debug_do(
+			storage = memalign( __page_size, size + __page_size );
+		);
+		__cfaabi_dbg_no_debug_do(
+			storage = (void*)malloc(size);
+		);
+
+		__cfaabi_dbg_debug_do(
+			if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
+				abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
+			}
+			storage = (void *)(((intptr_t)storage) + __page_size);
+		);
+	#endif
 	__cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size);
-	__cfaabi_dbg_debug_do(
-		if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
-			abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
-		}
-		storage = (void *)(((intptr_t)storage) + __page_size);
-	);
 
 	verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul );
 	return [storage, size];
+}
+
+void __stack_clean  ( __stack_info_t * this ) {
+	size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t);
+	void * storage = this->storage->limit;
+
+	#if CFA_COROUTINE_USE_MMAP
+		storage = (void *)(((intptr_t)storage) - __page_size);
+		if(munmap(storage, size + __page_size) == -1) {
+			abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
+		}
+	#else
+		__cfaabi_dbg_debug_do(
+			storage = (char*)(storage) - __page_size;
+			if ( mprotect( storage, __page_size, __map_prot ) == -1 ) {
+				abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
+			}
+		);
+
+		free( storage );
+	#endif
+	__cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
 }
 
@@ -210,7 +238,7 @@
 	assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize );
 
-	this->storage = (__stack_t *)((intptr_t)storage + size);
+	this->storage = (__stack_t *)((intptr_t)storage + size - sizeof(__stack_t));
 	this->storage->limit = storage;
-	this->storage->base  = (void*)((intptr_t)storage + size);
+	this->storage->base  = (void*)((intptr_t)storage + size - sizeof(__stack_t));
 	this->storage->exception_context.top_resume = 0p;
 	this->storage->exception_context.current_exception = 0p;
Index: libcfa/src/concurrency/coroutine.hfa
===================================================================
--- libcfa/src/concurrency/coroutine.hfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/concurrency/coroutine.hfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -102,5 +102,7 @@
 }
 
-extern void __stack_prepare   ( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
+extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
+extern void __stack_clean  ( __stack_info_t * this );
+
 
 // Suspend implementation inlined for performance
@@ -142,8 +144,6 @@
 
 	if( unlikely(dst->context.SP == 0p) ) {
-		active_thread()->curr_cor = dst;
 		__stack_prepare(&dst->stack, 65000);
 		__cfactx_start(main, dst, cor, __cfactx_invoke_coroutine);
-		active_thread()->curr_cor = src;
 	}
 
Index: libcfa/src/concurrency/io/setup.cfa
===================================================================
--- libcfa/src/concurrency/io/setup.cfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/concurrency/io/setup.cfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -17,4 +17,9 @@
 #define _GNU_SOURCE         /* See feature_test_macros(7) */
 
+#if defined(__CFA_DEBUG__)
+	// #define __CFA_DEBUG_PRINT_IO__
+	// #define __CFA_DEBUG_PRINT_IO_CORE__
+#endif
+
 #include "io/types.hfa"
 #include "kernel.hfa"
@@ -111,5 +116,5 @@
 
 	void __kernel_io_startup(void) {
-		__cfaabi_dbg_print_safe( "Kernel : Creating EPOLL instance\n" );
+		__cfadbg_print_safe(io_core, "Kernel : Creating EPOLL instance\n" );
 
 		iopoll.epollfd = epoll_create1(0);
@@ -118,5 +123,5 @@
 		}
 
-		__cfaabi_dbg_print_safe( "Kernel : Starting io poller thread\n" );
+		__cfadbg_print_safe(io_core, "Kernel : Starting io poller thread\n" );
 
 		iopoll.run = true;
@@ -132,6 +137,5 @@
 		// Wait for the io poller thread to finish
 
-		pthread_join( iopoll.thrd, 0p );
-		free( iopoll.stack );
+		__destroy_pthread( iopoll.thrd, iopoll.stack, 0p );
 
 		int ret = close(iopoll.epollfd);
@@ -142,5 +146,5 @@
 		// Io polling is now fully stopped
 
-		__cfaabi_dbg_print_safe( "Kernel : IO poller stopped\n" );
+		__cfadbg_print_safe(io_core, "Kernel : IO poller stopped\n" );
 	}
 
@@ -150,5 +154,5 @@
 		id.id = doregister(&id);
 		__cfaabi_tls.this_proc_id = &id;
-		__cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
+		__cfadbg_print_safe(io_core, "Kernel : IO poller thread starting\n" );
 
 		// Block signals to control when they arrive
@@ -185,5 +189,5 @@
 		}
 
-		__cfaabi_dbg_print_safe( "Kernel : IO poller thread stopping\n" );
+		__cfadbg_print_safe(io_core, "Kernel : IO poller thread stopping\n" );
 		unregister(&id);
 		return 0p;
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -29,4 +29,7 @@
 #include "kernel_private.hfa"
 #include "startup.hfa"          // STARTUP_PRIORITY_XXX
+#include "math.hfa"
+
+#define CFA_PROCESSOR_USE_MMAP 0
 
 //-----------------------------------------------------------------------------
@@ -114,5 +117,5 @@
 }
 
-size_t __page_size = 0;
+extern size_t __page_size;
 
 //-----------------------------------------------------------------------------
@@ -158,6 +161,4 @@
 	/* paranoid */ verify( ! __preemption_enabled() );
 	__cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
-
-	__page_size = sysconf( _SC_PAGESIZE );
 
 	__cfa_dbg_global_clusters.list{ __get };
@@ -539,4 +540,5 @@
 }
 
+extern size_t __page_size;
 void ^?{}(processor & this) with( this ){
 	if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
@@ -550,8 +552,5 @@
 	}
 
-	int err = pthread_join( kernel_thread, 0p );
-	if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err));
-
-	free( this.stack );
+	__destroy_pthread( kernel_thread, this.stack, 0p );
 
 	disable_interrupts();
@@ -678,14 +677,26 @@
 
 	void * stack;
-	__cfaabi_dbg_debug_do(
-		stack = memalign( __page_size, stacksize + __page_size );
-		// pthread has no mechanism to create the guard page in user supplied stack.
+	#if CFA_PROCESSOR_USE_MMAP
+		stacksize = ceiling( stacksize, __page_size ) + __page_size;
+		stack = mmap(0p, stacksize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+		if(stack == ((void*)-1)) {
+			abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
+		}
 		if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
-			abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
+			abort( "pthread stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
 		} // if
-	);
-	__cfaabi_dbg_no_debug_do(
-		stack = malloc( stacksize );
-	);
+	#else
+		__cfaabi_dbg_debug_do(
+			stack = memalign( __page_size, stacksize + __page_size );
+			// pthread has no mechanism to create the guard page in user supplied stack.
+			if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
+				abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
+			} // if
+		);
+		__cfaabi_dbg_no_debug_do(
+			stack = malloc( stacksize );
+		);
+	#endif
+
 
 	check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
@@ -694,4 +705,28 @@
 	return stack;
 }
+
+void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ) {
+	int err = pthread_join( pthread, retval );
+	if( err != 0 ) abort("KERNEL ERROR: joining pthread %p caused error %s\n", (void*)pthread, strerror(err));
+
+	#if CFA_PROCESSOR_USE_MMAP
+		pthread_attr_t attr;
+
+		check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
+
+		size_t stacksize;
+		// default stack size, normally defined by shell limit
+		check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
+		assert( stacksize >= PTHREAD_STACK_MIN );
+		stacksize += __page_size;
+
+		if(munmap(stack, stacksize) == -1) {
+			abort( "pthread stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
+		}
+	#else
+		free( stack );
+	#endif
+}
+
 
 #if defined(__CFA_WITH_VERIFY__)
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -49,4 +49,5 @@
 
 void * __create_pthread( pthread_t *, void * (*)(void *), void * );
+void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
 
 
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/concurrency/preemption.cfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -575,6 +575,5 @@
 	// Wait for the preemption thread to finish
 
-	pthread_join( alarm_thread, 0p );
-	free( alarm_stack );
+	__destroy_pthread( alarm_thread, alarm_stack, 0p );
 
 	// Preemption is now fully stopped
Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/heap.cfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -10,9 +10,10 @@
 // Created On       : Tue Dec 19 21:58:35 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Sep  7 22:17:46 2020
-// Update Count     : 957
+// Last Modified On : Wed Dec 16 12:28:25 2020
+// Update Count     : 1023
 //
 
 #include <unistd.h>										// sbrk, sysconf
+#include <stdlib.h>										// EXIT_FAILURE
 #include <stdbool.h>									// true, false
 #include <stdio.h>										// snprintf, fileno
@@ -71,5 +72,5 @@
 	// Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address,
 	// the brk address is extended by the extension amount.
-	__CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
+	__CFA_DEFAULT_HEAP_EXPANSION__ = (10 * 1024 * 1024),
 
 	// Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets;
@@ -115,5 +116,6 @@
 
 // statically allocated variables => zero filled.
-static size_t pageSize;									// architecture pagesize
+size_t __page_size;										// architecture pagesize
+int __map_prot;											// common mmap/mprotect protection
 static size_t heapExpand;								// sbrk advance
 static size_t mmapStart;								// cross over point for mmap
@@ -249,5 +251,5 @@
 #endif // FASTLOOKUP
 
-static int mmapFd = -1;									// fake or actual fd for anonymous file
+static const off_t mmapFd = -1;							// fake or actual fd for anonymous file
 #ifdef __CFA_DEBUG__
 static bool heapBoot = 0;								// detect recursion during boot
@@ -374,5 +376,5 @@
 
 static inline bool setMmapStart( size_t value ) {		// true => mmapped, false => sbrk
-  if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return false;
+  if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false;
 	mmapStart = value;									// set global
 
@@ -436,5 +438,5 @@
 	header = headerAddr( addr );
 
-  if ( unlikely( heapEnd < addr ) ) {					// mmapped ?
+  if ( unlikely( addr < heapBegin || heapEnd < addr ) ) { // mmapped ?
 		fakeHeader( header, alignment );
 		size = header->kind.real.blockSize & -3;		// mmap size
@@ -443,5 +445,5 @@
 
 	#ifdef __CFA_DEBUG__
-	checkHeader( addr < heapBegin, name, addr );		// bad low address ?
+	checkHeader( header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
 	#endif // __CFA_DEBUG__
 
@@ -464,4 +466,23 @@
 } // headers
 
+#ifdef __CFA_DEBUG__
+#if __SIZEOF_POINTER__ == 4
+#define MASK 0xdeadbeef
+#else
+#define MASK 0xdeadbeefdeadbeef
+#endif
+#define STRIDE size_t
+
+static void * Memset( void * addr, STRIDE size ) {		// debug only
+	if ( size % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, size %zd not multiple of %zd.", size, sizeof(STRIDE) );
+	if ( (STRIDE)addr % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, addr %p not multiple of %zd.", addr, sizeof(STRIDE) );
+
+	STRIDE * end = (STRIDE *)addr + size / sizeof(STRIDE);
+	for ( STRIDE * p = (STRIDE *)addr; p < end; p += 1 ) *p = MASK;
+	return addr;
+} // Memset
+#endif // __CFA_DEBUG__
+
+
 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes."
 
@@ -472,8 +493,15 @@
 		// If the size requested is bigger than the current remaining storage, increase the size of the heap.
 
-		size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() );
+		size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size );
+		// Do not call abort or strerror( errno ) as they may call malloc.
 		if ( sbrk( increase ) == (void *)-1 ) {			// failed, no memory ?
 			unlock( extlock );
-			abort( NO_MEMORY_MSG, size );				// give up
+			__cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size );
+			_exit( EXIT_FAILURE );
+		} // if
+		if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
+			unlock( extlock );
+			__cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno );
+			_exit( EXIT_FAILURE );
 		} // if
 		#ifdef __STATISTICS__
@@ -483,5 +511,6 @@
 		#ifdef __CFA_DEBUG__
 		// Set new memory to garbage so subsequent uninitialized usages might fail.
-		memset( (char *)heapEnd + heapRemaining, '\377', increase );
+		memset( (char *)heapEnd + heapRemaining, '\xde', increase );
+		//Memset( (char *)heapEnd + heapRemaining, increase );
 		#endif // __CFA_DEBUG__
 		rem = heapRemaining + increase - size;
@@ -542,6 +571,6 @@
 		block->header.kind.real.home = freeElem;		// pointer back to free list of apropriate size
 	} else {											// large size => mmap
-  if ( unlikely( size > ULONG_MAX - pageSize ) ) return 0p;
-		tsize = ceiling2( tsize, pageSize );			// must be multiple of page size
+  if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p;
+		tsize = ceiling2( tsize, __page_size );			// must be multiple of page size
 		#ifdef __STATISTICS__
 		__atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
@@ -549,13 +578,14 @@
 		#endif // __STATISTICS__
 
-		block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
+		block = (HeapManager.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
 		if ( block == (HeapManager.Storage *)MAP_FAILED ) { // failed ?
 			if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory
 			// Do not call strerror( errno ) as it may call malloc.
-			abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno );
+			abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno );
 		} //if
 		#ifdef __CFA_DEBUG__
 		// Set new memory to garbage so subsequent uninitialized usages might fail.
-		memset( block, '\377', tsize );
+		memset( block, '\xde', tsize );
+		//Memset( block, tsize );
 		#endif // __CFA_DEBUG__
 		block->header.kind.real.blockSize = tsize;		// storage size for munmap
@@ -597,14 +627,13 @@
 		#endif // __STATISTICS__
 		if ( munmap( header, size ) == -1 ) {
-			#ifdef __CFA_DEBUG__
 			abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
 				   "Possible cause is invalid pointer.",
 				   addr );
-			#endif // __CFA_DEBUG__
 		} // if
 	} else {
 		#ifdef __CFA_DEBUG__
 		// Set free memory to garbage so subsequent usages might fail.
-		memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
+		memset( ((HeapManager.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( HeapManager.Storage ) );
+		//Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) );
 		#endif // __CFA_DEBUG__
 
@@ -648,8 +677,8 @@
 		for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) {
 		#else
-		// for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) {
+			for(;;) {
+//		for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) {
 //		for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) {
-		for ( HeapManager.Storage * p ;; /* p = getNext( p )->top */) {
-			HeapManager.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works`
+//			HeapManager.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works`
 //			typeof(p) temp = (( p )`next)->top;			// FIX ME: direct assignent fails, initialization works`
 //			p = temp;
@@ -675,5 +704,6 @@
 
 static void ?{}( HeapManager & manager ) with( manager ) {
-	pageSize = sysconf( _SC_PAGESIZE );
+	__page_size = sysconf( _SC_PAGESIZE );
+	__map_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
 
 	for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
@@ -695,5 +725,5 @@
 
 	char * end = (char *)sbrk( 0 );
-	heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment
+	heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment
 } // HeapManager
 
@@ -713,5 +743,4 @@
 	#ifdef __CFA_DEBUG__
 	if ( heapBoot ) {									// check for recursion during system boot
-		// DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
 		abort( "boot() : internal error, recursively invoked during system boot." );
 	} // if
@@ -935,5 +964,5 @@
 	  		header->kind.real.size = size;				// reset allocation size
 	  		if ( unlikely( ozfill ) && size > osize ) {	// previous request zero fill and larger ?
-	  			memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage
+	  			memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage
 	  		} // if
 			return oaddr;
@@ -960,5 +989,5 @@
 			header->kind.real.blockSize |= 2;			// mark new request as zero filled
 			if ( size > osize ) {						// previous request larger ?
-				memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage
+				memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
 			} // if
 		} // if
@@ -999,4 +1028,5 @@
 		return cmemalignNoStats( alignment, dim, elemSize );
 	} // cmemalign
+
 
 	// Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
@@ -1017,8 +1047,9 @@
 	} // posix_memalign
 
+
 	// Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
 	// page size.  It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
 	void * valloc( size_t size ) {
-		return memalign( pageSize, size );
+		return memalign( __page_size, size );
 	} // valloc
 
@@ -1026,5 +1057,5 @@
 	// Same as valloc but rounds size to multiple of page size.
 	void * pvalloc( size_t size ) {
-		return memalign( pageSize, ceiling2( size, pageSize ) );
+		return memalign( __page_size, ceiling2( size, __page_size ) );
 	} // pvalloc
 
@@ -1165,5 +1196,5 @@
 		choose( option ) {
 		  case M_TOP_PAD:
-			heapExpand = ceiling2( value, pageSize ); return 1;
+			heapExpand = ceiling2( value, __page_size ); return 1;
 		  case M_MMAP_THRESHOLD:
 			if ( setMmapStart( value ) ) return 1;
@@ -1327,5 +1358,5 @@
 		header->kind.real.blockSize |= 2;				// mark new request as zero filled
 		if ( size > osize ) {							// previous request larger ?
-			memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage
+			memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage
 		} // if
 	} // if
Index: libcfa/src/stdlib.hfa
===================================================================
--- libcfa/src/stdlib.hfa	(revision accc5dbbe1d1e2468fc5a0056e612ce9da496ce2)
+++ libcfa/src/stdlib.hfa	(revision c8025a2106ce52966312a91c95b8d09da7b91b8b)
@@ -10,6 +10,6 @@
 // Created On       : Thu Jan 28 17:12:35 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Tue Dec  8 18:27:22 2020
-// Update Count     : 524
+// Last Modified On : Sat Dec 12 13:52:34 2020
+// Update Count     : 536
 //
 
@@ -49,5 +49,5 @@
 
 static inline forall( dtype T | sized(T) ) {
-	// Cforall safe equivalents, i.e., implicit size specification
+	// CFA safe equivalents, i.e., implicit size specification
 
 	T * malloc( void ) {
@@ -234,5 +234,5 @@
 
 static inline forall( dtype T | sized(T) ) {
-	// Cforall safe initialization/copy, i.e., implicit size specification, non-array types
+	// CFA safe initialization/copy, i.e., implicit size specification, non-array types
 	T * memset( T * dest, char fill ) {
 		return (T *)memset( dest, fill, sizeof(T) );
@@ -243,5 +243,5 @@
 	} // memcpy
 
-	// Cforall safe initialization/copy, i.e., implicit size specification, array types
+	// CFA safe initialization/copy, i.e., implicit size specification, array types
 	T * amemset( T dest[], char fill, size_t dim ) {
 		return (T *)(void *)memset( dest, fill, dim * sizeof(T) ); // C memset
@@ -253,12 +253,16 @@
 } // distribution
 
-// Cforall deallocation for multiple objects
+// CFA deallocation for multiple objects
+static inline forall( dtype T )							// FIX ME, problems with 0p in list
+void free( T * ptr ) {
+	free( (void *)ptr );								// C free
+} // free
 static inline forall( dtype T, ttype TT | { void free( TT ); } )
-void free( T * addr, TT rest ) {
-	free( ( void *)addr );								// use C free
+void free( T * ptr, TT rest ) {
+	free( ptr );
 	free( rest );
 } // free
 
-// Cforall allocation/deallocation and constructor/destructor, non-array types
+// CFA allocation/deallocation and constructor/destructor, non-array types
 static inline forall( dtype T | sized(T), ttype TT | { void ?{}( T &, TT ); } )
 T * new( TT p ) {
@@ -272,7 +276,6 @@
 		^(*ptr){};										// run destructor
 	} // if
-	free( ptr );
+	free( ptr );										// always call free
 } // delete
-
 static inline forall( dtype T, ttype TT | { void ^?{}( T & ); void delete( TT ); } )
 void delete( T * ptr, TT rest ) {
@@ -281,5 +284,5 @@
 } // delete
 
-// Cforall allocation/deallocation and constructor/destructor, array types
+// CFA allocation/deallocation and constructor/destructor, array types
 forall( dtype T | sized(T), ttype TT | { void ?{}( T &, TT ); } ) T * anew( size_t dim, TT p );
 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( T arr[] );
