Index: libcfa/src/bits/collection.hfa
===================================================================
--- libcfa/src/bits/collection.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/bits/collection.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -1,10 +1,12 @@
 #pragma once
+#include <stdio.h> // REMOVE THIS AFTER DEBUGGING
+
 
 struct Colable {
-	Colable * next;										// next node in the list
+	struct Colable * next;										// next node in the list
 	// invariant: (next != 0) <=> listed()
 };
-
-inline {
+#ifdef __cforall
+static inline {
 	// PUBLIC
 
@@ -28,14 +30,14 @@
 	}
 
-	// wrappers to make Collection have T
-	forall( dtype T ) {
-		T *& Next( T * n ) {
-			return (T *)Next( (Colable *)n );
-		}
+	// // wrappers to make Collection have T
+	// forall( dtype T ) {
+	// 	T *& Next( T * n ) {
+	// 		return (T *)Next( (Colable *)n );
+	// 	}
 
-		bool listed( T * n ) {
-			return Next( (Colable *)n ) != 0p;
-		}
-	} // distribution
+	// 	bool listed( T * n ) {
+	// 		return Next( (Colable *)n ) != 0p;
+	// 	}
+	// } // distribution
 } // distribution
 
@@ -45,5 +47,5 @@
 };
 
-inline {
+static inline {
 	// class invariant: root == 0 & empty() | *root in *this
 	void ?{}( Collection &, const Collection & ) = void; // no copy
@@ -68,5 +70,5 @@
 };
 
-inline {
+static inline {
 	void ?{}( ColIter & colIter ) with( colIter ) {
 		curr = 0p;
@@ -79,2 +81,3 @@
 	} // distribution
 } // distribution
+#endif
Index: libcfa/src/bits/containers.hfa
===================================================================
--- libcfa/src/bits/containers.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/bits/containers.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -36,5 +36,5 @@
 	#define __small_array_t(T) __small_array(T)
 #else
-	#define __small_array_t(T) struct __small_array
+	#define __small_array_t(T) __small_array
 #endif
 
Index: libcfa/src/bits/defs.hfa
===================================================================
--- libcfa/src/bits/defs.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/bits/defs.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -29,5 +29,5 @@
 #define __cfa_anonymous_object(x) inline struct x
 #else
-#define __cfa_anonymous_object(x) x __cfa_anonymous_object
+#define __cfa_anonymous_object(x) struct x __cfa_anonymous_object
 #endif
 
Index: libcfa/src/bits/queue.hfa
===================================================================
--- libcfa/src/bits/queue.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/bits/queue.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -3,5 +3,5 @@
 #include "bits/collection.hfa"
 
-forall( dtype T ) {
+forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) {
 	struct Queue {
 		inline Collection;								// Plan 9 inheritance
@@ -64,5 +64,5 @@
 			T & t = head( q );
 			if ( root ) {
-				root = Next( root );
+				root = Next( (T *)root );
 				if ( &head( q ) == &t ) {
 					root = last = 0p;					// only one element
@@ -142,5 +142,5 @@
 } // distribution
 
-forall( dtype T ) {
+forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) {
 	struct QueueIter {
 		inline ColIter;									// Plan 9 inheritance
Index: libcfa/src/bits/sequence.hfa
===================================================================
--- libcfa/src/bits/sequence.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/bits/sequence.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -2,11 +2,13 @@
 
 #include "bits/collection.hfa"
+#include "bits/defs.hfa"
 
 struct Seqable {
-	inline Colable;
-	Seqable * back;										// pointer to previous node in the list
+	__cfa_anonymous_object(Colable);
+	struct Seqable * back;										// pointer to previous node in the list
 };
 
-inline {
+#ifdef __cforall
+static inline {
 	// PUBLIC
 
@@ -26,18 +28,18 @@
 	}
 
-	// wrappers to make Collection have T
-	forall( dtype T ) {
-		T *& Back( T * n ) {
-			return (T *)Back( (Seqable *)n );
-		}
-	} // distribution
+	// // wrappers to make Collection have T
+	// forall( dtype T ) {
+	// 	T *& Back( T * n ) {
+	// 		return (T *)Back( (Seqable *)n );
+	// 	}
+	// } // distribution
 } // distribution
 
-forall( dtype T ) {
+forall( dtype T | { T *& Back ( T * ); T *& Next ( T * ); bool listed ( T * ); } ) {
 	struct Sequence {
 		inline Collection;								// Plan 9 inheritance
 	};
 
-	inline {
+	static inline {
 		// wrappers to make Collection have T
 		T & head( Sequence(T) & s ) with( s ) {
@@ -184,7 +186,7 @@
 				T * toEnd = Back( &head( s ) );
 				T * fromEnd = Back( &head( from ) );
-				Back( root ) = fromEnd;
+				Back( (T *)root ) = fromEnd;
 				Next( fromEnd ) = &head( s );
-				Back( from.root ) = toEnd;
+				Back( (T *)from.root ) = toEnd;
 				Next( toEnd ) = &head( from );
 			} // if
@@ -214,5 +216,5 @@
 } // distribution
 
-forall( dtype T ) {
+forall( dtype T | { T *& Back ( T * ); T *& Next ( T * ); bool listed ( T * ); } ) {
 	// SeqIter(T) is used to iterate over a Sequence(T) in head-to-tail order.
 	struct SeqIter {
@@ -224,5 +226,5 @@
 	};
 
-	inline {
+	static inline {
 		void ?{}( SeqIter(T) & si ) with( si ) {
 			((ColIter &)si){};
@@ -265,5 +267,5 @@
 	};
 
-	inline {
+	static inline {
 		void ?{}( SeqIterRev(T) & si ) with( si ) {	
 			((ColIter &)si){};
@@ -298,2 +300,4 @@
 	} // distribution
 } // distribution
+
+#endif
Index: libcfa/src/bits/stack.hfa
===================================================================
--- libcfa/src/bits/stack.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/bits/stack.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -3,5 +3,5 @@
 #include "bits/collection.hfa"
 
-forall( dtype T ) {
+forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) {
 	struct Stack {
 		inline Collection;								// Plan 9 inheritance
@@ -44,5 +44,5 @@
 			T & t = head( s );
 			if ( root ) {
-				root = ( T *)Next( root );
+				root = ( T *)Next( (T *)root );
 				if ( &head( s ) == &t ) root = 0p;		// only one element ?
 				Next( &t ) = 0p;
@@ -58,5 +58,5 @@
 
 
-forall( dtype T ) {
+forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) {
 	struct StackIter {
 		inline ColIter;									// Plan 9 inheritance
Index: libcfa/src/concurrency/coroutine.cfa
===================================================================
--- libcfa/src/concurrency/coroutine.cfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/concurrency/coroutine.cfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -10,6 +10,6 @@
 // Created On       : Mon Nov 28 12:27:26 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Oct 23 23:05:24 2020
-// Update Count     : 22
+// Last Modified On : Tue Dec 15 12:06:04 2020
+// Update Count     : 23
 //
 
@@ -88,4 +88,5 @@
 static const size_t MinStackSize = 1000;
 extern size_t __page_size;				// architecture pagesize HACK, should go in proper runtime singleton
+extern int __map_prot;
 
 void __stack_prepare( __stack_info_t * this, size_t create_size );
@@ -206,5 +207,5 @@
 		__cfaabi_dbg_debug_do(
 			storage = (char*)(storage) - __page_size;
-			if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) {
+			if ( mprotect( storage, __page_size, __map_prot ) == -1 ) {
 				abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
 			}
Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/concurrency/invoke.h	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -189,4 +189,10 @@
 		struct __monitor_group_t monitors;
 
+		// used to put threads on user data structures
+		struct {
+			struct $thread * next;
+			struct $thread * back;
+		} seqable;
+
 		struct {
 			struct $thread * next;
@@ -218,4 +224,16 @@
 		}
 
+		static inline $thread *& Back( $thread * this ) __attribute__((const)) {
+			return this->seqable.back;
+		}
+
+		static inline $thread *& Next( $thread * this ) __attribute__((const)) {
+			return this->seqable.next;
+		}
+
+		static inline bool listed( $thread * this ) {
+			return this->seqable.next != 0p;
+		}
+
 		static inline void ?{}(__monitor_group_t & this) {
 			(this.data){0p};
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -117,5 +117,5 @@
 }
 
-size_t __page_size = 0;
+extern size_t __page_size;
 
 //-----------------------------------------------------------------------------
@@ -161,6 +161,4 @@
 	/* paranoid */ verify( ! __preemption_enabled() );
 	__cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
-
-	__page_size = sysconf( _SC_PAGESIZE );
 
 	__cfa_dbg_global_clusters.list{ __get };
@@ -681,5 +679,5 @@
 	#if CFA_PROCESSOR_USE_MMAP
 		stacksize = ceiling( stacksize, __page_size ) + __page_size;
-		stack = mmap(0p, stacksize, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+		stack = mmap(0p, stacksize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
 		if(stack == ((void*)-1)) {
 			abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
Index: libcfa/src/concurrency/locks.cfa
===================================================================
--- libcfa/src/concurrency/locks.cfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/concurrency/locks.cfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -29,4 +29,16 @@
 
 	void ^?{}( info_thread(L) & this ){ }
+
+	info_thread(L) *& Back( info_thread(L) * this ) {
+		return (info_thread(L) *)Back( (Seqable *)this );
+	}
+
+	info_thread(L) *& Next( info_thread(L) * this ) {
+		return (info_thread(L) *)Next( (Colable *)this );
+	}
+
+	bool listed( info_thread(L) * this ) {
+		return Next( (Colable *)this ) != 0p;
+	}
 }
 
@@ -58,5 +70,5 @@
 		abort("A single acquisition lock holder attempted to reacquire the lock resulting in a deadlock.");
 	} else if ( owner != 0p && owner != active_thread() ) {
-		append( blocked_threads, active_thread() );
+		addTail( blocked_threads, *active_thread() );
 		wait_count++;
 		unlock( lock );
@@ -96,5 +108,5 @@
 
 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
-	$thread * t = pop_head( blocked_threads );
+	$thread * t = &dropHead( blocked_threads );
 	owner = t;
 	recursion_count = ( t ? 1 : 0 );
@@ -128,5 +140,5 @@
     lock( lock __cfaabi_dbg_ctx2 );
 	if ( owner != 0p ) {
-		append( blocked_threads, t );
+		addTail( blocked_threads, *t );
 		wait_count++;
 		unlock( lock );
@@ -257,5 +269,4 @@
 		size_t recursion_count = 0;
 		if (i->lock) {
-			i->t->link.next = 1p;
 			recursion_count = get_recursion_count(*i->lock);
 			remove_( *i->lock );
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/concurrency/locks.hfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -43,4 +43,8 @@
 	void ?{}( info_thread(L) & this, $thread * t, uintptr_t info );
 	void ^?{}( info_thread(L) & this );
+
+	info_thread(L) *& Back( info_thread(L) * this );
+	info_thread(L) *& Next( info_thread(L) * this );
+	bool listed( info_thread(L) * this );
 }
 
@@ -64,5 +68,5 @@
 
 	// List of blocked threads
-	__queue_t( $thread ) blocked_threads;
+	Sequence( $thread ) blocked_threads;
 
 	// Count of current blocked threads
Index: libcfa/src/concurrency/thread.cfa
===================================================================
--- libcfa/src/concurrency/thread.cfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/concurrency/thread.cfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -43,4 +43,7 @@
 		canary = 0x0D15EA5E0D15EA5Ep;
 	#endif
+
+	seqable.next = 0p;
+	seqable.back = 0p;
 
 	node.next = 0p;
Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision 72a3aff1a5539f7902f2fb2ec6468b2cd5fec852)
+++ libcfa/src/heap.cfa	(revision 7a70fb209bce29c380785c7ae281c74ad784cca6)
@@ -10,9 +10,10 @@
 // Created On       : Tue Dec 19 21:58:35 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Sun Dec 13 22:04:10 2020
-// Update Count     : 984
+// Last Modified On : Wed Dec 16 12:28:25 2020
+// Update Count     : 1023
 //
 
 #include <unistd.h>										// sbrk, sysconf
+#include <stdlib.h>										// EXIT_FAILURE
 #include <stdbool.h>									// true, false
 #include <stdio.h>										// snprintf, fileno
@@ -71,5 +72,5 @@
 	// Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address,
 	// the brk address is extended by the extension amount.
-	__CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),
+	__CFA_DEFAULT_HEAP_EXPANSION__ = (10 * 1024 * 1024),
 
 	// Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets;
@@ -115,5 +116,6 @@
 
 // statically allocated variables => zero filled.
-static size_t pageSize;									// architecture pagesize
+size_t __page_size;										// architecture pagesize
+int __map_prot;											// common mmap/mprotect protection
 static size_t heapExpand;								// sbrk advance
 static size_t mmapStart;								// cross over point for mmap
@@ -249,5 +251,5 @@
 #endif // FASTLOOKUP
 
-static int mmapFd = -1;									// fake or actual fd for anonymous file
+static const off_t mmapFd = -1;							// fake or actual fd for anonymous file
 #ifdef __CFA_DEBUG__
 static bool heapBoot = 0;								// detect recursion during boot
@@ -374,5 +376,5 @@
 
 static inline bool setMmapStart( size_t value ) {		// true => mmapped, false => sbrk
-  if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return false;
+  if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false;
 	mmapStart = value;									// set global
 
@@ -436,5 +438,5 @@
 	header = headerAddr( addr );
 
-  if ( unlikely( heapEnd < addr ) ) {					// mmapped ?
+  if ( unlikely( addr < heapBegin || heapEnd < addr ) ) { // mmapped ?
 		fakeHeader( header, alignment );
 		size = header->kind.real.blockSize & -3;		// mmap size
@@ -443,5 +445,5 @@
 
 	#ifdef __CFA_DEBUG__
-	checkHeader( addr < heapBegin, name, addr );		// bad low address ?
+	checkHeader( header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ?
 	#endif // __CFA_DEBUG__
 
@@ -482,4 +484,5 @@
 #endif // __CFA_DEBUG__
 
+
 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes."
 
@@ -490,15 +493,15 @@
 		// If the size requested is bigger than the current remaining storage, increase the size of the heap.
 
-		size_t increase = ceiling2( size > heapExpand ? size : heapExpand, pageSize );
+		size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size );
+		// Do not call abort or strerror( errno ) as they may call malloc.
 		if ( sbrk( increase ) == (void *)-1 ) {			// failed, no memory ?
 			unlock( extlock );
-			abort( NO_MEMORY_MSG, size );				// give up
-		} // if
-		if ( mprotect( (char *)heapEnd + heapRemaining, increase, PROT_READ | PROT_WRITE | PROT_EXEC ) ) {
-			enum { BufferSize = 128 };
-			char helpText[BufferSize];
-			// Do not call strerror( errno ) as it may call malloc.
-			int len = snprintf( helpText, BufferSize, "internal error, extend(), mprotect failure, heapEnd:%p size:%zd, errno:%d.", heapEnd, increase, errno );
-			__cfaabi_bits_write( STDERR_FILENO, helpText, len );
+			__cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size );
+			_exit( EXIT_FAILURE );
+		} // if
+		if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {
+			unlock( extlock );
+			__cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno );
+			_exit( EXIT_FAILURE );
 		} // if
 		#ifdef __STATISTICS__
@@ -508,6 +511,6 @@
 		#ifdef __CFA_DEBUG__
 		// Set new memory to garbage so subsequent uninitialized usages might fail.
-		//memset( (char *)heapEnd + heapRemaining, '\377', increase );
-		Memset( (char *)heapEnd + heapRemaining, increase );
+		memset( (char *)heapEnd + heapRemaining, '\xde', increase );
+		//Memset( (char *)heapEnd + heapRemaining, increase );
 		#endif // __CFA_DEBUG__
 		rem = heapRemaining + increase - size;
@@ -568,6 +571,6 @@
 		block->header.kind.real.home = freeElem;		// pointer back to free list of apropriate size
 	} else {											// large size => mmap
-  if ( unlikely( size > ULONG_MAX - pageSize ) ) return 0p;
-		tsize = ceiling2( tsize, pageSize );			// must be multiple of page size
+  if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p;
+		tsize = ceiling2( tsize, __page_size );			// must be multiple of page size
 		#ifdef __STATISTICS__
 		__atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST );
@@ -575,5 +578,5 @@
 		#endif // __STATISTICS__
 
-		block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
+		block = (HeapManager.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );
 		if ( block == (HeapManager.Storage *)MAP_FAILED ) { // failed ?
 			if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory
@@ -583,6 +586,6 @@
 		#ifdef __CFA_DEBUG__
 		// Set new memory to garbage so subsequent uninitialized usages might fail.
-		//memset( block, '\377', tsize );
-		Memset( block, tsize );
+		memset( block, '\xde', tsize );
+		//Memset( block, tsize );
 		#endif // __CFA_DEBUG__
 		block->header.kind.real.blockSize = tsize;		// storage size for munmap
@@ -624,15 +627,13 @@
 		#endif // __STATISTICS__
 		if ( munmap( header, size ) == -1 ) {
-			#ifdef __CFA_DEBUG__
 			abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n"
 				   "Possible cause is invalid pointer.",
 				   addr );
-			#endif // __CFA_DEBUG__
 		} // if
 	} else {
 		#ifdef __CFA_DEBUG__
 		// Set free memory to garbage so subsequent usages might fail.
-		//memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );
-		Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) );
+		memset( ((HeapManager.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( HeapManager.Storage ) );
+		//Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) );
 		#endif // __CFA_DEBUG__
 
@@ -703,5 +704,6 @@
 
 static void ?{}( HeapManager & manager ) with( manager ) {
-	pageSize = sysconf( _SC_PAGESIZE );
+	__page_size = sysconf( _SC_PAGESIZE );
+	__map_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
 
 	for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists
@@ -723,5 +725,5 @@
 
 	char * end = (char *)sbrk( 0 );
-	heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, pageSize ) - end ); // move start of heap to multiple of alignment
+	heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment
 } // HeapManager
 
@@ -741,5 +743,4 @@
 	#ifdef __CFA_DEBUG__
 	if ( heapBoot ) {									// check for recursion during system boot
-		// DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
 		abort( "boot() : internal error, recursively invoked during system boot." );
 	} // if
@@ -1028,4 +1029,5 @@
 	} // cmemalign
 
+
 	// Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
     // of alignment. This requirement is universally ignored.
@@ -1045,8 +1047,9 @@
 	} // posix_memalign
 
+
 	// Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
 	// page size.  It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
 	void * valloc( size_t size ) {
-		return memalign( pageSize, size );
+		return memalign( __page_size, size );
 	} // valloc
 
@@ -1054,5 +1057,5 @@
 	// Same as valloc but rounds size to multiple of page size.
 	void * pvalloc( size_t size ) {
-		return memalign( pageSize, ceiling2( size, pageSize ) );
+		return memalign( __page_size, ceiling2( size, __page_size ) );
 	} // pvalloc
 
@@ -1193,5 +1196,5 @@
 		choose( option ) {
 		  case M_TOP_PAD:
-			heapExpand = ceiling2( value, pageSize ); return 1;
+			heapExpand = ceiling2( value, __page_size ); return 1;
 		  case M_MMAP_THRESHOLD:
 			if ( setMmapStart( value ) ) return 1;
