Index: src/libcfa/concurrency/invoke.h
===================================================================
--- src/libcfa/concurrency/invoke.h	(revision 8fc45b762b4e8374629230668423504c051c14e7)
+++ src/libcfa/concurrency/invoke.h	(revision 025278e0ea194f101c7166809d5a4ee702acadaa)
@@ -25,119 +25,174 @@
 #define _INVOKE_H_
 
-      #define unlikely(x)    __builtin_expect(!!(x), 0)
-      #define thread_local _Thread_local
-
-      typedef void (*fptr_t)();
-
-      struct spinlock {
-            volatile int lock;
-            #ifdef __CFA_DEBUG__
-                  const char * prev_name;
-                  void* prev_thrd;
-            #endif
-      };
-
-      struct __thread_queue_t {
-            struct thread_desc * head;
-            struct thread_desc ** tail;
-      };
-
-      struct __condition_stack_t {
-            struct __condition_criterion_t * top;
-      };
-
-      #ifdef __CFORALL__
-      extern "Cforall" {
-            void ?{}( struct __thread_queue_t & );
-            void append( struct __thread_queue_t &, struct thread_desc * );
-            struct thread_desc * pop_head( struct __thread_queue_t & );
-            struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** );
-
-            void ?{}( struct __condition_stack_t & );
-            void push( struct __condition_stack_t &, struct __condition_criterion_t * );
-            struct __condition_criterion_t * pop( struct __condition_stack_t & );
-
-            void  ?{}(spinlock & this);
-            void ^?{}(spinlock & this);
-      }
-      #endif
-
-      struct coStack_t {
-            unsigned int size;                        // size of stack
-            void *storage;                            // pointer to stack
-            void *limit;                              // stack grows towards stack limit
-            void *base;                               // base of stack
-            void *context;                            // address of cfa_context_t
-            void *top;                                // address of top of storage
-            bool userStack;                           // whether or not the user allocated the stack
-      };
-
-      enum coroutine_state { Halted, Start, Inactive, Active, Primed };
-
-      struct coroutine_desc {
-            struct coStack_t stack;                   // stack information of the coroutine
-            const char *name;                         // textual name for coroutine/task, initialized by uC++ generated code
-            int errno_;                               // copy of global UNIX variable errno
-            enum coroutine_state state;               // current execution status for coroutine
-            struct coroutine_desc * starter;          // first coroutine to resume this one
-            struct coroutine_desc * last;             // last coroutine to resume this one
-      };
-
-      struct __waitfor_mask_t {
-            short * accepted;                         // the index of the accepted function, -1 if none
-            struct __acceptable_t * clauses;          // list of acceptable functions, null if any
-            short size;                               // number of acceptable functions
-      };
-
-      struct monitor_desc {
-            struct spinlock lock;                     // spinlock to protect internal data
-            struct thread_desc * owner;               // current owner of the monitor
-            struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
-            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
-            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
-            struct __waitfor_mask_t mask;             // mask used to know if some thread is waiting for something while holding the monitor
-            struct __condition_node_t * dtor_node;    // node used to signal the dtor in a waitfor dtor
-      };
-
-      struct __monitor_group_t {
-            struct monitor_desc ** list;              // currently held monitors
-            short                  size;              // number of currently held monitors
-            fptr_t                 func;              // last function that acquired monitors
-      };
-
-      struct thread_desc {
-            // Core threading fields
-            struct coroutine_desc  self_cor;          // coroutine body used to store context
-            struct monitor_desc    self_mon;          // monitor body used for mutual exclusion
-            struct monitor_desc *  self_mon_p;        // pointer to monitor with sufficient lifetime for current monitors
-            struct __monitor_group_t monitors;        // monitors currently held by this thread
-
-            // Link lists fields
-            struct thread_desc * next;                // instrusive link field for threads
-
-
+	#define unlikely(x)    __builtin_expect(!!(x), 0)
+	#define thread_local _Thread_local
+
+	typedef void (*fptr_t)();
+
+	struct spinlock {
+		volatile int lock;
+		#ifdef __CFA_DEBUG__
+			const char * prev_name;
+			void* prev_thrd;
+		#endif
+	};
+
+	struct __thread_queue_t {
+		struct thread_desc * head;
+		struct thread_desc ** tail;
+	};
+
+	struct __condition_stack_t {
+		struct __condition_criterion_t * top;
+	};
+
+	#ifdef __CFORALL__
+	extern "Cforall" {
+		void ?{}( struct __thread_queue_t & );
+		void append( struct __thread_queue_t &, struct thread_desc * );
+		struct thread_desc * pop_head( struct __thread_queue_t & );
+		struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** );
+
+		void ?{}( struct __condition_stack_t & );
+		void push( struct __condition_stack_t &, struct __condition_criterion_t * );
+		struct __condition_criterion_t * pop( struct __condition_stack_t & );
+
+		void  ?{}(spinlock & this);
+		void ^?{}(spinlock & this);
+	}
+	#endif
+
+	struct coStack_t {
+		// size of stack
+		unsigned int size;
+
+		// pointer to stack
+		void *storage;
+
+		// stack grows towards stack limit
+		void *limit;
+
+		// base of stack
+		void *base;
+
+		// address of cfa_context_t
+		void *context;
+
+		// address of top of storage
+		void *top;
+
+		// whether or not the user allocated the stack
+		bool userStack;
+
+	};
+
+	enum coroutine_state { Halted, Start, Inactive, Active, Primed };
+
+	struct coroutine_desc {
+		// stack information of the coroutine
+		struct coStack_t stack;
+
+		// textual name for coroutine/task, initialized by uC++ generated code
+		const char *name;
+
+		// copy of global UNIX variable errno
+		int errno_;
+
+		// current execution status for coroutine
+		enum coroutine_state state;
+
+		// first coroutine to resume this one
+		struct coroutine_desc * starter;
+
+		// last coroutine to resume this one
+		struct coroutine_desc * last;
+	};
+
+	struct __waitfor_mask_t {
+		// the index of the accepted function, -1 if none
+		short * accepted;
+
+		// list of acceptable functions, null if any
+		struct __acceptable_t * clauses;
+
+		// number of acceptable functions
+		short size;
+	};
+
+	struct monitor_desc {
+		// spinlock to protect internal data
+		struct spinlock lock;
+
+		// current owner of the monitor
+		struct thread_desc * owner;
+
+		// queue of threads that are blocked waiting for the monitor
+		struct __thread_queue_t entry_queue;
+
+		// stack of conditions to run next once we exit the monitor
+		struct __condition_stack_t signal_stack;
+
+		// monitor routines can be called recursively, we need to keep track of that
+		unsigned int recursion;
+
+		// mask used to know if some thread is waiting for something while holding the monitor
+		struct __waitfor_mask_t mask;
+
+		// node used to signal the dtor in a waitfor dtor
+		struct __condition_node_t * dtor_node;
+	};
+
+	struct __monitor_group_t {
+		// currently held monitors
+		struct monitor_desc ** list;
+
+		// number of currently held monitors
+		short                  size;
+
+		// last function that acquired monitors
+		fptr_t                 func;
+	};
+
+	struct thread_desc {
+		// Core threading fields
+		// coroutine body used to store context
+		struct coroutine_desc  self_cor;
+
+		// monitor body used for mutual exclusion
+		struct monitor_desc    self_mon;
+
+		// pointer to monitor with sufficient lifetime for current monitors
+		struct monitor_desc *  self_mon_p;
+
+		// monitors currently held by this thread
+		struct __monitor_group_t monitors;
+
+
+		// Link lists fields
+		// instrusive link field for threads
+		struct thread_desc * next;
      };
 
      #ifdef __CFORALL__
      extern "Cforall" {
-            static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {
-                  return this.list[index];
-            }
-
-            static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
-                  if( (lhs.list != 0) != (rhs.list != 0) ) return false;
-                  if( lhs.size != rhs.size ) return false;
-                  if( lhs.func != rhs.func ) return false;
-
-                  // Check that all the monitors match
-                  for( int i = 0; i < lhs.size; i++ ) {
-                        // If not a match, check next function
-                        if( lhs[i] != rhs[i] ) return false;
-                  }
-
-                  return true;
-            }
-      }
-      #endif
+		static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {
+			return this.list[index];
+		}
+
+		static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
+			if( (lhs.list != 0) != (rhs.list != 0) ) return false;
+			if( lhs.size != rhs.size ) return false;
+			if( lhs.func != rhs.func ) return false;
+
+			// Check that all the monitors match
+			for( int i = 0; i < lhs.size; i++ ) {
+				// If not a match, check next function
+				if( lhs[i] != rhs[i] ) return false;
+			}
+
+			return true;
+		}
+	}
+	#endif
 
 #endif //_INVOKE_H_
@@ -146,25 +201,25 @@
 #define _INVOKE_PRIVATE_H_
 
-      struct machine_context_t {
-            void *SP;
-            void *FP;
-            void *PC;
-      };
-
-      // assembler routines that performs the context switch
-      extern void CtxInvokeStub( void );
-      void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
-
-      #if   defined( __x86_64__ )
-      #define CtxGet( ctx ) __asm__ ( \
-                  "movq %%rsp,%0\n"   \
-                  "movq %%rbp,%1\n"   \
-            : "=rm" (ctx.SP), "=rm" (ctx.FP) )
-      #elif defined( __i386__ )
-      #define CtxGet( ctx ) __asm__ ( \
-                  "movl %%esp,%0\n"   \
-                  "movl %%ebp,%1\n"   \
-            : "=rm" (ctx.SP), "=rm" (ctx.FP) )
-      #endif
+	struct machine_context_t {
+		void *SP;
+		void *FP;
+		void *PC;
+	};
+
+	// assembler routines that performs the context switch
+	extern void CtxInvokeStub( void );
+	void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
+
+	#if   defined( __x86_64__ )
+	#define CtxGet( ctx ) __asm__ ( \
+			"movq %%rsp,%0\n"   \
+			"movq %%rbp,%1\n"   \
+		: "=rm" (ctx.SP), "=rm" (ctx.FP) )
+	#elif defined( __i386__ )
+	#define CtxGet( ctx ) __asm__ ( \
+			"movl %%esp,%0\n"   \
+			"movl %%ebp,%1\n"   \
+		: "=rm" (ctx.SP), "=rm" (ctx.FP) )
+	#endif
 
 #endif //_INVOKE_PRIVATE_H_
Index: src/libcfa/concurrency/kernel
===================================================================
--- src/libcfa/concurrency/kernel	(revision 8fc45b762b4e8374629230668423504c051c14e7)
+++ src/libcfa/concurrency/kernel	(revision 025278e0ea194f101c7166809d5a4ee702acadaa)
@@ -26,8 +26,15 @@
 //-----------------------------------------------------------------------------
 // Locks
-void lock      ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, spin if already acquired
-void lock_yield( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, yield repeatedly if already acquired
-bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, return false if already acquired
-void unlock    ( spinlock * );                        // Unlock the spinlock
+// Lock the spinlock, spin if already acquired
+void lock      ( spinlock * DEBUG_CTX_PARAM2 );
+
+// Lock the spinlock, yield repeatedly if already acquired
+void lock_yield( spinlock * DEBUG_CTX_PARAM2 );
+
+// Lock the spinlock, return false if already acquired
+bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );
+
+// Unlock the spinlock
+void unlock    ( spinlock * );
 
 struct semaphore {
@@ -46,7 +53,12 @@
 // Cluster
 struct cluster {
-	spinlock ready_queue_lock;                      // Ready queue locks
-	__thread_queue_t ready_queue;                   // Ready queue for threads
-	unsigned long long int preemption;              // Preemption rate on this cluster
+	// Ready queue locks
+	spinlock ready_queue_lock;
+
+	// Ready queue for threads
+	__thread_queue_t ready_queue;
+
+	// Preemption rate on this cluster
+	unsigned long long int preemption;
 };
 
@@ -79,21 +91,34 @@
 struct processor {
 	// Main state
-	struct processorCtx_t * runner;                 // Coroutine ctx who does keeps the state of the processor
-	cluster * cltr;                                 // Cluster from which to get threads
-	pthread_t kernel_thread;                        // Handle to pthreads
+	// Coroutine ctx who does keeps the state of the processor
+	struct processorCtx_t * runner;
+
+	// Cluster from which to get threads
+	cluster * cltr;
+
+	// Handle to pthreads
+	pthread_t kernel_thread;
 
 	// Termination
-	volatile bool do_terminate;                     // Set to true to notify the processor should terminate
-	semaphore terminated;                           // Termination synchronisation
+	// Set to true to notify the processor should terminate
+	volatile bool do_terminate;
+
+	// Termination synchronisation
+	semaphore terminated;
 
 	// RunThread data
-	struct FinishAction finish;                     // Action to do after a thread is ran
+	// Action to do after a thread is ran
+	struct FinishAction finish;
 
 	// Preemption data
-	struct alarm_node_t * preemption_alarm;         // Node which is added in the discrete event simulaiton
-	bool pending_preemption;                        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
+	// Node which is added in the discrete event simulaiton
+	struct alarm_node_t * preemption_alarm;
+
+	// If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
+	bool pending_preemption;
 
 #ifdef __CFA_DEBUG__
-	char * last_enable;                             // Last function to enable preemption on this processor
+	// Last function to enable preemption on this processor
+	char * last_enable;
 #endif
 };
Index: src/libcfa/concurrency/monitor
===================================================================
--- src/libcfa/concurrency/monitor	(revision 8fc45b762b4e8374629230668423504c051c14e7)
+++ src/libcfa/concurrency/monitor	(revision 025278e0ea194f101c7166809d5a4ee702acadaa)
@@ -39,8 +39,4 @@
 }
 
-// static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) {
-// 	return ((intptr_t)lhs) < ((intptr_t)rhs);
-// }
-
 struct monitor_guard_t {
 	monitor_desc ** m;
@@ -74,16 +70,34 @@
 
 struct __condition_criterion_t {
-	bool ready;						//Whether or not the criterion is met (True if met)
-	monitor_desc * target;				//The monitor this criterion concerns
-	struct __condition_node_t * owner;		//The parent node to which this criterion belongs
-	__condition_criterion_t * next;		//Intrusive linked list Next field
+	// Whether or not the criterion is met (True if met)
+	bool ready;
+
+	// The monitor this criterion concerns
+	monitor_desc * target;
+
+	// The parent node to which this criterion belongs
+	struct __condition_node_t * owner;
+
+	// Intrusive linked list Next field
+	__condition_criterion_t * next;
+
 };
 
 struct __condition_node_t {
-	thread_desc * waiting_thread;			//Thread that needs to be woken when all criteria are met
-	__condition_criterion_t * criteria; 	//Array of criteria (Criterions are contiguous in memory)
-	unsigned short count;				//Number of criterions in the criteria
-	__condition_node_t * next;			//Intrusive linked list Next field
-	uintptr_t user_info;				//Custom user info accessible before signalling
+	// Thread that needs to be woken when all criteria are met
+	thread_desc * waiting_thread;
+
+	// Array of criteria (Criterions are contiguous in memory)
+	__condition_criterion_t * criteria;
+
+	// Number of criterions in the criteria
+	unsigned short count;
+
+	// Intrusive linked list Next field
+	__condition_node_t * next;
+
+	// Custom user info accessible before signalling
+	uintptr_t user_info;
+
 };
 
@@ -102,7 +116,13 @@
 
 struct condition {
-	__condition_blocked_queue_t blocked;	//Link list which contains the blocked threads as-well as the information needed to unblock them
-	monitor_desc ** monitors;			//Array of monitor pointers (Monitors are NOT contiguous in memory)
-	unsigned short monitor_count;			//Number of monitors in the array
+	// Link list which contains the blocked threads as-well as the information needed to unblock them
+	__condition_blocked_queue_t blocked;
+
+	// Array of monitor pointers (Monitors are NOT contiguous in memory)
+	monitor_desc ** monitors;
+
+	// Number of monitors in the array
+	unsigned short monitor_count;
+
 };
 
