Index: src/Concurrency/Keywords.cc
===================================================================
--- src/Concurrency/Keywords.cc	(revision f710acad826bf96e612f2bf110b787521a584489)
+++ src/Concurrency/Keywords.cc	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
@@ -200,5 +200,5 @@
 		std::list<DeclarationWithType*> findMutexArgs( FunctionDecl* );
 		void validate( DeclarationWithType * );
-		void addStatments( CompoundStmt *, const std::list<DeclarationWithType * > &);
+		void addStatments( FunctionDecl* func, CompoundStmt *, const std::list<DeclarationWithType * > &);
 
 		static void implement( std::list< Declaration * > & translationUnit ) {
@@ -210,5 +210,14 @@
 	  	StructDecl* monitor_decl = nullptr;
 		StructDecl* guard_decl = nullptr;
+
+		static std::unique_ptr< Type > generic_func;
 	};
+
+	std::unique_ptr< Type > MutexKeyword::generic_func = std::unique_ptr< Type >(
+		new FunctionType(
+			noQualifiers,
+			true
+		)
+	);
 
 	//-----------------------------------------------------------------------------
@@ -394,4 +403,5 @@
 	// Mutex keyword implementation
 	//=============================================================================================
+
 	void MutexKeyword::visit(FunctionDecl* decl) {
 		Visitor::visit(decl);
@@ -410,5 +420,5 @@
 		if( !guard_decl ) throw SemanticError( "mutex keyword requires monitors to be in scope, add #include <monitor>", decl );
 
-		addStatments( body, mutexArgs );
+		addStatments( decl, body, mutexArgs );
 	}
 
@@ -456,5 +466,5 @@
 	}
 
-	void MutexKeyword::addStatments( CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {
+	void MutexKeyword::addStatments( FunctionDecl* func, CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {
 		ObjectDecl * monitors = new ObjectDecl(
 			"__monitors",
@@ -487,6 +497,8 @@
 		);
 
+		assert(generic_func);
+
 		//in reverse order :
-		// monitor_guard_t __guard = { __monitors, # };
+		// monitor_guard_t __guard = { __monitors, #, func };
 		body->push_front(
 			new DeclStmt( noLabels, new ObjectDecl(
@@ -502,5 +514,6 @@
 					{
 						new SingleInit( new VariableExpr( monitors ) ),
-						new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) )
+						new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ),
+						new SingleInit( new CastExpr( new VariableExpr( func ), generic_func->clone() ) )
 					},
 					noDesignators,
Index: src/libcfa/concurrency/invoke.h
===================================================================
--- src/libcfa/concurrency/invoke.h	(revision f710acad826bf96e612f2bf110b787521a584489)
+++ src/libcfa/concurrency/invoke.h	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
@@ -87,5 +87,10 @@
             struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
             unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
-      };
+
+            struct __acceptable_t * acceptables;      // list of acceptable functions, null if any
+            unsigned short acceptable_count;          // number of acceptable functions
+            short accepted_index;                     // the index of the accepted function, -1 if none
+            void (*pre_accept)(void);                 // function to run before an accept
+       };
 
       struct thread_desc {
@@ -95,5 +100,5 @@
             struct monitor_desc ** current_monitors;  // currently held monitors
             unsigned short current_monitor_count;     // number of currently held monitors
-      };
+     };
 
 #endif //_INVOKE_H_
Index: src/libcfa/concurrency/kernel.c
===================================================================
--- src/libcfa/concurrency/kernel.c	(revision f710acad826bf96e612f2bf110b787521a584489)
+++ src/libcfa/concurrency/kernel.c	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
@@ -366,4 +366,5 @@
 
 void BlockInternal( thread_desc * thrd ) {
+	assert(thrd);
 	disable_interrupts();
 	assert( thrd->cor.state != Halted );
@@ -379,4 +380,5 @@
 
 void BlockInternal( spinlock * lock, thread_desc * thrd ) {
+	assert(thrd);
 	disable_interrupts();
 	this_processor->finish.action_code = Release_Schedule;
Index: src/libcfa/concurrency/monitor
===================================================================
--- src/libcfa/concurrency/monitor	(revision f710acad826bf96e612f2bf110b787521a584489)
+++ src/libcfa/concurrency/monitor	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
@@ -23,6 +23,13 @@
 
 static inline void ?{}(monitor_desc * this) {
+	(&this->lock){};
 	this->owner = NULL;
+	(&this->entry_queue){};
+	(&this->signal_stack){};
 	this->recursion = 0;
+	this->acceptables = NULL;
+	this->acceptable_count = 0;
+	this->accepted_index = -1;
+	this->pre_accept = 0;
 }
 
@@ -38,5 +45,5 @@
 }
 
-void ?{}( monitor_guard_t * this, monitor_desc ** m, int count );
+void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() );
 void ^?{}( monitor_guard_t * this );
 
@@ -89,11 +96,17 @@
 uintptr_t front( condition * this );
 
+//-----------------------------------------------------------------------------
+// External scheduling
+
+typedef void (*void_fptr_t)(void);
+
 struct __acceptable_t {
-	void (*func)(void);
+	void_fptr_t func;
 	unsigned short count;
-	monitor_desc * monitors[1];
+	monitor_desc ** monitors;
+	bool run_preaccept;
 };
 
-void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) );
+int __accept_internal( unsigned short count, __acceptable_t * acceptables );
 
 // Local Variables: //
Index: src/libcfa/concurrency/monitor.c
===================================================================
--- src/libcfa/concurrency/monitor.c	(revision f710acad826bf96e612f2bf110b787521a584489)
+++ src/libcfa/concurrency/monitor.c	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
@@ -25,4 +25,5 @@
 static inline void set_owner( monitor_desc * this, thread_desc * owner );
 static inline thread_desc * next_thread( monitor_desc * this );
+static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() );
 
 static inline void lock_all( spinlock ** locks, unsigned short count );
@@ -34,8 +35,31 @@
 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
 
+static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
+static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
+
 static inline thread_desc * check_condition( __condition_criterion_t * );
 static inline void brand_condition( condition * );
 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
 
+static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count );
+
+//-----------------------------------------------------------------------------
+// Useful defines
+#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
+	__condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
+	__condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
+	init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
+
+#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
+	__condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
+	__condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
+	init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
+
+#define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
+	monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
+	unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
+	unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
+	spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
+
 //-----------------------------------------------------------------------------
 // Enter/Leave routines
@@ -43,46 +67,50 @@
 
 extern "C" {
-	void __enter_monitor_desc( monitor_desc * this ) {
+	// Enter single monitor
+	static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
+		// Lock the monitor spinlock, lock_yield to reduce contention
 		lock_yield( &this->lock DEBUG_CTX2 );
 		thread_desc * thrd = this_thread;
 
-		// LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
-
+		this->accepted_index = -1;
 		if( !this->owner ) {
-			//No one has the monitor, just take it
+			// No one has the monitor, just take it
 			set_owner( this, thrd );
 		}
 		else if( this->owner == thrd) {
-			//We already have the monitor, just not how many times we took it
+			// We already have the monitor, just not how many times we took it
 			verify( this->recursion > 0 );
 			this->recursion += 1;
 		}
+		else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) {
+			// Some one was waiting for us, enter
+			set_owner( this, thrd );
+		}
 		else {
-			//Some one else has the monitor, wait in line for it
+			// Some one else has the monitor, wait in line for it
 			append( &this->entry_queue, thrd );
-			// LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
 			BlockInternal( &this->lock );
 
-			//BlockInternal will unlock spinlock, no need to unlock ourselves
+			// BlockInternal will unlock spinlock, no need to unlock ourselves
 			return;
 		}
 
+		// Release the lock and leave
 		unlock( &this->lock );
 		return;
 	}
 
-	// leave pseudo code :
-	//	TODO
+	// Leave single monitor
 	void __leave_monitor_desc( monitor_desc * this ) {
+		// Lock the monitor spinlock, lock_yield to reduce contention
 		lock_yield( &this->lock DEBUG_CTX2 );
 
-		// LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
 		verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
 
-		//Leaving a recursion level, decrement the counter
+		// Leaving a recursion level, decrement the counter
 		this->recursion -= 1;
 
-		//If we haven't left the last level of recursion
-		//it means we don't need to do anything
+		// If we haven't left the last level of recursion
+		// it means we don't need to do anything
 		if( this->recursion != 0) {
 			unlock( &this->lock );
@@ -90,10 +118,9 @@
 		}
 
+		// Get the next thread, will be null on low contention monitor
 		thread_desc * new_owner = next_thread( this );
 
-		//We can now let other threads in safely
+		// We can now let other threads in safely
 		unlock( &this->lock );
-
-		// LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
 
 		//We need to wake-up the thread
@@ -101,6 +128,11 @@
 	}
 
+	// Leave the thread monitor
+	// last routine called by a thread.
+	// Should never return
 	void __leave_thread_monitor( thread_desc * thrd ) {
 		monitor_desc * this = &thrd->mon;
+
+		// Lock the monitor now
 		lock_yield( &this->lock DEBUG_CTX2 );
 
@@ -111,26 +143,39 @@
 		verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
 
-		//Leaving a recursion level, decrement the counter
+		// Leaving a recursion level, decrement the counter
 		this->recursion -= 1;
 
-		//If we haven't left the last level of recursion
-		//it means we don't need to do anything
-		if( this->recursion != 0) {
-			unlock( &this->lock );
-			return;
-		}
-
+		// If we haven't left the last level of recursion
+		// it must mean there is an error
+		if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
+
+		// Fetch the next thread, can be null
 		thread_desc * new_owner = next_thread( this );
 
+		// Leave the thread, this will unlock the spinlock
+		// Use leave thread instead of BlockInternal which is
+		// specialized for this case and supports null new_owner
 		LeaveThread( &this->lock, new_owner );
-	}
-}
-
-static inline void enter(monitor_desc ** monitors, int count) {
+
+		// Control flow should never reach here!
+	}
+}
+
+// Enter multiple monitor
+// relies on the monitor array being sorted
+static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) {
 	for(int i = 0; i < count; i++) {
-		__enter_monitor_desc( monitors[i] );
-	}
-}
-
+		__enter_monitor_desc( monitors[i], monitors, count, func );
+	}
+
+	int acc_idx = monitors[0]->accepted_index;
+	if( acc_idx >= 0 && monitors[0]->acceptables[ acc_idx ].run_preaccept ) {
+		assert( monitors[0]->pre_accept );
+		monitors[0]->pre_accept();
+	}
+}
+
+// Leave multiple monitor
+// relies on the monitor array being sorted
 static inline void leave(monitor_desc ** monitors, int count) {
 	for(int i = count - 1; i >= 0; i--) {
@@ -139,23 +184,38 @@
 }
 
-void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
+// Ctor for monitor guard
+// Sorts monitors before entering
+void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ) {
+	// Store current array
 	this->m = m;
 	this->count = count;
+
+	// Sort monitors based on address -> TODO use a sort specialized for small numbers
 	qsort(this->m, count);
-	enter( this->m, this->count );
-
+
+	// Enter the monitors in order
+	enter( this->m, this->count, func );
+
+	// Save previous thread context
 	this->prev_mntrs = this_thread->current_monitors;
 	this->prev_count = this_thread->current_monitor_count;
 
+	// Update thread context (needed for conditions)
 	this_thread->current_monitors      = m;
 	this_thread->current_monitor_count = count;
 }
 
+// Dtor for monitor guard
 void ^?{}( monitor_guard_t * this ) {
+	// Leave the monitors in order
 	leave( this->m, this->count );
 
+	// Restore thread context
 	this_thread->current_monitors      = this->prev_mntrs;
 	this_thread->current_monitor_count = this->prev_count;
 }
+
+//-----------------------------------------------------------------------------
+// Internal scheduling types
 
 void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
@@ -183,35 +243,30 @@
 // Internal scheduling
 void wait( condition * this, uintptr_t user_info = 0 ) {
-	// LIB_DEBUG_PRINT_SAFE("Waiting\n");
-
 	brand_condition( this );
 
-	//Check that everything is as expected
+	// Check that everything is as expected
 	assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
 	verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
 	verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
 
-	unsigned short count = this->monitor_count;
-	unsigned int recursions[ count ];		//Save the current recursion levels to restore them later
-	spinlock *   locks     [ count ];		//We need to pass-in an array of locks to BlockInternal
-
-	// LIB_DEBUG_PRINT_SAFE("count %i\n", count);
-
-	__condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
-
-	__condition_criterion_t criteria[count];
-	for(int i = 0; i < count; i++) {
-		(&criteria[i]){ this->monitors[i], &waiter };
-		// LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
-	}
-
-	waiter.criteria = criteria;
+	// Create storage for monitor context
+	monitor_ctx( this->monitors, this->monitor_count );
+
+	// Create the node specific to this wait operation
+	wait_ctx( this_thread, user_info );
+
+	// Append the current wait operation to the ones already queued on the condition
+	// We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
 	append( &this->blocked, &waiter );
 
-	lock_all( this->monitors, locks, count );
-	save_recursion( this->monitors, recursions, count );
-	//DON'T unlock, ask the kernel to do it
-
-	//Find the next thread(s) to run
+	// Lock all monitors (aggregates the lock them as well)
+	lock_all( monitors, locks, count );
+
+	// DON'T unlock, ask the kernel to do it
+
+	// Save monitor state
+	save_recursion( monitors, recursions, count );
+
+	// Find the next thread(s) to run
 	unsigned short thread_count = 0;
 	thread_desc * threads[ count ];
@@ -220,14 +275,9 @@
 	}
 
+	// Remove any duplicate threads
 	for( int i = 0; i < count; i++) {
-		thread_desc * new_owner = next_thread( this->monitors[i] );
+		thread_desc * new_owner = next_thread( monitors[i] );
 		thread_count = insert_unique( threads, thread_count, new_owner );
 	}
-
-	// LIB_DEBUG_PRINT_SAFE("Will unblock: ");
-	for(int i = 0; i < thread_count; i++) {
-		// LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
-	}
-	// LIB_DEBUG_PRINT_SAFE("\n");
 
 	// Everything is ready to go to sleep
@@ -235,24 +285,19 @@
 
 
-	//WE WOKE UP
-
-
-	//We are back, restore the owners and recursions
+	// WE WOKE UP
+
+
+	// We are back, restore the owners and recursions
 	lock_all( locks, count );
-	restore_recursion( this->monitors, recursions, count );
+	restore_recursion( monitors, recursions, count );
 	unlock_all( locks, count );
 }
 
 bool signal( condition * this ) {
-	if( is_empty( this ) ) {
-		// LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
-		return false;
-	}
+	if( is_empty( this ) ) { return false; }
 
 	//Check that everything is as expected
 	verify( this->monitors );
 	verify( this->monitor_count != 0 );
-
-	unsigned short count = this->monitor_count;
 
 	//Some more checking in debug
@@ -261,16 +306,17 @@
 		if ( this->monitor_count != this_thrd->current_monitor_count ) {
 			abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
-		} // if
+		}
 
 		for(int i = 0; i < this->monitor_count; i++) {
 			if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
 				abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
-			} // if
+			}
 		}
 	);
 
-	//Lock all the monitors
+	unsigned short count = this->monitor_count;
+
+	// Lock all monitors
 	lock_all( this->monitors, NULL, count );
-	// LIB_DEBUG_PRINT_SAFE("Signalling");
 
 	//Pop the head of the waiting queue
@@ -280,11 +326,8 @@
 	for(int i = 0; i < count; i++) {
 		__condition_criterion_t * crit = &node->criteria[i];
-		// LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
 		assert( !crit->ready );
 		push( &crit->target->signal_stack, crit );
 	}
 
-	// LIB_DEBUG_PRINT_SAFE("\n");
-
 	//Release
 	unlock_all( this->monitors, count );
@@ -294,8 +337,5 @@
 
 bool signal_block( condition * this ) {
-	if( !this->blocked.head ) {
-		LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
-		return false;
-	}
+	if( !this->blocked.head ) { return false; }
 
 	//Check that everything is as expected
@@ -303,32 +343,21 @@
 	verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
 
-	unsigned short count = this->monitor_count;
-	unsigned int recursions[ count ];		//Save the current recursion levels to restore them later
-	spinlock *   locks     [ count ];		//We need to pass-in an array of locks to BlockInternal
-
-	lock_all( this->monitors, locks, count );
-
-	//create creteria
-	__condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
-
-	__condition_criterion_t criteria[count];
-	for(int i = 0; i < count; i++) {
-		(&criteria[i]){ this->monitors[i], &waiter };
-		// LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
-		push( &criteria[i].target->signal_stack, &criteria[i] );
-	}
-
-	waiter.criteria = criteria;
+	// Create storage for monitor context
+	monitor_ctx( this->monitors, this->monitor_count );
+
+	// Lock all monitors (aggregates the locks them as well)
+	lock_all( monitors, locks, count );
+
+	// Create the node specific to this wait operation
+	wait_ctx_primed( this_thread, 0 )
 
 	//save contexts
-	save_recursion( this->monitors, recursions, count );
+	save_recursion( monitors, recursions, count );
 
 	//Find the thread to run
 	thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
 	for(int i = 0; i < count; i++) {
-		set_owner( this->monitors[i], signallee );
-	}
-
-	LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" );
+		set_owner( monitors[i], signallee );
+	}
 
 	//Everything is ready to go to sleep
@@ -336,11 +365,10 @@
 
 
-
-
-	LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" );
+	// WE WOKE UP
+
 
 	//We are back, restore the owners and recursions
 	lock_all( locks, count );
-	restore_recursion( this->monitors, recursions, count );
+	restore_recursion( monitors, recursions, count );
 	unlock_all( locks, count );
 
@@ -348,4 +376,5 @@
 }
 
+// Access the user_info of the thread waiting at the front of the queue
 uintptr_t front( condition * this ) {
 	verifyf( !is_empty(this),
@@ -358,28 +387,43 @@
 //-----------------------------------------------------------------------------
 // Internal scheduling
-void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
-	// thread_desc * this = this_thread;
-
-	// unsigned short count = this->current_monitor_count;
-	// unsigned int recursions[ count ];		//Save the current recursion levels to restore them later
-	// spinlock *   locks     [ count ];		//We need to pass-in an array of locks to BlockInternal
-
-	// lock_all( this->current_monitors, locks, count );
-
-
-
-
-
-	// // // Everything is ready to go to sleep
-	// // BlockInternal( locks, count, threads, thread_count );
-
-
-	// //WE WOKE UP
-
-
-	// //We are back, restore the owners and recursions
-	// lock_all( locks, count );
-	// restore_recursion( this->monitors, recursions, count );
-	// unlock_all( locks, count );
+int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) {
+	thread_desc * thrd = this_thread;
+
+	// Create storage for monitor context
+	monitor_ctx( acceptables->monitors, acceptables->count );
+
+	// Lock all monitors (aggregates the lock them as well)
+	lock_all( monitors, locks, count );
+
+	// Create the node specific to this wait operation
+	wait_ctx_primed( thrd, 0 );
+
+	// Check if the entry queue
+	thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count );
+
+	if( !next ) {
+		// Update acceptables on the current monitors
+		for(int i = 0; i < count; i++) {
+			monitors[i]->acceptables = acceptables;
+			monitors[i]->acceptable_count = acc_count;
+		}
+	}
+
+	save_recursion( monitors, recursions, count );
+
+	// Everything is ready to go to sleep
+	BlockInternal( locks, count, &next, next ? 1 : 0 );
+
+
+	//WE WOKE UP
+
+
+	//We are back, restore the owners and recursions
+	lock_all( locks, count );
+	restore_recursion( monitors, recursions, count );
+	int acc_idx = monitors[0]->accepted_index;
+	unlock_all( locks, count );
+
+	return acc_idx;
 }
 
@@ -415,4 +459,57 @@
 }
 
+static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
+	__acceptable_t* accs = this->acceptables; // Optim
+	int acc_cnt = this->acceptable_count;
+
+	// Check if there are any acceptable functions
+	if( !accs ) return -1;
+
+	// If this isn't the first monitor to test this, there is no reason to repeat the test.
+	if( this != group[0] ) return group[0]->accepted_index;
+
+	// For all acceptable functions check if this is the current function.
+	OUT_LOOP:
+	for( int i = 0; i < acc_cnt; i++ ) {
+		__acceptable_t * acc = &accs[i];
+
+		// if function matches, check the monitors
+		if( acc->func == func ) {
+
+			// If the group count is different then it can't be a match
+			if( acc->count != group_cnt ) return -1;
+
+			// Check that all the monitors match
+			for( int j = 0; j < group_cnt; j++ ) {
+				// If not a match, check next function
+				if( acc->monitors[j] != group[j] ) continue OUT_LOOP;
+			}
+
+			// It's a complete match, accept the call
+			return i;
+		}
+	}
+
+	// No function matched
+	return -1;
+}
+
+static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
+	for(int i = 0; i < count; i++) {
+		(&criteria[i]){ monitors[i], waiter };
+	}
+
+	waiter->criteria = criteria;
+}
+
+static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
+	for(int i = 0; i < count; i++) {
+		(&criteria[i]){ monitors[i], waiter };
+		push( &criteria[i].target->signal_stack, &criteria[i] );
+	}
+
+	waiter->criteria = criteria;
+}
+
 static inline void lock_all( spinlock ** locks, unsigned short count ) {
 	for( int i = 0; i < count; i++ ) {
@@ -505,4 +602,8 @@
 }
 
+static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) {
+	return NULL;
+}
+
 void ?{}( __condition_blocked_queue_t * this ) {
 	this->head = NULL;
Index: src/tests/sched-ext.c
===================================================================
--- src/tests/sched-ext.c	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
+++ src/tests/sched-ext.c	(revision 97e32964dc1e5125b35bd5db0234e6b6f272fe14)
@@ -0,0 +1,72 @@
+#include <fstream>
+#include <kernel>
+#include <monitor>
+#include <stdlib>
+#include <thread>
+
+#include <time.h>
+
+static const unsigned long N = 2_500ul;
+
+#ifndef PREEMPTION_RATE
+#define PREEMPTION_RATE 10_000ul
+#endif
+
+unsigned int default_preemption() {
+	return PREEMPTION_RATE;
+}
+
+monitor global_t {};
+
+global_t globalA;
+
+thread Acceptor {};
+thread Acceptee {};
+
+//----------------------------------------------------------------------------------------------------
+// Acceptor
+void do_notify( global_t * mutex a );
+
+void do_wait( global_t * mutex a ) {
+	sout | "Preparing to wait" | endl;
+
+	__acceptable_t acceptable;
+	acceptable.func          = (void_fptr_t)do_notify;
+	acceptable.count         = 1;
+	acceptable.monitors      = &a;
+	acceptable.run_preaccept = false;
+
+	sout | "Waiting for notify" | endl;
+
+	int ret = __accept_internal( 1, &acceptable );
+	sout | "Back from wating, accepted" | ret | endl;
+}
+
+void main( Acceptor* this ) {
+	do_wait( &globalA );
+}
+
+//----------------------------------------------------------------------------------------------------
+// Acceptee
+void do_notify( global_t * mutex a ) {
+	sout | "Notifying" | endl;
+}
+
+void main( Acceptee* this ) {
+	for( volatile int i = 0; i < N; i++ );
+
+	sout | "Call Notify" | endl;
+	do_notify( &globalA );
+}
+
+//----------------------------------------------------------------------------------------------------
+// Main
+int main(int argc, char* argv[]) {
+	processor p;
+	sout | "Starting" | endl;
+	{
+		Acceptor r;
+		Acceptee e;
+	}
+	sout | "Done" | endl;
+}
