Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision 50b88854c78d94229d63df8d800995aebc5797b4)
+++ libcfa/src/concurrency/invoke.h	(revision b0c7419eaea3be8c569d9296836b9fdf20f008ab)
@@ -92,6 +92,6 @@
 	};
 
-	enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun, Reschedule };
-	enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION };
+	enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun };
+	enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
 
 	struct coroutine_desc {
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 50b88854c78d94229d63df8d800995aebc5797b4)
+++ libcfa/src/concurrency/kernel.cfa	(revision b0c7419eaea3be8c569d9296836b9fdf20f008ab)
@@ -213,4 +213,5 @@
 	this.cltr = &cltr;
 	terminated{ 0 };
+	destroyer = 0p;
 	do_terminate = false;
 	preemption_alarm = 0p;
@@ -320,7 +321,4 @@
 	coroutine_desc * proc_cor = get_coroutine(this->runner);
 
-	// Reset the terminating actions here
-	this->finish.action_code = No_Action;
-
 	// Update global state
 	kernelTLS.this_thread = thrd_dst;
@@ -334,5 +332,5 @@
 		if(unlikely(thrd_dst->preempted)) {
 			thrd_dst->preempted = __NO_PREEMPTION;
-			verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule);
+			verify(thrd_dst->state == Active || thrd_dst->state == Rerun);
 		} else {
 			verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
@@ -372,4 +370,8 @@
 				// The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
 				thrd_dst->state = Halted;
+
+				// We may need to wake someone up here since
+				unpark( this->destroyer );
+				this->destroyer = 0p;
 				break RUNNING;
 			case Active:
@@ -380,10 +382,4 @@
 				// In this case, just run it again.
 				continue RUNNING;
-			case Reschedule:
-				// This is case 3, someone tried to run this before it finished blocking
-				// but it must go through the ready-queue
-				thrd_dst->state = Inactive;  /*restore invariant */
-				ScheduleThread( thrd_dst );
-				break RUNNING;
 			default:
 				// This makes no sense, something is wrong abort
@@ -397,5 +393,5 @@
 
 // KERNEL_ONLY
-static void returnToKernel() {
+void returnToKernel() {
 	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 	coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
@@ -553,5 +549,5 @@
 	/* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
 	                  "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
-	/* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule,
+	/* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
 	                  "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
 	/* paranoid */ #endif
@@ -589,12 +585,10 @@
 }
 
-void unpark( thread_desc * thrd, bool must_yield ) {
+void unpark( thread_desc * thrd ) {
 	if( !thrd ) return;
-
-	enum coroutine_state new_state = must_yield ? Reschedule : Rerun;
 
 	disable_interrupts();
 	static_assert(sizeof(thrd->state) == sizeof(int));
-	enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST);
+	enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
 	switch(old_state) {
 		case Active:
@@ -609,5 +603,4 @@
 			break;
 		case Rerun:
-		case Reschedule:
 			abort("More than one thread attempted to schedule thread %p\n", thrd);
 			break;
@@ -637,7 +630,8 @@
 
 // KERNEL ONLY
-void LeaveThread() {
+void __leave_thread() {
 	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 	returnToKernel();
+	abort();
 }
 
@@ -649,5 +643,5 @@
 
 	thread_desc * thrd = kernelTLS.this_thread;
-	/* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule);
+	/* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
 
 	// SKULLDUGGERY: It is possible that we are preempting this thread just before
Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision 50b88854c78d94229d63df8d800995aebc5797b4)
+++ libcfa/src/concurrency/kernel.hfa	(revision b0c7419eaea3be8c569d9296836b9fdf20f008ab)
@@ -45,53 +45,4 @@
 extern struct cluster * mainCluster;
 
-enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };
-
-typedef void (*__finish_callback_fptr_t)(void);
-
-//TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)
-struct FinishAction {
-	FinishOpCode action_code;
-	/*
-	// Union of possible actions
-	union {
-		// Option 1 : locks and threads
-		struct {
-			// 1 thread or N thread
-			union {
-				thread_desc * thrd;
-				struct {
-					thread_desc ** thrds;
-					unsigned short thrd_count;
-				};
-			};
-			// 1 lock or N lock
-			union {
-				__spinlock_t * lock;
-				struct {
-					__spinlock_t ** locks;
-					unsigned short lock_count;
-				};
-			};
-		};
-		// Option 2 : action pointer
-		__finish_callback_fptr_t callback;
-	};
-	/*/
-	thread_desc * thrd;
-	thread_desc ** thrds;
-	unsigned short thrd_count;
-	__spinlock_t * lock;
-	__spinlock_t ** locks;
-	unsigned short lock_count;
-	__finish_callback_fptr_t callback;
-	//*/
-};
-static inline void ?{}(FinishAction & this) {
-	this.action_code = No_Action;
-	this.thrd = 0p;
-	this.lock = 0p;
-}
-static inline void ^?{}(FinishAction &) {}
-
 // Processor
 coroutine processorCtx_t {
@@ -116,5 +67,5 @@
 	// RunThread data
 	// Action to do after a thread is ran
-	struct FinishAction finish;
+	thread_desc * destroyer;
 
 	// Preemption data
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision 50b88854c78d94229d63df8d800995aebc5797b4)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision b0c7419eaea3be8c569d9296836b9fdf20f008ab)
@@ -34,7 +34,5 @@
 
 //Block current thread and release/wake-up the following resources
-void LeaveThread();
-
-bool force_yield( enum __Preemption_Reason );
+void __leave_thread() __attribute__((noreturn));
 
 //-----------------------------------------------------------------------------
Index: libcfa/src/concurrency/monitor.cfa
===================================================================
--- libcfa/src/concurrency/monitor.cfa	(revision 50b88854c78d94229d63df8d800995aebc5797b4)
+++ libcfa/src/concurrency/monitor.cfa	(revision b0c7419eaea3be8c569d9296836b9fdf20f008ab)
@@ -277,5 +277,5 @@
 		disable_interrupts();
 
-		thrd->self_cor.state = Halted;
+		thrd->state = Halted;
 
 		/* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
@@ -296,10 +296,12 @@
 		// Unpark the next owner if needed
 		/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
-		unpark( new_owner );
-
-		// Leave the thread, this will unlock the spinlock
-		// Use leave thread instead of park which is
-		// specialized for this case
-		LeaveThread();
+		/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+		/* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
+		/* paranoid */ verify( thrd->state == Halted );
+
+		kernelTLS.this_processor->destroyer = new_owner;
+
+		// Leave the thread
+		__leave_thread();
 
 		// Control flow should never reach here!
Index: libcfa/src/concurrency/thread.hfa
===================================================================
--- libcfa/src/concurrency/thread.hfa	(revision 50b88854c78d94229d63df8d800995aebc5797b4)
+++ libcfa/src/concurrency/thread.hfa	(revision b0c7419eaea3be8c569d9296836b9fdf20f008ab)
@@ -101,20 +101,16 @@
 //----------
 // Unpark a thread, if the thread is already blocked, schedule it
-//                  if the thread is not yet block, signal that it should rerun immediately or reschedule itself
-void unpark( thread_desc * this, bool must_yield );
-
-static inline void unpark( thread_desc * this ) { unpark( this, false ); }
+//                  if the thread is not yet block, signal that it should rerun immediately
+void unpark( thread_desc * this );
 
 forall( dtype T | is_thread(T) )
-static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ), false );}
-
-forall( dtype T | is_thread(T) )
-static inline void unpark( T & this, bool must_yield ) { if(!&this) return; unpark( get_thread( this ), must_yield );}
+static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );}
 
 //----------
 // Yield: force thread to block and be rescheduled
+bool force_yield( enum __Preemption_Reason );
+
 static inline void yield() {
-	unpark( active_thread(), true );
-	park();
+	force_yield(__MANUAL_PREEMPTION);
 }
 
