Index: src/libcfa/concurrency/invoke.h
===================================================================
--- src/libcfa/concurrency/invoke.h	(revision 206de5ab2b76ee44636062f62bc9a1ff10a14888)
+++ src/libcfa/concurrency/invoke.h	(revision 549c006f5f7e2f45d09672b9abae0dfe3809c271)
@@ -96,5 +96,6 @@
             struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
             unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
-            struct __waitfor_mask_t mask;               // mask used to know if some thread is waiting for something while holding the monitor
+            struct __waitfor_mask_t mask;             // mask used to know if some thread is waiting for something while holding the monitor
+            struct __condition_node_t * dtor_node;    // node used to signal the dtor in a waitfor dtor
       };
 
Index: src/libcfa/concurrency/monitor
===================================================================
--- src/libcfa/concurrency/monitor	(revision 206de5ab2b76ee44636062f62bc9a1ff10a14888)
+++ src/libcfa/concurrency/monitor	(revision 549c006f5f7e2f45d09672b9abae0dfe3809c271)
@@ -29,12 +29,17 @@
 static inline void ?{}(monitor_desc & this) {
 	(this.lock){};
-	this.owner = NULL;
 	(this.entry_queue){};
 	(this.signal_stack){};
-	this.recursion = 0;
+	this.owner         = NULL;
+	this.recursion     = 0;
 	this.mask.accepted = NULL;
 	this.mask.clauses  = NULL;
 	this.mask.size     = 0;
+	this.dtor_node     = NULL;
 }
+
+// static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) {
+// 	return ((intptr_t)lhs) < ((intptr_t)rhs);
+// }
 
 struct monitor_guard_t {
@@ -46,10 +51,17 @@
 };
 
-static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) {
-	return ((intptr_t)lhs) < ((intptr_t)rhs);
-}
-
 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() );
 void ^?{}( monitor_guard_t & this );
+
+
+struct monitor_dtor_guard_t {
+	monitor_desc * m;
+	monitor_desc ** prev_mntrs;
+	unsigned short  prev_count;
+	fptr_t          prev_func;
+};
+
+void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, void (*func)() );
+void ^?{}( monitor_dtor_guard_t & this );
 
 //-----------------------------------------------------------------------------
Index: src/libcfa/concurrency/monitor.c
===================================================================
--- src/libcfa/concurrency/monitor.c	(revision 206de5ab2b76ee44636062f62bc9a1ff10a14888)
+++ src/libcfa/concurrency/monitor.c	(revision 549c006f5f7e2f45d09672b9abae0dfe3809c271)
@@ -94,5 +94,5 @@
 		}
 		else if( this->owner == thrd) {
-			// We already have the monitor, just not how many times we took it
+			// We already have the monitor, just note how many times we took it
 			verify( this->recursion > 0 );
 			this->recursion += 1;
@@ -127,4 +127,63 @@
 		unlock( &this->lock );
 		return;
+	}
+
+	static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
+		// Lock the monitor spinlock, lock_yield to reduce contention
+		lock_yield( &this->lock DEBUG_CTX2 );
+		thread_desc * thrd = this_thread;
+
+		LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
+
+
+		if( !this->owner ) {
+			LIB_DEBUG_PRINT_SAFE("Kernel : Destroying free mon %p\n", this);
+
+			// No one has the monitor, just take it
+			set_owner( this, thrd );
+
+			unlock( &this->lock );
+			return;
+		}
+		else if( this->owner == thrd) {
+			// We already have the monitor... but where about to destroy it so the nesting will fail
+			// Abort!
+			abortf("Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.");
+		}
+
+		int count = 1;
+		monitor_desc ** monitors = &this;
+		__monitor_group_t group = { &this, 1, func };
+		if( is_accepted( this, group) ) {
+			LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts dtor, block and signal it \n");
+
+			// Reset mask
+			reset_mask( this );
+
+			// Create the node specific to this wait operation
+			wait_ctx_primed( this_thread, 0 )
+
+			// Some one else has the monitor, wait for him to finish and then run
+			BlockInternal( &this->lock );
+
+			// Some one was waiting for us, enter
+			set_owner( this, thrd );
+		}
+		else {
+			LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
+
+			wait_ctx( this_thread, 0 )
+			this->dtor_node = &waiter;
+
+			// Some one else has the monitor, wait in line for it
+			append( &this->entry_queue, thrd );
+			BlockInternal( &this->lock );
+
+			// BlockInternal will unlock spinlock, no need to unlock ourselves
+			return;
+		}
+
+		LIB_DEBUG_PRINT_SAFE("Kernel : Destroying %p\n", this);
+
 	}
 
@@ -158,4 +217,16 @@
 	}
 
+	// Leave single monitor for the last time
+	void __leave_dtor_monitor_desc( monitor_desc * this ) {
+		LIB_DEBUG_DO(
+			if( this_thread != this->owner ) {
+				abortf("Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);
+			}
+			if( this->recursion != 1 ) {
+				abortf("Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
+			}
+		)
+	}
+
 	// Leave the thread monitor
 	// last routine called by a thread.
@@ -210,5 +281,5 @@
 // Ctor for monitor guard
 // Sorts monitors before entering
-void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() ) {
+void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, fptr_t func ) {
 	// Store current array
 	this.m = m;
@@ -246,4 +317,36 @@
 
 	// LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
+
+	// Restore thread context
+	this_thread->monitors.list = this.prev_mntrs;
+	this_thread->monitors.size = this.prev_count;
+	this_thread->monitors.func = this.prev_func;
+}
+
+
+// Ctor for monitor guard
+// Sorts monitors before entering
+void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, fptr_t func ) {
+	// Store current array
+	this.m = *m;
+
+	// Save previous thread context
+	this.prev_mntrs = this_thread->monitors.list;
+	this.prev_count = this_thread->monitors.size;
+	this.prev_func  = this_thread->monitors.func;
+
+	// Update thread context (needed for conditions)
+	this_thread->monitors.list = m;
+	this_thread->monitors.size = 1;
+	this_thread->monitors.func = func;
+
+	__enter_monitor_dtor( this.m, func );
+}
+
+
+// Dtor for monitor guard
+void ^?{}( monitor_dtor_guard_t & this ) {
+	// Leave the monitors in order
+	__leave_dtor_monitor_desc( this.m );
 
 	// Restore thread context
@@ -448,5 +551,13 @@
 			*mask.accepted = index;
 			if( mask.clauses[index].is_dtor ) {
-				#warning case not implemented
+				verifyf( mask.clauses[index].size == 1        , "ERROR: Accepted dtor has more than 1 mutex parameter." );
+
+				monitor_desc * mon2dtor = mask.clauses[index].list[0];
+				verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
+
+				__condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
+				push( &mon2dtor->signal_stack, dtor_crit );
+
+				unlock_all( locks, count );
 			}
 			else {
