Index: libcfa/src/concurrency/actor.hfa
===================================================================
--- libcfa/src/concurrency/actor.hfa	(revision d964c39341a44901d924f02a9e01d5cf08df25cc)
+++ libcfa/src/concurrency/actor.hfa	(revision 1e38178d2850a9cb987631ad78f994d928456ae6)
@@ -3,7 +3,8 @@
 #include <locks.hfa>
 #include <limits.hfa>
-#include <list.hfa>
 #include <kernel.hfa>
-#include <vector2.hfa>
+#include <time_t.hfa>
+#include <time.hfa>
+#include <iofwd.hfa>
 
 #ifdef __CFA_DEBUG__
@@ -21,24 +22,24 @@
 // Define the default number of executor request-queues (mailboxes) written to by actors and serviced by the
 // actor-executor threads. Must be greater than 0.
-#define __DEFAULT_EXECUTOR_RQUEUES__ 2
+#define __DEFAULT_EXECUTOR_RQUEUES__ 4
 
 // Define if executor is created in a separate cluster
 #define __DEFAULT_EXECUTOR_SEPCLUS__ false
 
-#define __STEAL 1 // workstealing toggle. Disjoint from toggles above
-
-// whether to steal work or to steal a queue Only applicable if __STEAL == 1
-#define __STEAL_WORK 0
-
-// heuristic selection (only set one to be 1)
-#define __RAND_QUEUE 1
-#define __RAND_WORKER 0
-
-// show stealing stats
-// #define __STEAL_STATS
+#define __DEFAULT_EXECUTOR_BUFSIZE__ 10
+
+// #define __STEAL 0 // workstealing toggle. Disjoint from toggles above
+
+// workstealing heuristic selection (only set one to be 1)
+// #define RAND 0
+// #define SEARCH 0
+
+// show stats
+// #define STATS
 
 // forward decls
 struct actor;
 struct message;
+struct executor;
 
 enum Allocation { Nodelete, Delete, Destroy, Finished }; // allocation status
@@ -66,5 +67,6 @@
 }
 
-// hybrid data structure. Copies until buffer is full and then allocates for intrusive list
+// Vector-like data structure that supports O(1) queue operations with no bound on size
+// assumes gulping behaviour (once a remove occurs, removes happen until empty beforw next insert)
 struct copy_queue {
     request * buffer;
@@ -89,11 +91,10 @@
         /* paranoid */ verify( buffer );
     }
-    buffer[count]{ elem }; // C_TODO: change to memcpy
-    // memcpy( &buffer[count], &elem, sizeof(request) );
+    memcpy( &buffer[count], &elem, sizeof(request) );
     count++;
 }
 
 // once you start removing you need to remove all elements
-// it is not supported to call insert() before the list is fully empty
+// it is not supported to call insert() before the array is fully empty
 static inline request & remove( copy_queue & this ) with(this) {
     if ( count > 0 ) {
@@ -107,5 +108,5 @@
 }
 
-// try to reclaim some memory
+// try to reclaim some memory if less than half of buffer is utilized
 static inline void reclaim( copy_queue & this ) with(this) {
     if ( utilized >= last_size || buffer_size <= 4 ) { utilized = 0; return; }
@@ -117,16 +118,27 @@
 static inline bool isEmpty( copy_queue & this ) with(this) { return count == 0; }
 
-static size_t __buffer_size = 10; // C_TODO: rework this to be passed from executor through ctors (no need for global)
 struct work_queue {
     __spinlock_t mutex_lock;
-    copy_queue owned_queue;
-    copy_queue * c_queue;
-    volatile bool being_processed;
+    copy_queue * owned_queue;       // copy queue allocated and cleaned up by this work_queue
+    copy_queue * c_queue;           // current queue
+    volatile bool being_processed;  // flag to prevent concurrent processing
+    #ifdef STATS
+    unsigned int id;
+    size_t missed;                  // transfers skipped due to being_processed flag being up
+    #endif
 }; // work_queue
-static inline void ?{}( work_queue & this ) with(this) { 
-    owned_queue{ __buffer_size };
-    c_queue = &owned_queue;
+static inline void ?{}( work_queue & this, size_t buf_size, unsigned int i ) with(this) { 
+    owned_queue = alloc();      // allocated separately to avoid false sharing
+    (*owned_queue){ buf_size };
+    c_queue = owned_queue;
     being_processed = false;
-}
+    #ifdef STATS
+    id = i;
+    missed = 0;
+    #endif
+}
+
+// clean up copy_queue owned by this work_queue
+static inline void ^?{}( work_queue & this ) with(this) { delete( owned_queue ); }
 
 static inline void insert( work_queue & this, request & elem ) with(this) {
@@ -136,16 +148,13 @@
 } // insert
 
-static inline void transfer( work_queue & this, copy_queue ** transfer_to, work_queue ** queue_arr, unsigned int idx ) with(this) {
+static inline void transfer( work_queue & this, copy_queue ** transfer_to ) with(this) {
     lock( mutex_lock __cfaabi_dbg_ctx2 );
-    #if __STEAL
-
-    #if __STEAL_WORK
-    if (  unlikely( being_processed ) )
-    #else
-    // check if queue has been stolen out from under us between
-    // transfer() call and lock acquire C_TODO: maybe just use new queue!
-    if ( unlikely( being_processed || queue_arr[idx] != &this ) )
-    #endif // __STEAL_WORK
-    {
+    #ifdef __STEAL
+
+    // check if queue is being processed elsewhere
+    if ( unlikely( being_processed ) ) {
+        #ifdef STATS
+        missed++;
+        #endif
         unlock( mutex_lock );
         return;
@@ -164,46 +173,60 @@
 } // transfer
 
+// needed since some info needs to persist past worker lifetimes
+struct worker_info {
+    volatile unsigned long long stamp;
+    #ifdef STATS
+    size_t stolen_from;
+    #endif
+};
+static inline void ?{}( worker_info & this ) {
+    #ifdef STATS
+    this.stolen_from = 0;
+    #endif
+    this.stamp = rdtscl();
+}
+
+#ifdef STATS
+unsigned int * stolen_arr;
+unsigned int * replaced_queue;
+#endif
 thread worker {
     work_queue ** request_queues;
     copy_queue * current_queue;
-    worker ** worker_arr; // C_TODO: change n_workers, n_queues,worker_arr to just be pulled from ptr to executor
-	request & req;
-    unsigned int start, range, empty_count, n_workers, n_queues, id;
-    #ifdef __STEAL_STATS
-    unsigned int try_steal, stolen;
+    executor * executor_;
+    unsigned int start, range;
+    int id;
+    #ifdef STATS
+    size_t try_steal, stolen, failed_swaps, msgs_stolen;
+    unsigned long long processed;
+    size_t gulps;
     #endif
 };
 
-#ifdef __STEAL_STATS
-unsigned int total_tries = 0, total_stolen = 0, total_workers;
+#ifdef STATS
+// aggregate counters for statistics
+size_t total_tries = 0, total_stolen = 0, total_workers, all_gulps = 0,
+    total_failed_swaps = 0, all_processed = 0, __num_actors_stats = 0, all_msgs_stolen = 0;
 #endif
-static inline void ?{}( worker & this, cluster & clu, work_queue ** request_queues, copy_queue * current_queue, unsigned int start,
-        unsigned int range, worker ** worker_arr, unsigned int n_workers, unsigned int n_queues, unsigned int id ) {
+static inline void ?{}( worker & this, cluster & clu, work_queue ** request_queues, copy_queue * current_queue, executor * executor_,
+    unsigned int start, unsigned int range, int id ) {
     ((thread &)this){ clu };
-    this.request_queues = request_queues;
-    this.current_queue = current_queue;
-    this.start = start;
-    this.range = range;
-    this.empty_count = 0;
-    this.n_workers = n_workers;
-    this.worker_arr = worker_arr;
-    this.n_queues = n_queues;
-    this.id = id;
-    #ifdef __STEAL_STATS
-    this.try_steal = 0;
-    this.stolen = 0;
-    total_workers = n_workers;
-    #endif
-}
-static inline void ^?{}( worker & mutex this ) with(this) { 
-    // delete( current_queue );
-    #ifdef __STEAL_STATS
-    __atomic_add_fetch(&total_tries, try_steal, __ATOMIC_SEQ_CST);
-    __atomic_add_fetch(&total_stolen, stolen, __ATOMIC_SEQ_CST);
-    if (__atomic_sub_fetch(&total_workers, 1, __ATOMIC_SEQ_CST) == 0)
-        printf("steal attempts: %u, steals: %u\n", total_tries, total_stolen);
-    #endif
-}
-
+    this.request_queues = request_queues;           // array of all queues
+    this.current_queue = current_queue;             // currently gulped queue (start with empty queue to use in swap later)
+    this.executor_ = executor_;                     // pointer to current executor
+    this.start = start;                             // start of worker's subrange of request_queues
+    this.range = range;                             // size of worker's subrange of request_queues
+    this.id = id;                                   // worker's id and index in array of workers
+    #ifdef STATS
+    this.try_steal = 0;                             // attempts to steal
+    this.stolen = 0;                                // successful steals
+    this.processed = 0;                             // requests processed
+    this.gulps = 0;                                 // number of gulps
+    this.failed_swaps = 0;                          // steal swap failures
+    this.msgs_stolen = 0;                           // number of messages stolen
+    #endif
+}
+
+static bool no_steal = false;
 struct executor {
     cluster * cluster;							    // if workers execute on separate cluster
@@ -213,11 +236,42 @@
 	work_queue ** worker_req_queues;                // secondary array of work queues to allow for swapping
     worker ** workers;								// array of workers executing work requests
+    worker_info * w_infos;                          // array of info about each worker 
 	unsigned int nprocessors, nworkers, nrqueues;	// number of processors/threads/request queues
 	bool seperate_clus;								// use same or separate cluster for executor
 }; // executor
 
+// #ifdef STATS
+// __spinlock_t out_lock;
+// #endif
+static inline void ^?{}( worker & mutex this ) with(this) { 
+    #ifdef STATS
+    __atomic_add_fetch(&all_gulps, gulps,__ATOMIC_SEQ_CST);
+    __atomic_add_fetch(&all_processed, processed,__ATOMIC_SEQ_CST);
+    __atomic_add_fetch(&all_msgs_stolen, msgs_stolen,__ATOMIC_SEQ_CST);
+    __atomic_add_fetch(&total_tries, try_steal, __ATOMIC_SEQ_CST);
+    __atomic_add_fetch(&total_stolen, stolen, __ATOMIC_SEQ_CST);
+    __atomic_add_fetch(&total_failed_swaps, failed_swaps, __ATOMIC_SEQ_CST);
+
+    // per worker steal stats (uncomment alongside the lock above this routine to print)
+    // lock( out_lock __cfaabi_dbg_ctx2 );
+    // printf("Worker id: %d, processed: %llu messages, attempted %lu, stole: %lu, stolen from: %lu\n", id, processed, try_steal, stolen, __atomic_add_fetch(&executor_->w_infos[id].stolen_from, 0, __ATOMIC_SEQ_CST) );
+    // int count = 0;
+    // int count2 = 0;
+    // for ( i; range ) {
+    //     if ( replaced_queue[start + i] > 0 ){
+    //         count++;
+    //         // printf("%d: %u, ",i, replaced_queue[i]);
+    //     }
+    //     if (__atomic_add_fetch(&stolen_arr[start + i],0,__ATOMIC_SEQ_CST) > 0)
+    //         count2++;
+    // }
+    // printf("swapped with: %d of %u indices\n", count, executor_->nrqueues / executor_->nworkers );
+    // printf("%d of %u indices were stolen\n", count2, executor_->nrqueues / executor_->nworkers );
+    // unlock( out_lock );
+    #endif
+}
+
 static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues, bool seperate_clus, size_t buf_size ) with(this) {
     if ( nrqueues < nworkers ) abort( "nrqueues needs to be >= nworkers\n" );
-    __buffer_size = buf_size;
     this.nprocessors = nprocessors;
     this.nworkers = nworkers;
@@ -225,4 +279,13 @@
     this.seperate_clus = seperate_clus;
 
+    if ( nworkers == nrqueues )
+        no_steal = true;
+    
+    #ifdef STATS
+    stolen_arr = aalloc( nrqueues );
+    replaced_queue = aalloc( nrqueues );
+    total_workers = nworkers;
+    #endif
+
     if ( seperate_clus ) {
         cluster = alloc();
@@ -233,5 +296,5 @@
     worker_req_queues = aalloc( nrqueues );
     for ( i; nrqueues ) {
-        request_queues[i]{};
+        request_queues[i]{ buf_size, i };
         worker_req_queues[i] = &request_queues[i];
     }
@@ -242,13 +305,19 @@
 
     local_queues = aalloc( nworkers );
-    workers = alloc( nworkers );
+    workers = aalloc( nworkers );
+    w_infos = aalloc( nworkers );
     unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
+
+    for ( i; nworkers ) {
+        w_infos[i]{};
+        local_queues[i]{ buf_size };
+    }
+
     for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
-        local_queues[i]{ buf_size };
         range = reqPerWorker + ( i < extras ? 1 : 0 );
-        (*(workers[i] = alloc())){ *cluster, worker_req_queues, &local_queues[i], start, range, workers, nworkers, nrqueues, i };
+        (*(workers[i] = alloc())){ *cluster, worker_req_queues, &local_queues[i], &this, start, range, i };
     } // for
 }
-static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues, bool seperate_clus ) { this{ nprocessors, nworkers, nrqueues, seperate_clus, __buffer_size }; }
+static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues, bool seperate_clus ) { this{ nprocessors, nworkers, nrqueues, seperate_clus, __DEFAULT_EXECUTOR_BUFSIZE__ }; }
 static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues ) { this{ nprocessors, nworkers, nrqueues, __DEFAULT_EXECUTOR_SEPCLUS__ }; }
 static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers ) { this{ nprocessors, nworkers, __DEFAULT_EXECUTOR_RQUEUES__ }; }
@@ -256,7 +325,6 @@
 static inline void ?{}( executor & this ) { this{ __DEFAULT_EXECUTOR_PROCESSORS__ }; }
 
-// C_TODO: once stealing is implemented make sure shutdown still works
 static inline void ^?{}( executor & this ) with(this) {
-    #if __STEAL
+    #ifdef __STEAL
     request sentinels[nrqueues];
     for ( unsigned int i = 0; i < nrqueues; i++ ) {
@@ -265,6 +333,7 @@
     #else
     request sentinels[nworkers];
-    unsigned int reqPerWorker = nrqueues / nworkers;
-    for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
+    unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
+    for ( unsigned int i = 0, step = 0, range; i < nworkers; i += 1, step += range ) {
+        range = reqPerWorker + ( i < extras ? 1 : 0 );
         insert( request_queues[step], sentinels[i] );		// force eventually termination
     } // for
@@ -278,5 +347,15 @@
     } // for
 
+    #ifdef STATS
+    size_t misses = 0;
+    for ( i; nrqueues ) {
+        misses += worker_req_queues[i]->missed;
+    }
+    adelete( stolen_arr );
+    adelete( replaced_queue );
+    #endif
+
     adelete( workers );
+    adelete( w_infos );
     adelete( local_queues );
     adelete( request_queues );
@@ -284,21 +363,35 @@
     adelete( processors );
     if ( seperate_clus ) delete( cluster );
+
+    #ifdef STATS
+    printf("    Actor System Stats:\n");
+    printf("\tActors Created:\t\t\t\t%lu\n\tMessages Sent:\t\t\t\t%lu\n", __num_actors_stats, all_processed);
+    printf("\tGulps:\t\t\t\t\t%lu\n\tAverage Gulp Size:\t\t\t%lu\n\tMissed gulps:\t\t\t\t%lu\n", all_gulps, all_processed / all_gulps, misses);
+    printf("\tSteal attempts:\t\t\t\t%lu\n\tSteals:\t\t\t\t\t%lu\n\tSteal failures (no candidates):\t\t%lu\n\tSteal failures (failed swaps):\t\t%lu\n", 
+        total_tries, total_stolen, total_tries - total_stolen - total_failed_swaps, total_failed_swaps);
+    printf("\tMessages stolen:\t\t\t%lu\n\tAverage steal size:\t\t\t%lu\n", all_msgs_stolen, all_msgs_stolen/total_stolen);
+    #endif
+        
 }
 
 // this is a static field of executor but have to forward decl for get_next_ticket
-static unsigned int __next_ticket = 0; 
-
-static inline unsigned int get_next_ticket( executor & this ) with(this) {
-    return __atomic_fetch_add( &__next_ticket, 1, __ATOMIC_SEQ_CST) % nrqueues;
+static unsigned long int __next_ticket = 0; 
+
+static inline unsigned long int __get_next_ticket( executor & this ) with(this) {
+    unsigned long int temp = __atomic_fetch_add( &__next_ticket, 1, __ATOMIC_SEQ_CST) % nrqueues;
+
+    // reserve MAX for dead actors
+    if ( temp == MAX ) temp = __atomic_fetch_add( &__next_ticket, 1, __ATOMIC_SEQ_CST) % nrqueues;
+    return temp;
 } // tickets
 
-// C_TODO: update globals in this file to be static fields once the project is done
+// TODO: update globals in this file to be static fields once the static fields project is done
 static executor * __actor_executor_ = 0p;
-static bool __actor_executor_passed = false;        // was an executor passed to start_actor_system
-static unsigned long int __num_actors_;				// number of actor objects in system
+static bool __actor_executor_passed = false;            // was an executor passed to start_actor_system
+static unsigned long int __num_actors_ = 0;				// number of actor objects in system
 static struct thread$ * __actor_executor_thd = 0p;		// used to wake executor after actors finish
 struct actor {
-    unsigned long int ticket;	        // executor-queue handle to provide FIFO message execution
-    Allocation allocation_;			// allocation action
+    unsigned long int ticket;	                        // executor-queue handle
+    Allocation allocation_;			                    // allocation action
 };
 
@@ -306,8 +399,11 @@
     // Once an actor is allocated it must be sent a message or the actor system cannot stop. Hence, its receive
     // member must be called to end it
-    verifyf( __actor_executor_, "Creating actor before calling start_actor_system()." ); 
+    verifyf( __actor_executor_, "Creating actor before calling start_actor_system() can cause undefined behaviour.\n" ); 
     this.allocation_ = Nodelete;
-    this.ticket = get_next_ticket( *__actor_executor_ );
+    this.ticket = __get_next_ticket( *__actor_executor_ );
     __atomic_fetch_add( &__num_actors_, 1, __ATOMIC_SEQ_CST );
+    #ifdef STATS
+    __atomic_fetch_add( &__num_actors_stats, 1, __ATOMIC_SEQ_CST );
+    #endif
 }
 static inline void ^?{}( actor & this ) {}
@@ -338,10 +434,15 @@
 
 static inline void ?{}( message & this ) { this.allocation_ = Nodelete; }
-static inline void ?{}( message & this, Allocation allocation ) { this.allocation_ = allocation; }
-static inline void ^?{}( message & this ) {}
+static inline void ?{}( message & this, Allocation allocation ) {
+    this.allocation_ = allocation;
+    verifyf( this.allocation_ != Finished, "The Finished Allocation status is not supported for message types.\n");
+}
+static inline void ^?{}( message & this ) {
+    CFA_DEBUG( if ( this.allocation_ == Nodelete ) printf("A message at location %p was allocated but never sent.\n", &this); )
+}
 
 static inline void check_message( message & this ) {
     switch ( this.allocation_ ) {						// analyze message status
-        case Nodelete: break;
+        case Nodelete: CFA_DEBUG( this.allocation_ = Finished; ) break;
         case Delete: delete( &this ); break;
         case Destroy: ^?{}(this); break;
@@ -349,4 +450,5 @@
     } // switch
 }
+static inline void set_allocation( message & this, Allocation state ) { this.allocation_ = state; }
 
 static inline void deliver_request( request & this ) {
@@ -357,40 +459,16 @@
 }
 
-// Couple of ways to approach work stealing
-// 1: completely worker agnostic, just find a big queue and steal it
-// 2: track some heuristic of worker's load and focus on that and then pick a queue from that worker
-//   worker heuristics:
-//     - how many queues have work?
-//     - size of largest queue
-//     - total # of messages
-//     - messages currently servicing
-//     - pick randomly
-//     - pick from closer threads/workers (this can be combined with others)
-
-// lock free or global lock for queue stealing
-#define __LOCK_SWP 0
-
-__spinlock_t swp_lock;
-
-// tries to atomically swap two queues and returns a bool indicating if the swap failed
-static inline bool try_swap_queues( worker & this, unsigned int victim_idx, unsigned int my_idx ) with(this) {
-    #if __LOCK_SWP
-
-    lock( swp_lock __cfaabi_dbg_ctx2 );
-    work_queue * temp = request_queues[my_idx];
-    request_queues[my_idx] = request_queues[victim_idx];
-    request_queues[victim_idx] = temp;
-    unlock( swp_lock );
-    
-    return true;
-
-    #else // __LOCK_SWP else
+// tries to atomically swap two queues and returns 0p if the swap failed
+// returns ptr to newly owned queue if swap succeeds
+static inline work_queue * try_swap_queues( worker & this, unsigned int victim_idx, unsigned int my_idx ) with(this) {
     work_queue * my_queue = request_queues[my_idx];
     work_queue * other_queue = request_queues[victim_idx];
-    if ( other_queue == 0p || my_queue == 0p ) return false;
+
+    // if either queue is 0p then they are in the process of being stolen
+    if ( other_queue == 0p ) return 0p;
 
     // try to set our queue ptr to be 0p. If it fails someone moved our queue so return false
     if ( !__atomic_compare_exchange_n( &request_queues[my_idx], &my_queue, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )
-        return false;
+        return 0p;
 
     // try to set other queue ptr to be our queue ptr. If it fails someone moved the other queue so fix up then return false
@@ -398,68 +476,17 @@
         /* paranoid */ verify( request_queues[my_idx] == 0p );
         request_queues[my_idx] = my_queue; // reset my queue ptr back to appropriate val
-        return false;
+        return 0p;
     }
 
     // we have successfully swapped and since our queue is 0p no one will touch it so write back new queue ptr non atomically
     request_queues[my_idx] = other_queue; // last write does not need to be atomic
-    return true;
-
-    #endif // __LOCK_SWP
+    return other_queue;
 }
 
 // once a worker to steal from has been chosen, choose queue to steal from
-static inline bool choose_queue( worker & this, unsigned int victim_id, unsigned int & last_idx ) with(this) {
-    #if __RAND_QUEUE
-    unsigned int tries = 0;
-    const unsigned int start_idx = prng( n_queues );
-    work_queue * curr_steal_queue;
-
-    for ( unsigned int i = start_idx; tries < n_queues; i = (i + 1) % n_queues ) {
-        tries++;
-        curr_steal_queue = request_queues[i];
-        #if __STEAL_WORK
-
-        // avoid empty queues and queues that are being operated on
-        if ( curr_steal_queue->being_processed || isEmpty( *curr_steal_queue->c_queue ) )
-            continue;
-        
-        // in this case we just return from transfer if this doesn't work
-        transfer( *curr_steal_queue, &current_queue, request_queues, i );
-        if ( isEmpty( *current_queue ) ) continue;
-        last_idx = i;
-
-        #ifdef __STEAL_STATS
-        stolen++;
-        #endif // __STEAL_STATS
-
-        #else // __STEAL_WORK else
-
-        // avoid empty queues and queues that are being operated on
-        if ( curr_steal_queue == 0p || curr_steal_queue->being_processed || isEmpty( *curr_steal_queue->c_queue ) )
-            continue;
-
-        #ifdef __STEAL_STATS
-        bool success = try_swap_queues( this, i, last_idx );
-        if ( success ) stolen++;
-        #else
-        try_swap_queues( this, i, last_idx );
-        #endif // __STEAL_STATS
-
-        // C_TODO: try transfer immediately
-        // transfer( *request_queues[last_idx], &current_queue, request_queues, last_idx );
-        // if ( isEmpty( *current_queue ) ) return false;
-        return false;
-
-        #endif // __STEAL_WORK
-
-        return true;
-    } // for
-    return false;
-
-    #elif __RAND_WORKER
-
+static inline void choose_queue( worker & this, unsigned int victim_id, unsigned int swap_idx ) with(this) {
     // have to calculate victim start and range since victim may be deleted before us in shutdown
-    const unsigned int queues_per_worker = n_queues / n_workers;
-    const unsigned int extras = n_queues % n_workers;
+    const unsigned int queues_per_worker = executor_->nrqueues / executor_->nworkers;
+    const unsigned int extras = executor_->nrqueues % executor_->nworkers;
     unsigned int vic_start, vic_range;
     if ( extras > victim_id  ) {
@@ -471,4 +498,5 @@
     }
     unsigned int start_idx = prng( vic_range );
+
     unsigned int tries = 0;
     work_queue * curr_steal_queue;
@@ -481,83 +509,115 @@
             continue;
 
-        try_swap_queues( this, i, last_idx );
-
-        #ifdef __STEAL_STATS
-        bool success = try_swap_queues( this, i, last_idx );
-        if ( success ) stolen++;
+        #ifdef STATS
+        curr_steal_queue = try_swap_queues( this, i + vic_start, swap_idx );
+        if ( curr_steal_queue ) {
+            msgs_stolen += curr_steal_queue->c_queue->count;
+            stolen++;
+            __atomic_add_fetch(&executor_->w_infos[victim_id].stolen_from, 1, __ATOMIC_RELAXED);
+            replaced_queue[swap_idx]++;
+            __atomic_add_fetch(&stolen_arr[ i + vic_start ], 1, __ATOMIC_RELAXED);
+        } else {
+            failed_swaps++;
+        }
         #else
-        try_swap_queues( this, i, last_idx );
-        #endif // __STEAL_STATS
-
-        // C_TODO: try transfer immediately
-        // transfer( *request_queues[last_idx], &current_queue, request_queues, last_idx );
-        // if ( isEmpty( *current_queue ) ) return false;
-        return false;
-    }
-    #endif 
+        curr_steal_queue = try_swap_queues( this, i + vic_start, swap_idx );
+        #endif // STATS
+
+        return;
+    }
+
+    return;
 }
 
 // choose a worker to steal from
-static inline bool choose_victim( worker & this, unsigned int & last_idx ) with(this) {
-    #if __RAND_WORKER
-    unsigned int victim = prng( n_workers );
-    if ( victim == id ) victim = ( victim + 1 ) % n_workers;
-    return choose_queue( this, victim, last_idx );
-    #else
-    return choose_queue( this, 0, last_idx );
-    #endif
-}
-
-// look for work to steal
-// returns a bool: true => a queue was stolen, false => no work was stolen
-static inline bool steal_work( worker & this, unsigned int & last_idx ) with(this) { // C_TODO: add debug tracking of how many steals occur
-    // to steal queue acquire both queue's locks in address ordering (maybe can do atomic swap)
-    // maybe a flag to hint which queue is being processed
-    // look at count to see if queue is worth stealing (dont steal empty queues)
-    // if steal and then flag is up then dont process and just continue looking at own queues
-    // (best effort approach) its ok if stealing isn't fruitful
-    //          -> more important to not delay busy threads
-
-    return choose_victim( this, last_idx );
+static inline void steal_work( worker & this, unsigned int swap_idx ) with(this) {
+    #if RAND
+    unsigned int victim = prng( executor_->nworkers );
+    if ( victim == id ) victim = ( victim + 1 ) % executor_->nworkers;
+    choose_queue( this, victim, swap_idx );
+    #elif SEARCH
+    unsigned long long min = MAX; // smaller timestamp means longer since service
+    int min_id = 0; // use ints not uints to avoid integer underflow without hacky math
+    int n_workers = executor_->nworkers;
+    unsigned long long curr_stamp;
+    int scount = 1;
+    for ( int i = (id + 1) % n_workers; scount < n_workers; i = (i + 1) % n_workers, scount++ ) {
+        curr_stamp = executor_->w_infos[i].stamp;
+        if ( curr_stamp < min ) {
+            min = curr_stamp;
+            min_id = i;
+        }
+    } 
+    choose_queue( this, min_id, swap_idx );
+    #endif
 }
 
 void main( worker & this ) with(this) {
+    #ifdef STATS
+    for ( i; executor_->nrqueues ) {
+        replaced_queue[i] = 0;
+        __atomic_store_n( &stolen_arr[i], 0, __ATOMIC_SEQ_CST );
+    }
+    #endif
+
     // threshold of empty queues we see before we go stealing
-    const unsigned int steal_threshold = 2 * n_queues;
+    const unsigned int steal_threshold = 2 * range;
+
+    // Store variable data here instead of worker struct to avoid any potential false sharing
+    unsigned int empty_count = 0;
+    request & req;
     unsigned int curr_idx;
     work_queue * curr_work_queue;
+
     Exit:
     for ( unsigned int i = 0;; i = (i + 1) % range ) { // cycle through set of request buffers
-        // C_TODO: potentially check queue count instead of immediately trying to transfer
         curr_idx = i + start;
         curr_work_queue = request_queues[curr_idx];
-        transfer( *curr_work_queue, &current_queue, request_queues, curr_idx );
-        if ( isEmpty( *current_queue ) ) {
-            #if __STEAL
+        
+        // check if queue is empty before trying to gulp it
+        if ( isEmpty( *curr_work_queue->c_queue ) ) {
+            #ifdef __STEAL
             empty_count++;
             if ( empty_count < steal_threshold ) continue;
-            empty_count = 0; // C_TODO: look into stealing backoff schemes
-            #ifdef __STEAL_STATS
+            #else
+            continue;
+            #endif
+        }
+        transfer( *curr_work_queue, &current_queue );
+        #ifdef STATS
+        gulps++;
+        #endif // STATS
+        #ifdef __STEAL
+        if ( isEmpty( *current_queue ) ) {
+            if ( unlikely( no_steal ) ) continue;
+            empty_count++;
+            if ( empty_count < steal_threshold ) continue;
+            empty_count = 0;
+
+            __atomic_store_n( &executor_->w_infos[id].stamp, rdtscl(), __ATOMIC_RELAXED );
+            
+            #ifdef STATS
             try_steal++;
-            #endif // __STEAL_STATS
-
-            if ( ! steal_work( this, curr_idx ) ) continue;
-
-            #else // __STEAL else
-
+            #endif // STATS
+            
+            steal_work( this, start + prng( range ) );
             continue;
-            
-            #endif // __STEAL
         }
+        #endif // __STEAL
         while ( ! isEmpty( *current_queue ) ) {
+            #ifdef STATS
+            processed++;
+            #endif
             &req = &remove( *current_queue );
-            if ( !&req ) continue; // possibly add some work stealing/idle sleep here
+            if ( !&req ) continue;
             if ( req.stop ) break Exit;
             deliver_request( req );
         }
-        #if __STEAL
+        #ifdef __STEAL
         curr_work_queue->being_processed = false; // set done processing
+        empty_count = 0; // we found work so reset empty counter
         #endif
-        empty_count = 0; // we found work so reset empty counter
+        
+        // potentially reclaim some of the current queue's vector space if it is unused
         reclaim( *current_queue );
     } // for
@@ -569,4 +629,5 @@
 
 static inline void send( actor & this, request & req ) {
+    verifyf( this.ticket != (unsigned long int)MAX, "Attempted to send message to deleted/dead actor\n" );
     send( *__actor_executor_, req, this.ticket );
 }
@@ -578,5 +639,9 @@
 }
 
-static inline void start_actor_system() { start_actor_system( active_cluster()->procs.total ); }
+// TODO: potentially getting revisit number of processors
+//  ( currently the value stored in active_cluster()->procs.total is often stale 
+//  and doesn't reflect how many procs are allocated )
+// static inline void start_actor_system() { start_actor_system( active_cluster()->procs.total ); }
+static inline void start_actor_system() { start_actor_system( 1 ); }
 
 static inline void start_actor_system( executor & this ) {
@@ -595,2 +660,14 @@
     __actor_executor_passed = false;
 }
+
+// Default messages to send to any actor to change status
+// struct __DeleteMsg { inline message; } DeleteMsg;
+// void ?{}( __DeleteMsg & this ) { ((message &) this){ Finished }; }
+// struct __DestroyMsg { inline message; } DestroyMsg;
+// void ?{}( __DestroyMsg & this ) { ((message &) this){ Finished }; }
+// struct __FinishedMsg { inline message; } FinishedMsg;
+// void ?{}( __FinishedMsg & this ) { ((message &) this){ Finished }; }
+
+// Allocation receive( actor & this, __DeleteMsg & msg ) { return Delete; }
+// Allocation receive( actor & this, __DestroyMsg & msg ) { return Destroy; }
+// Allocation receive( actor & this, __FinishedMsg & msg ) { return Finished; }
