| 1 | // Mutex buffer is embedded in the nomutex executor to allow the executor to delete the workers without causing a | 
|---|
| 2 | // deadlock.  If the executor is the monitor and the buffer is class, the thread calling the executor's destructor | 
|---|
| 3 | // (which is mutex) blocks when deleting the workers, preventing outstanding workers from calling remove to drain the | 
|---|
| 4 | // buffer. | 
|---|
| 5 |  | 
|---|
| 6 | #include <thread.hfa> | 
|---|
| 7 | #include <containers/list.hfa> | 
|---|
| 8 |  | 
|---|
| 9 | forall( T &, TLink& = dlink(T) | embedded(T, TLink, dlink(T)) ) { | 
|---|
| 10 | monitor Buffer {                                                                        // unbounded buffer | 
|---|
| 11 | dlist( T, TLink ) queue;                                                // unbounded list of work requests | 
|---|
| 12 | condition delay; | 
|---|
| 13 | }; // Buffer | 
|---|
| 14 |  | 
|---|
| 15 | void insert( Buffer(T, TLink) & mutex buf, T * elem ) with(buf) { | 
|---|
| 16 | dlist( T, TLink ) * qptr = &queue;                              // workaround https://cforall.uwaterloo.ca/trac/ticket/166 | 
|---|
| 17 | insert_last( *qptr, *elem );                                    // insert element into buffer | 
|---|
| 18 | signal( delay );                                                                // restart | 
|---|
| 19 | } // insert | 
|---|
| 20 |  | 
|---|
| 21 | T * remove( Buffer(T, TLink) & mutex buf ) with(buf) { | 
|---|
| 22 | dlist( T, TLink ) * qptr = &queue;                              // workaround https://cforall.uwaterloo.ca/trac/ticket/166 | 
|---|
| 23 | // if ( (*qptr)`isEmpty ) wait( delay );                // no request to process ? => wait | 
|---|
| 24 | if ( (*qptr)`isEmpty ) return 0p;                                     // no request to process ? => wait | 
|---|
| 25 | return &try_pop_front( *qptr ); | 
|---|
| 26 | } // remove | 
|---|
| 27 | } // forall | 
|---|
| 28 |  | 
|---|
| 29 | struct WRequest {                                                                               // client request, no return | 
|---|
| 30 | void (* action)( void ); | 
|---|
| 31 | inline dlink(WRequest); | 
|---|
| 32 | }; // WRequest | 
|---|
| 33 | P9_EMBEDDED(WRequest, dlink(WRequest)) | 
|---|
| 34 |  | 
|---|
| 35 | void ?{}( WRequest & req ) with(req) { action = 0; } | 
|---|
| 36 | void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; } | 
|---|
| 37 | bool stop( WRequest & req ) { return req.action == 0; } | 
|---|
| 38 | void doit( WRequest & req ) { req.action(); } | 
|---|
| 39 |  | 
|---|
| 40 | // Each worker has its own set (when requests buffers > workers) of work buffers to reduce contention between client | 
|---|
| 41 | // and server, where work requests arrive and are distributed into buffers in a roughly round-robin order. | 
|---|
| 42 |  | 
|---|
| 43 | thread Worker { | 
|---|
| 44 | Buffer(WRequest) * requests; | 
|---|
| 45 | WRequest * request; | 
|---|
| 46 | unsigned int start, range; | 
|---|
| 47 | }; // Worker | 
|---|
| 48 |  | 
|---|
| 49 | void main( Worker & w ) with(w) { | 
|---|
| 50 | for ( int i = 0;; i = (i + 1) % range ) { | 
|---|
| 51 | request = remove( requests[i + start] ); | 
|---|
| 52 | if ( ! request ) { yield(); continue; } | 
|---|
| 53 | if ( stop( *request ) ) break; | 
|---|
| 54 | doit( *request ); | 
|---|
| 55 | delete( request ); | 
|---|
| 56 | } // for | 
|---|
| 57 | } // Worker::main | 
|---|
| 58 |  | 
|---|
| 59 | void ?{}( Worker & worker, cluster * wc, Buffer(WRequest) * requests, unsigned int start, unsigned int range ) { | 
|---|
| 60 | ((thread &)worker){ *wc }; | 
|---|
| 61 | worker.[requests, request, start, range] = [requests, 0p, start, range]; | 
|---|
| 62 | } // ?{} | 
|---|
| 63 |  | 
|---|
| 64 | WRequest * current_request( Worker & worker ) { return worker.request; } | 
|---|
| 65 |  | 
|---|
| 66 | struct Executor { | 
|---|
| 67 | cluster * cluster;                                                                      // if workers execute on separate cluster | 
|---|
| 68 | processor ** processors;                                                        // array of virtual processors adding parallelism for workers | 
|---|
| 69 | Buffer(WRequest) * requests;                                            // list of work requests | 
|---|
| 70 | Worker ** workers;                                                                      // array of workers executing work requests | 
|---|
| 71 | unsigned int nprocessors, nworkers, nrqueues;           // number of processors/threads/request queues | 
|---|
| 72 | bool sepClus;                                                                           // use same or separate cluster for executor | 
|---|
| 73 | unsigned int next;                                                                      // demultiplexed across worker buffers | 
|---|
| 74 | }; // Executor | 
|---|
| 75 |  | 
|---|
| 76 | unsigned int tickets( Executor & ex ) with(ex) { | 
|---|
| 77 | //return uFetchAdd( next, 1 ) % nrqueues; | 
|---|
| 78 | return next++ % nrqueues;                                                       // no locking, interference randomizes | 
|---|
| 79 | } // tickets | 
|---|
| 80 |  | 
|---|
| 81 | void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nr, bool sc = false ) with(ex) { | 
|---|
| 82 | [nprocessors, nworkers, nrqueues, sepClus] = [np, nw, nr, sc]; | 
|---|
| 83 | assert( nrqueues >= nworkers ); | 
|---|
| 84 | cluster = sepClus ? new( "Executor" ) : active_cluster(); | 
|---|
| 85 | processors = aalloc( nprocessors ); | 
|---|
| 86 | requests = anew( nrqueues ); | 
|---|
| 87 | workers = aalloc( nworkers ); | 
|---|
| 88 |  | 
|---|
| 89 | for ( i; nprocessors ) { | 
|---|
| 90 | processors[i] = new( *cluster ); | 
|---|
| 91 | } // for | 
|---|
| 92 |  | 
|---|
| 93 | unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers; | 
|---|
| 94 | //      for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) { | 
|---|
| 95 | for ( i; nworkers : start; 0u ~ @ ~ range : range; ) { | 
|---|
| 96 | range = reqPerWorker + ( i < extras ? 1 : 0 ); | 
|---|
| 97 | workers[i] = new( cluster, requests, start, range ); | 
|---|
| 98 | } // for | 
|---|
| 99 | } // ?{} | 
|---|
| 100 |  | 
|---|
| 101 | void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) { | 
|---|
| 102 | ex{ nprocessors, nworkers, nworkers, sepClus }; | 
|---|
| 103 | } | 
|---|
| 104 | void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) { | 
|---|
| 105 | ex{ nprocessors, nprocessors, nprocessors, sepClus }; | 
|---|
| 106 | } | 
|---|
| 107 | void ?{}( Executor & ex ) {                                                             // special for current cluster, no processors added | 
|---|
| 108 | ex{ 0, active_cluster()->nprocessors, false }; | 
|---|
| 109 | } | 
|---|
| 110 | void ^?{}( Executor & ex ) with(ex) { | 
|---|
| 111 | // Add one sentinel per worker to stop them. Since in destructor, no new external work should be queued.  Cannot | 
|---|
| 112 | // combine next two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may | 
|---|
| 113 | // take the single sentinel while waiting for worker 0 to end. | 
|---|
| 114 |  | 
|---|
| 115 | WRequest sentinel[nworkers]; | 
|---|
| 116 | unsigned int reqPerWorker = nrqueues / nworkers; | 
|---|
| 117 | for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) { | 
|---|
| 118 | insert( requests[step], &sentinel[i] );                 // force eventually termination | 
|---|
| 119 | } // for | 
|---|
| 120 | for ( i; nworkers ) { | 
|---|
| 121 | delete( workers[i] ); | 
|---|
| 122 | } // for | 
|---|
| 123 | for ( i; nprocessors ) { | 
|---|
| 124 | delete( processors[i] ); | 
|---|
| 125 | } // for | 
|---|
| 126 |  | 
|---|
| 127 | free( workers ); | 
|---|
| 128 | //      adelete( nrqueues, requests ); | 
|---|
| 129 | for ( i; nrqueues ) ^?{}( requests[i] );                        // FIX ME: problem with resolver | 
|---|
| 130 | free( requests ); | 
|---|
| 131 | free( processors ); | 
|---|
| 132 | if ( sepClus ) { delete( cluster ); } | 
|---|
| 133 | } // ^?{} | 
|---|
| 134 |  | 
|---|
| 135 | void send( Executor & ex, void (* action)( void ) ) {   // asynchronous call, no return value | 
|---|
| 136 | WRequest * node = new( action ); | 
|---|
| 137 | insert( ex.requests[tickets( ex )], node ); | 
|---|
| 138 | } // send | 
|---|
| 139 |  | 
|---|
| 140 |  | 
|---|
| 141 | int counter = 0; | 
|---|
| 142 |  | 
|---|
| 143 | void work( void ) { | 
|---|
| 144 | __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST ); | 
|---|
| 145 | // fprintf( stderr, "workie\n" ); | 
|---|
| 146 | } | 
|---|
| 147 |  | 
|---|
| 148 | int main( int argc, char * argv[] ) { | 
|---|
| 149 | int times = 1_000_000; | 
|---|
| 150 | if ( argc == 2 ) times = atoi( argv[1] ); | 
|---|
| 151 | processor p[7]; | 
|---|
| 152 | { | 
|---|
| 153 | Executor exector; | 
|---|
| 154 | for ( i; times ) { | 
|---|
| 155 | send( exector, work ); | 
|---|
| 156 | if ( i % 100 == 0 ) yield(); | 
|---|
| 157 | } // for | 
|---|
| 158 | } | 
|---|
| 159 | printf( "%d\n", counter ); | 
|---|
| 160 | } | 
|---|
| 161 |  | 
|---|
| 162 | // Local Variables: // | 
|---|
| 163 | // tab-width: 4" // | 
|---|
| 164 | // compile-command: "cfa executor.cfa" // | 
|---|
| 165 | // End: // | 
|---|