| 1 | // Mutex buffer is embedded in the nomutex executor to allow the executor to delete the workers without causing a
|
|---|
| 2 | // deadlock. If the executor is the monitor and the buffer is class, the thread calling the executor's destructor
|
|---|
| 3 | // (which is mutex) blocks when deleting the workers, preventing outstanding workers from calling remove to drain the
|
|---|
| 4 | // buffer.
|
|---|
| 5 |
|
|---|
| 6 | #include <containers/list.hfa>
|
|---|
| 7 | #include <thread.hfa>
|
|---|
| 8 | //#include <malloc.h> // trace
|
|---|
| 9 |
|
|---|
| 10 | forall( dtype T | $dlistable(T, T) ) {
|
|---|
| 11 | monitor Buffer { // unbounded buffer
|
|---|
| 12 | dlist( T, T ) queue; // unbounded list of work requests
|
|---|
| 13 | condition delay;
|
|---|
| 14 | }; // Buffer
|
|---|
| 15 |
|
|---|
| 16 | void insert( Buffer(T) & mutex buf, T * elem ) with(buf) {
|
|---|
| 17 | dlist( T, T ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166
|
|---|
| 18 | insert_last( *qptr, *elem ); // insert element into buffer
|
|---|
| 19 | signal( delay ); // restart
|
|---|
| 20 | } // insert
|
|---|
| 21 |
|
|---|
| 22 | T * remove( Buffer(T) & mutex buf ) with(buf) {
|
|---|
| 23 | dlist( T, T ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166
|
|---|
| 24 | if ( (*qptr)`is_empty ) wait( delay ); // no request to process ? => wait
|
|---|
| 25 | return &pop_first( *qptr );
|
|---|
| 26 | } // remove
|
|---|
| 27 |
|
|---|
| 28 | // void ?{}( Buffer(T) & ) {}
|
|---|
| 29 | // void ^?{}( Buffer(T) & mutex ) {}
|
|---|
| 30 | } // forall
|
|---|
| 31 |
|
|---|
| 32 | struct WRequest { // client request, no return
|
|---|
| 33 | void (* action)( void );
|
|---|
| 34 | DLISTED_MGD_IMPL_IN(WRequest)
|
|---|
| 35 | }; // WRequest
|
|---|
| 36 | DLISTED_MGD_IMPL_OUT(WRequest)
|
|---|
| 37 |
|
|---|
| 38 | void ?{}( WRequest & req ) with(req) { action = 0; }
|
|---|
| 39 | void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; }
|
|---|
| 40 | bool stop( WRequest & req ) { return req.action == 0; }
|
|---|
| 41 | void doit( WRequest & req ) { req.action(); }
|
|---|
| 42 |
|
|---|
| 43 | // Each worker has its own set (when requests buffers > workers) of work buffers to reduce contention between client
|
|---|
| 44 | // and server, where work requests arrive and are distributed into buffers in a roughly round-robin order.
|
|---|
| 45 |
|
|---|
| 46 | thread Worker {
|
|---|
| 47 | Buffer(WRequest) * requests;
|
|---|
| 48 | WRequest * request;
|
|---|
| 49 | unsigned int start, range;
|
|---|
| 50 | }; // Worker
|
|---|
| 51 |
|
|---|
| 52 | void main( Worker & w ) with(w) {
|
|---|
| 53 | for ( int i = 0;; i = (i + 1) % range ) {
|
|---|
| 54 | request = remove( requests[i + start] );
|
|---|
| 55 | if ( ! request ) { yield(); continue; }
|
|---|
| 56 | if ( stop( *request ) ) break;
|
|---|
| 57 | doit( *request );
|
|---|
| 58 | delete( request );
|
|---|
| 59 | } // for
|
|---|
| 60 | } // Worker::main
|
|---|
| 61 |
|
|---|
| 62 | void ?{}( Worker & worker, cluster * wc, Buffer(WRequest) * requests, unsigned int start, unsigned int range ) {
|
|---|
| 63 | ((thread &)worker){ *wc };
|
|---|
| 64 | worker.[requests, request, start, range] = [requests, 0p, start, range];
|
|---|
| 65 | } // ?{}
|
|---|
| 66 |
|
|---|
| 67 | WRequest * current_request( Worker & worker ) { return worker.request; }
|
|---|
| 68 |
|
|---|
| 69 | struct Executor {
|
|---|
| 70 | cluster * cluster; // if workers execute on separate cluster
|
|---|
| 71 | processor ** processors; // array of virtual processors adding parallelism for workers
|
|---|
| 72 | Buffer(WRequest) * requests; // list of work requests
|
|---|
| 73 | Worker ** workers; // array of workers executing work requests
|
|---|
| 74 | unsigned int nprocessors, nworkers, nrqueues; // number of processors/threads/request queues
|
|---|
| 75 | bool sepClus; // use same or separate cluster for executor
|
|---|
| 76 | }; // Executor
|
|---|
| 77 |
|
|---|
| 78 | static thread_local unsigned int next; // demultiplexed across worker buffers
|
|---|
| 79 |
|
|---|
| 80 | unsigned int tickets( Executor & ex ) with(ex) {
|
|---|
| 81 | //return uFetchAdd( next, 1 ) % nrqueues;
|
|---|
| 82 | return next++ % nrqueues; // no locking, interference randomizes
|
|---|
| 83 | } // tickets
|
|---|
| 84 |
|
|---|
| 85 | void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nr, bool sc = false ) with(ex) {
|
|---|
| 86 | [nprocessors, nworkers, nrqueues, sepClus] = [np, nw, nr, sc];
|
|---|
| 87 | assert( nrqueues >= nworkers );
|
|---|
| 88 | cluster = sepClus ? new( "Executor" ) : active_cluster();
|
|---|
| 89 | processors = aalloc( nprocessors );
|
|---|
| 90 | requests = anew( nrqueues );
|
|---|
| 91 | workers = aalloc( nworkers );
|
|---|
| 92 |
|
|---|
| 93 | for ( i; nprocessors ) {
|
|---|
| 94 | processors[i] = new( *cluster );
|
|---|
| 95 | } // for
|
|---|
| 96 |
|
|---|
| 97 | unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
|
|---|
| 98 | // for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
|
|---|
| 99 | for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
|
|---|
| 100 | range = reqPerWorker + ( i < extras ? 1 : 0 );
|
|---|
| 101 | workers[i] = new( cluster, requests, start, range );
|
|---|
| 102 | } // for
|
|---|
| 103 | } // ?{}
|
|---|
| 104 |
|
|---|
| 105 | void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) {
|
|---|
| 106 | ex{ nprocessors, nworkers, nworkers, sepClus };
|
|---|
| 107 | }
|
|---|
| 108 | void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) {
|
|---|
| 109 | ex{ nprocessors, nprocessors, nprocessors, sepClus };
|
|---|
| 110 | }
|
|---|
| 111 | void ?{}( Executor & ex ) { // special for current cluster
|
|---|
| 112 | ex{ 0, active_cluster()->nprocessors, false };
|
|---|
| 113 | }
|
|---|
| 114 | void ^?{}( Executor & ex ) with(ex) {
|
|---|
| 115 | // Add one sentinel per worker to stop them. Since in destructor, no new external work should be queued. Cannot
|
|---|
| 116 | // combine next two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may
|
|---|
| 117 | // take the single sentinel while waiting for worker 0 to end.
|
|---|
| 118 |
|
|---|
| 119 | WRequest sentinel[nworkers];
|
|---|
| 120 | unsigned int reqPerWorker = nrqueues / nworkers;
|
|---|
| 121 | for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
|
|---|
| 122 | insert( requests[step], &sentinel[i] ); // force eventually termination
|
|---|
| 123 | } // for
|
|---|
| 124 | for ( i; nworkers ) {
|
|---|
| 125 | delete( workers[i] );
|
|---|
| 126 | } // for
|
|---|
| 127 | for ( i; nprocessors ) {
|
|---|
| 128 | delete( processors[i] );
|
|---|
| 129 | } // for
|
|---|
| 130 |
|
|---|
| 131 | free( workers );
|
|---|
| 132 | // adelete( nrqueues, requests );
|
|---|
| 133 | for ( i; nrqueues ) ^?{}( requests[i] );
|
|---|
| 134 | free( requests );
|
|---|
| 135 | free( processors );
|
|---|
| 136 | if ( sepClus ) { delete( cluster ); }
|
|---|
| 137 | } // ^?{}
|
|---|
| 138 |
|
|---|
| 139 | void send( Executor & ex, void (* action)( void ) ) { // asynchronous call, no return value
|
|---|
| 140 | WRequest * node = new( action );
|
|---|
| 141 | insert( ex.requests[tickets( ex )], node );
|
|---|
| 142 | } // send
|
|---|
| 143 |
|
|---|
| 144 |
|
|---|
| 145 | int counter = 0;
|
|---|
| 146 |
|
|---|
| 147 | void workie( void ) {
|
|---|
| 148 | __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST );
|
|---|
| 149 | // fprintf( stderr, "workie\n" );
|
|---|
| 150 | }
|
|---|
| 151 |
|
|---|
| 152 | int main( int argc, char * argv[] ) {
|
|---|
| 153 | int times = 1_000_000;
|
|---|
| 154 | if ( argc == 2 ) times = atoi( argv[1] );
|
|---|
| 155 | processor p[7];
|
|---|
| 156 | {
|
|---|
| 157 | printf( "%d\n", active_cluster()->nprocessors );
|
|---|
| 158 | Executor exector;
|
|---|
| 159 | for ( i; times ) {
|
|---|
| 160 | send( exector, workie );
|
|---|
| 161 | if ( i % 100 == 0 ) {
|
|---|
| 162 | // fprintf( stderr, "%d\n", i );
|
|---|
| 163 | yield();
|
|---|
| 164 | }
|
|---|
| 165 | } // for
|
|---|
| 166 | }
|
|---|
| 167 | printf( "%d\n", counter );
|
|---|
| 168 | }
|
|---|
| 169 |
|
|---|
| 170 | // Local Variables: //
|
|---|
| 171 | // tab-width: 4" //
|
|---|
| 172 | // compile-command: "cfa executor.cfa" //
|
|---|
| 173 | // End: //
|
|---|