Changeset eef8dfb for libcfa/src/executor.cfa
- Timestamp:
- Jan 7, 2021, 2:55:57 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 58fe85a
- Parents:
- bdfc032 (diff), 44e37ef (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/executor.cfa
rbdfc032 reef8dfb 4 4 // buffer. 5 5 6 #include <bits/containers.hfa>7 6 #include <thread.hfa> 8 #include < stdio.h>7 #include <containers/list.hfa> 9 8 10 forall( dtype T ) 11 monitor Buffer { // unbounded buffer 12 __queue_t( T ) queue; // unbounded list of work requests 13 condition delay; 14 }; // Buffer 15 forall( dtype T | is_node(T) ) { 16 void insert( Buffer( T ) & mutex buf, T * elem ) with(buf) { 17 append( queue, elem ); // insert element into buffer 18 signal( delay ); // restart 19 } // insert 9 forall( dtype T | $dlistable(T, T) ) { 10 monitor Buffer { // unbounded buffer 11 dlist( T, T ) queue; // unbounded list of work requests 12 condition delay; 13 }; // Buffer 20 14 21 T * remove( Buffer( T ) & mutex buf) with(buf) {22 if ( queue.head != 0 ) wait( delay ); // no request to process ? => wait23 // return pop_head( queue ); 24 } // remove 25 } // distribution 15 void insert( Buffer(T) & mutex buf, T * elem ) with(buf) { 16 dlist( T, T ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166 17 insert_last( *qptr, *elem ); // insert element into buffer 18 signal( delay ); // restart 19 } // insert 26 20 27 struct WRequest { // client request, no return 28 void (* action)( void ); 29 WRequest * next; // intrusive queue field 21 T * remove( Buffer(T) & mutex buf ) with(buf) { 22 dlist( T, T ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166 23 // if ( (*qptr)`is_empty ) wait( delay ); // no request to process ? => wait 24 if ( (*qptr)`is_empty ) return 0p; // no request to process ? => wait 25 return &pop_first( *qptr ); 26 } // remove 27 } // forall 28 29 struct WRequest { // client request, no return 30 void (* action)( void ); 31 DLISTED_MGD_IMPL_IN(WRequest) 30 32 }; // WRequest 33 DLISTED_MGD_IMPL_OUT(WRequest) 31 34 32 WRequest *& get_next( WRequest & this ) { return this.next; } 33 void ?{}( WRequest & req ) with(req) { action = 0; next = 0; } 34 void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; next = 0; } 35 void ?{}( WRequest & req ) with(req) { action = 0; } 36 void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; } 35 37 bool stop( WRequest & req ) { return req.action == 0; } 36 38 void doit( WRequest & req ) { req.action(); } 37 39 38 // Each worker has its own work buffer to reduce contention between client and server. Hence, work requests arrive and39 // a re distributed into buffers in a roughly round-robin order.40 // Each worker has its own set (when requests buffers > workers) of work buffers to reduce contention between client 41 // and server, where work requests arrive and are distributed into buffers in a roughly round-robin order. 40 42 41 43 thread Worker { 42 Buffer( WRequest ) * requests; 43 unsigned int start, range; 44 Buffer(WRequest) * requests; 45 WRequest * request; 46 unsigned int start, range; 44 47 }; // Worker 45 48 46 49 void main( Worker & w ) with(w) { 47 48 WRequest *request = remove( requests[i + start] );49 50 51 doit( *request );52 delete( request );53 50 for ( int i = 0;; i = (i + 1) % range ) { 51 request = remove( requests[i + start] ); 52 if ( ! request ) { yield(); continue; } 53 if ( stop( *request ) ) break; 54 doit( *request ); 55 delete( request ); 56 } // for 54 57 } // Worker::main 55 58 56 void ?{}( Worker & worker, cluster * wc, Buffer( WRequest) * requests, unsigned int start, unsigned int range ) {57 (*get_thread(worker)){ *wc }; // create on given cluster 58 worker.[requests, start, range] = [requests, start, range];59 void ?{}( Worker & worker, cluster * wc, Buffer(WRequest) * requests, unsigned int start, unsigned int range ) { 60 ((thread &)worker){ *wc }; 61 worker.[requests, request, start, range] = [requests, 0p, start, range]; 59 62 } // ?{} 60 63 64 WRequest * current_request( Worker & worker ) { return worker.request; } 65 61 66 struct Executor { 62 cluster * cluster; // if workers execute on separate cluster 63 processor ** processors; // array of virtual processors adding parallelism for workers 64 Buffer( WRequest ) * requests; // list of work requests 65 Worker ** workers; // array of workers executing work requests 66 unsigned int nprocessors, nworkers, nmailboxes; // number of mailboxes/workers/processor tasks 67 bool sepClus; // use same or separate cluster for executor 67 cluster * cluster; // if workers execute on separate cluster 68 processor ** processors; // array of virtual processors adding parallelism for workers 69 Buffer(WRequest) * requests; // list of work requests 70 Worker ** workers; // array of workers executing work requests 71 unsigned int nprocessors, nworkers, nrqueues; // number of processors/threads/request queues 72 bool sepClus; // use same or separate cluster for executor 73 unsigned int next; // demultiplexed across worker buffers 68 74 }; // Executor 69 75 70 static thread_local unsigned int next; // demultiplexed across worker buffers71 76 unsigned int tickets( Executor & ex ) with(ex) { 72 //return uFetchAdd( next, 1 ) % nmailboxes;73 return next++ % nmailboxes;// no locking, interference randomizes77 //return uFetchAdd( next, 1 ) % nrqueues; 78 return next++ % nrqueues; // no locking, interference randomizes 74 79 } // tickets 75 80 76 void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int n m, bool sc = false ) with(ex) {77 [nprocessors, nworkers, nmailboxes, sepClus] = [np, nw, nm, sc];78 assert( nmailboxes >= nworkers );79 80 processors = (processor **)anew( nprocessors );81 requests = anew( nmailboxes );82 workers = (Worker **)anew( nworkers );81 void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nr, bool sc = false ) with(ex) { 82 [nprocessors, nworkers, nrqueues, sepClus] = [np, nw, nr, sc]; 83 assert( nrqueues >= nworkers ); 84 cluster = sepClus ? new( "Executor" ) : active_cluster(); 85 processors = aalloc( nprocessors ); 86 requests = anew( nrqueues ); 87 workers = aalloc( nworkers ); 83 88 84 85 processors[ i] = new( *cluster );86 89 for ( i; nprocessors ) { 90 processors[i] = new( *cluster ); 91 } // for 87 92 88 unsigned int reqPerWorker = nmailboxes / nworkers, extras = nmailboxes % nworkers; 89 for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker + ( i < extras ? 1 : 0 ) ) { 90 workers[ i ] = new( cluster, requests, step, reqPerWorker + ( i < extras ? 1 : 0 ) ); 91 } // for 93 unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers; 94 // for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) { 95 for ( i; nworkers : start; 0u ~ @ ~ range : range; ) { 96 range = reqPerWorker + ( i < extras ? 1 : 0 ); 97 workers[i] = new( cluster, requests, start, range ); 98 } // for 92 99 } // ?{} 93 100 94 101 void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) { 95 102 ex{ nprocessors, nworkers, nworkers, sepClus }; 96 103 } 97 104 void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) { 98 105 ex{ nprocessors, nprocessors, nprocessors, sepClus }; 99 106 } 100 void ?{}( Executor & ex ) { // special for current cluster101 107 void ?{}( Executor & ex ) { // special for current cluster, no processors added 108 ex{ 0, active_cluster()->nprocessors, false }; 102 109 } 103 110 void ^?{}( Executor & ex ) with(ex) { 104 // Add one sentinel per worker to stop them. Since in destructor, no new work should be queued. Cannot combine next105 // two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may take the 106 //single sentinel while waiting for worker 0 to end.111 // Add one sentinel per worker to stop them. Since in destructor, no new external work should be queued. Cannot 112 // combine next two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may 113 // take the single sentinel while waiting for worker 0 to end. 107 114 108 109 unsigned int reqPerWorker = nmailboxes / nworkers;110 111 insert( requests[step], &sentinel[i] );// force eventually termination112 113 114 delete( workers[ i] );115 116 117 delete( processors[ i] );118 115 WRequest sentinel[nworkers]; 116 unsigned int reqPerWorker = nrqueues / nworkers; 117 for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) { 118 insert( requests[step], &sentinel[i] ); // force eventually termination 119 } // for 120 for ( i; nworkers ) { 121 delete( workers[i] ); 122 } // for 123 for ( i; nprocessors ) { 124 delete( processors[i] ); 125 } // for 119 126 120 delete( workers ); 121 delete( requests ); 122 delete( processors ); 123 if ( sepClus ) { delete( cluster ); } 127 free( workers ); 128 // adelete( nrqueues, requests ); 129 for ( i; nrqueues ) ^?{}( requests[i] ); // FIX ME: problem with resolver 130 free( requests ); 131 free( processors ); 132 if ( sepClus ) { delete( cluster ); } 124 133 } // ^?{} 125 134 126 135 void send( Executor & ex, void (* action)( void ) ) { // asynchronous call, no return value 127 128 136 WRequest * node = new( action ); 137 insert( ex.requests[tickets( ex )], node ); 129 138 } // send 139 130 140 131 141 int counter = 0; 132 142 133 void work ie( void ) {134 135 //fprintf( stderr, "workie\n" );143 void work( void ) { 144 __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST ); 145 // fprintf( stderr, "workie\n" ); 136 146 } 137 147 138 int main() { 139 { 140 Executor exector; 141 for ( i; 3000 ) { 142 send( exector, workie ); 143 if ( i % 100 ) yield(); 144 } // for 145 } 146 printf( "%d\n", counter ); 148 int main( int argc, char * argv[] ) { 149 int times = 1_000_000; 150 if ( argc == 2 ) times = atoi( argv[1] ); 151 processor p[7]; 152 { 153 Executor exector; 154 for ( i; times ) { 155 send( exector, work ); 156 if ( i % 100 == 0 ) yield(); 157 } // for 158 } 159 printf( "%d\n", counter ); 147 160 } 148 161 149 162 // Local Variables: // 163 // tab-width: 4" // 150 164 // compile-command: "cfa executor.cfa" // 151 165 // End: //
Note:
See TracChangeset
for help on using the changeset viewer.