source: libcfa/src/executor.cfa @ 9476549

Last change on this file since 9476549 was 55b060d, checked in by Peter A. Buhr <pabuhr@…>, 15 months ago

rename directories containers to collections

  • Property mode set to 100644
File size: 6.0 KB
RevLine 
[d30fdbc]1// Mutex buffer is embedded in the nomutex executor to allow the executor to delete the workers without causing a
2// deadlock.  If the executor is the monitor and the buffer is class, the thread calling the executor's destructor
3// (which is mutex) blocks when deleting the workers, preventing outstanding workers from calling remove to drain the
4// buffer.
5
6#include <thread.hfa>
[55b060d]7#include <collections/list.hfa>
[089ee6b]8
[69914cbc]9forall( T &, TLink& = dlink(T) | embedded(T, TLink, dlink(T)) ) {
[089ee6b]10        monitor Buffer {                                                                        // unbounded buffer
[69914cbc]11                dlist( T, TLink ) queue;                                                // unbounded list of work requests
[089ee6b]12                condition delay;
13        }; // Buffer
14
[69914cbc]15        void insert( Buffer(T, TLink) & mutex buf, T * elem ) with(buf) {
16                dlist( T, TLink ) * qptr = &queue;                              // workaround https://cforall.uwaterloo.ca/trac/ticket/166
[089ee6b]17                insert_last( *qptr, *elem );                                    // insert element into buffer
18                signal( delay );                                                                // restart
19        } // insert
20
[69914cbc]21        T * remove( Buffer(T, TLink) & mutex buf ) with(buf) {
22                dlist( T, TLink ) * qptr = &queue;                              // workaround https://cforall.uwaterloo.ca/trac/ticket/166
23                // if ( (*qptr)`isEmpty ) wait( delay );                // no request to process ? => wait
24          if ( (*qptr)`isEmpty ) return 0p;                                     // no request to process ? => wait
25                return &try_pop_front( *qptr );
[089ee6b]26        } // remove
27} // forall
28
29struct WRequest {                                                                               // client request, no return
30        void (* action)( void );
[69914cbc]31        inline dlink(WRequest);
[d30fdbc]32}; // WRequest
[69914cbc]33P9_EMBEDDED(WRequest, dlink(WRequest))
[d30fdbc]34
[8d462e5]35void ?{}( WRequest & req ) with(req) { action = 0; }
36void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; }
[d30fdbc]37bool stop( WRequest & req ) { return req.action == 0; }
38void doit( WRequest & req ) { req.action(); }
39
[089ee6b]40// Each worker has its own set (when requests buffers > workers) of work buffers to reduce contention between client
41// and server, where work requests arrive and are distributed into buffers in a roughly round-robin order.
[d30fdbc]42
43thread Worker {
[089ee6b]44        Buffer(WRequest) * requests;
45        WRequest * request;
46        unsigned int start, range;
[d30fdbc]47}; // Worker
48
49void main( Worker & w ) with(w) {
[089ee6b]50        for ( int i = 0;; i = (i + 1) % range ) {
51                request = remove( requests[i + start] );
52          if ( ! request ) { yield(); continue; }
53          if ( stop( *request ) ) break;
54                doit( *request );
55                delete( request );
56        } // for
[d30fdbc]57} // Worker::main
58
[089ee6b]59void ?{}( Worker & worker, cluster * wc, Buffer(WRequest) * requests, unsigned int start, unsigned int range ) {
60        ((thread &)worker){ *wc };
61        worker.[requests, request, start, range] = [requests, 0p, start, range];
[d30fdbc]62} // ?{}
63
[089ee6b]64WRequest * current_request( Worker & worker ) { return worker.request; }
65
[d30fdbc]66struct Executor {
[089ee6b]67        cluster * cluster;                                                                      // if workers execute on separate cluster
68        processor ** processors;                                                        // array of virtual processors adding parallelism for workers
69        Buffer(WRequest) * requests;                                            // list of work requests
70        Worker ** workers;                                                                      // array of workers executing work requests
71        unsigned int nprocessors, nworkers, nrqueues;           // number of processors/threads/request queues
72        bool sepClus;                                                                           // use same or separate cluster for executor
[a51c0c0]73        unsigned int next;                                                                      // demultiplexed across worker buffers
[d30fdbc]74}; // Executor
75
76unsigned int tickets( Executor & ex ) with(ex) {
[089ee6b]77        //return uFetchAdd( next, 1 ) % nrqueues;
78        return next++ % nrqueues;                                                       // no locking, interference randomizes
[d30fdbc]79} // tickets
80
[089ee6b]81void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nr, bool sc = false ) with(ex) {
82        [nprocessors, nworkers, nrqueues, sepClus] = [np, nw, nr, sc];
83        assert( nrqueues >= nworkers );
84        cluster = sepClus ? new( "Executor" ) : active_cluster();
85        processors = aalloc( nprocessors );
86        requests = anew( nrqueues );
87        workers = aalloc( nworkers );
88
89        for ( i; nprocessors ) {
90                processors[i] = new( *cluster );
91        } // for
92
93        unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
94//      for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
95    for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
96            range = reqPerWorker + ( i < extras ? 1 : 0 );
97                workers[i] = new( cluster, requests, start, range );
98        } // for
[d30fdbc]99} // ?{}
100
101void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) {
[089ee6b]102        ex{ nprocessors, nworkers, nworkers, sepClus };
[d30fdbc]103}
104void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) {
[089ee6b]105        ex{ nprocessors, nprocessors, nprocessors, sepClus };
[d30fdbc]106}
[a51c0c0]107void ?{}( Executor & ex ) {                                                             // special for current cluster, no processors added
[089ee6b]108        ex{ 0, active_cluster()->nprocessors, false };
[d30fdbc]109}
110void ^?{}( Executor & ex ) with(ex) {
[089ee6b]111        // Add one sentinel per worker to stop them. Since in destructor, no new external work should be queued.  Cannot
112        // combine next two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may
113        // take the single sentinel while waiting for worker 0 to end.
114
115        WRequest sentinel[nworkers];
116        unsigned int reqPerWorker = nrqueues / nworkers;
117        for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
118                insert( requests[step], &sentinel[i] );                 // force eventually termination
119        } // for
120        for ( i; nworkers ) {
121                delete( workers[i] );
122        } // for
123        for ( i; nprocessors ) {
124                delete( processors[i] );
125        } // for
126
127        free( workers );
128//      adelete( nrqueues, requests );
[a51c0c0]129        for ( i; nrqueues ) ^?{}( requests[i] );                        // FIX ME: problem with resolver
[089ee6b]130        free( requests );
131        free( processors );
132        if ( sepClus ) { delete( cluster ); }
[d30fdbc]133} // ^?{}
134
135void send( Executor & ex, void (* action)( void ) ) {   // asynchronous call, no return value
[089ee6b]136        WRequest * node = new( action );
137        insert( ex.requests[tickets( ex )], node );
[d30fdbc]138} // send
139
[089ee6b]140
[d30fdbc]141int counter = 0;
142
[a51c0c0]143void work( void ) {
[089ee6b]144        __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST );
[a51c0c0]145        // fprintf( stderr, "workie\n" );
[d30fdbc]146}
147
[089ee6b]148int main( int argc, char * argv[] ) {
149        int times = 1_000_000;
150        if ( argc == 2 ) times = atoi( argv[1] );
151        processor p[7];
152        {
153                Executor exector;
154                for ( i; times ) {
[a51c0c0]155                        send( exector, work );
156                        if ( i % 100 == 0 ) yield();
[089ee6b]157                } // for
158        }
159        printf( "%d\n", counter );
[d30fdbc]160}
161
162// Local Variables: //
[089ee6b]163// tab-width: 4" //
[d30fdbc]164// compile-command: "cfa executor.cfa" //
165// End: //
Note: See TracBrowser for help on using the repository browser.