source: libcfa/src/executor.cfa@ 5a7789f

Last change on this file since 5a7789f was 69914cbc, checked in by Michael Brooks <mlbrooks@…>, 4 years ago

Replacing "Mike's old linked list" with "Mike's new linked list," including replatforming uses of the old one.

libcfa/src/containers/list.hfa ---[becomes]--> tests/zombies/linked-list-perf/mike-old.hfa
libcfa/src/containers/list2.hfa ---[becomes]--> libcfa/src/containers/list.hfa

There are no more multiple versions of "Mike's list" in libcfa, nor tests thereof.

The libcfa concurrency uses (alarm, kernel and ready_queue) are hereby ported to "Mike's new."

A best-effort port of executor, which was found not compiling with errors that seem unrelated to the linked list, is attempted too.

  • Property mode set to 100644
File size: 6.0 KB
Line 
1// Mutex buffer is embedded in the nomutex executor to allow the executor to delete the workers without causing a
2// deadlock. If the executor is the monitor and the buffer is class, the thread calling the executor's destructor
3// (which is mutex) blocks when deleting the workers, preventing outstanding workers from calling remove to drain the
4// buffer.
5
6#include <thread.hfa>
7#include <containers/list.hfa>
8
9forall( T &, TLink& = dlink(T) | embedded(T, TLink, dlink(T)) ) {
10 monitor Buffer { // unbounded buffer
11 dlist( T, TLink ) queue; // unbounded list of work requests
12 condition delay;
13 }; // Buffer
14
15 void insert( Buffer(T, TLink) & mutex buf, T * elem ) with(buf) {
16 dlist( T, TLink ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166
17 insert_last( *qptr, *elem ); // insert element into buffer
18 signal( delay ); // restart
19 } // insert
20
21 T * remove( Buffer(T, TLink) & mutex buf ) with(buf) {
22 dlist( T, TLink ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166
23 // if ( (*qptr)`isEmpty ) wait( delay ); // no request to process ? => wait
24 if ( (*qptr)`isEmpty ) return 0p; // no request to process ? => wait
25 return &try_pop_front( *qptr );
26 } // remove
27} // forall
28
29struct WRequest { // client request, no return
30 void (* action)( void );
31 inline dlink(WRequest);
32}; // WRequest
33P9_EMBEDDED(WRequest, dlink(WRequest))
34
35void ?{}( WRequest & req ) with(req) { action = 0; }
36void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; }
37bool stop( WRequest & req ) { return req.action == 0; }
38void doit( WRequest & req ) { req.action(); }
39
40// Each worker has its own set (when requests buffers > workers) of work buffers to reduce contention between client
41// and server, where work requests arrive and are distributed into buffers in a roughly round-robin order.
42
43thread Worker {
44 Buffer(WRequest) * requests;
45 WRequest * request;
46 unsigned int start, range;
47}; // Worker
48
49void main( Worker & w ) with(w) {
50 for ( int i = 0;; i = (i + 1) % range ) {
51 request = remove( requests[i + start] );
52 if ( ! request ) { yield(); continue; }
53 if ( stop( *request ) ) break;
54 doit( *request );
55 delete( request );
56 } // for
57} // Worker::main
58
59void ?{}( Worker & worker, cluster * wc, Buffer(WRequest) * requests, unsigned int start, unsigned int range ) {
60 ((thread &)worker){ *wc };
61 worker.[requests, request, start, range] = [requests, 0p, start, range];
62} // ?{}
63
64WRequest * current_request( Worker & worker ) { return worker.request; }
65
66struct Executor {
67 cluster * cluster; // if workers execute on separate cluster
68 processor ** processors; // array of virtual processors adding parallelism for workers
69 Buffer(WRequest) * requests; // list of work requests
70 Worker ** workers; // array of workers executing work requests
71 unsigned int nprocessors, nworkers, nrqueues; // number of processors/threads/request queues
72 bool sepClus; // use same or separate cluster for executor
73 unsigned int next; // demultiplexed across worker buffers
74}; // Executor
75
76unsigned int tickets( Executor & ex ) with(ex) {
77 //return uFetchAdd( next, 1 ) % nrqueues;
78 return next++ % nrqueues; // no locking, interference randomizes
79} // tickets
80
81void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nr, bool sc = false ) with(ex) {
82 [nprocessors, nworkers, nrqueues, sepClus] = [np, nw, nr, sc];
83 assert( nrqueues >= nworkers );
84 cluster = sepClus ? new( "Executor" ) : active_cluster();
85 processors = aalloc( nprocessors );
86 requests = anew( nrqueues );
87 workers = aalloc( nworkers );
88
89 for ( i; nprocessors ) {
90 processors[i] = new( *cluster );
91 } // for
92
93 unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
94// for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
95 for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
96 range = reqPerWorker + ( i < extras ? 1 : 0 );
97 workers[i] = new( cluster, requests, start, range );
98 } // for
99} // ?{}
100
101void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) {
102 ex{ nprocessors, nworkers, nworkers, sepClus };
103}
104void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) {
105 ex{ nprocessors, nprocessors, nprocessors, sepClus };
106}
107void ?{}( Executor & ex ) { // special for current cluster, no processors added
108 ex{ 0, active_cluster()->nprocessors, false };
109}
110void ^?{}( Executor & ex ) with(ex) {
111 // Add one sentinel per worker to stop them. Since in destructor, no new external work should be queued. Cannot
112 // combine next two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may
113 // take the single sentinel while waiting for worker 0 to end.
114
115 WRequest sentinel[nworkers];
116 unsigned int reqPerWorker = nrqueues / nworkers;
117 for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
118 insert( requests[step], &sentinel[i] ); // force eventually termination
119 } // for
120 for ( i; nworkers ) {
121 delete( workers[i] );
122 } // for
123 for ( i; nprocessors ) {
124 delete( processors[i] );
125 } // for
126
127 free( workers );
128// adelete( nrqueues, requests );
129 for ( i; nrqueues ) ^?{}( requests[i] ); // FIX ME: problem with resolver
130 free( requests );
131 free( processors );
132 if ( sepClus ) { delete( cluster ); }
133} // ^?{}
134
135void send( Executor & ex, void (* action)( void ) ) { // asynchronous call, no return value
136 WRequest * node = new( action );
137 insert( ex.requests[tickets( ex )], node );
138} // send
139
140
141int counter = 0;
142
143void work( void ) {
144 __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST );
145 // fprintf( stderr, "workie\n" );
146}
147
148int main( int argc, char * argv[] ) {
149 int times = 1_000_000;
150 if ( argc == 2 ) times = atoi( argv[1] );
151 processor p[7];
152 {
153 Executor exector;
154 for ( i; times ) {
155 send( exector, work );
156 if ( i % 100 == 0 ) yield();
157 } // for
158 }
159 printf( "%d\n", counter );
160}
161
162// Local Variables: //
163// tab-width: 4" //
164// compile-command: "cfa executor.cfa" //
165// End: //
Note: See TracBrowser for help on using the repository browser.