source: libcfa/src/executor.cfa@ 171ca0d

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 171ca0d was cca568e, checked in by Peter A. Buhr <pabuhr@…>, 6 years ago

change all example waitfor statements from comma separate to colon

  • Property mode set to 100644
File size: 5.5 KB
Line 
1// Mutex buffer is embedded in the nomutex executor to allow the executor to delete the workers without causing a
2// deadlock. If the executor is the monitor and the buffer is class, the thread calling the executor's destructor
3// (which is mutex) blocks when deleting the workers, preventing outstanding workers from calling remove to drain the
4// buffer.
5
6#include <bits/containers.hfa>
7#include <thread.hfa>
8#include <stdio.h>
9
10forall( dtype T )
11monitor Buffer { // unbounded buffer
12 __queue_t( T ) queue; // unbounded list of work requests
13 condition delay;
14}; // Buffer
15forall( dtype T | is_node(T) ) {
16 void insert( Buffer( T ) & mutex buf, T * elem ) with(buf) {
17 append( queue, elem ); // insert element into buffer
18 signal( delay ); // restart
19 } // insert
20
21 T * remove( Buffer( T ) & mutex buf ) with(buf) {
22 if ( queue.head != 0 ) wait( delay ); // no request to process ? => wait
23// return pop_head( queue );
24 } // remove
25} // distribution
26
27struct WRequest { // client request, no return
28 void (* action)( void );
29 WRequest * next; // intrusive queue field
30}; // WRequest
31
32WRequest *& get_next( WRequest & this ) { return this.next; }
33void ?{}( WRequest & req ) with(req) { action = 0; next = 0; }
34void ?{}( WRequest & req, void (* action)( void ) ) with(req) { req.action = action; next = 0; }
35bool stop( WRequest & req ) { return req.action == 0; }
36void doit( WRequest & req ) { req.action(); }
37
38// Each worker has its own work buffer to reduce contention between client and server. Hence, work requests arrive and
39// are distributed into buffers in a roughly round-robin order.
40
41thread Worker {
42 Buffer( WRequest ) * requests;
43 unsigned int start, range;
44}; // Worker
45
46void main( Worker & w ) with(w) {
47 for ( int i = 0;; i = (i + 1) % range ) {
48 WRequest * request = remove( requests[i + start] );
49 if ( ! request ) { yield(); continue; }
50 if ( stop( *request ) ) break;
51 doit( *request );
52 delete( request );
53 } // for
54} // Worker::main
55
56void ?{}( Worker & worker, cluster * wc, Buffer( WRequest ) * requests, unsigned int start, unsigned int range ) {
57 (*get_thread(worker)){ *wc }; // create on given cluster
58 worker.[requests, start, range] = [requests, start, range];
59} // ?{}
60
61struct Executor {
62 cluster * cluster; // if workers execute on separate cluster
63 processor ** processors; // array of virtual processors adding parallelism for workers
64 Buffer( WRequest ) * requests; // list of work requests
65 Worker ** workers; // array of workers executing work requests
66 unsigned int nprocessors, nworkers, nmailboxes; // number of mailboxes/workers/processor tasks
67 bool sepClus; // use same or separate cluster for executor
68}; // Executor
69
70static thread_local unsigned int next; // demultiplexed across worker buffers
71unsigned int tickets( Executor & ex ) with(ex) {
72 //return uFetchAdd( next, 1 ) % nmailboxes;
73 return next++ % nmailboxes; // no locking, interference randomizes
74} // tickets
75
76void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nm, bool sc = false ) with(ex) {
77 [nprocessors, nworkers, nmailboxes, sepClus] = [np, nw, nm, sc];
78 assert( nmailboxes >= nworkers );
79 cluster = sepClus ? new( "Executor" ) : active_cluster();
80 processors = (processor **)anew( nprocessors );
81 requests = anew( nmailboxes );
82 workers = (Worker **)anew( nworkers );
83
84 for ( i; nprocessors ) {
85 processors[ i ] = new( *cluster );
86 } // for
87
88 unsigned int reqPerWorker = nmailboxes / nworkers, extras = nmailboxes % nworkers;
89 for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker + ( i < extras ? 1 : 0 ) ) {
90 workers[ i ] = new( cluster, requests, step, reqPerWorker + ( i < extras ? 1 : 0 ) );
91 } // for
92} // ?{}
93
94void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) {
95 ex{ nprocessors, nworkers, nworkers, sepClus };
96}
97void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) {
98 ex{ nprocessors, nprocessors, nprocessors, sepClus };
99}
100void ?{}( Executor & ex ) { // special for current cluster
101 ex{ 0, active_cluster()->nprocessors, false };
102}
103void ^?{}( Executor & ex ) with(ex) {
104 // Add one sentinel per worker to stop them. Since in destructor, no new work should be queued. Cannot combine next
105 // two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may take the
106 // single sentinel while waiting for worker 0 to end.
107
108 WRequest sentinel[nworkers];
109 unsigned int reqPerWorker = nmailboxes / nworkers;
110 for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
111 insert( requests[step], &sentinel[i] ); // force eventually termination
112 } // for
113 for ( i; nworkers ) {
114 delete( workers[ i ] );
115 } // for
116 for ( i; nprocessors ) {
117 delete( processors[ i ] );
118 } // for
119
120 delete( workers );
121 delete( requests );
122 delete( processors );
123 if ( sepClus ) { delete( cluster ); }
124} // ^?{}
125
126void send( Executor & ex, void (* action)( void ) ) { // asynchronous call, no return value
127 WRequest * node = new( action );
128 insert( ex.requests[tickets( ex )], node );
129} // send
130
131int counter = 0;
132
133void workie( void ) {
134 __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST );
135// fprintf( stderr, "workie\n" );
136}
137
138int main() {
139 {
140 Executor exector;
141 for ( i; 3000 ) {
142 send( exector, workie );
143 if ( i % 100 ) yield();
144 } // for
145 }
146 printf( "%d\n", counter );
147}
148
149// Local Variables: //
150// compile-command: "cfa executor.cfa" //
151// End: //
Note: See TracBrowser for help on using the repository browser.