Changeset 089ee6b


Ignore:
Timestamp:
May 24, 2020, 5:05:13 PM (17 months ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
arm-eh, jacob/cs343-translation, master, new-ast, new-ast-unique-expr
Children:
a51c0c0
Parents:
cbbd8fd7
Message:

update intrusive buffer, fix storage management problems

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/executor.cfa

    rcbbd8fd7 r089ee6b  
    66#include <containers/list.hfa>
    77#include <thread.hfa>
    8 #include <stdio.h>
     8//#include <malloc.h>                                                                           // trace
    99
    10 struct WRequest {                                       // client request, no return
    11     void (* action)( void );
    12     DLISTED_MGD_IMPL_IN(WRequest)
     10forall( dtype T | $dlistable(T, T) ) {
     11        monitor Buffer {                                                                        // unbounded buffer
     12                dlist( T, T ) queue;                                                    // unbounded list of work requests
     13                condition delay;
     14        }; // Buffer
     15
     16        void insert( Buffer(T) & mutex buf, T * elem ) with(buf) {
     17                dlist( T, T ) * qptr = &queue;                                  // workaround https://cforall.uwaterloo.ca/trac/ticket/166
     18                insert_last( *qptr, *elem );                                    // insert element into buffer
     19                signal( delay );                                                                // restart
     20        } // insert
     21
     22        T * remove( Buffer(T) & mutex buf ) with(buf) {
     23                dlist( T, T ) * qptr = &queue;                                  // workaround https://cforall.uwaterloo.ca/trac/ticket/166
     24                if ( (*qptr)`is_empty ) wait( delay );                  // no request to process ? => wait
     25                return &pop_first( *qptr );
     26        } // remove
     27
     28//      void ?{}( Buffer(T) & ) {}
     29//      void ^?{}( Buffer(T) & mutex ) {}
     30} // forall
     31
     32struct WRequest {                                                                               // client request, no return
     33        void (* action)( void );
     34        DLISTED_MGD_IMPL_IN(WRequest)
    1335}; // WRequest
    1436DLISTED_MGD_IMPL_OUT(WRequest)
     
    1941void doit( WRequest & req ) { req.action(); }
    2042
    21 monitor WRBuffer {                                      // unbounded buffer
    22     dlist( WRequest, WRequest ) queue;                  // unbounded list of work requests
    23     condition delay;
    24 }; // WRBuffer
    25 
    26 void insert( WRBuffer & mutex buf, WRequest * elem ) with(buf) {
    27     insert_last( queue, *elem );                        // insert element into buffer
    28     signal( delay );                                    // restart
    29 } // insert
    30 
    31 WRequest * remove( WRBuffer & mutex buf ) with(buf) {
    32     if ( queue`is_empty ) wait( delay );                // no request to process ? => wait
    33     return & pop_first( queue );
    34 } // remove
    35 
    36 // Each worker has its own work buffer to reduce contention between client and server. Hence, work requests arrive and
    37 // are distributed into buffers in a roughly round-robin order.
     43// Each worker has its own set (when requests buffers > workers) of work buffers to reduce contention between client
     44// and server, where work requests arrive and are distributed into buffers in a roughly round-robin order.
    3845
    3946thread Worker {
    40     WRBuffer * requests;
    41     unsigned int start, range;
     47        Buffer(WRequest) * requests;
     48        WRequest * request;
     49        unsigned int start, range;
    4250}; // Worker
    4351
    4452void main( Worker & w ) with(w) {
    45     for ( int i = 0;; i = (i + 1) % range ) {
    46         WRequest * request = remove( requests[i + start] );
    47       if ( ! request ) { yield(); continue; }
    48       if ( stop( *request ) ) break;
    49         doit( *request );
    50         delete( request );
    51     } // for
     53        for ( int i = 0;; i = (i + 1) % range ) {
     54                request = remove( requests[i + start] );
     55          if ( ! request ) { yield(); continue; }
     56          if ( stop( *request ) ) break;
     57                doit( *request );
     58                delete( request );
     59        } // for
    5260} // Worker::main
    5361
    54 void ?{}( Worker & worker, cluster * wc, WRBuffer * requests, unsigned int start, unsigned int range ) {
    55     (*get_thread(worker)){ *wc };                       // create on given cluster
    56     worker.[requests, start, range] = [requests, start, range];
     62void ?{}( Worker & worker, cluster * wc, Buffer(WRequest) * requests, unsigned int start, unsigned int range ) {
     63        ((thread &)worker){ *wc };
     64        worker.[requests, request, start, range] = [requests, 0p, start, range];
    5765} // ?{}
    5866
     67WRequest * current_request( Worker & worker ) { return worker.request; }
     68
    5969struct Executor {
    60     cluster * cluster;                                  // if workers execute on separate cluster
    61     processor ** processors;                            // array of virtual processors adding parallelism for workers
    62     WRBuffer * requests;                                // list of work requests
    63     Worker ** workers;                                  // array of workers executing work requests
    64     unsigned int nprocessors, nworkers, nmailboxes;     // number of mailboxes/workers/processor tasks
    65     bool sepClus;                                       // use same or separate cluster for executor
     70        cluster * cluster;                                                                      // if workers execute on separate cluster
     71        processor ** processors;                                                        // array of virtual processors adding parallelism for workers
     72        Buffer(WRequest) * requests;                                            // list of work requests
     73        Worker ** workers;                                                                      // array of workers executing work requests
     74        unsigned int nprocessors, nworkers, nrqueues;           // number of processors/threads/request queues
     75        bool sepClus;                                                                           // use same or separate cluster for executor
    6676}; // Executor
    6777
    68 static thread_local unsigned int next;                  // demultiplexed across worker buffers
     78static thread_local unsigned int next;                                  // demultiplexed across worker buffers
     79
    6980unsigned int tickets( Executor & ex ) with(ex) {
    70     //return uFetchAdd( next, 1 ) % nmailboxes;
    71     return next++ % nmailboxes;                         // no locking, interference randomizes
     81        //return uFetchAdd( next, 1 ) % nrqueues;
     82        return next++ % nrqueues;                                                       // no locking, interference randomizes
    7283} // tickets
    7384
    74 void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nm, bool sc = false ) with(ex) {
    75     [nprocessors, nworkers, nmailboxes, sepClus] = [np, nw, nm, sc];
    76     assert( nmailboxes >= nworkers );
    77     cluster = sepClus ? new( "Executor" ) : active_cluster();
    78     processors = (processor **)anew( nprocessors );
    79     requests = (WRBuffer *)anew( nmailboxes );
    80     workers = (Worker **)anew( nworkers );
     85void ?{}( Executor & ex, unsigned int np, unsigned int nw, unsigned int nr, bool sc = false ) with(ex) {
     86        [nprocessors, nworkers, nrqueues, sepClus] = [np, nw, nr, sc];
     87        assert( nrqueues >= nworkers );
     88        cluster = sepClus ? new( "Executor" ) : active_cluster();
     89        processors = aalloc( nprocessors );
     90        requests = anew( nrqueues );
     91        workers = aalloc( nworkers );
    8192
    82     for ( i; nprocessors ) {
    83         processors[ i ] = new( *cluster );
    84     } // for
     93        for ( i; nprocessors ) {
     94                processors[i] = new( *cluster );
     95        } // for
    8596
    86     unsigned int reqPerWorker = nmailboxes / nworkers, extras = nmailboxes % nworkers;
    87     for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker + ( i < extras ? 1 : 0 ) ) {
    88         workers[ i ] = new( cluster, requests, step, reqPerWorker + ( i < extras ? 1 : 0 ) );
    89     } // for
     97        unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
     98//      for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
     99    for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
     100            range = reqPerWorker + ( i < extras ? 1 : 0 );
     101                workers[i] = new( cluster, requests, start, range );
     102        } // for
    90103} // ?{}
    91104
    92105void ?{}( Executor & ex, unsigned int nprocessors, unsigned int nworkers, bool sepClus = false ) {
    93     ex{ nprocessors, nworkers, nworkers, sepClus };
     106        ex{ nprocessors, nworkers, nworkers, sepClus };
    94107}
    95108void ?{}( Executor & ex, unsigned int nprocessors, bool sepClus = false ) {
    96     ex{ nprocessors, nprocessors, nprocessors, sepClus };
     109        ex{ nprocessors, nprocessors, nprocessors, sepClus };
    97110}
    98 void ?{}( Executor & ex ) {                             // special for current cluster
    99     ex{ 0, active_cluster()->nprocessors, false };
     111void ?{}( Executor & ex ) {                                                             // special for current cluster
     112        ex{ 0, active_cluster()->nprocessors, false };
    100113}
    101114void ^?{}( Executor & ex ) with(ex) {
    102     // Add one sentinel per worker to stop them. Since in destructor, no new work should be queued.  Cannot combine next
    103     // two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may take the
    104     // single sentinel while waiting for worker 0 to end.
     115        // Add one sentinel per worker to stop them. Since in destructor, no new external work should be queued.  Cannot
     116        // combine next two loops and only have a single sentinel because workers arrive in arbitrary order, so worker1 may
     117        // take the single sentinel while waiting for worker 0 to end.
    105118
    106     WRequest sentinel[nworkers];
    107     unsigned int reqPerWorker = nmailboxes / nworkers;
    108     for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
    109         insert( requests[step], &sentinel[i] );         // force eventually termination
    110     } // for
    111     for ( i; nworkers ) {
    112         delete( workers[ i ] );
    113     } // for
    114     for ( i; nprocessors ) {
    115         delete( processors[ i ] );
    116     } // for
     119        WRequest sentinel[nworkers];
     120        unsigned int reqPerWorker = nrqueues / nworkers;
     121        for ( unsigned int i = 0, step = 0; i < nworkers; i += 1, step += reqPerWorker ) {
     122                insert( requests[step], &sentinel[i] );                 // force eventually termination
     123        } // for
     124        for ( i; nworkers ) {
     125                delete( workers[i] );
     126        } // for
     127        for ( i; nprocessors ) {
     128                delete( processors[i] );
     129        } // for
    117130
    118     delete( workers );
    119     delete( requests );
    120     delete( processors );
    121     if ( sepClus ) { delete( cluster ); }
     131        free( workers );
     132//      adelete( nrqueues, requests );
     133        for ( i; nrqueues ) ^?{}( requests[i] );
     134        free( requests );
     135        free( processors );
     136        if ( sepClus ) { delete( cluster ); }
    122137} // ^?{}
    123138
    124139void send( Executor & ex, void (* action)( void ) ) {   // asynchronous call, no return value
    125     WRequest * node = new( action );
    126     insert( ex.requests[tickets( ex )], node );
     140        WRequest * node = new( action );
     141        insert( ex.requests[tickets( ex )], node );
    127142} // send
     143
    128144
    129145int counter = 0;
    130146
    131147void workie( void ) {
    132     __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST );
    133 //    fprintf( stderr, "workie\n" );
     148        __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST );
     149//      fprintf( stderr, "workie\n" );
    134150}
    135151
    136 int main() {
    137     {
    138         Executor exector;
    139         for ( i; 3000 ) {
    140             send( exector, workie );
    141             if ( i % 100 == 0 ) {
    142 //              fprintf( stderr, "%d\n", i );
    143                 yield();
    144             }
    145         } // for
    146     }
    147     printf( "%d\n", counter );
     152int main( int argc, char * argv[] ) {
     153        int times = 1_000_000;
     154        if ( argc == 2 ) times = atoi( argv[1] );
     155        processor p[7];
     156        {
     157                printf( "%d\n", active_cluster()->nprocessors );
     158                Executor exector;
     159                for ( i; times ) {
     160                        send( exector, workie );
     161                        if ( i % 100 == 0 ) {
     162//                        fprintf( stderr, "%d\n", i );
     163                                yield();
     164                        }
     165                } // for
     166        }
     167        printf( "%d\n", counter );
    148168}
    149169
    150170// Local Variables: //
     171// tab-width: 4" //
    151172// compile-command: "cfa executor.cfa" //
    152173// End: //
Note: See TracChangeset for help on using the changeset viewer.