# Changeset 04e6f93 for doc

Ignore:
Timestamp:
Feb 27, 2020, 4:04:25 PM (3 years ago)
Branches:
arm-eh, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
a037f85
Parents:
41efd33 (diff), 930b504 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
doc
Files:
20 edited
1 moved

Unmodified
Removed
• ## doc/bibliography/pl.bib

 r41efd33 %    Predefined journal names: %  acmcs: Computing Surveys             acta: Acta Infomatica @string{acta="Acta Infomatica"} %  cacm: Communications of the ACM %  ibmjrd: IBM J. Research & Development ibmsj: IBM Systems Journal %  tcs: Theoretical Computer Science @string{acta="Acta Infomatica"} string{ieeepds="IEEE Transactions on Parallel and Distributed Systems"} @string{ieeepds="IEEE Trans. Parallel Distrib. Syst."} series      = {ACM Distinguished Dissertations}, year        = 1983, } @article{Zhang19, keywords    = {Algebraic effects, dynamic scoping, exceptions, parametricity, type systems}, author      = {Zhang, Yizhou and Myers, Andrew C.}, title       = {Abstraction-safe Effect Handlers via Tunneling}, journal     = {Proc. ACM Program. Lang.}, issue_date  = {January 2019}, volume      = {3}, number      = {POPL}, month       = jan, year        = {2019}, issn        = {2475-1421}, pages       = {5:1--5:29}, articleno   = {5}, publisher   = {ACM}, address     = {New York, NY, USA}, } @inproceedings{Zhang16, keywords    = {Exception tunneling, Genus, exception handling}, author      = {Zhang, Yizhou and Salvaneschi, Guido and Beightol, Quinn and Liskov, Barbara and Myers, Andrew C.}, title       = {Accepting Blame for Safe Tunneled Exceptions}, booktitle   = {Proceedings of the 37th ACM SIGPLAN Conference on Programming Language Design and Implementation}, series      = {PLDI'16}, year        = {2016}, location    = {Santa Barbara, CA, USA}, pages       = {281--295}, publisher   = {ACM}, address     = {New York, NY, USA}, } journal     = sigplan, year        = 1981, month       = feb, volume = 16, number = 2, pages = {48-52}, month       = feb, volume      = 16, number      = 2, pages       = {48-52}, comment     = { A one-pass, top-down algorithm for overload resolution.  Input is a title       = {An Alternative to Subclassing}, journal     = sigplan, volume      = {21},    number = {11}, volume      = {21}, number      = {11}, pages       = {424-428}, month       = nov, year = 1986, month       = nov, year        = 1986, comment     = { The Smalltalk class hierarchy has three uses: factoring out code; isbn        = {3-540-66538-2}, location    = {Toulouse, France}, doi         = {http://doi.acm.org/10.1145/318773.319251}, publisher   = {Springer}, address     = {London, UK}, year        = 2010, pages       = {39--50}, numpages    = {12}, publisher   = {IEEE Computer Society}, address     = {Washington, DC, USA}, } @manual{C99, keywords    = {ISO/IEC C 9899}, contributer = {pabuhr@plg}, key         = {C99}, title       = {C Programming Language {ISO/IEC} 9899:1999(E)}, edition     = {2nd}, publisher   = {International Standard Organization}, address     = {\href{https://webstore.ansi.org/Standards/INCITS/INCITSISOIEC98991999R2005}{https://webstore.ansi.org/\-Standards/\-INCITS/\-INCITSISOIEC98991999R2005}}, year        = 1999, } @manual{C11, keywords    = {ISO/IEC C 11}, location    = {London, United Kingdom}, pages       = {41--53}, numpages    = {13}, url         = {http://doi.acm.org/10.1145/360204.360207}, doi         = {10.1145/360204.360207}, acmid       = {360207}, publisher   = {ACM}, address     = {New York, NY, USA}, year        = 1993, pages       = {201--208}, url         = {http://doi.acm.org/10.1145/155360.155580}, publisher   = {ACM}, address     = {New York, NY, USA}, location    = {Boulder, Colorado, USA}, pages       = {91--97}, numpages    = {7}, publisher   = {ACM}, address     = {New York, NY, USA}, issn        = {0004-5411}, pages       = {215--225}, numpages    = {11}, url         = {http://doi.acm.org/10.1145/321879.321884}, doi         = {10.1145/321879.321884}, acmid       = {321884}, publisher   = {ACM}, address     = {New York, NY, USA}, } @misc{Drepper13, keywords    = {thread-local storage}, contributer = {pabuhr@plg}, author      = {Ulrich Drepper}, title       = {{ELF} Handling For Thread-Local Storage}, year        = 2013, month       = aug, note        = {WikipediA}, howpublished= {\href{http://www.akkadia.org/drepper/tls.pdf} {http://\-www.akkadia.org/\-drepper/\-tls.pdf}}, } @misc{Turley99, keywords    = {embedded system, micrprocessor}, howpublished= {\href{https://www.eetimes.com/author.asp?sectionid=36&doc_id=1287712} {https://\-www.eetimes.com/\-author.asp?sectionid=\-36&doc_id=1287712}}, } @article{Xiao19, keywords    = {bug classification, fault trigger, Linux operating system, regression bug}, contributer = {pabuhr@plg}, author      = {Guanping Xiao and Zheng Zheng and Beibei Yin and Kishor S. Trivedi and Xiaoting Du and Kai-Yuan Cai}, title       = {An Empirical Study of Fault Triggers in the Linux Operating System: An Evolutionary Perspective}, journal     = {IEEE Transactions on Reliability}, month       = dec, year        = 2019, volume      = 68, number      = 4, pages       = {1356-1383}, } } @inproceedings{Palix11, keywords    = {Linux, fault-finding tools}, contributer = {pabuhr@plg}, author      = {Nicolas Palix and Ga\"el Thomas and Suman Saha and Christophe Calv\es and Julia Lawall and Gilles Muller}, title       = {Faults in Linux: Ten Years Later}, booktitle   = {Proc. of the 16 International Conf. on Arch. Support for Prog. Lang. and Oper. Sys.}, series      = {ASPLOS'11}, month       = mar, year        = 2011, location    = {Newport Beach, California, USA}, pages       = {305-318}, publisher   = {ACM}, address     = {New York, NY, USA}, } @article{Lamport87, keywords    = {software solutions, mutual exclusion, fast}, issn        = {0001-0782}, pages       = {107--115}, numpages    = {9}, url         = {http://doi.acm.org/10.1145/1538788.1538814}, doi         = {10.1145/1538788.1538814}, acmid       = {1538814}, publisher   = {ACM}, address     = {New York, NY, USA}, } @mastersthesis{Radhakrishnan19, author      = {Srihari Radhakrishnan}, title       = {High Performance Web Servers: A Study In Concurrent Programming Models}, school      = {School of Computer Sc., University of Waterloo}, year        = 2019, optaddress  = {Waterloo, Ontario, Canada, N2L 3G1}, note        = {\href{https://uwspace.uwaterloo.ca/handle/10012/14706}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-14706}}, } @article{katzenelson83b, contributer = {gjditchfield@plg}, pages       = {115-138}, year        = 1971, } @inproceedings{Hagersten03, keywords    = {cache storage, parallel architectures, performance evaluation, shared memory systems}, author      = {Zoran Radovi\'{c} and Erik Hagersten}, title       = {Hierarchical backoff locks for nonuniform communication architectures}, booktitle   = {Proceedings of the Ninth International Symposium on High-Performance Computer Architecture}, year        = {2003}, location    = {Anaheim, CA, USA}, pages       = {241-252}, publisher   = {IEEE}, } } @misc{gccValueLabels, keywords    = {gcc extension, value labels}, contributer = {pabuhr@plg}, key         = {Labels as Values}, author      = {{gcc Extension}}, title       = {Labels as Values}, year        = {since gcc-3}, howpublished= {\href{https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html} {https:\-//gcc.gnu.org/\-onlinedocs/\-gcc/\-Labels-as-Values.html}}, } @mastersthesis{Clarke90, keywords    = {concurrency, postponing requests}, @article{Pierce00, keywords    = {Scala}, keywords    = {Scala, polymorphism, subtyping, type inference}, contributer = {a3moss@uwaterloo.ca}, author      = {Pierce, Benjamin C. and Turner, David N.}, issn        = {0164-0925}, pages       = {1--44}, numpages    = {44}, url         = {http://doi.acm.org/10.1145/345099.345100}, doi         = {10.1145/345099.345100}, acmid       = {345100}, publisher   = {ACM}, address     = {New York, NY, USA}, keywords    = {polymorphism, subtyping, type inference}, } @article{Dice15, keywords    = {Concurrency, NUMA, hierarchical locks, locks, multicore, mutex, mutual exclusion, spin locks}, author      = {Dice, David and Marathe, Virendra J. and Shavit, Nir}, title       = {Lock Cohorting: A General Technique for Designing NUMA Locks}, journal     = {ACM Trans. Parallel Comput.}, issue_date  = {January 2015}, volume      = 1, number      = 2, month       = feb, year        = 2015, pages       = {13:1--13:42}, publisher   = {ACM}, address     = {New York, NY, USA}, } @article{Sundell08, journal     = sigplan, year        = 1989, month       = jun, volume = 24, number = 6, pages = {37-48}, month       = jun, volume      = 24, number      = 6, pages       = {37-48}, abstract    = { This paper describes a scheme we have used to manage a large year        = 1986, pages       = {313--326}, numpages    = {14}, publisher   = {ACM}, address     = {New York, NY, USA}, year        = 1986, pages       = {327--348}, numpages    = {22}, publisher   = {ACM}, address     = {New York, NY, USA}, year        = 2005, pages       = {146-196}, numpages    = {51}, publisher   = {ACM}, address     = {New York, NY, USA}, year        = 2000, pages       = {29-46}, note        = {OOPSLA'00, Oct. 15--19, 2000, Minneapolis, Minnesota, U.S.A.}, note        = {OOPSLA'00, Oct. 15--19, 2000, Minneapolis, Minn., U.S.A.}, } location    = {San Diego, California, USA}, pages       = {101--112}, numpages    = {12}, url         = {http://doi.acm.org/10.1145/2535838.2535878}, doi         = {10.1145/2535838.2535878}, acmid       = {2535878}, publisher   = {ACM}, address     = {New York, NY, USA}, issn        = {0362-1340}, pages       = {30--42}, numpages    = {13}, url         = {http://doi.acm.org/10.1145/947586.947589}, doi         = {10.1145/947586.947589}, publisher   = {ACM}, address     = {New York, NY, USA} month       = 9, year        = 2005, } @article{Bauer15, keywords    = {resumption exceptions, theory}, contributer = {pabuhr@plg}, author      = {Andrej Bauer and Matija Pretnar}, title       = {Programming with Algebraic Effects and Handlers}, journal     = {Journal of Logical and Algebraic Methods in Programming}, publisher   = {Elsevier BV}, volume      = 84, number      = 1, month       = jan, year        = 2015, pages       = {108-123}, } issn        = {0164-0925}, pages       = {429-475}, url         = {http://doi.acm.org/10.1145/1133651.1133653}, doi         = {10.1145/1133651.1133653}, acmid       = {1133653}, publisher   = {ACM}, address     = {New York, NY, USA}, issn        = {0001-0782}, pages       = {565--569}, numpages    = {5}, url         = {http://doi.acm.org/10.1145/359545.359566}, doi         = {10.1145/359545.359566}, acmid       = {359566}, publisher   = {ACM}, address     = {New York, NY, USA} issn        = {0362-1340}, pages       = {145--147}, numpages    = {3}, url         = {http://doi.acm.org/10.1145/122598.122614}, doi         = {10.1145/122598.122614}, acmid       = {122614}, publisher   = {ACM}, address     = {New York, NY, USA}, issn        = {0362-1340}, pages       = {82--87}, numpages    = {6}, url         = {http://doi.acm.org/10.1145/947680.947688}, doi         = {10.1145/947680.947688}, publisher   = {ACM}, address     = {New York, NY, USA}, } @article{Cascaval08, author      = {Cascaval, Calin and Blundell, Colin and Michael, Maged and Cain, Harold W. and Wu, Peng and Chiras, Stefanie and Chatterjee, Siddhartha}, title       = {Software Transactional Memory: Why Is It Only a Research Toy?}, journal     = {Queue}, volume      = {6}, number      = {5}, month       = sep, year        = {2008}, pages       = {40:46--40:58}, publisher   = {ACM}, address     = {New York, NY, USA}, } @article{Dijkstra65a, keywords    = {N-thread software-solution mutual exclusion}, year        = 1974, pages       = {261-301}, issn        = {0360-0300}, doi         = {http://doi.acm.org/10.1145/356635.356640}, publisher   = {ACM}, address     = {New York, NY, USA}, publisher   = {ACM Press}, address     = {New York, NY, USA}, doi         = {http://doi.acm.org/10.1145/356586.356588}, } howpublished= {\href{https://projects.eclipse.org/proposals/trace-compass}{https://\-projects.eclipse.org/\-proposals/\-trace-compass}}, } @inproceedings{Boehm09, author      = {Boehm, Hans-J.}, title       = {Transactional Memory Should Be an Implementation Technique, Not a Programming Interface}, booktitle   = {Proceedings of the First USENIX Conference on Hot Topics in Parallelism}, series      = {HotPar'09}, year        = {2009}, location    = {Berkeley, California}, publisher   = {USENIX Association}, address     = {Berkeley, CA, USA}, } @article{Leroy00, keywords    = {type-systems, exceptions}, number      = {2}, pages       = {204-214}, month       = apr, year = 1988, month       = apr, year        = 1988, comment     = { Extended record types add fields to their base record.  Assignment issn        = {0004-5411}, pages       = {245--281}, numpages    = {37}, url         = {http://doi.acm.org/10.1145/62.2160}, doi         = {10.1145/62.2160}, acmid       = {2160}, publisher   = {ACM}, address     = {New York, NY, USA}, contributer = {pabuhr@plg}, author      = {Boehm, Hans-J. and Adve, Sarita V.}, title       = {You Don'T Know Jack About Shared Variables or Memory Models}, title       = {You Don't Know Jack About Shared Variables or Memory Models}, journal     = cacm, volume      = 55,
• ## doc/papers/concurrency/Paper.tex

• ## doc/papers/concurrency/examples/Fib.py

 r41efd33 while True: fn = fn1 + fn2; fn2 = fn1; fn1 = fn; yield fn f1 = Fib() # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 Fib.py" # # compile-command: "python3.7 Fib.py" # # End: #
• ## doc/papers/concurrency/examples/Fib2.c

 r41efd33 #include void mary() { printf( "MARY\n" ); } #define FIB_INIT { 0 } typedef struct { int next; int fn1, fn2; } Fib; typedef struct { int restart; int fn1, fn2; } Fib; int fib( Fib * f ) { static void * states[] = { &&s1, &&s2, &&s3 }; goto *states[f->next]; static void * states[] = { &&s0, &&s1, &&s2 }; goto *states[f->restart]; s0: f->fn1 = 0; f->restart = 1; return f->fn1; s1: mary(); f->fn1 = 0; f->next = 1; return f->fn1; s2: mary(); f->fn2 = f->fn1; f->fn1 = 1; f->next = 2; f->restart = 2; return f->fn1; s3:; mary(); s2:; int fn = f->fn1 + f->fn2; f->fn2 = f->fn1;
• ## doc/papers/concurrency/examples/Fib2.py

 r41efd33 def Fib(): fn1, fn = 0, 1 fn1, fn = 1, 0 while True: yield fn1 yield fn fn1, fn = fn, fn1 + fn # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 Fib2.py" # # compile-command: "python3.7 Fib2.py" # # End: #
• ## doc/papers/concurrency/examples/Fib3.c

 r41efd33 typedef struct { int fn1, fn; void * next; int restart, fn1, fn; } Fib; #define FibCtor { 1, 0, NULL } #define FibCtor { 0, 1, 0 } Fib * comain( Fib * f ) { if ( __builtin_expect(f->next != 0, 1) ) goto *f->next; f->next = &&s1; static void * states[] = {&&s0, &&s1}; goto *states[f->restart]; s0: f->restart = 1; for ( ;; ) { return f;
• ## doc/papers/concurrency/examples/FibRefactor.py

 r41efd33 # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 FibRefactor.py" # # compile-command: "python3.7 FibRefactor.py" # # End: #
• ## doc/papers/concurrency/examples/Format.c

 r41efd33 typedef struct { void * next; int restart, g, b; char ch; int g, b; } Fmt; void comain( Fmt * f ) { if ( __builtin_expect(f->next != 0, 1) ) goto *f->next; f->next = &&s1; static void * states[] = {&&s0, &&s1}; goto *states[f->restart]; s0: f->restart = 1; for ( ;; ) { for ( f->g = 0; f->g < 5; f->g += 1 ) {                 // groups for ( f->b = 0; f->b < 4; f->b += 1 ) {         // blocks return; s1:;  while ( f->ch == '\n' ) return;         // ignore do { return;  s1: ; } while ( f->ch == '\n' );                              // ignore printf( "%c", f->ch );                                  // print character } int main() { Fmt fmt = { NULL }; Fmt fmt = { 0 }; comain( &fmt );                                                                         // prime for ( ;; ) {
• ## doc/papers/concurrency/examples/Format.cc

 r41efd33 for ( g = 0; g < 5; g += 1 ) { // groups of 5 blocks for ( b = 0; b < 4; b += 1 ) { // blocks of 4 characters //                                      for ( ;; ) { // for newline characters for ( ;; ) { // for newline characters suspend(); //                                              if ( ch != '\n' ) break; // ignore newline //                                      } if ( ch != '\n' ) break; // ignore newline } //                                      cout << ch; // print character } // Local Variables: // // tab-width: 4 // // compile-command: "u++-work -O2 -nodebubg Format.cc" // // compile-command: "u++-work -O2 -nodebug Format.cc" // // End: //
• ## doc/papers/concurrency/examples/Format.cfa

 r41efd33 for ( g = 0; g < 5; g += 1 ) {          // groups of 5 blocks for ( b = 0; b < 4; b += 1 ) {  // blocks of 4 characters //                              do { do { suspend(); //                              } while ( ch == '\n' || ch == '\t' ); } while ( ch == '\n' || ch == '\t' ); sout | ch;                                      // print character }
• ## doc/papers/concurrency/examples/Format.data

 r41efd33 abcdefghijklmnopqrstuvwxyzxxxxxxxxxxxxxx abcdefghijklmnop qrstuvwxyzx xxxxxxxxxxxxx
• ## doc/papers/concurrency/examples/Format.py

 r41efd33 for g in range( 5 ):    # groups of 5 blocks for b in range( 4 ): # blocks of 4 characters print( (yield), end='' ) # receive from send while True: ch = (yield) # receive from send if '\n' not in ch: break print( ch, end='' ) # receive from send print( '  ', end='' ) # block separator print()                                 # group separator print() input = "abcdefghijklmnop\nqrstuvwx\nyzxxxxxxxxxxxxxx\n" fmt = Format() next( fmt )                                                     # prime generator for i in range( 41 ): fmt.send( 'a' );                                # send to yield for i in input: fmt.send( i );                          # send to yield # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 Format.py" # # compile-command: "python3.7 Format.py" # # End: #
• ## doc/papers/concurrency/examples/Format1.c

 r41efd33 typedef struct { void * next; int restart, g, b; char ch; int g, b; } Fmt; void format( Fmt * f ) { if ( __builtin_expect(f->next != 0, 1) ) goto *f->next; f->next = &&s1; static void * states[] = {&&s0, &&s1}; goto *states[f->restart]; s0: f->restart = 1; for ( ;; ) { for ( f->g = 0; f->g < 5; f->g += 1 ) {                 // groups for ( f->b = 0; f->b < 4; f->b += 1 ) {         // blocks return; s1: ; if ( f->ch == '\0' ) goto fini;                 // EOF ? s1: if ( f->ch == '\0' ) goto fini;           // EOF ? while ( f->ch == '\n' ) return;                 // ignore printf( "%c", f->ch );                                  // print character //                              printf( "%c", f->ch );                                  // print character } printf( " " );                                                          // block separator //                      printf( " " );                                                          // block separator } printf( "\n" );                                                                 // group separator //              printf( "\n" );                                                                 // group separator } fini: if ( f->g != 0 || f->b != 0 ) printf( "\n" ); fini:; //      if ( f->g != 0 || f->b != 0 ) printf( "\n" ); } int main() { Fmt fmt = { NULL }; Fmt fmt = { 0 }; format( &fmt );                                                                         // prime for ( ;; ) { scanf( "%c", &fmt.ch );                                                 // direct read into communication variable if ( feof( stdin ) ) break; fmt.ch = 'a'; for ( long int i = 0; i < 1000000000; i += 1 ) { //              scanf( "%c", &fmt.ch );                                                 // direct read into communication variable //        if ( feof( stdin ) ) break; format( &fmt ); } fmt.ch = '\0'; fmt.ch = '\0';                                                                          // sentential (EOF) format( &fmt ); }
• ## doc/papers/concurrency/examples/PingPong.c

 r41efd33 typedef struct PingPong { int restart;                                                                            // style 1 int N, i; const char * name; int N, i; struct PingPong * partner; void * next; void * next;                                                                            // style 2 } PingPong; #define PPCtor( name, N ) { name, N, 0, NULL, NULL } #define PPCtor( name, N ) { 0, N, 0, name, NULL, NULL } void comain( PingPong * pp ) __attribute__(( noinline )); void comain( PingPong * pp ) { #if 0 if ( __builtin_expect(pp->next != 0, 1) ) goto *pp->next; #if 0 pp->next = &&here; asm( "mov  %0,%%rdi" : "=m" (pp) ); asm( "mov  %rdi,%rax" ); #ifndef OPT #ifdef PRINT asm( "add  $16, %rsp" ); #endif // PRINT asm( "popq %rbp" ); #endif // ! OPT #ifdef OPT #ifdef PRINT asm( "popq %rbx" ); #endif // PRINT #endif // OPT asm( "jmp comain" ); here: ; #endif // 0 pp->next = &&cycle; for ( ; pp->i < pp->N; pp->i += 1 ) { cycle: ; } // for #endif // 0 #if 1 static void * states[] = {&&s0, &&s1}; goto *states[pp->restart]; s0: pp->restart = 1; for ( ; pp->i < pp->N; pp->i += 1 ) { #ifdef PRINT printf( "%s %d\n", pp->name, pp->i ); #endif // PRINT asm( "mov %0,%%rdi" : "=m" (pp->partner) ); asm( "mov %rdi,%rax" ); #ifndef OPT #ifdef PRINT asm( "add$16, %rsp" ); #endif // PRINT asm( "popq %rbp" ); #endif // ! OPT #ifdef OPT #ifdef PRINT asm( "popq %rbx" ); #endif // PRINT #endif // OPT asm( "jmp  comain" ); s1: ; } // for #endif // 0 } // Local Variables: // // tab-width: 4 // // compile-command: "gcc-8 -g -DPRINT PingPong.c" // // compile-command: "gcc-9 -g -DPRINT PingPong.c" // // End: //
• ## doc/papers/concurrency/examples/Pingpong.py

 r41efd33 def PingPong( name, N ): partner = (yield)           # get partner yield                       # resume scheduler partner = yield                         # get partner yield                                           # resume scheduler for i in range( N ): print( name ) yield partner           # execute next yield partner                   # execute next print( "end", name ) def Scheduler(): n = (yield)                 # starting coroutine while True: n = next( n )           # schedule coroutine n = yield                                       # starting coroutine try: while True: n = next( n )           # schedule coroutine except StopIteration: pass pi = PingPong( "ping", 5 ) po = PingPong( "pong", 5 ) next( pi )                      # prime pi.send( po )                   # send partner next( po )                      # prime po.send( pi )                   # send partner next( pi )                                              # prime pi.send( po )                                   # send partner next( po )                                              # prime po.send( pi )                                   # send partner s = Scheduler(); next( s )                       # prime next( s )                                               # prime try: s.send( pi )                            # start cycle except StopIteration: print( "scheduler stop" ) except StopIteration:                   # scheduler stopped pass print( "stop" ) # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 Pingpong.py" # # compile-command: "python3.7 Pingpong.py" # # End: #
• ## doc/papers/concurrency/examples/ProdCons.py

 r41efd33 def Prod( N ): cons = (yield)              # get cons yield                       # resume scheduler cons = yield                            # get cons yield                                           # resume scheduler for i in range( N ): print( "prod" ) yield cons              # execute next yield cons                              # execute next print( "end", "prod" ) def Cons( N ): prod = (yield)              # get prod yield                       # resume scheduler prod = yield                            # get prod yield                                           # resume scheduler for i in range( N ): print( "cons" ) yield prod              # execute next yield prod                              # execute next print( "end", "cons" ) def Scheduler(): n = (yield)                 # starting coroutine while True: n = next( n )           # schedule coroutine n = yield                                       # starting coroutine try: while True: n = next( n )           # schedule coroutine except StopIteration: pass prod = Prod( 5 ) cons = Cons( 5 ) next( prod )                    # prime prod.send( cons )               # send cons next( cons )                    # prime cons.send( prod )               # send prod next( prod )                                    # prime prod.send( cons )                               # send cons next( cons )                                    # prime cons.send( prod )                               # send prod s = Scheduler(); next( s )                       # prime next( s )                                               # prime try: s.send( prod )                          # start cycle except StopIteration: print( "scheduler stop" ) except StopIteration:                   # scheduler stopped pass print( "stop" ) # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 ProdCons.py" # # compile-command: "python3.7 ProdCons.py" # # End: #
• ## doc/papers/concurrency/examples/RWMonitorEXT.cfa

 r41efd33 int rcnt, wcnt;                                                                         // number of readers/writer using resource }; void ?{}( ReadersWriter & rw ) with(rw) { rcnt = wcnt = 0; } void EndRead( ReadersWriter & mutex rw ) with(rw) { rcnt -= 1; } void EndWrite( ReadersWriter & mutex rw ) with(rw) { wcnt = 0; } void StartRead( ReadersWriter & mutex rw ) with(rw) { if ( wcnt > 0 ) waitfor( EndWrite, rw ); if ( wcnt > 0 ) waitfor( EndWrite : rw ); rcnt += 1; } void StartWrite( ReadersWriter & mutex rw ) with(rw) { if ( wcnt > 0 ) waitfor( EndWrite, rw ); else while ( rcnt > 0 ) waitfor( EndRead, rw ); if ( wcnt > 0 ) waitfor( EndWrite : rw ); else while ( rcnt > 0 ) waitfor( EndRead : rw ); wcnt = 1; } void ?{}( ReadersWriter & rw ) with(rw) { rcnt = wcnt = 0; } int readers( ReadersWriter & rw ) { return rw.rcnt; } void Read( ReadersWriter & rw ) { StartRead( rw ); EndWrite( rw ); } thread Worker { ReadersWriter &rw; } // for } int main() { enum { MaxTask = 5 }; } // main // Local Variables: // // tab-width: 4 // // compile-command: "cfa -O2 RWMonitor.cfa" // // compile-command: "cfa -O2 RWMonitorEXT.cfa" // // End: //
• ## doc/papers/concurrency/examples/Refactor.py

 r41efd33 # Local Variables: # # tab-width: 4 # # compile-command: "python3.5 Refactor.py" # # compile-command: "python3.7 Refactor.py" # # End: #
• ## doc/papers/concurrency/figures/FullCoroutinePhases.fig

 r41efd33 -2 1200 2 5 1 0 1 0 7 100 0 -1 0.000 0 0 1 0 4575.000 2437.500 4275 1875 4575 1800 4875 1875 5 1 0 1 0 7 100 0 -1 0.000 0 0 1 0 5175.000 2437.500 4875 1875 5175 1800 5475 1875 1 1 1.00 45.00 90.00 5 1 0 1 0 7 100 0 -1 0.000 0 0 1 0 4575.000 1537.500 4875 2100 4575 2175 4275 2100 5 1 0 1 0 7 100 0 -1 0.000 0 0 1 0 5175.000 1537.500 5475 2100 5175 2175 4875 2100 1 1 1.00 45.00 90.00 5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 0 4207.500 1642.500 4125 1425 3975 1650 4200 1875 5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 0 4807.500 1642.500 4725 1425 4575 1650 4800 1875 1 1 1.00 45.00 90.00 6 1575 1575 2700 2025 2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2175 1575 2400 1800 4 1 0 100 0 4 10 0.0000 2 165 300 1725 1950 ping\001 4 1 0 100 0 4 10 0.0000 2 135 360 2475 1950 pong\001 -6 6 3075 1575 4200 2025 6 3075 1575 4200 2025 2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 3300 1575 3300 1800 3525 1575 3300 1800 2 1 0 1 0 7 100 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 3300 2025 3300 2250 4 1 0 100 0 0 10 0.0000 2 105 555 2100 1200 creation\001 4 1 0 100 0 4 10 0.0000 2 165 300 1725 1950 ping\001 4 1 0 100 0 4 10 0.0000 2 135 360 2475 1950 pong\001 4 1 0 100 0 4 10 0.0000 2 165 300 3300 1950 ping\001 4 1 0 100 0 4 10 0.0000 2 135 360 3300 2400 pong\001 4 1 0 100 0 0 10 0.0000 2 105 675 4575 1200 execution\001 4 1 0 100 0 4 10 0.0000 2 165 300 4275 2025 ping\001 4 1 0 100 0 4 10 0.0000 2 135 360 4875 2025 pong\001 4 1 0 100 0 0 10 0.0000 2 90 420 3300 1200 starter\001 3675 1575 3900 1800 4 1 0 100 0 4 10 0.0000 2 165 300 3225 1950 ping\001 4 1 0 100 0 4 10 0.0000 2 135 360 3975 1950 pong\001 -6 -6 4 1 0 100 0 4 10 0.0000 2 165 705 2100 1500 pgm main\001 4 1 0 100 0 4 10 0.0000 2 165 705 3300 1500 pgm main\001 4 1 0 100 0 4 10 0.0000 2 165 705 4500 1500 pgm main\001 4 1 0 100 0 4 10 0.0000 2 165 705 3600 1500 pgm main\001 4 1 0 100 0 4 10 0.0000 2 165 300 4875 2025 ping\001 4 1 0 100 0 4 10 0.0000 2 135 360 5475 2025 pong\001 4 1 0 100 0 4 10 0.0000 2 165 705 5100 1500 pgm main\001 4 1 0 100 0 2 10 0.0000 2 105 540 2100 1275 creator\001 4 1 0 100 0 2 10 0.0000 2 105 495 3600 1275 starter\001 4 1 0 100 0 2 10 0.0000 2 105 690 5175 1275 execution\001
• ## doc/papers/concurrency/figures/RunTimeStructure.fig

 r41efd33 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 4500 3600 15 15 4500 3600 4515 3615 -6 6 2175 4650 7050 4950 1 3 0 1 0 0 0 0 0 0.000 1 0.0000 2250 4830 30 30 2250 4830 2280 4860 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4200 4800 150 75 4200 4800 4350 4875 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3275 4800 100 100 3275 4800 3375 4800 6 3225 4125 4650 4425 6 4350 4200 4650 4350 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 4425 4275 15 15 4425 4275 4440 4290 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 4500 4275 15 15 4500 4275 4515 4290 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 4575 4275 15 15 4575 4275 4590 4290 -6 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3450 4275 225 150 3450 4275 3675 4425 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4050 4275 225 150 4050 4275 4275 4425 -6 6 6675 4125 7500 4425 6 7200 4200 7500 4350 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 7275 4275 15 15 7275 4275 7290 4290 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 7350 4275 15 15 7350 4275 7365 4290 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 7425 4275 15 15 7425 4275 7440 4290 -6 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6900 4275 225 150 6900 4275 7125 4425 -6 6 6675 3525 8025 3975 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 6675 3750 6975 3750 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 7125 3750 7350 3750 2 2 0 1 -1 -1 0 0 -1 0.000 0 0 0 0 0 5 5400 4950 5400 4725 5175 4725 5175 4950 5400 4950 2 2 1 1 -1 -1 0 0 -1 3.000 0 0 0 0 0 5 6525 4950 6300 4950 6300 4725 6525 4725 6525 4950 4 0 -1 0 0 0 10 0.0000 2 105 450 6600 4875 cluster\001 4 0 -1 0 0 0 10 0.0000 2 105 660 5475 4875 processor\001 4 0 -1 0 0 0 10 0.0000 2 105 555 4425 4875 monitor\001 4 0 -1 0 0 0 10 0.0000 2 120 270 3450 4875 task\001 4 0 -1 0 0 0 10 0.0000 2 105 660 2325 4875 coroutine\001 -6 6 3450 1275 3750 1425 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3525 1350 15 15 3525 1350 3540 1365 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3600 1350 15 15 3600 1350 3615 1365 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3675 1350 15 15 3675 1350 3690 1365 -6 6 5550 1275 5850 1425 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 5625 1350 15 15 5625 1350 5640 1365 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 5700 1350 15 15 5700 1350 5715 1365 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 5775 1350 15 15 5775 1350 5790 1365 7800 3975 7800 3525 7350 3525 7350 3975 7800 3975 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 7800 3750 8025 3750 -6 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5550 2625 150 150 5550 2625 5700 2625 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2850 150 150 4425 2850 4575 2850 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4650 2475 150 150 4650 2475 4800 2475 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3525 3600 150 150 3525 3600 3675 3600 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3975 3600 150 150 3975 3600 4125 3600 1 3 0 1 0 0 0 0 0 0.000 1 0.0000 3525 3600 30 30 3525 3600 3555 3630 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3975 2850 150 150 3975 2850 4125 2850 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 7200 2775 150 150 7200 2775 7350 2775 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4650 1350 225 150 4650 1350 4875 1500 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5250 1350 225 150 5250 1350 5475 1500 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4050 1350 225 150 4050 1350 4275 1500 1 3 0 1 0 0 0 0 0 0.000 1 0.0000 2250 4830 30 30 2250 4830 2280 4860 1 3 0 1 0 0 0 0 0 0.000 1 0.0000 7200 2775 30 30 7200 2775 7230 2805 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3525 3600 150 150 3525 3600 3675 3600 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3875 4800 100 100 3875 4800 3975 4800 1 1 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4650 4800 150 75 4650 4800 4800 4875 2 2 0 1 -1 -1 0 0 -1 0.000 0 0 0 0 0 5 2400 4200 2400 3750 1950 3750 1950 4200 2400 4200 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 6675 3975 6975 3975 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 7050 2775 6825 2775 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 6825 2775 6825 3975 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 7125 3975 7350 3975 2 2 0 1 -1 -1 0 0 -1 0.000 0 0 0 0 0 5 7800 4200 7800 3750 7350 3750 7350 4200 7800 4200 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 7800 3975 8025 3975 6825 2775 6825 3750 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 1 0 4 1 1 1.00 45.00 90.00 7875 3975 7875 2325 7200 2325 7200 2550 7875 3750 7875 2325 7200 2325 7200 2550 2 2 0 1 -1 -1 0 0 -1 0.000 0 0 0 0 0 5 5850 4950 5850 4725 5625 4725 5625 4950 5850 4950 2 2 1 1 -1 -1 0 0 -1 3.000 0 0 0 0 0 5 6975 4950 6750 4950 6750 4725 6975 4725 6975 4950 4 1 -1 0 0 0 10 0.0000 2 105 720 5550 4425 Processors\001 4 1 -1 0 0 0 10 0.0000 2 120 1005 4200 3225 Blocked Tasks\001 4 1 -1 0 0 0 10 0.0000 2 105 990 2175 3525 Discrete-event\001 4 1 -1 0 0 0 10 0.0000 2 135 795 2175 4350 preemption\001 4 0 -1 0 0 0 10 0.0000 2 150 1290 2325 4875 genrator/coroutine\001 4 0 -1 0 0 0 10 0.0000 2 120 270 4050 4875 task\001 4 0 -1 0 0 0 10 0.0000 2 105 450 7050 4875 cluster\001 4 0 -1 0 0 0 10 0.0000 2 105 660 5925 4875 processor\001 4 0 -1 0 0 0 10 0.0000 2 105 555 4875 4875 monitor\001
• ## doc/papers/concurrency/mail2

 r41efd33 Software: Practice and Experience Editorial Office Date: Tue, 12 Nov 2019 22:25:17 +0000 From: Richard Jones Reply-To: R.E.Jones@kent.ac.uk To: tdelisle@uwaterloo.ca, pabuhr@uwaterloo.ca Subject: Software: Practice and Experience - Decision on Manuscript ID SPE-19-0219 12-Nov-2019 Dear Dr Buhr, Many thanks for submitting SPE-19-0219 entitled "Advanced Control-flow and Concurrency in Cforall" to Software: Practice and Experience. The paper has now been reviewed and the comments of the referees are included at the bottom of this letter. The decision on this paper is that it requires substantial further work is required. The referees have a number of substantial concerns. All the reviewers found the submission very hard to read; two of the reviewers state that it needs very substantial restructuring. These concerns must be addressed before your submission can be considered further. A revised version of your manuscript that takes into account the comments of the referees will be reconsidered for publication. Please note that submitting a revision of your manuscript does not guarantee eventual acceptance, and that your revision will be subject to re-review by the referees before a decision is rendered. You have 90 days from the date of this email to submit your revision. If you are unable to complete the revision within this time, please contact me to request an extension. You can upload your revised manuscript and submit it through your Author Center. Log into https://mc.manuscriptcentral.com/spe  and enter your Author Center, where you will find your manuscript title listed under "Manuscripts with Decisions". When submitting your revised manuscript, you will be able to respond to the comments made by the referee(s) in the space provided.  You can use this space to document any changes you make to the original manuscript. If you feel that your paper could benefit from English language polishing, you may wish to consider having your paper professionally edited for English language by a service such as Wiley's at http://wileyeditingservices.com. Please note that while this service will greatly improve the readability of your paper, it does not guarantee acceptance of your paper by the journal. Once again, thank you for submitting your manuscript to Software: Practice and Experience and I look forward to receiving your revision. Sincerely, Prof. Richard Jones Software: Practice and Experience R.E.Jones@kent.ac.uk Referee(s)' Comments to Author: Reviewing: 1 Comments to the Author This article presents the design and rationale behind the various threading and synchronization mechanisms of C-forall, a new low-level programming language.  This paper is very similar to a companion paper which I have also received: as the papers are similar, so will these reviews be --- in particular any general comments from the other review apply to this paper also. As far as I can tell, the article contains three main ideas: an asynchronous execution / threading model; a model for monitors to provide mutual exclusion; and an implementation.  The first two ideas are drawn together in Table 1: unfortunately this is on page 25 of 30 pages of text. Implementation choices and descriptions are scattered throughout the paper - and the sectioning of the paper seems almost arbitrary. The article is about its contributions.  Simply adding feature X to language Y isn't by itself a contribution, (when feature X isn't already a contribution).  The contribution can be in the design: the motivation, the space of potential design options, the particular design chosen and the rationale for that choice, or the resulting performance.  For example: why support two kinds of generators as well as user-level threads?  Why support both low and high level synchronization constructs?  Similarly I would have found the article easier to follow if it was written top down, presenting the design principles, present the space of language features, justify chosen language features (and rationale) and those excluded, and then present implementation, and performance. Then the writing of the article is often hard to follow, to say the least. Two examples: section 3 "stateful functions" - I've some idea what that is (a function with Algol's "own" or C's "static" variables? but in fact the paper has a rather more specific idea than that. The top of page 3 throws a whole lot of defintions at the reader "generator" "coroutine" "stackful" "stackless" "symmetric" "asymmetric" without every stopping to define each one --- but then in footnote "C" takes the time to explain what C's "main" function is?  I cannot imagine a reader of this paper who doesn't know what "main" is in C; especially if they understand the other concepts already presented in the paper.  The start of section 3 then does the same thing: putting up a whole lot of definitions, making distinctions and comparisons, even talking about some runtime details, but the critical definition of a monitor doesn't appear until three pages later, at the start of section 5 on p15, lines 29-34 are a good, clear, description of what a monitor actually is.  That needs to come first, rather than being buried again after two sections of comparisons, discussions, implementations, and options that are ungrounded because they haven't told the reader what they are actually talking about.  First tell the reader what something is, then how they might use it (as programmers: what are the rules and restrictions) and only then start comparison with other things, other approaches, other languages, or implementations. The description of the implementation is similarly lost in the trees without ever really seeing the wood. Figure 19 is crucial here, but it's pretty much at the end of the paper, and comments about implementations are threaded throughout the paper without the context (fig 19) to understand what's going on.   The protocol for performance testing may just about suffice for C (although is N constantly ten million, or does it vary for each benchmark) but such evaluation isn't appropriate for garbage-collected or JITTed languages like Java or Go. other comments working through the paper - these are mostly low level and are certainly not comprehensive. p1 only a subset of C-forall extensions? p1 "has features often associated with object-oriented programming languages, such as constructors, destructors, virtuals and simple inheritance."   There's no need to quibble about this. Once a language has inheritance, it's hard to claim it's not object-oriented. p2 barging? signals-as-hints? p3 start your discussion of generations with a simple example of a C-forall generator.  Fig 1(b) might do: but put it inline instead of the python example - and explain the key rules and restrictions on the construct.  Then don't even start to compare with coroutines until you've presented, described and explained your coroutines... p3 I'd probably leave out the various "C" versions unless there are key points to make you can't make in C-forall. All the alternatives are just confusing. p4 but what's that "with" in Fig 1(B) p5 start with the high level features of C-forall generators... p5 why is the paper explaining networking protocols? p7 lines 1-9 (transforming generator to coroutine - why would I do any of this? Why would I want one instead of the other (do not use "stack" in your answer!) p10 last para "A coroutine must retain its last resumer to suspend back because the resumer is on a different stack. These reverse pointers allow suspend to cycle backwards, "  I've no idea what is going on here?  why should I care?  Shouldn't I just be using threads instead?  why not? p16 for the same reasons - what reasons? p17 if the multiple-monitor entry procedure really is novel, write a paper about that, and only about that. p23 "Loose Object Definitions" - no idea what that means.  in that section: you can't leave out JS-style dynamic properties.  Even in OOLs that (one way or another) allow separate definitions of methods (like Objective-C, Swift, Ruby, C#) at any time a runtime class has a fixed definition.  Quite why the detail about bit mask implementation is here anyway, I've no idea. p25 this cluster isn't a CLU cluster then? * conclusion should conclude the paper, not the related. Reviewing: 2 Comments to the Author This paper describes the concurrency features of an extension of C (whose name I will write as "C\/" here, for convenience), including much design-level discussion of the coroutine- and monitor-based features and some microbenchmarks exploring the current implementation's performance. The key message of the latter is that the system's concurrency abstractions are much lighter-weight than the threading found in mainstream C or Java implementations. There is much description of the system and its details, but nothing about (non-artificial) uses of it. Although the microbenchmark data is encouraging, arguably not enough practical experience with the system has been reported here to say much about either its usability advantages or its performance. As such, the main contribution of the paper seem to be to document the existence of the described system and to provide a detailed design rationale and (partial) tutorial. I believe that could be of interest to some readers, so an acceptable manuscript is lurking in here somewhere. Unfortunately, at present the writing style is somewhere between unclear and infuriating. It omits to define terms; it uses needlessly many terms for what are apparently (but not clearly) the same things; it interrupts itself rather than deliver the natural consequent of whatever it has just said; and so on. Section 5 is particularly bad in these regards -- see my detailed comments below. Fairly major additional efforts will be needed to turn the present text into a digestible design-and-tutorial document. I suspect that a shorter paper could do this job better than the present manuscript, which is overwrought in parts. p2: lines 4--9 are a little sloppy. It is not the languages but their popular implementations which "adopt" the 1:1 kernel threading model. line 10: "medium work" -- "medium-sized work"? line 18: "is all sequential to the compiler" -- not true in modern compilers, and in 2004 H-J Boehm wrote a tech report describing exactly why ("Threads cannot be implemented as a library", HP Labs). line 20: "knows the optimization boundaries" -- I found this vague. What's an example? line 31: this paragraph has made a lot of claims. Perhaps forward-reference to the parts of the paper that discuss each one. line 33: "so the reader can judge if" -- this reads rather passive-aggressively. Perhaps better: "... to support our argument that..." line 41: "a dynamic partitioning mechanism" -- I couldn't tell what this meant p3. Presenting concept of a "stateful function" as a new language feature seems odd. In C, functions often have local state thanks to static local variables (or globals, indeed). Of course, that has several limitations. Can you perhaps present your contributions by enumerating these limitations? See also my suggestion below about a possible framing centred on a strawman. line 2: "an old idea that is new again" -- this is too oblique lines 2--15: I found this to be a word/concept soup. Stacks, closures, generators, stackless stackful, coroutine, symmetric, asymmetric, resume/suspend versus resume/resume... there needs to be a more gradual and structured way to introduce all this, and ideally one that minimises redundancy. Maybe present it as a series of "definitions" each with its own heading, e.g. "A closure is stackless if its local state has statically known fixed size"; "A generator simply means a stackless closure." And so on. Perhaps also strongly introduce the word "activate" as a direct contrast with resume and suspend. These are just a flavour of the sort of changes that might make this paragraph into something readable. Continuing the thought: I found it confusing that by these definitinos, a stackful closure is not a stack, even though logically the stack *is* a kind of closure (it is a representation of the current thread's continuation). lines 24--27: without explaining what the boost functor types mean, I don't think the point here comes across. line 34: "semantically coupled" -- I wasn't surew hat this meant p4: the point of Figure 1 (C) was not immediately clear. It seem to be showing how one might "compile down" Figure 1 (B). Or is that Figure 1 (A)? It's right that the incidental language features of the system are not front-and-centre, but I'd appreciate some brief glossing of non-C languages features as they appear. Examples are the square bracket notation, the pipe notation and the constructor syntax. These explanations could go in the caption of the figure which first uses them, perhaps. Overall I found the figure captions to be terse, and a missed opportunity to explain clearly what was going on. p5 line 23: "This restriction is removed..." -- give us some up-front summary of your contributions and the elements of the language design that will be talked about, so that this isn't an aside. This will reduce the "twisty passages" feeling that characterises much of the paper. line 40: "a killer asymmetric generator" -- this is stylistically odd, and the sentence about failures doesn't convincigly argue that C\/ will help with them. Have you any experience writing device drivers using C\/? Or any argument that the kinds of failures can be traced to the "stack-ripping" style that one is forced to use without coroutines? Also, a typo on line 41: "device drives". And saying "Windows/Linux" is sloppy... what does the cited paper actually say? p6 lines 13--23: this paragraph is difficult to understand. It seems to be talking about a control-flow pattern roughly equivalent to tail recursion. What is the high-level point, other than that this is possible? line 34: "which they call coroutines" -- a better way to make this point is presumably that the C++20 proposal only provides a specialised kind of coroutine, namely generators, despite its use of the more general word. line 47: "... due to dynamic stack allocation, execution..." -- this sentence doesn't scan. I suggest adding "and for" in the relevant places where currently there are only commas. p8 / Figure 5 (B) -- the GNU C extension of unary "&&" needs to be explained. The whole figure needs a better explanation, in fact. p9, lines 1--10: I wasn't sure this stepping-through really added much value. What are the truly important points to note about this code? p10: similarly, lines 3--27 again are somewhere between tedious and confusing. I'm sure the motivation and details of "starter semantics" can both be stated much more pithily. line 32: "a self-resume does not overwrite the last resumer" -- is this a hack or a defensible principled decision? p11: "a common source of errors" -- among beginners or among production code? Presumably the former. line 23: "with builtin and library" -- not sure what this means lines 31--36: these can be much briefer. The only important point here seems to be that coroutines cannot be copied. p12: line 1: what is a "task"? Does it matter? line 7: calling it "heap stack" seems to be a recipe for confusion. "Stack-and-heap" might be better, and contrast with "stack-and-VLS" perhaps. When "VLS" is glossed, suggest actually expanding its initials: say "length" not "size". line 21: are you saying "cooperative threading" is the same as "non-preemptive scheduling", or that one is a special case (kind) of the other? Both are defensible, but be clear. line 27: "mutual exclusion and synchronization" -- the former is a kind of the latter, so I suggest "and other forms of synchronization". line 30: "can either be a stackless or stackful" -- stray "a", but also, this seems to be switching from generic/background terminology to C\/-specific terminology. An expositional idea occurs: start the paper with a strawman naive/limited realisation of coroutines -- say, Simon Tatham's popular "Coroutines in C" web page -- and identify point by point what the limitations are and how C\/ overcomes them. Currently the presentation is often flat (lacking motivating contrasts) and backwards (stating solutions before problems). The foregoing approach might fix both of these. page 13: line 23: it seems a distraction to mention the Python feature here. p14 line 5: it seems odd to describe these as "stateless" just because they lack shared mutable state. It means the code itself is even more stateful. Maybe the "stack ripping" argument could usefully be given here. line 16: "too restrictive" -- would be good to have a reference to justify this, or at least give a sense of what the state-of-the-art performance in transactional memory systems is (both software and hardware) line 22: "simulate monitors" -- what about just *implementing* monitors? isn't that what these systems do? or is the point more about refining them somehow into something more specialised? p15: sections 4.1 and 4.2 seem adrift and misplaced. Split them into basic parts (which go earlier) and more advanced parts (e.g. barging, which can be explained later). line 31: "acquire/release" -- misses an opportunity to contrast the monitor's "enter/exit" abstraction with the less structured acquire/release of locks. p16 line 12: the "implicit" versus "explicit" point is unclear. Is it perhaps about the contract between an opt-in *discipline* and a language-enforced *guarantee*? line 28: no need to spend ages dithering about which one is default and which one is the explicit qualifier. Tell us what you decided, briefly justify it, and move on. p17: Figure 11: since the main point seems to be to highlight bulk acquire, include a comment which identifies the line where this is happening. line 2: "impossible to statically..." -- or dynamically. Doing it dynamically would be perfectly acceptable (locking is a dynamic operation after all) "guarantees acquisition order is consistent" -- assuming it's done in a single bulk acquire. p18: section 5.3: the text here is a mess. The explanations of "internal" versus "external" scheduling are unclear, and "signals as hints" is not explained. "... can cause thread starvation" -- means including a while loop, or not doing so? "There are three signalling mechanisms.." but the text does not follow that by telling us what they are. My own scribbled attempt at unpicking the internal/external thing: "threads already in the monitor, albeit waiting, have priority over those trying to enter". p19: line 3: "empty condition" -- explain that condition variables don't store anything. So being "empty" means that the queue of waiting threads (threads waiting to be signalled that the condition has become true) is empty. line 6: "... can be transformed into external scheduling..." -- OK, but give some motivation. p20: line 6: "mechnaism" lines 16--20: this is dense and can probably only be made clear with an example p21 line 21: clarify that nested monitor deadlock was describe earlier (in 5.2). (Is the repetition necessary?) line 27: "locks, and by extension monitors" -- this is true but the "by extension" argument is faulty. It is perfectly possible to use locks as a primitive and build a compositional mechanism out of them, e.g. transactions. p22 line 2: should say "restructured" line 33: "Implementing a fast subset check..." -- make clear that the following section explains how to do this. Restructuring the sections themselves could do this, or noting in the text. p23: line 3: "dynamic member adding, eg, JavaScript" -- needs to say "as permitted in JavaScript", and "dynamically adding members" is stylistically better p23: line 18: "urgent stack" -- back-reference to where this was explained before p24 line 7: I did not understand what was more "direct" about "direct communication". Also, what is a "passive monitor" -- just a monitor, given that monitors are passive by design? line 14 / section 5.9: this table was useful and it (or something like it) could be used much earlier on to set the structure of the rest of the paper. The explanation at present is too brief, e.g. I did not really understand the point about cases 7 and 8. p25 line 2: instead of casually dropping in a terse explanation for the newly intrdouced term "virtual processor", introduce it properly. Presumably the point is to give a less ambiguous meaning to "thread" by reserving it only for C\/'s green threads. Table 1: what does "No / Yes" mean? p26 line 15: "transforms user threads into fibres" -- a reference is needed to explain what "fibres" means... guessing it's in the sense of Adya et al. line 20: "Microsoft runtime" -- means Windows? lines 21--26: don't say "interrupt" to mean "signal", especially not without clear introduction. You can use "POSIX signal" to disambiguate from condition variables' "signal". p27 line 3: "frequency is usually long" -- that's a "time period" or "interval", not a frequency line 5: the lengthy quotation is not really necessary; just paraphrase the first sentence and move on. line 20: "to verify the implementation" -- I don't think that means what is intended Tables in section 7 -- too many significant figures. How many overall runs are described? What is N in each case? p29 line 2: "to eliminate this cost" -- arguably confusing since nowadays on commodity CPUs most of the benefits of inlining are not to do with call overheads, but from later optimizations enabled as a consequence of the inlining line 41: "a hierarchy" -- are they a hierarchy? If so, this could be explained earlier. Also, to say these make up "an integrated set... of control-flow features" verges on the tautologous. p30 line 15: "a common case being web servers and XaaS" -- that's two cases Reviewing: 3 Comments to the Author # Cforall review Overall, I quite enjoyed reading the paper. Cforall has some very interesting ideas. I did have some suggestions that I think would be helpful before final publication. I also left notes on various parts of the paper that I find confusing when reading, in hopes that it may be useful to you. ## Summary * Expand on the motivations for including both generator and coroutines, vs trying to build one atop the other * Expand on the motivations for having Why both symmetric and asymettric coroutines? * Comparison to async-await model adopted by other languages * C#, JS * Rust and its async/await model * Consider performance comparisons against node.js and Rust frameworks * Discuss performance of monitors vs finer-grained memory models and atomic operations found in other languages * Why both internal/external scheduling for synchronization? ## Generator/coroutines In general, this section was clear, but I thought it would be useful to provide a somewhat deeper look into why Cforall opted for the particular combination of features that it offers. I see three main differences from other languages: * Generators are not exposed as a "function" that returns a generator object, but rather as a kind of struct, with communication happening via mutable state instead of "return values". That is, the generator must be manually resumed and (if I understood) it is expected to store values that can then later be read (perhaps via methods), instead of having a yield  statement that yields up a value explicitly. * Both "symmetric" and "asymmetric" generators are supported, instead of only asymmetric. * Coroutines (multi-frame generators) are an explicit mechanism. In most other languages, coroutines are rather built by layering single-frame generators atop one another (e.g., using a mechanism like async-await), and symmetric coroutines are basically not supported. I'd like to see a bit more justification for Cforall including all the above mechanisms -- it seemed like symmetric coroutines were a useful building block for some of the user-space threading and custom scheduler mechanisms that were briefly mentioned later in the paper. In the discussion of coroutines, I would have expected a bit more of a comparison to the async-await mechanism offered in other languages. Certainly the semantics of async-await in JavaScript implies significantly more overhead (because each async fn is a distinct heap object). [Rust's approach avoids this overhead][zc], however, and might be worthy of a comparison (see the Performance section). ## Locks and threading ### Comparison to atomics overlooks performance There are several sections in the paper that compare against atomics -- for example, on page 15, the paper shows a simple monitor that encapsulates an integer and compares that to C++ atomics. Later, the paper compares the simplicity of monitors against the volatile quantifier from Java. The conclusion in section 8 also revisits this point. While I agree that monitors are simpler, they are obviously also significantly different from a performance perspective -- the paper doesn't seem to address this at all. It's plausible that (e.g.) the Aint monitor type described in the paper can be compiled and mapped to the specialized instructions offered by hardware, but I didn't see any mention of how this would be done. There is also no mention of the more nuanced memory ordering relations offered by C++11 and how one might achieve similar performance characteristics in Cforall (perhaps the answer is that one simply doesn't need to; I think that's defensible, but worth stating explicitly). ### Justification for external scheduling feels lacking Cforall includes both internal and external scheduling; I found the explanation for the external scheduling mechanism to be lacking in justification. Why include both mechanisms when most languages seem to make do with only internal scheduling? It would be useful to show some scenarios where external scheduling is truly more powerful. I would have liked to see some more discussion of external scheduling and how it  interacts with software engineering best practices. It seems somewhat similar to AOP in certain regards. It seems to add a bit of "extra semantics" to monitor methods, in that any method may now also become a kind of synchronization point. The "open-ended" nature of this feels like it could easily lead to subtle bugs, particularly when code refactoring occurs (which may e.g. split an existing method into two). This seems particularly true if external scheduling can occur across compilation units -- the paper suggested that this is true, but I wasn't entirely clear. I would have also appreciated a few more details on how external scheduling is implemented. It seems to me that there must be some sort of "hooks" on mutex methods so that they can detect whether some other function is waiting on them and awaken those blocked threads. I'm not sure how such hooks are inserted, particularly across compilation units. The material in Section 5.6 didn't quite clarify the matter for me. For example, it left me somewhat confused about whether the f and g functions declared were meant to be local to a translation unit, or shared with other unit. ### Presentation of monitors is somewhat confusing I found myself confused fairly often in the section on monitors. I'm just going to leave some notes here on places that I got confused in how that it could be useful to you as feedback on writing that might want to be clarified. To start, I did not realize that the mutex_opt notation was a keyword, I thought it was a type annotation. I think this could be called out more explicitly. Later, in section 5.2, the paper discusses nomutex` annotations, which initially threw me, as they had not been introduced (now I realize that this paragraph is there to justify why there is no such keyword). The paragraph might be rearranged to make that clearer, perhaps by leading with the choice that Cforall made. On page 17, the paper states that "acquiring multiple monitors is safe from deadlock", but this could be stated a bit more precisely: acquiring multiple monitors in a bulk-acquire is safe from deadlock (deadlock can still result from nested acquires). On page 18, the paper states that wait states do not have to be enclosed in loops, as there is no concern of barging. This seems true but there are also other reasons to use loops (e.g., if there are multiple reasons to notify on the same condition). Thus the statement initially surprised me, as barging is only one of many reasons that I typically employ loops around waits. I did not understand the diagram in Figure 12 for some time. Initially, I thought that it was generic to all monitors, and I could not understand the state space. It was only later that I realized it was specific to your example. Updating the caption from "Monitor scheduling to "Monitor scheduling in the example from Fig 13" might have helped me quite a bit. I spent quite some time reading the boy/girl dating example (\*) and I admit I found it somewhat confusing. For example, I couldn't tell whether there were supposed to be many "girl" threads executing at once, or if there was only supposed to be one girl and one boy thread executing in a loop. Are the girl/boy threads supposed to invoke the girl/boy methods or vice versa? Surely there is some easier way to set this up? I believe that when reading the paper I convinced myself of how it was supposed to be working, but I'm writing this review some days later, and I find myself confused all over again and not able to easily figure it out. (\*) as an aside, I would consider modifying the example to some other form of matching, like customers and support personnel. ## Related work The paper offered a number of comparisons to Go, C#, Scala, and so forth, but seems to have overlooked another recent language, Rust. In many ways, Rust seems to be closest in philosophy to Cforall, so it seems like an odd omission. I already mentioned above that Rust is in the process of shipping [async-await syntax][aa], which is definitely an alternative to the generator/coroutine approach in Cforall (though one with clear pros/cons). ## Performance In the performance section in particular, you might consider comparing against some of the Rust web servers and threading systems. For example, actix is top of the [single query TechEmpower Framework benchmarks], and tokio is near the top of the [plainthreading benchmarks][pt] (hyper, the top, is more of an HTTP framework, though it is also written in Rust). It would seem worth trying to compare their "context switching" costs as well -- I believe both actix and tokio have a notion of threads that could be readily compared. Another addition that might be worth considering is to compare against node.js promises, although I think the comparison to process creation is not as clean. That said, I think that the performance comparison is not a big focus of the paper, so it may not be necessary to add anything to it. ## Authorship of this review I'm going to sign this review. This review was authored by Nicholas D. Matsakis. In the intrerest of full disclosure, I'm heavily involved in the Rust project, although I dont' think that influenced this review in particular. Feel free to reach out to me for clarifying questions. ## Links [aa]: https://blog.rust-lang.org/2019/09/30/Async-await-hits-beta.html [zc]: https://aturon.github.io/blog/2016/08/11/futures/ [sq]: https://www.techempower.com/benchmarks/#section=data-r18&hw=ph&test=db [pt]: https://www.techempower.com/benchmarks/#section=data-r18&hw=ph&test=plaintext Subject: Re: manuscript SPE-19-0219 To: "Peter A. Buhr" From: Richard Jones Date: Tue, 12 Nov 2019 22:43:55 +0000 Dear Dr Buhr Your should have received a decision letter on this today. I am sorry that this has taken so long. Unfortunately SP&E receives a lot of submissions and getting reviewers is a perennial problem. Regards Richard Peter A. Buhr wrote on 11/11/2019 13:10: >     26-Jun-2019 >     Your manuscript entitled "Advanced Control-flow and Concurrency in Cforall" >     has been received by Software: Practice and Experience. It will be given >     full consideration for publication in the journal. > > Hi, it has been over 4 months since submission of our manuscript SPE-19-0219 > with no response. > > Currently, I am refereeing a paper for IEEE that already cites our prior SP&E > paper and the Master's thesis forming the bases of the SP&E paper under > review. Hence our work is apropos and we want to get it disseminates as soon as > possible. > > [3] A. Moss, R. Schluntz, and P. A. Buhr, "Cforall: Adding modern programming >      language features to C," Software - Practice and Experience, vol. 48, >      no. 12, pp. 2111-2146, 2018. > > [4] T. Delisle, "Concurrency in C for all," Master's thesis, University of >      Waterloo, 2018.  [Online].  Available: >      https://uwspace.uwaterloo.ca/bitstream/handle/10012/12888 Date: Mon, 13 Jan 2020 05:33:15 +0000 From: Richard Jones Reply-To: R.E.Jones@kent.ac.uk To: pabuhr@uwaterloo.ca Subject: Revision reminder - SPE-19-0219 13-Jan-2020 Dear Dr Buhr SPE-19-0219 This is a reminder that your opportunity to revise and re-submit your manuscript will expire 28 days from now. If you require more time please contact me directly and I may grant an extension to this deadline, otherwise the option to submit a revision online, will not be available. I look forward to receiving your revision. Sincerely, Prof. Richard Jones Editor, Software: Practice and Experience https://mc.manuscriptcentral.com/spe Date: Wed, 5 Feb 2020 04:22:18 +0000 From: Aaron Thomas Reply-To: speoffice@wiley.com To: tdelisle@uwaterloo.ca, pabuhr@uwaterloo.ca Subject: SPE-19-0219.R1 successfully submitted 04-Feb-2020 Dear Dr Buhr, Your manuscript entitled "Advanced Control-flow and Concurrency in Cforall" has been successfully submitted online and is presently being given full consideration for publication in Software: Practice and Experience. Your manuscript number is SPE-19-0219.R1.  Please mention this number in all future correspondence regarding this submission. You can view the status of your manuscript at any time by checking your Author Center after logging into https://mc.manuscriptcentral.com/spe.  If you have difficulty using this site, please click the 'Get Help Now' link at the top right corner of the site. Thank you for submitting your manuscript to Software: Practice and Experience. Sincerely, Software: Practice and Experience Editorial Office
Note: See TracChangeset for help on using the changeset viewer.