Changes in / [3cd5fdd:c6a1e8a]
- Files:
-
- 14 added
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/LaTeXmacros/common.tex
r3cd5fdd rc6a1e8a 11 11 %% Created On : Sat Apr 9 10:06:17 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Mon Jul 9 08:28:05 201814 %% Update Count : 38 013 %% Last Modified On : Fri May 24 07:59:54 2019 14 %% Update Count : 382 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 61 61 \newlength{\gcolumnposn} % temporary hack because lstlisting does not handle tabs correctly 62 62 \newlength{\columnposn} 63 \setlength{\gcolumnposn}{2. 5in}63 \setlength{\gcolumnposn}{2.75in} 64 64 \setlength{\columnposn}{\gcolumnposn} 65 65 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}} -
doc/bibliography/pl.bib
r3cd5fdd rc6a1e8a 2585 2585 2586 2586 @article{Tarjan75, 2587 keywords= {union-find},2588 contributer= {a3moss@uwaterloo.ca},2589 author= {Tarjan, Robert Endre},2590 title= {Efficiency of a Good But Not Linear Set Union Algorithm},2591 journal= {J. ACM},2592 issue_date= {April 1975},2593 volume= {22},2594 number= {2},2595 month= apr,2596 year= {1975},2597 issn= {0004-5411},2598 pages= {215--225},2599 numpages= {11},2600 url= {http://doi.acm.org/10.1145/321879.321884},2601 doi= {10.1145/321879.321884},2602 acmid= {321884},2603 publisher= {ACM},2604 address= {New York, NY, USA},2587 keywords = {union-find}, 2588 contributer = {a3moss@uwaterloo.ca}, 2589 author = {Tarjan, Robert Endre}, 2590 title = {Efficiency of a Good But Not Linear Set Union Algorithm}, 2591 journal = {J. ACM}, 2592 issue_date = {April 1975}, 2593 volume = {22}, 2594 number = {2}, 2595 month = apr, 2596 year = {1975}, 2597 issn = {0004-5411}, 2598 pages = {215--225}, 2599 numpages = {11}, 2600 url = {http://doi.acm.org/10.1145/321879.321884}, 2601 doi = {10.1145/321879.321884}, 2602 acmid = {321884}, 2603 publisher = {ACM}, 2604 address = {New York, NY, USA}, 2605 2605 } 2606 2606 … … 2623 2623 journal = ipl, 2624 2624 year = 1980, 2625 month = apr, volume = 10, number = 3, pages = {120-123}, 2625 month = apr, 2626 volume = 10, 2627 number = 3, 2628 pages = {120-123}, 2626 2629 comment = { 2627 2630 The ``two-pass'' algorithm. An upward pass over a parse tree … … 2657 2660 } 2658 2661 2659 @ InProceedings{chambers89a,2662 @inproceedings{chambers89a, 2660 2663 keywords = {maps, delegation}, 2661 2664 author = "Craig Chambers and David Ungar and Elgin Lee", 2662 title = "An Efficient Implementation of {SELF}, a Dynamically-Typed 2663 Object-Oriented Language Based on Prototypes", 2665 title = "An Efficient Implementation of {SELF}, a Dynamically-Typed Object-Oriented Language Based on Prototypes", 2664 2666 crossref = "OOPSLA89", 2665 2667 pages = {49-70} 2666 2668 } 2667 2669 2670 @misc{Turley99, 2671 keywords = {embedded system, micrprocessor}, 2672 contributer = {pabuhr@plg}, 2673 author = {Jim Turley}, 2674 title = {Embedded Processors by the Numbers}, 2675 year = 1999, 2676 month = may, 2677 note = {Electronic Engineering Times}, 2678 howpublished= {\href{https://www.eetimes.com/author.asp?sectionid=36&doc_id=1287712} 2679 {https://\-www.eetimes.com/\-author.asp?sectionid=\-36&doc_id=1287712}}, 2680 } 2681 2668 2682 @article{oop:encapsulation, 2669 2683 keywords = {Encapsulation, Inheritance, Subclasses, Multiple Inheritance}, 2670 2684 contributer = {gjditchfield@plg}, 2671 2685 author = {Alan Snyder}, 2672 title = {Encapsulation and Inheritance in Object-Oriented Programming 2673 Languages}, 2686 title = {Encapsulation and Inheritance in Object-Oriented Programming Languages}, 2674 2687 journal = sigplan, 2675 2688 volume = {21}, number = {11}, … … 2706 2719 title = {Encapsulators: A New Software Paradigm in Smalltalk-80}, 2707 2720 journal = sigplan, 2708 volume = {21}, number = {11}, 2721 volume = {21}, 2722 number = {11}, 2709 2723 pages = {341-346}, 2710 month = nov, year = 1986, 2724 month = nov, 2725 year = 1986, 2711 2726 comment = { 2712 2727 Encapsulators are objects that surround other objects. -
doc/papers/concurrency/Makefile
r3cd5fdd rc6a1e8a 24 24 25 25 PICTURES = ${addsuffix .pstex, \ 26 FullProdConsStack \ 27 FullCoroutinePhases \ 28 corlayout \ 26 29 monitor \ 27 30 ext_monitor \ -
doc/papers/concurrency/Paper.tex
r3cd5fdd rc6a1e8a 153 153 auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__, 154 154 coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally, 155 __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,155 __float80, float80, __float128, float128, forall, ftype, generator, _Generic, _Imaginary, __imag, __imag__, 156 156 inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or, 157 otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread,157 otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, suspend, thread, 158 158 _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__, 159 159 virtual, __volatile, __volatile__, waitfor, when, with, zero_t}, … … 231 231 } 232 232 233 \newbox\myboxA 234 \newbox\myboxB 235 \newbox\myboxC 236 \newbox\myboxD 237 233 238 \title{\texorpdfstring{Advanced Control-flow and Concurrency in \protect\CFA}{Advanced Control-flow in Cforall}} 234 239 … … 247 252 This paper discusses the design philosophy and implementation of its advanced control-flow and concurrent/parallel features, along with the supporting runtime. 248 253 These features are created from scratch as ISO C has only low-level and/or unimplemented concurrency, so C programmers continue to rely on library features like C pthreads. 249 \CFA introduces modern language-level control-flow mechanisms, like coroutines, user-level threading, and monitors for mutual exclusion and synchronization.254 \CFA introduces modern language-level control-flow mechanisms, like generators, coroutines, user-level threading, and monitors for mutual exclusion and synchronization. 250 255 Library extension for executors, futures, and actors are built on these basic mechanisms. 251 The runtime provides significant programmer simplification and safety by eliminating spurious wakeup and reducingmonitor barging.256 The runtime provides significant programmer simplification and safety by eliminating spurious wakeup and optional monitor barging. 252 257 The runtime also ensures multiple monitors can be safely acquired \emph{simultaneously} (deadlock free), and this feature is fully integrated with all monitor synchronization mechanisms. 253 All languagefeatures integrate with the \CFA polymorphic type-system and exception handling, while respecting the expectations and style of C programmers.258 All control-flow features integrate with the \CFA polymorphic type-system and exception handling, while respecting the expectations and style of C programmers. 254 259 Experimental results show comparable performance of the new features with similar mechanisms in other concurrent programming-languages. 255 260 }% 256 261 257 \keywords{ coroutines, concurrency, parallelism, threads, monitors, runtime, C, \CFA (Cforall)}262 \keywords{generator, coroutine, concurrency, parallelism, thread, monitor, runtime, C, \CFA (Cforall)} 258 263 259 264 … … 285 290 As a result, languages like Java, Scala~\cite{Scala}, Objective-C~\cite{obj-c-book}, \CCeleven~\cite{C11}, and C\#~\cite{Csharp} adopt the 1:1 kernel-threading model, with a variety of presentation mechanisms. 286 291 From 2000 onwards, languages like Go~\cite{Go}, Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, D~\cite{D}, and \uC~\cite{uC++,uC++book} have championed the M:N user-threading model, and many user-threading libraries have appeared~\cite{Qthreads,MPC,BoostThreads}, including putting green threads back into Java~\cite{Quasar}. 287 The main argument for user-level threading is that they are lighter weight than kernel threads (locking and context switching do not cross the kernel boundary), so there is less restriction on programming styles that encourage large numbers of threads performing smallerwork-units to facilitate load balancing by the runtime~\cite{Verch12}.292 The main argument for user-level threading is that they are lighter weight than kernel threads (locking and context switching do not cross the kernel boundary), so there is less restriction on programming styles that encourage large numbers of threads performing medium work-units to facilitate load balancing by the runtime~\cite{Verch12}. 288 293 As well, user-threading facilitates a simpler concurrency approach using thread objects that leverage sequential patterns versus events with call-backs~\cite{vonBehren03}. 289 294 Finally, performant user-threading implementations (both time and space) are largely competitive with direct kernel-threading implementations, while achieving the programming advantages of high concurrency levels and safety. … … 300 305 301 306 Finally, it is important for a language to provide safety over performance \emph{as the default}, allowing careful reduction of safety for performance when necessary. 302 Two concurrency violations of this philosophy are \emph{spurious wakeup} and \emph{barging}, i.e., random wakeup~\cite[\S~8]{Buhr05a} and signalling-as-hints~\cite[\S~8]{Buhr05a}, where one begats the other. 303 If you believe spurious wakeup is a foundational concurrency property, than unblocking (signalling) a thread is always a hint. 304 If you \emph{do not} believe spurious wakeup is foundational, than signalling-as-hints is a performance decision. 305 Most importantly, removing spurious wakeup and signals-as-hints makes concurrent programming significantly safer because it removes local non-determinism. 306 Clawing back performance where the local non-determinism is unimportant, should be an option not the default. 307 Two concurrency violations of this philosophy are \emph{spurious wakeup} and \emph{barging}, i.e., random wakeup~\cite[\S~8]{Buhr05a} and signals-as-hints~\cite[\S~8]{Buhr05a}, where one is a consequence of the other, i.e., once there is spurious wakeup, signals-as-hints follows. 308 However, spurious wakeup is \emph{not} a foundational concurrency property~\cite[\S~8]{Buhr05a}, it is a performance design choice. 309 Similarly, signals-as-hints is also a performance decision. 310 We argue removing spurious wakeup and signals-as-hints makes concurrent programming significantly safer because it removes local non-determinism and matches with programmer expectation. 311 (Experience by the authors teaching concurrency is that students are highly confused by these semantics.) 312 Clawing back performance, when local non-determinism is unimportant, should be an option not the default. 307 313 308 314 \begin{comment} … … 327 333 328 334 \CFA embraces user-level threading, language extensions for advanced control-flow, and safety as the default. 329 We present comparative examples so the reader can judge if the \CFA control-flow extensions are better and safer than those in or proposed for \Celeven, \CC and other concurrent, imperative programminglanguages, and perform experiments to show the \CFA runtime is competitive with other similar mechanisms.335 We present comparative examples so the reader can judge if the \CFA control-flow extensions are better and safer than those in or proposed for \Celeven, \CC, and other concurrent, imperative programming-languages, and perform experiments to show the \CFA runtime is competitive with other similar mechanisms. 330 336 The main contributions of this work are: 331 337 \begin{itemize} 332 338 \item 333 expressive language-levelcoroutines and user-level threading, which respect the expectations of C programmers.339 language-level generators, coroutines and user-level threading, which respect the expectations of C programmers. 334 340 \item 335 monitor synchronization without barging. 336 \item 337 safely acquiring multiple monitors \emph{simultaneously} (deadlock free), while seamlessly integrating this capability with all monitor synchronization mechanisms. 341 monitor synchronization without barging, and the ability to safely acquiring multiple monitors \emph{simultaneously} (deadlock free), while seamlessly integrating these capability with all monitor synchronization mechanisms. 338 342 \item 339 343 providing statically type-safe interfaces that integrate with the \CFA polymorphic type-system and other language features. … … 343 347 a runtime system with no spurious wakeup. 344 348 \item 345 experimental results showing comparable performance of the new features with similar mechanisms in other concurrent programming-languages. 349 a dynamic partitioning mechanism to segregate the execution environment for specialized requirements. 350 \item 351 a non-blocking I/O library 352 \item 353 experimental results showing comparable performance of the new features with similar mechanisms in other programming languages. 346 354 \end{itemize} 347 355 356 357 \section{Stateful Function} 358 359 The generator/coroutine provides a stateful function, which is an old idea~\cite{Conway63,Marlin80} that is new again~\cite{C++20Coroutine19}. 360 A stateful function allows execution to be temporarily suspended and later resumed, e.g., plugin, device driver, finite-state machine. 361 Hence, a stateful function may not end when it returns to its caller, allowing it to be restarted with the data and execution location present at the point of suspension. 362 This capability is accomplished by retaining a data/execution \emph{closure} between invocations. 363 If the closure is fixed size, we call it a \emph{generator} (or \emph{stackless}), and its control flow is restricted, e.g., suspending outside the generator is prohibited. 364 If the closure is variable sized, we call it a \emph{coroutine} (or \emph{stackful}), and as the names implies, often implemented with a separate stack with no programming restrictions. 365 Hence, refactoring a stackless coroutine may require changing it to stackful. 366 A foundational property of all \emph{stateful functions} is that resume/suspend \emph{do not} cause incremental stack growth, i.e., resume/suspend operations are remembered through the closure not the stack. 367 As well, activating a stateful function is \emph{asymmetric} or \emph{symmetric}, identified by resume/suspend (no cycles) and resume/resume (cycles). 368 A fixed closure activated by modified call/return is faster than a variable closure activated by context switching. 369 Additionally, any storage management for the closure (especially in unmanaged languages, i.e., no garbage collection) must also be factored into design and performance. 370 Therefore, selecting between stackless and stackful semantics is a tradeoff between programming requirements and performance, where stackless is faster and stackful is more general. 371 Note, creation cost is amortized across usage, so activation cost is usually the dominant factor. 372 373 374 \begin{figure} 375 \centering 376 \begin{lrbox}{\myboxA} 377 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 378 typedef struct { 379 int fn1, fn; 380 } Fib; 381 #define FibCtor { 1, 0 } 382 int fib( Fib * f ) { 383 384 385 386 int fn = f->fn; f->fn = f->fn1; 387 f->fn1 = f->fn + fn; 388 return fn; 389 390 } 391 int main() { 392 Fib f1 = FibCtor, f2 = FibCtor; 393 for ( int i = 0; i < 10; i += 1 ) 394 printf( "%d %d\n", 395 fib( &f1 ), fib( &f2 ) ); 396 } 397 \end{cfa} 398 \end{lrbox} 399 400 \begin{lrbox}{\myboxB} 401 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 402 `generator` Fib { 403 int fn1, fn; 404 }; 405 406 void `main(Fib & fib)` with(fib) { 407 408 [fn1, fn] = [1, 0]; 409 for () { 410 `suspend;` 411 [fn1, fn] = [fn, fn + fn1]; 412 413 } 414 } 415 int main() { 416 Fib f1, f2; 417 for ( 10 ) 418 sout | `resume( f1 )`.fn 419 | `resume( f2 )`.fn; 420 } 421 \end{cfa} 422 \end{lrbox} 423 424 \begin{lrbox}{\myboxC} 425 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 426 typedef struct { 427 int fn1, fn; void * `next`; 428 } Fib; 429 #define FibCtor { 1, 0, NULL } 430 Fib * comain( Fib * f ) { 431 if ( f->next ) goto *f->next; 432 f->next = &&s1; 433 for ( ;; ) { 434 return f; 435 s1:; int fn = f->fn + f->fn1; 436 f->fn1 = f->fn; f->fn = fn; 437 } 438 } 439 int main() { 440 Fib f1 = FibCtor, f2 = FibCtor; 441 for ( int i = 0; i < 10; i += 1 ) 442 printf("%d %d\n",comain(&f1)->fn, 443 comain(&f2)->fn); 444 } 445 \end{cfa} 446 \end{lrbox} 447 448 \subfloat[C asymmetric generator]{\label{f:CFibonacci}\usebox\myboxA} 449 \hspace{3pt} 450 \vrule 451 \hspace{3pt} 452 \subfloat[\CFA asymmetric generator]{\label{f:CFAFibonacciGen}\usebox\myboxB} 453 \hspace{3pt} 454 \vrule 455 \hspace{3pt} 456 \subfloat[C generator implementation]{\label{f:CFibonacciSim}\usebox\myboxC} 457 \caption{Fibonacci (output) Asymmetric Generator} 458 \label{f:FibonacciAsymmetricGenerator} 459 460 \bigskip 461 462 \begin{lrbox}{\myboxA} 463 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 464 `generator Fmt` { 465 char ch; 466 int g, b; 467 }; 468 void ?{}( Fmt & fmt ) { `resume(fmt);` } // constructor 469 void ^?{}( Fmt & f ) with(f) { $\C[1.75in]{// destructor}$ 470 if ( g != 0 || b != 0 ) sout | nl; } 471 void `main( Fmt & f )` with(f) { 472 for () { $\C{// until destructor call}$ 473 for ( ; g < 5; g += 1 ) { $\C{// groups}$ 474 for ( ; b < 4; b += 1 ) { $\C{// blocks}$ 475 `suspend;` $\C{// wait for character}$ 476 while ( ch == '\n' ) `suspend;` // ignore 477 sout | ch; // newline 478 } sout | " "; // block spacer 479 } sout | nl; // group newline 480 } 481 } 482 int main() { 483 Fmt fmt; $\C{// fmt constructor called}$ 484 for () { 485 sin | fmt.ch; $\C{// read into generator}$ 486 if ( eof( sin ) ) break; 487 `resume( fmt );` 488 } 489 490 } $\C{// fmt destructor called}\CRT$ 491 \end{cfa} 492 \end{lrbox} 493 494 \begin{lrbox}{\myboxB} 495 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 496 typedef struct { 497 void * next; 498 char ch; 499 int g, b; 500 } Fmt; 501 void comain( Fmt * f ) { 502 if ( f->next ) goto *f->next; 503 f->next = &&s1; 504 for ( ;; ) { 505 for ( f->g = 0; f->g < 5; f->g += 1 ) { 506 for ( f->b = 0; f->b < 4; f->b += 1 ) { 507 return; 508 s1:; while ( f->ch == '\n' ) return; 509 printf( "%c", f->ch ); 510 } printf( " " ); 511 } printf( "\n" ); 512 } 513 } 514 int main() { 515 Fmt fmt = { NULL }; comain( &fmt ); // prime 516 for ( ;; ) { 517 scanf( "%c", &fmt.ch ); 518 if ( feof( stdin ) ) break; 519 comain( &fmt ); 520 } 521 if ( fmt.g != 0 || fmt.b != 0 ) printf( "\n" ); 522 } 523 \end{cfa} 524 \end{lrbox} 525 526 \subfloat[\CFA asymmetric generator]{\label{f:CFAFormatGen}\usebox\myboxA} 527 \hspace{3pt} 528 \vrule 529 \hspace{3pt} 530 \subfloat[C generator simulation]{\label{f:CFormatSim}\usebox\myboxB} 531 \hspace{3pt} 532 \caption{Formatter (input) Asymmetric Generator} 533 \label{f:FormatterAsymmetricGenerator} 534 \end{figure} 535 536 537 \subsection{Generator} 538 539 Stackless generators have the potential to be very small and fast, \ie as small and fast as function call/return for both creation and execution. 540 The \CFA goal is to achieve this performance target, possibly at the cost of some semantic complexity. 541 How this goal is accomplished is done through a series of different kinds of generators and their implementation. 542 543 Figure~\ref{f:FibonacciAsymmetricGenerator} shows an unbounded asymmetric generator for an infinite sequence of Fibonacci numbers written in C and \CFA, with a simple C implementation for the \CFA version. 544 This kind of generator is an \emph{output generator}, producing a new result on each resumption. 545 To compute Fibonacci, the previous two values in the sequence are retained to generate the next value, \ie @fn1@ and @fn@, plus the execution location where control restarts when the generator is resumed, \ie top or middle. 546 An additional requirement is the ability to create an arbitrary number of generators (of any kind), \ie retaining state in global variables is insufficient; 547 hence, state is retained in a closure between calls. 548 Figure~\ref{f:CFibonacci} shows the C approach of manually creating the closure in structure @Fib@, and multiple instances of this closure provide multiple Fibonacci generators. 549 The C version only has the middle execution state because the top execution state becomes declaration initialization. 550 Figure~\ref{f:CFAFibonacciGen} shows the \CFA approach, which also has a manual closure, but replaces the structure with a special \CFA @generator@ type. 551 This generator type is then connected to a function named @main@ that takes as its only parameter a reference to the generator type, called a \emph{generator main}. 552 The generator main contains @suspend@ statements that suspend execution without ending the generator versus @return@. 553 For the Fibonacci generator-main,\footnote{ 554 The \CFA \lstinline|with| opens an aggregate scope making its fields directly accessible, like Pascal \lstinline|with|, but using parallel semantics. 555 Multiple aggregates may be opened.} 556 the top initialization state appears at the start and the middle execution state is denoted by statement @suspend@. 557 Any local variables in @main@ \emph{are not retained} between calls; 558 hence local variable are only for temporary computations \emph{between} suspends. 559 All retained state \emph{must} appear in the generator's type. 560 As well, generator code containing a @suspend@ cannot be refactored into a helper function called by the generator, because @suspend@ is implemented via @return@, so a return from the helper function goes back to the current generator not the resumer. 561 The generator is started by calling function @resume@ with a generator instance, which begins execution at the top of the generator main, and subsequent @resume@ calls restart the generator at its point of last suspension. 562 Resuming an ended (returned) generator is undefined. 563 Function @resume@ returns its argument generator so it can be cascaded in an expression, in this case to print the next Fibonacci value @fn@ computed in the generator instance. 564 Figure~\ref{f:CFibonacciSim} shows the C implementation of the \CFA generator only needs one additional field, @next@, to handle retention of execution state. 565 The computed @goto@ at the start of the generator main, which branches after the previous suspend, adds very little cost to the resume call. 566 Finally, an explicit generator type provides both design and performance benefits, such as multiple type-safe interface functions taking and returning arbitrary types. 567 \begin{cfa} 568 int ?()( Fib & fib ) with( fib ) { return `resume( fib )`.fn; } // function-call interface 569 int ?()( Fib & fib, int N ) with( fib ) { for ( N - 1 ) `fib()`; return `fib()`; } // use simple interface 570 double ?()( Fib & fib ) with( fib ) { return (int)`fib()` / 3.14159; } // cast prevents recursive call 571 sout | (int)f1() | (double)f1() | f2( 2 ); // simple interface, cast selects call based on return type, step 2 values 572 \end{cfa} 573 Now, the generator can be a separately-compiled opaque-type only accessed through its interface functions. 574 For contrast, Figure~\ref{f:PythonFibonacci} shows the equivalent Python Fibonacci generator, which does not use a generator type, and hence only has a single interface, but an implicit closure. 575 576 Having to manually create the generator closure by moving local-state variables into the generator type is an additional programmer burden and possible point of error. 577 (This restriction is removed by the coroutine, see Section~\ref{s:Coroutine}.) 578 Our concern is that static analysis to discriminate local state from temporary variables is complex and beyond the current scope of the \CFA project. 579 As well, supporting variable-size local-state, like variable-length arrays, requires dynamic allocation of the local state, which significantly increases the cost of generator creation/destruction, and is a show-stopper for embedded programming. 580 But more importantly, the size of the generator type is tied to the local state in the generator main, which precludes separate compilation of the generator main, \ie a generator must be inlined or local state must be dynamically allocated. 581 Our current experience is that most generator problems have simple data state, including local state, but complex execution state. 582 As well, C programmers are not afraid with this kind of semantic programming requirement, if it results in very small, fast generators. 583 584 Figure~\ref{f:CFAFormatGen} shows an asymmetric \newterm{input generator}, @Fmt@, for restructuring text into groups of characters of fixed-size blocks, \ie the input on the left is reformatted into the output on the right, where newlines are ignored. 585 \begin{center} 586 \tt 587 \begin{tabular}{@{}l|l@{}} 588 \multicolumn{1}{c|}{\textbf{\textrm{input}}} & \multicolumn{1}{c}{\textbf{\textrm{output}}} \\ 589 \begin{tabular}[t]{@{}ll@{}} 590 abcdefghijklmnopqrstuvwxyz \\ 591 abcdefghijklmnopqrstuvwxyz 592 \end{tabular} 593 & 594 \begin{tabular}[t]{@{}lllll@{}} 595 abcd & efgh & ijkl & mnop & qrst \\ 596 uvwx & yzab & cdef & ghij & klmn \\ 597 opqr & stuv & wxyz & & 598 \end{tabular} 599 \end{tabular} 600 \end{center} 601 The example takes advantage of resuming a generator in the constructor to prime the loops so the first character sent for formatting appears inside the nested loops. 602 The destructor provides a newline, if formatted text ends with a full line. 603 Figure~\ref{f:CFormatSim} shows the C implementation of the \CFA input generator with one additional field and the computed @goto@. 604 For contrast, Figure~\ref{f:PythonFormatter} shows the equivalent Python format generator with the same properties as the Fibonacci generator. 605 606 Figure~\ref{f:DeviceDriverGen} shows a \emph{killer} asymmetric generator, a device-driver, because device drivers caused 70\%-85\% of failures in Windows/Linux~\cite{Swift05}. 607 Device drives follow the pattern of simple data state but complex execution state, \ie finite state-machine (FSM) parsing a protocol. 608 For example, the following protocol: 609 \begin{center} 610 \ldots\, STX \ldots\, message \ldots\, ESC ETX \ldots\, message \ldots\, ETX 2-byte crc \ldots 611 \end{center} 612 is a network message beginning with the control character STX, ending with an ETX, and followed by a 2-byte cyclic-redundancy check. 613 Control characters may appear in a message if preceded by an ESC. 614 When a message byte arrives, it triggers an interrupt, and the operating system services the interrupt by calling the device driver with the byte read from a hardware register. 615 The device driver returns a status code of its current state, and when a complete message is obtained, the operating system knows the message is in the message buffer. 616 Hence, the device driver is an input/output generator. 617 618 Note, the cost of creating and resuming the device-driver generator, @Driver@, is virtually identical to call/return, so performance in an operating-system kernel is excellent. 619 As well, the data state is small, where variables @byte@ and @msg@ are communication variables for passing in message bytes and returning the message, and variables @lnth@, @crc@, and @sum@ are local variable that must be retained between calls and are hoisted into the generator type. 620 Manually, detecting and hoisting local-state variables is easy when the number is small. 621 Finally, the execution state is large, with one @resume@ and seven @suspend@s. 622 Hence, the key benefits of the generator are correctness, safety, and maintenance because the execution states of the FSM are transcribed directly into the programming language rather than using a table-driven approach. 623 Because FSMs can be complex and occur frequently in important domains, direct support of the generator is crucial in a systems programming-language. 624 625 \begin{figure} 626 \centering 627 \newbox\myboxA 628 \begin{lrbox}{\myboxA} 629 \begin{python}[aboveskip=0pt,belowskip=0pt] 630 def Fib(): 631 fn1, fn = 0, 1 632 while True: 633 `yield fn1` 634 fn1, fn = fn, fn1 + fn 635 f1 = Fib() 636 f2 = Fib() 637 for i in range( 10 ): 638 print( next( f1 ), next( f2 ) ) 639 640 641 642 643 644 645 \end{python} 646 \end{lrbox} 647 648 \newbox\myboxB 649 \begin{lrbox}{\myboxB} 650 \begin{python}[aboveskip=0pt,belowskip=0pt] 651 def Fmt(): 652 try: 653 while True: 654 for g in range( 5 ): 655 for b in range( 4 ): 656 print( `(yield)`, end='' ) 657 print( ' ', end='' ) 658 print() 659 except GeneratorExit: 660 if g != 0 | b != 0: 661 print() 662 fmt = Fmt() 663 `next( fmt )` # prime, next prewritten 664 for i in range( 41 ): 665 `fmt.send( 'a' );` # send to yield 666 \end{python} 667 \end{lrbox} 668 \subfloat[Fibonacci]{\label{f:PythonFibonacci}\usebox\myboxA} 669 \hspace{3pt} 670 \vrule 671 \hspace{3pt} 672 \subfloat[Formatter]{\label{f:PythonFormatter}\usebox\myboxB} 673 \caption{Python Generator} 674 \label{f:PythonGenerator} 675 676 \bigskip 677 678 \begin{tabular}{@{}l|l@{}} 679 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 680 enum Status { CONT, MSG, ESTX, 681 ELNTH, ECRC }; 682 `generator` Driver { 683 Status status; 684 unsigned char byte, * msg; // communication 685 unsigned int lnth, sum; // local state 686 unsigned short int crc; 687 }; 688 void ?{}( Driver & d, char * m ) { d.msg = m; } 689 Status next( Driver & d, char b ) with( d ) { 690 byte = b; `resume( d );` return status; 691 } 692 void main( Driver & d ) with( d ) { 693 enum { STX = '\002', ESC = '\033', 694 ETX = '\003', MaxMsg = 64 }; 695 msg: for () { // parse message 696 status = CONT; 697 lnth = 0; sum = 0; 698 while ( byte != STX ) `suspend;` 699 emsg: for () { 700 `suspend;` // process byte 701 \end{cfa} 702 & 703 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 704 choose ( byte ) { // switch with implicit break 705 case STX: 706 status = ESTX; `suspend;` continue msg; 707 case ETX: 708 break emsg; 709 case ESC: 710 `suspend;` 711 } 712 if ( lnth >= MaxMsg ) { // buffer full ? 713 status = ELNTH; `suspend;` continue msg; } 714 msg[lnth++] = byte; 715 sum += byte; 716 } 717 msg[lnth] = '\0'; // terminate string 718 `suspend;` 719 crc = byte << 8; 720 `suspend;` 721 status = (crc | byte) == sum ? MSG : ECRC; 722 `suspend;` 723 } 724 } 725 \end{cfa} 726 \end{tabular} 727 \caption{Device-driver generator for communication protocol} 728 \label{f:DeviceDriverGen} 729 \end{figure} 730 731 Figure~\ref{f:CFAPingPongGen} shows a symmetric generator, where the generator resumes another generator, forming a resume/resume cycle. 732 (The trivial cycle is a generator resuming itself.) 733 This control flow is similar to recursion for functions, but without stack growth. 734 The steps for symmetric control-flow are creating, executing, and terminating the cycle. 735 Constructing the cycle must deal with definition-before-use to close the cycle, \ie, the first generator must know about the last generator, which is not within scope. 736 (This issues occurs for any cyclic data-structure.) 737 % The example creates all the generators and then assigns the partners that form the cycle. 738 % Alternatively, the constructor can assign the partners as they are declared, except the first, and the first-generator partner is set after the last generator declaration to close the cycle. 739 Once the cycle is formed, the program main resumes one of the generators, and the generators can then traverse an arbitrary cycle using @resume@ to activate partner generator(s). 740 Terminating the cycle is accomplished by @suspend@ or @return@, both of which go back to the stack frame that started the cycle (program main in the example). 741 The starting stack-frame is below the last active generator because the resume/resume cycle does not grow the stack. 742 Also, since local variables are not retained in the generator function, it does not contain an objects with destructors that must be called, so the cost is the same as a function return. 743 Destructor cost occurs when the generator instance is deallocated, which is easily controlled by the programmer. 744 745 Figure~\ref{f:CPingPongSim} shows the implementation of the symmetric generator, where the complexity is the @resume@, which needs an extension to the calling convention to perform a forward rather than backward jump. 746 This jump starts at the top of the next generator main to re-execute the normal calling convention to make space on the stack for its local variables. 747 However, before the jump, the caller must reset its stack (and any registers) equivalent to a @return@, but subsequently jump forward not backwards. 748 This semantics is basically a tail-call optimization, which compilers already perform. 749 Hence, assembly code is manually insert in the example to undo the generator's entry code before the direct jump. 750 This assembly code is fragile as it depends on what entry code is generated, specifically if there are local variables and the level of optimization. 751 To provide this new calling convention requires a mechanism built into the compiler, which is beyond the scope of \CFA at this time. 752 Nevertheless, it is possible to hand generate any symmetric generators for proof of concept and performance testing. 753 A compiler could also eliminate other artifacts in the generator simulation to further increase performance. 754 755 \begin{figure} 756 \centering 757 \begin{lrbox}{\myboxA} 758 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 759 `generator PingPong` { 760 const char * name; 761 PingPong & partner; // rebindable reference 762 int N, i; 763 }; 764 void ?{}(PingPong & pp, char * nm, int N) with(pp) { 765 name = nm; partner = 0p; pp.N = N; i = 0; } 766 void `main( PingPong & pp )` with(pp) { 767 for ( ; i < N; i += 1 ) { 768 sout | name | i; 769 `resume( partner );` 770 } 771 } 772 int main() { 773 enum { N = 5 }; 774 PingPong ping = {"ping", N}, pong = {"pong", N}; 775 &ping.partner = &pong; &pong.partner = &ping; 776 `resume( ping );` 777 } 778 \end{cfa} 779 \end{lrbox} 780 781 \begin{lrbox}{\myboxB} 782 \begin{cfa}[escapechar={},aboveskip=0pt,belowskip=0pt] 783 typedef struct PingPong { 784 const char * name; 785 struct PingPong * partner; 786 int N, i; 787 void * next; 788 } PingPong; 789 #define PPCtor(name, N) { name, NULL, N, 0, NULL } 790 void comain( PingPong * pp ) { 791 if ( pp->next ) goto *pp->next; 792 pp->next = &&cycle; 793 for ( ; pp->i < pp->N; pp->i += 1 ) { 794 printf( "%s %d\n", pp->name, pp->i ); 795 asm( "mov %0,%%rdi" : "=m" (pp->partner) ); 796 asm( "mov %rdi,%rax" ); 797 asm( "popq %rbx" ); 798 asm( "jmp comain" ); 799 cycle: ; 800 } 801 } 802 \end{cfa} 803 \end{lrbox} 804 805 \subfloat[\CFA symmetric generator]{\label{f:CFAPingPongGen}\usebox\myboxA} 806 \hspace{3pt} 807 \vrule 808 \hspace{3pt} 809 \subfloat[C generator simulation]{\label{f:CPingPongSim}\usebox\myboxB} 810 \hspace{3pt} 811 \caption{Ping-Pong Symmetric Generator} 812 \label{f:PingPongSymmetricGenerator} 813 \end{figure} 814 815 Finally, part of this generator work was inspired by the recent \CCtwenty generator proposal~\cite{C++20Coroutine19} (which they call coroutines). 816 Our work provides the same high-performance asymmetric-generators as \CCtwenty, and extends their work with symmetric generators. 817 An additional \CCtwenty generator feature allows @suspend@ and @resume@ to be followed by a restricted compound-statement that is executed after the current generator has reset its stack but before calling the next generator, specified with the following \CFA syntax. 818 \begin{cfa} 819 ... suspend`{ ... }`; 820 ... resume( C )`{ ... }` ... 821 \end{cfa} 822 Since the current generator's stack is released before calling the compound statement, the compound statement can only reference variables in the generator's type. 823 This feature is useful when a generator is used in a concurrent context to ensure it is stopped before releasing a lock in the compound statement, which might immediately allow another thread to resume the generator. 824 Hence, this mechanism provides a general and safe handoff of the generator among competing threads. 825 826 827 \subsection{Coroutine} 828 \label{s:Coroutine} 829 830 Stackful coroutines extend generator semantics, \ie there is an implicit closure and @suspend@ may appear in a helper function called from the coroutine main. 831 A coroutine is specified by replacing @generator@ with @coroutine@ for the type. 832 This generality results in higher cost for creation, due to dynamic stack allocation, execution, due to context switching among stacks, and terminating, due to possible stack unwinding and dynamic stack deallocation. 833 How coroutines differ from generators is done through a series of examples. 834 835 First, the previous generator examples are converted to their coroutine counterparts, allowing local-state variables to be moved from the generator type into the coroutine main. 836 \begin{description} 837 \item[Fibonacci] 838 Move the declaration of @fn1@ to the start of coroutine main. 839 \begin{cfa}[xleftmargin=0pt] 840 void main( Fib & fib ) with(fib) { 841 `int fn1;` 842 \end{cfa} 843 \item[Formatter] 844 Move the declaration of @g@ and @b@ to the for loops in the coroutine main. 845 \begin{cfa}[xleftmargin=0pt] 846 for ( `g`; 5 ) { 847 for ( `b`; 4 ) { 848 \end{cfa} 849 \item[Device Driver] 850 Move the declaration of @lnth@ and @sum@ to their points of initialization. 851 \begin{cfa}[xleftmargin=0pt] 852 status = CONT; 853 `unsigned int lnth = 0, sum = 0;` 854 ... 855 `unsigned short int crc = byte << 8;` 856 \end{cfa} 857 \item[PingPong] 858 Move the declaration of @i@ to the for loop in the coroutine main. 859 \begin{cfa}[xleftmargin=0pt] 860 void main( PingPong & pp ) with(pp) { 861 for ( `i`; N ) { 862 \end{cfa} 863 \end{description} 864 It is also possible to refactor code containing local-state and @suspend@ statements into a helper routine, like the computation of the CRC for the device driver. 865 \begin{cfa} 866 unsigned int Crc() { 867 `suspend;` 868 unsigned short int crc = byte << 8; 869 `suspend;` 870 status = (crc | byte) == sum ? MSG : ECRC; 871 return crc; 872 } 873 \end{cfa} 874 A call to this function is placed at the end of the driver's coroutine-main. 875 For complex finite-state machines, refactoring is part of normal program abstraction, especially when code is used in multiple places. 876 Again, this complexity is usually associated with execution state rather than data state. 877 348 878 \begin{comment} 349 This paper provides a minimal concurrency \newterm{Application Program Interface} (API) that is simple, efficient and can be used to build other concurrency features. 350 While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master. 351 An easier approach for programmers is to support higher-level constructs as the basis of concurrency. 352 Indeed, for highly-productive concurrent-programming, high-level approaches are much more popular~\cite{Hochstein05}. 353 Examples of high-level approaches are jobs (thread pool)~\cite{TBB}, implicit threading~\cite{OpenMP}, monitors~\cite{Java}, channels~\cite{CSP,Go}, and message passing~\cite{Erlang,MPI}. 354 355 The following terminology is used. 356 A \newterm{thread} is a fundamental unit of execution that runs a sequence of code and requires a stack to maintain state. 357 Multiple simultaneous threads give rise to \newterm{concurrency}, which requires locking to ensure access to shared data and safe communication. 358 \newterm{Locking}, and by extension \newterm{locks}, are defined as a mechanism to prevent progress of threads to provide safety. 359 \newterm{Parallelism} is running multiple threads simultaneously. 360 Parallelism implies \emph{actual} simultaneous execution, where concurrency only requires \emph{apparent} simultaneous execution. 361 As such, parallelism only affects performance, which is observed through differences in space and/or time at runtime. 362 Hence, there are two problems to be solved: concurrency and parallelism. 363 While these two concepts are often combined, they are distinct, requiring different tools~\cite[\S~2]{Buhr05a}. 364 Concurrency tools handle mutual exclusion and synchronization, while parallelism tools handle performance, cost, and resource utilization. 365 366 The proposed concurrency API is implemented in a dialect of C, called \CFA (pronounced C-for-all). 367 The paper discusses how the language features are added to the \CFA translator with respect to parsing, semantics, and type checking, and the corresponding high-performance runtime-library to implement the concurrent features. 368 \end{comment} 369 370 371 \begin{comment} 372 \section{\CFA Overview} 373 374 The following is a quick introduction to the \CFA language, specifically tailored to the features needed to support concurrency. 375 Extended versions and explanation of the following code examples are available at the \CFA website~\cite{Cforall} or in Moss~\etal~\cite{Moss18}. 376 377 \CFA is a non-object-oriented extension of ISO-C, and hence, supports all C paradigms. 378 Like C, the building blocks of \CFA are structures and routines. 379 Virtually all of the code generated by the \CFA translator respects C memory layouts and calling conventions. 380 While \CFA is not object oriented, lacking the concept of a receiver (\eg @this@) and nominal inheritance-relationships, C has a notion of objects: ``region of data storage in the execution environment, the contents of which can represent values''~\cite[3.15]{C11}. 381 While some object-oriented features appear in \CFA, they are independent capabilities, allowing \CFA to adopt them while maintaining a procedural paradigm. 382 383 384 \subsection{References} 385 386 \CFA provides multi-level rebindable references, as an alternative to pointers, which significantly reduces syntactic noise. 387 \begin{cfa} 388 int x = 1, y = 2, z = 3; 389 int * p1 = &x, ** p2 = &p1, *** p3 = &p2, $\C{// pointers to x}$ 390 `&` r1 = x, `&&` r2 = r1, `&&&` r3 = r2; $\C{// references to x}$ 391 int * p4 = &z, `&` r4 = z; 392 393 *p1 = 3; **p2 = 3; ***p3 = 3; // change x 394 r1 = 3; r2 = 3; r3 = 3; // change x: implicit dereferences *r1, **r2, ***r3 395 **p3 = &y; *p3 = &p4; // change p1, p2 396 `&`r3 = &y; `&&`r3 = &`&`r4; // change r1, r2: cancel implicit dereferences (&*)**r3, (&(&*)*)*r3, &(&*)r4 397 \end{cfa} 398 A reference is a handle to an object, like a pointer, but is automatically dereferenced the specified number of levels. 399 Referencing (address-of @&@) a reference variable cancels one of the implicit dereferences, until there are no more implicit references, after which normal expression behaviour applies. 400 401 402 \subsection{\texorpdfstring{\protect\lstinline{with} Statement}{with Statement}} 403 \label{s:WithStatement} 404 405 Heterogeneous data is aggregated into a structure/union. 406 To reduce syntactic noise, \CFA provides a @with@ statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate field-qualification by opening a scope containing the field identifiers. 407 \begin{cquote} 408 \vspace*{-\baselineskip}%??? 409 \lstDeleteShortInline@% 410 \begin{cfa} 411 struct S { char c; int i; double d; }; 412 struct T { double m, n; }; 413 // multiple aggregate parameters 414 \end{cfa} 415 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 416 \begin{cfa} 417 void f( S & s, T & t ) { 418 `s.`c; `s.`i; `s.`d; 419 `t.`m; `t.`n; 420 } 421 \end{cfa} 422 & 423 \begin{cfa} 424 void f( S & s, T & t ) `with ( s, t )` { 425 c; i; d; // no qualification 426 m; n; 427 } 428 \end{cfa} 429 \end{tabular} 430 \lstMakeShortInline@% 431 \end{cquote} 432 Object-oriented programming languages only provide implicit qualification for the receiver. 433 434 In detail, the @with@-statement syntax is: 435 \begin{cfa} 436 $\emph{with-statement}$: 437 'with' '(' $\emph{expression-list}$ ')' $\emph{compound-statement}$ 438 \end{cfa} 439 and may appear as the body of a routine or nested within a routine body. 440 Each expression in the expression-list provides a type and object. 441 The type must be an aggregate type. 442 (Enumerations are already opened.) 443 The object is the implicit qualifier for the open structure-fields. 444 All expressions in the expression list are opened in parallel within the compound statement, which is different from Pascal, which nests the openings from left to right. 445 446 447 \subsection{Overloading} 448 449 \CFA maximizes the ability to reuse names via overloading to aggressively address the naming problem. 450 Both variables and routines may be overloaded, where selection is based on number and types of returns and arguments (as in Ada~\cite{Ada}). 451 \newpage 452 \vspace*{-2\baselineskip}%??? 453 \begin{cquote} 454 \begin{cfa} 455 // selection based on type 456 \end{cfa} 457 \lstDeleteShortInline@% 458 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 459 \begin{cfa} 460 const short int `MIN` = -32768; 461 const int `MIN` = -2147483648; 462 const long int `MIN` = -9223372036854775808L; 463 \end{cfa} 464 & 465 \begin{cfa} 466 short int si = `MIN`; 467 int i = `MIN`; 468 long int li = `MIN`; 469 \end{cfa} 470 \end{tabular} 471 \begin{cfa} 472 // selection based on type and number of parameters 473 \end{cfa} 474 \begin{tabular}{@{}l@{\hspace{2.7\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 475 \begin{cfa} 476 void `f`( void ); 477 void `f`( char ); 478 void `f`( int, double ); 479 \end{cfa} 480 & 481 \begin{cfa} 482 `f`(); 483 `f`( 'a' ); 484 `f`( 3, 5.2 ); 485 \end{cfa} 486 \end{tabular} 487 \begin{cfa} 488 // selection based on type and number of returns 489 \end{cfa} 490 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 491 \begin{cfa} 492 char `f`( int ); 493 double `f`( int ); 494 [char, double] `f`( int ); 495 \end{cfa} 496 & 497 \begin{cfa} 498 char c = `f`( 3 ); 499 double d = `f`( 3 ); 500 [d, c] = `f`( 3 ); 501 \end{cfa} 502 \end{tabular} 503 \lstMakeShortInline@% 504 \end{cquote} 505 Overloading is important for \CFA concurrency since the runtime system relies on creating different types to represent concurrency objects. 506 Therefore, overloading eliminates long prefixes and other naming conventions to prevent name clashes. 507 As seen in Section~\ref{s:Concurrency}, routine @main@ is heavily overloaded. 508 As another example, variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name: 509 \begin{cfa} 510 struct S { int `i`; int j; double m; } s; 511 struct T { int `i`; int k; int m; } t; 512 with ( s, t ) { 513 j + k; $\C{// unambiguous, s.j + t.k}$ 514 m = 5.0; $\C{// unambiguous, s.m = 5.0}$ 515 m = 1; $\C{// unambiguous, t.m = 1}$ 516 int a = m; $\C{// unambiguous, a = t.m }$ 517 double b = m; $\C{// unambiguous, b = s.m}$ 518 int c = `s.i` + `t.i`; $\C{// unambiguous, qualification}$ 519 (double)m; $\C{// unambiguous, cast s.m}$ 520 } 521 \end{cfa} 522 For parallel semantics, both @s.i@ and @t.i@ are visible with the same type, so only @i@ is ambiguous without qualification. 523 524 525 \subsection{Operators} 526 527 Overloading also extends to operators. 528 Operator-overloading syntax creates a routine name with an operator symbol and question marks for the operands: 529 \begin{cquote} 530 \lstDeleteShortInline@% 531 \begin{tabular}{@{}ll@{\hspace{\parindentlnth}}|@{\hspace{\parindentlnth}}l@{}} 532 \begin{cfa} 533 int ++?(int op); 534 int ?++(int op); 535 int `?+?`(int op1, int op2); 536 int ?<=?(int op1, int op2); 537 int ?=? (int & op1, int op2); 538 int ?+=?(int & op1, int op2); 539 \end{cfa} 540 & 541 \begin{cfa} 542 // unary prefix increment 543 // unary postfix increment 544 // binary plus 545 // binary less than 546 // binary assignment 547 // binary plus-assignment 548 \end{cfa} 549 & 550 \begin{cfa} 551 struct S { int i, j; }; 552 S `?+?`( S op1, S op2) { // add two structures 553 return (S){op1.i + op2.i, op1.j + op2.j}; 554 } 555 S s1 = {1, 2}, s2 = {2, 3}, s3; 556 s3 = s1 `+` s2; // compute sum: s3 == {2, 5} 557 \end{cfa} 558 \end{tabular} 559 \lstMakeShortInline@% 560 \end{cquote} 561 562 563 \subsection{Constructors / Destructors} 564 565 Object lifetime is a challenge in non-managed programming languages. 566 \CFA responds with \CC-like constructors and destructors, using a different operator-overloading syntax. 567 \begin{cfa} 568 struct VLA { int len, * data; }; $\C{// variable length array of integers}$ 569 void ?{}( VLA & vla ) with ( vla ) { len = 10; data = alloc( len ); } $\C{// default constructor}$ 570 void ?{}( VLA & vla, int size, char fill ) with ( vla ) { len = size; data = alloc( len, fill ); } // initialization 571 void ?{}( VLA & vla, VLA other ) { vla.len = other.len; vla.data = other.data; } $\C{// copy, shallow}$ 572 void ^?{}( VLA & vla ) with ( vla ) { free( data ); } $\C{// destructor}$ 573 { 574 VLA x, y = { 20, 0x01 }, z = y; $\C{// z points to y}$ 575 // $\LstCommentStyle{\color{red}\ \ \ x\{\};\ \ \ \ \ \ \ \ \ y\{ 20, 0x01 \};\ \ \ \ \ \ \ \ \ \ z\{ z, y \};\ \ \ \ \ \ \ implicit calls}$ 576 ^x{}; $\C{// deallocate x}$ 577 x{}; $\C{// reallocate x}$ 578 z{ 5, 0xff }; $\C{// reallocate z, not pointing to y}$ 579 ^y{}; $\C{// deallocate y}$ 580 y{ x }; $\C{// reallocate y, points to x}$ 581 x{}; $\C{// reallocate x, not pointing to y}$ 582 } // $\LstCommentStyle{\color{red}\^{}z\{\};\ \ \^{}y\{\};\ \ \^{}x\{\};\ \ \ implicit calls}$ 583 \end{cfa} 584 Like \CC, construction is implicit on allocation (stack/heap) and destruction is implicit on deallocation. 585 The object and all their fields are constructed/destructed. 586 \CFA also provides @new@ and @delete@ as library routines, which behave like @malloc@ and @free@, in addition to constructing and destructing objects: 587 \begin{cfa} 588 { 589 ... struct S s = {10}; ... $\C{// allocation, call constructor}$ 590 } $\C{// deallocation, call destructor}$ 591 struct S * s = new(); $\C{// allocation, call constructor}$ 592 ... 593 delete( s ); $\C{// deallocation, call destructor}$ 594 \end{cfa} 595 \CFA concurrency uses object lifetime as a means of mutual exclusion and/or synchronization. 596 597 598 \subsection{Parametric Polymorphism} 599 \label{s:ParametricPolymorphism} 600 601 The signature feature of \CFA is parametric-polymorphic routines~\cite{Cforall} with routines generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types. 602 For example, the following sum routine works for any type that supports construction from 0 and addition: 603 \begin{cfa} 604 forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and + 605 T sum( T a[$\,$], size_t size ) { 606 `T` total = { `0` }; $\C{// initialize by 0 constructor}$ 607 for ( size_t i = 0; i < size; i += 1 ) 608 total = total `+` a[i]; $\C{// select appropriate +}$ 609 return total; 610 } 611 S sa[5]; 612 int i = sum( sa, 5 ); $\C{// use S's 0 construction and +}$ 613 \end{cfa} 614 Type variables can be @otype@ or @dtype@. 615 @otype@ refers to a \emph{complete type}, \ie, a type with size, alignment, default constructor, copy constructor, destructor, and assignment operator. 616 @dtype@ refers to an \emph{incomplete type}, \eg, void or a forward-declared type. 617 The builtin types @zero_t@ and @one_t@ overload constant 0 and 1 for a new types, where both 0 and 1 have special meaning in C. 618 619 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each routine declaration: 620 \begin{cfa} 621 trait `sumable`( otype T ) { 622 void `?{}`( T &, zero_t ); $\C{// 0 literal constructor}$ 623 T `?+?`( T, T ); $\C{// assortment of additions}$ 624 T ?+=?( T &, T ); 625 T ++?( T & ); 626 T ?++( T & ); 627 }; 628 forall( otype T `| sumable( T )` ) $\C{// use trait}$ 629 T sum( T a[$\,$], size_t size ); 630 \end{cfa} 631 632 Using the return type for overload discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@: 633 \begin{cfa} 634 forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); } 635 int * ip = alloc(); $\C{// select type and size from left-hand side}$ 636 double * dp = alloc(); 637 struct S {...} * sp = alloc(); 638 \end{cfa} 639 where the return type supplies the type/size of the allocation, which is impossible in most type systems. 640 \end{comment} 641 642 643 \section{Coroutines: Stepping Stone} 644 \label{coroutine} 645 646 Coroutines are generalized routines allowing execution to be temporarily suspended and later resumed. 647 Hence, unlike a normal routine, a coroutine may not terminate when it returns to its caller, allowing it to be restarted with the values and execution location present at the point of suspension. 648 This capability is accomplished via the coroutine's stack, where suspend/resume context switch among stacks. 649 Because threading design-challenges are present in coroutines, their design effort is relevant, and this effort can be easily exposed to programmers giving them a useful new programming paradigm because a coroutine handles the class of problems that need to retain state between calls, \eg plugins, device drivers, and finite-state machines. 650 Therefore, the two fundamental features of the core \CFA coroutine-API are independent call-stacks and @suspend@/@resume@ operations. 651 652 For example, a problem made easier with coroutines is unbounded generators, \eg generating an infinite sequence of Fibonacci numbers 653 \begin{displaymath} 654 \mathsf{fib}(n) = \left \{ 655 \begin{array}{ll} 656 0 & n = 0 \\ 657 1 & n = 1 \\ 658 \mathsf{fib}(n-1) + \mathsf{fib}(n-2) & n \ge 2 \\ 659 \end{array} 660 \right. 661 \end{displaymath} 662 where Figure~\ref{f:C-fibonacci} shows conventional approaches for writing a Fibonacci generator in C. 663 Figure~\ref{f:GlobalVariables} illustrates the following problems: unique unencapsulated global variables necessary to retain state between calls, only one Fibonacci generator, and execution state must be explicitly retained via explicit state variables. 664 Figure~\ref{f:ExternalState} addresses these issues: unencapsulated program global variables become encapsulated structure variables, unique global variables are replaced by multiple Fibonacci objects, and explicit execution state is removed by precomputing the first two Fibonacci numbers and returning $\mathsf{fib}(n-2)$. 879 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, @`coroutine` Fib { int fn; }@, which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines, \eg @next@. 880 Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main. 881 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code represents the three states in the Fibonacci formula via the three suspend points, to context switch back to the caller's @resume@. 882 The interface routine @next@, takes a Fibonacci instance and context switches to it using @resume@; 883 on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned. 884 The first @resume@ is special because it allocates the coroutine stack and cocalls its coroutine main on that stack; 885 when the coroutine main returns, its stack is deallocated. 886 Hence, @Fib@ is an object at creation, transitions to a coroutine on its first resume, and transitions back to an object when the coroutine main finishes. 887 Figure~\ref{f:Coroutine1State} shows the coroutine version of the C version in Figure~\ref{f:ExternalState}. 888 Coroutine generators are called \newterm{output coroutines} because values are only returned. 665 889 666 890 \begin{figure} … … 689 913 \begin{lrbox}{\myboxA} 690 914 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 691 #define F IB_INIT{ 0, 1 }915 #define FibCtor { 0, 1 } 692 916 typedef struct { int fn1, fn; } Fib; 693 917 int fib( Fib * f ) { … … 702 926 703 927 int main() { 704 Fib f1 = F IB_INIT, f2 = FIB_INIT;928 Fib f1 = FibCtor, f2 = FibCtor; 705 929 for ( int i = 0; i < 10; i += 1 ) { 706 930 printf( "%d %d\n", … … 719 943 [fn1, fn] = [0, 1]; 720 944 for () { 721 `suspend ();`945 `suspend;` 722 946 [fn1, fn] = [fn, fn1 + fn]; 723 947 } 724 948 } 725 949 int ?()( Fib & fib ) with( fib ) { 726 `resume( fib );` returnfn1;950 return `resume( fib )`.fn1; 727 951 } 728 952 int main() { … … 772 996 \caption{Fibonacci Generator} 773 997 \label{f:C-fibonacci} 774 775 % \bigskip776 %777 % \newbox\myboxA778 % \begin{lrbox}{\myboxA}779 % \begin{cfa}[aboveskip=0pt,belowskip=0pt]780 % `coroutine` Fib { int fn; };781 % void main( Fib & fib ) with( fib ) {782 % fn = 0; int fn1 = fn; `suspend()`;783 % fn = 1; int fn2 = fn1; fn1 = fn; `suspend()`;784 % for () {785 % fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `suspend()`; }786 % }787 % int next( Fib & fib ) with( fib ) { `resume( fib );` return fn; }788 % int main() {789 % Fib f1, f2;790 % for ( 10 )791 % sout | next( f1 ) | next( f2 );792 % }793 % \end{cfa}794 % \end{lrbox}795 % \newbox\myboxB796 % \begin{lrbox}{\myboxB}797 % \begin{python}[aboveskip=0pt,belowskip=0pt]798 %799 % def Fibonacci():800 % fn = 0; fn1 = fn; `yield fn` # suspend801 % fn = 1; fn2 = fn1; fn1 = fn; `yield fn`802 % while True:803 % fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `yield fn`804 %805 %806 % f1 = Fibonacci()807 % f2 = Fibonacci()808 % for i in range( 10 ):809 % print( `next( f1 )`, `next( f2 )` ) # resume810 %811 % \end{python}812 % \end{lrbox}813 % \subfloat[\CFA]{\label{f:Coroutine3States}\usebox\myboxA}814 % \qquad815 % \subfloat[Python]{\label{f:Coroutine1State}\usebox\myboxB}816 % \caption{Fibonacci input coroutine, 3 states, internal variables}817 % \label{f:cfa-fibonacci}818 998 \end{figure} 819 999 820 Using a coroutine, it is possible to express the Fibonacci formula directly without any of the C problems. 821 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, @`coroutine` Fib { int fn; }@, which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines, \eg @next@. 822 Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main. 823 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code represents the three states in the Fibonacci formula via the three suspend points, to context switch back to the caller's @resume@. 824 The interface routine @next@, takes a Fibonacci instance and context switches to it using @resume@; 825 on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned. 826 The first @resume@ is special because it allocates the coroutine stack and cocalls its coroutine main on that stack; 827 when the coroutine main returns, its stack is deallocated. 828 Hence, @Fib@ is an object at creation, transitions to a coroutine on its first resume, and transitions back to an object when the coroutine main finishes. 829 Figure~\ref{f:Coroutine1State} shows the coroutine version of the C version in Figure~\ref{f:ExternalState}. 830 Coroutine generators are called \newterm{output coroutines} because values are only returned. 831 832 Figure~\ref{f:CFAFmt} shows an \newterm{input coroutine}, @Format@, for restructuring text into groups of characters of fixed-size blocks. 833 For example, the input of the left is reformatted into the output on the right. 834 \begin{quote} 835 \tt 836 \begin{tabular}{@{}l|l@{}} 837 \multicolumn{1}{c|}{\textbf{\textrm{input}}} & \multicolumn{1}{c}{\textbf{\textrm{output}}} \\ 838 abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz 839 & 840 \begin{tabular}[t]{@{}lllll@{}} 841 abcd & efgh & ijkl & mnop & qrst \\ 842 uvwx & yzab & cdef & ghij & klmn \\ 843 opqr & stuv & wxyz & & 844 \end{tabular} 845 \end{tabular} 846 \end{quote} 847 The example takes advantage of resuming a coroutine in the constructor to prime the loops so the first character sent for formatting appears inside the nested loops. 848 The destructor provides a newline, if formatted text ends with a full line. 849 Figure~\ref{f:CFmt} shows the C equivalent formatter, where the loops of the coroutine are flattened (linearized) and rechecked on each call because execution location is not retained between calls. 850 (Linearized code is the bane of device drivers.) 851 852 \begin{figure} 853 \centering 1000 \bigskip 1001 854 1002 \newbox\myboxA 855 1003 \begin{lrbox}{\myboxA} 856 1004 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 857 `coroutine` Fmt { 858 char ch; // communication variables 859 int g, b; // needed in destructor 860 }; 861 void main( Fmt & fmt ) with( fmt ) { 1005 `coroutine` Fib { int fn; }; 1006 void main( Fib & fib ) with( fib ) { 1007 fn = 0; int fn1 = fn; `suspend`; 1008 fn = 1; int fn2 = fn1; fn1 = fn; `suspend`; 862 1009 for () { 863 for ( g = 0; g < 5; g += 1 ) { // groups 864 for ( b = 0; b < 4; b += 1 ) { // blocks 865 `suspend();` 866 sout | ch; } // print character 867 sout | " "; } // block separator 868 sout | nl; } // group separator 869 } 870 void ?{}( Fmt & fmt ) { `resume( fmt );` } // prime 871 void ^?{}( Fmt & fmt ) with( fmt ) { // destructor 872 if ( g != 0 || b != 0 ) // special case 873 sout | nl; } 874 void send( Fmt & fmt, char c ) { fmt.ch = c; `resume( fmt )`; } 1010 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `suspend`; } 1011 } 1012 int next( Fib & fib ) with( fib ) { `resume( fib );` return fn; } 875 1013 int main() { 876 Fmt fmt; 877 sout | nlOff; // turn off auto newline 878 for ( 41 ) 879 send( fmt, 'a' ); 1014 Fib f1, f2; 1015 for ( 10 ) 1016 sout | next( f1 ) | next( f2 ); 880 1017 } 881 1018 \end{cfa} 882 1019 \end{lrbox} 883 884 1020 \newbox\myboxB 885 1021 \begin{lrbox}{\myboxB} 886 1022 \begin{python}[aboveskip=0pt,belowskip=0pt] 887 1023 888 889 890 def Fmt(): 891 try: 892 while True: 893 for g in range( 5 ): 894 for b in range( 4 ): 895 896 print( `(yield)`, end='' ) 897 print( ' ', end='' ) 898 print() 899 900 901 except GeneratorExit: 902 if g != 0 | b != 0: 903 print() 904 905 906 fmt = Fmt() 907 `next( fmt )` # prime 908 for i in range( 41 ): 909 `fmt.send( 'a' );` # send to yield 1024 def Fibonacci(): 1025 fn = 0; fn1 = fn; `yield fn` # suspend 1026 fn = 1; fn2 = fn1; fn1 = fn; `yield fn` 1027 while True: 1028 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `yield fn` 1029 1030 1031 f1 = Fibonacci() 1032 f2 = Fibonacci() 1033 for i in range( 10 ): 1034 print( `next( f1 )`, `next( f2 )` ) # resume 910 1035 911 1036 \end{python} 912 1037 \end{lrbox} 913 \subfloat[\CFA]{\label{f:C FAFmt}\usebox\myboxA}1038 \subfloat[\CFA]{\label{f:Coroutine3States}\usebox\myboxA} 914 1039 \qquad 915 \subfloat[Python]{\label{f:C Fmt}\usebox\myboxB}916 \caption{ Output formatting text}917 \label{f: fmt-line}1040 \subfloat[Python]{\label{f:Coroutine1State}\usebox\myboxB} 1041 \caption{Fibonacci input coroutine, 3 states, internal variables} 1042 \label{f:cfa-fibonacci} 918 1043 \end{figure} 919 920 The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming routine for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal routines. 921 However, @resume@ and @suspend@ context switch among existing stack-frames, rather than create new ones so there is no stack growth. 922 \newterm{Symmetric (full) coroutine}s have a coroutine call to a resuming routine for another coroutine, and its coroutine main calls another resuming routine, which eventually forms a resuming-call cycle. 923 (The trivial cycle is a coroutine resuming itself.) 924 This control flow is similar to recursion for normal routines, but again there is no stack growth from the context switch. 1044 \end{comment} 925 1045 926 1046 \begin{figure} … … 930 1050 \begin{cfa} 931 1051 `coroutine` Prod { 932 Cons & c; 1052 Cons & c; // communication 933 1053 int N, money, receipt; 934 1054 }; … … 951 1071 } 952 1072 void start( Prod & prod, int N, Cons &c ) { 953 &prod.c = &c; // reassignable reference1073 &prod.c = &c; 954 1074 prod.[N, receipt] = [N, 0]; 955 1075 `resume( prod );` … … 964 1084 \begin{cfa} 965 1085 `coroutine` Cons { 966 Prod & p; 1086 Prod & p; // communication 967 1087 int p1, p2, status; 968 1088 bool done; … … 972 1092 cons.[status, done ] = [0, false]; 973 1093 } 974 void ^?{}( Cons & cons ) {}975 1094 void main( Cons & cons ) with( cons ) { 976 1095 // 1st resume starts here … … 994 1113 `resume( cons );` 995 1114 } 1115 996 1116 \end{cfa} 997 1117 \end{tabular} 998 1118 \caption{Producer / consumer: resume-resume cycle, bi-directional communication} 999 1119 \label{f:ProdCons} 1120 1121 \medskip 1122 1123 \begin{center} 1124 \input{FullProdConsStack.pstex_t} 1125 \end{center} 1126 \vspace*{-10pt} 1127 \caption{Producer / consumer runtime stacks} 1128 \label{f:ProdConsRuntimeStacks} 1129 1130 \medskip 1131 1132 \begin{center} 1133 \input{FullCoroutinePhases.pstex_t} 1134 \end{center} 1135 \vspace*{-10pt} 1136 \caption{Ping / Pong coroutine steps} 1137 \label{f:PingPongFullCoroutineSteps} 1000 1138 \end{figure} 1001 1139 1002 Figure~\ref{f:ProdCons} shows a producer/consumer symmetric-coroutine performing bi-directional communication.1003 Since the solution involves a full-coroutining cycle, the program main creates one coroutine in isolation, passes this coroutine to its partner, and closes the cycle at the call to @start@.1004 The @start@ routine communicates both the number of elements to be produced and the consumer into the producer's coroutine-structure.1005 The n the @resume@ to@prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it.1006 @prod@'s coroutine main starts, creates local variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer.1140 Figure~\ref{f:ProdCons} shows the ping-pong example in Figure~\ref{f:CFAPingPongGen} extended into a producer/consumer symmetric-coroutine performing bidirectional communication. 1141 This example is illustrative because both producer/consumer have two interface functions with @resume@s that suspend execution in these interface (helper) functions. 1142 The program main creates the producer coroutine, passes it to the consumer coroutine in its initialization, and closes the cycle at the call to @start@ along with the number of items to be produced. 1143 The first @resume@ of @prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it. 1144 @prod@'s coroutine main starts, creates local-state variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer. 1007 1145 1008 1146 The producer call to @delivery@ transfers values into the consumer's communication variables, resumes the consumer, and returns the consumer status. 1009 For the first resume, @cons@'s stack is initialized, creating localvariables retained between subsequent activations of the coroutine.1147 On the first resume, @cons@'s stack is created and initialized, holding local-state variables retained between subsequent activations of the coroutine. 1010 1148 The consumer iterates until the @done@ flag is set, prints the values delivered by the producer, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation). 1011 1149 The call from the consumer to @payment@ introduces the cycle between producer and consumer. 1012 1150 When @payment@ is called, the consumer copies values into the producer's communication variable and a resume is executed. 1013 1151 The context switch restarts the producer at the point where it last context switched, so it continues in @delivery@ after the resume. 1014 1015 1152 @delivery@ returns the status value in @prod@'s coroutine main, where the status is printed. 1016 1153 The loop then repeats calling @delivery@, where each call resumes the consumer coroutine. … … 1018 1155 The consumer increments and returns the receipt to the call in @cons@'s coroutine main. 1019 1156 The loop then repeats calling @payment@, where each call resumes the producer coroutine. 1020 1021 After iterating $N$ times, the producer calls @stop@. 1022 The @done@ flag is set to stop the consumer's execution and a resume is executed. 1023 The context switch restarts @cons@ in @payment@ and it returns with the last receipt. 1024 The consumer terminates its loops because @done@ is true, its @main@ terminates, so @cons@ transitions from a coroutine back to an object, and @prod@ reactivates after the resume in @stop@. 1025 @stop@ returns and @prod@'s coroutine main terminates. 1026 The program main restarts after the resume in @start@. 1027 @start@ returns and the program main terminates. 1028 1029 One \emph{killer} application for a coroutine is device drivers, which at one time caused 70\%-85\% of failures in Windows/Linux~\cite{Swift05}. 1030 Many device drivers are a finite state-machine parsing a protocol, e.g.: 1031 \begin{tabbing} 1032 \ldots STX \= \ldots message \ldots \= ESC \= ETX \= \ldots message \ldots \= ETX \= 2-byte crc \= \ldots \kill 1033 \ldots STX \> \ldots message \ldots \> ESC \> ETX \> \ldots message \ldots \> ETX \> 2-byte crc \> \ldots 1034 \end{tabbing} 1035 where a network message begins with the control character STX and ends with an ETX, followed by a 2-byte cyclic-redundancy check. 1036 Control characters may appear in a message if preceded by an ESC. 1037 Because FSMs can be complex and occur frequently in important domains, direct support of the coroutine is crucial in a systems programminglanguage. 1038 1039 \begin{figure} 1040 \begin{cfa} 1041 enum Status { CONT, MSG, ESTX, ELNTH, ECRC }; 1042 `coroutine` Driver { 1043 Status status; 1044 char * msg, byte; 1045 }; 1046 void ?{}( Driver & d, char * m ) { d.msg = m; } $\C[3.0in]{// constructor}$ 1047 Status next( Driver & d, char b ) with( d ) { $\C{// 'with' opens scope}$ 1048 byte = b; `resume( d );` return status; 1049 } 1050 void main( Driver & d ) with( d ) { 1051 enum { STX = '\002', ESC = '\033', ETX = '\003', MaxMsg = 64 }; 1052 unsigned short int crc; $\C{// error checking}$ 1053 msg: for () { $\C{// parse message}$ 1054 status = CONT; 1055 unsigned int lnth = 0, sum = 0; 1056 while ( byte != STX ) `suspend();` 1057 emsg: for () { 1058 `suspend();` $\C{// process byte}$ 1059 choose ( byte ) { $\C{// switch with default break}$ 1060 case STX: 1061 status = ESTX; `suspend();` continue msg; 1062 case ETX: 1063 break emsg; 1064 case ESC: 1065 suspend(); 1066 } // choose 1067 if ( lnth >= MaxMsg ) { $\C{// buffer full ?}$ 1068 status = ELNTH; `suspend();` continue msg; } 1069 msg[lnth++] = byte; 1070 sum += byte; 1071 } // for 1072 msg[lnth] = '\0'; $\C{// terminate string}\CRT$ 1073 `suspend();` 1074 crc = (unsigned char)byte << 8; // prevent sign extension for signed char 1075 `suspend();` 1076 status = (crc | (unsigned char)byte) == sum ? MSG : ECRC; 1077 `suspend();` 1078 } // for 1079 } 1080 \end{cfa} 1081 \caption{Device driver for simple communication protocol} 1082 \end{figure} 1157 Figure~\ref{f:ProdConsRuntimeStacks} shows the runtime stacks of the program main, and the coroutine mains for @prod@ and @cons@ during the cycling. 1158 1159 Terminating a coroutine cycle is more complex than a generator cycle, because it requires context switching to the program main's \emph{stack} to shutdown the program, whereas generators started by the program main run on its stack. 1160 Furthermore, each deallocated coroutine must guarantee all destructors are run for object allocated in the coroutine type \emph{and} allocated on the coroutine's stack at the point of suspension, which can be arbitrarily deep. 1161 When a coroutine's main ends, its stack is already unwound so any stack allocated objects with destructors have been finalized. 1162 The na\"{i}ve semantics for coroutine-cycle termination is context switch to the last resumer, like executing a @suspend@/@return@ in a generator. 1163 However, for coroutines, the last resumer is \emph{not} implicitly below the current stack frame, as for generators, because each coroutine's stack is independent. 1164 Unfortunately, it is impossible to determine statically if a coroutine is in a cycle and unrealistic to check dynamically (graph-cycle problem). 1165 Hence, a compromise solution is necessary that works for asymmetric (acyclic) and symmetric (cyclic) coroutines. 1166 1167 Our solution for coroutine termination works well for the most common asymmetric and symmetric coroutine usage-patterns. 1168 For asymmetric coroutines, it is common for the first resumer (starter) coroutine to be the only resumer. 1169 All previous generators converted to coroutines have this property. 1170 For symmetric coroutines, it is common for the cycle creator to persist for the life-time of the cycle. 1171 Hence, the starter coroutine is remembered on the first resume and ending the coroutine resumes the starter. 1172 Figure~\ref{f:ProdConsRuntimeStacks} shows this semantic by the dashed lines from the end of the coroutine mains: @prod@ starts @cons@ so @cons@ resumes @prod@ at the end, and the program main starts @prod@ so @prod@ resumes the program main at the end. 1173 For other scenarios, it is always possible to devise a solution with additional programming effort. 1174 1175 The producer/consumer example does not illustrate the full power of the starter semantics because @cons@ always ends first. 1176 Assume generator @PingPong@ is converted to a coroutine. 1177 Figure~\ref{f:PingPongFullCoroutineSteps} shows the creation, starter, and cyclic execution steps of the coroutine version. 1178 The program main creates (declares) coroutine instances @ping@ and @pong@. 1179 Next, program main resumes @ping@, making it @ping@'s starter, and @ping@'s main resumes @pong@'s main, making it @pong@'s starter. 1180 Execution forms a cycle when @pong@ resumes @ping@, and cycles $N$ times. 1181 By adjusting $N$ for either @ping@/@pong@, it is possible to have either one finish first, instead of @pong@ always ending first. 1182 If @pong@ ends first, it resumes its starter @ping@ in its coroutine main, then @ping@ ends and resumes its starter the program main in function @start@. 1183 If @ping@ ends first, it resumes its starter the program main in function @start@. 1184 Regardless of the cycle complexity, the starter stack always leads back to the program main, but the stack can be entered at an arbitrary point. 1185 Once back at the program main, coroutines @ping@ and @pong@ are deallocated. 1186 For generators, deallocation runs the destructors for all objects in the generator type. 1187 For coroutines, deallocation deals with objects in the coroutine type and must also run the destructors for any objects pending on the coroutine's stack for any unterminated coroutine. 1188 Hence, if a coroutine's destructor detects the coroutine is not ended, it implicitly raises a cancellation exception (uncatchable exception) at the coroutine and resumes it so the cancellation exception can propagate to the root of the coroutine's stack destroying all local variable on the stack. 1189 So the \CFA semantics for the generator and coroutine, ensure both can be safely deallocated at any time, regardless of their current state, like any other aggregate object. 1190 Explicitly raising normal exceptions at another coroutine can replace flag variables, like @stop@, \eg @prod@ raises a @stop@ exception at @cons@ after it finishes generating values and resumes @cons@, which catches the @stop@exception to terminate its loop. 1191 1192 Finally, there is an interesting effect for @suspend@ with symmetric coroutines. 1193 A coroutine must retain its last resumer to suspend back because the resumer is on a different stack. 1194 These reverse pointers allow @suspend@ to cycle \emph{backwards}, which may be useful in certain cases. 1195 However, there is an anomaly if a coroutine resumes itself, because it overwrites its last resumer with itself, losing the ability to resume the last external resumer. 1196 To prevent losing this information, a self-resume does not overwrite the last resumer. 1083 1197 1084 1198 … … 1086 1200 1087 1201 A significant implementation challenge for coroutines (and threads, see Section~\ref{threads}) is adding extra fields and executing code after/before the coroutine constructor/destructor and coroutine main to create/initialize/de-initialize/destroy extra fields and the stack. 1088 There are several solutions to this problem and the chosen option forced the \CFA coroutine design.1089 1090 Object-oriented inheritance provides extra fields and code in a restricted context, but it requires programmers to explicitly performthe inheritance:1202 There are several solutions to this problem and the chosen option directed the \CFA coroutine design. 1203 1204 For object-oriented languages, inheritance can be used to provide extra fields and code, but it requires explicitly writing the inheritance: 1091 1205 \begin{cfa}[morekeywords={class,inherits}] 1092 1206 class mycoroutine inherits baseCoroutine { ... } 1093 1207 \end{cfa} 1094 and the programming language (and possibly its tool set, \eg debugger) may need to understand @baseCoroutine@ because of the stack.1208 In addition, the programming language and possibly its tool set, \eg debugger, @valgrind@ need to understand @baseCoroutine@ because of special stack property of coroutines. 1095 1209 Furthermore, the execution of constructors/destructors is in the wrong order for certain operations. 1096 1210 For example, for threads if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived. … … 1103 1217 } 1104 1218 \end{cfa} 1105 which also requires an explicit declaration that must be the last oneto ensure correct initialization order.1106 However, there is nothing preventing wrong placement or multiple declarations .1107 1108 For coroutines as for threads, many implementations are based on routine pointers or routineobjects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}.1219 which also requires an explicit declaration that must be last to ensure correct initialization order. 1220 However, there is nothing preventing wrong placement or multiple declarations of @baseCoroutine@. 1221 1222 For coroutines, as for threads, many implementations are based on function pointers or function objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}. 1109 1223 For example, Boost implements coroutines in terms of four functor object-types: 1110 1224 \begin{cfa} … … 1114 1228 symmetric_coroutine<>::yield_type 1115 1229 \end{cfa} 1116 Similarly, the canonical threading paradigm is often based on routine pointers, \eg @pthreads@~\cite{Butenhof97}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}. 1117 However, the generic thread-handle (identifier) is limited (few operations), unless it is wrapped in a custom type. 1230 1231 Figure~\ref{f:CoroutineMemoryLayout} shows different memory-layout options for a coroutine (where a task is similar). 1232 The coroutine handle is the @coroutine@ instance containing programmer specified global/communication variables. 1233 The coroutine descriptor contains all implicit declarations needed by the runtime, \eg @suspend@/@resume@, and can be part of the coroutine handle or separate.\footnote{ 1234 We are examining variable-sized structures (VLS), where fields can be variable-sized structures or arrays. 1235 Once allocated, a VLS is fixed sized.} 1236 The coroutine stack can appear in a number of locations and forms, fixed or variable sized. 1237 Hence, the coroutine's stack could be a VLS on the allocating stack, provided the allocating stack is large enough. 1238 For stack allocation, allocation/deallocation only requires a few arithmetic operation to compute the size and adjust the stack point, modulo any constructor costs. 1239 For heap allocation, allocation/deallocation is an expensive heap operation (where the heap can be a shared resource), modulo any constructor costs. 1240 For heap stack allocation, it is also possible to use a split (segmented) stack calling-convention, available with gcc and clang, so the stack is variable sized. 1241 Currently, \CFA supports stack/heap allocated descriptors but only heap allocated stacks; 1242 split-stack allocation is under development. 1243 In \CFA debug-mode, a fixed-sized stack is terminated with a write-only page, which catches most stack overflows. 1244 1245 \begin{figure} 1246 \centering 1247 \input{corlayout.pstex_t} 1248 \caption{Coroutine memory layout} 1249 \label{f:CoroutineMemoryLayout} 1250 \end{figure} 1251 1252 Similarly, the canonical threading paradigm is often based on function pointers, \eg @pthreads@~\cite{Butenhof97}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}. 1253 However, the generic thread-handle (identifier) is limited (few operations), unless it is wrapped in a custom type, as in the pthreads approach. 1118 1254 \begin{cfa} 1119 1255 void mycor( coroutine_t cid, void * arg ) { … … 1127 1263 } 1128 1264 \end{cfa} 1129 Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda-based coroutines adds very little.1265 Since the custom type is simple to write in \CFA and solves several issues, added support for function/lambda-based coroutines adds very little. 1130 1266 1131 1267 Note, the type @coroutine_t@ must be an abstract handle to the coroutine, because the coroutine descriptor and its stack are non-copyable. -
doc/papers/concurrency/examples/Fib.c
r3cd5fdd rc6a1e8a 1 1 #include <stdio.h> 2 2 3 #define FIB_INIT { 0, 1 } 4 typedef struct { int fn1, fn; } Fib; 3 typedef struct { 4 int fn1, fn; 5 } Fib; 6 7 #define FibCtor { 1, 0 } 8 5 9 int fib( Fib * f ) { 6 7 int ret = f->fn1; 8 f->fn1 = f->fn; 9 f->fn = ret + f->fn; 10 11 return ret; 10 int fn = f->fn; f->fn = f->fn1; 11 f->fn1 = f->fn + fn; 12 return fn; 12 13 } 13 14 int main() { 14 Fib f1 = F IB_INIT, f2 = FIB_INIT;15 Fib f1 = FibCtor, f2 = FibCtor; 15 16 for ( int i = 0; i < 10; i += 1 ) { 16 17 printf( "%d %d\n", fib( &f1 ), fib( &f2 ) ); -
doc/papers/concurrency/examples/Fib.cfa
r3cd5fdd rc6a1e8a 13 13 } 14 14 15 #define F IB_INIT{ 0, 1 }16 typedef struct { int fn 1, fn2; } Fib;15 #define FibCtor { 0, 1 } 16 typedef struct { int fn, fn1; } Fib; 17 17 int fib_state( Fib & f ) with( f ) { 18 int ret = fn2; 19 int fn = fn1 + fn2; 20 fn2 = fn1; fn1 = fn; 21 return ret; 18 int fn0 = fn1 + fn2; fn2 = fn1; fn = fn0; 19 return fn1; 22 20 } 23 21 … … 30 28 } 31 29 } 32 int next( Fib1 & fib ) with( fib ) { resume( fib ); returnfn; }30 int ?()( Fib1 & fib ) with( fib ) { return resume( fib ).fn; } 33 31 34 coroutine Fib2 { int ret; };// used for communication32 coroutine Fib2 { int fn; }; // used for communication 35 33 void main( Fib2 & fib ) with( fib ) { // called on first resume 36 int fn , fn1 = 1, fn2 = 0;// precompute first two states34 int fn1 = 1, fn2 = 0; // precompute first two states 37 35 for () { 38 ret = fn2;39 36 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; // general case 40 37 suspend(); // restart last resume 41 38 } 42 39 } 43 int next( Fib2 & fib ) with( fib ) { 44 resume( fib ); // restart last suspend 45 return ret; 40 int ?()( Fib2 & fib ) with( fib ) { 41 return resume( fib ).fn; // restart last suspend 42 } 43 int ?()( Fib2 & fib, int N ) with( fib ) { 44 for ( N - 1 ) fib(); 45 return fib(); 46 } 47 double ?()( Fib2 & fib ) with( fib ) { 48 return (int)(fib()) / 3.14159; // restart last suspend 46 49 } 47 50 … … 51 54 sout | nl; 52 55 53 Fib f1 = F IB_INIT, f2 = FIB_INIT;56 Fib f1 = FibCtor, f2 = FibCtor; 54 57 for ( 10 ) 55 58 sout | fib_state( f1 ) | fib_state( f2 ); … … 58 61 Fib1 f1, f2; 59 62 for ( 10 ) 60 sout | next( f1 ) | next( f2);63 sout | f1() | f2(); 61 64 sout | nl; 62 65 63 Fib2 f1 , f2;66 Fib2 f12, f22; 64 67 for ( 10 ) 65 sout | next( (Fib2 &)f1 ) | next( (Fib2 &)f2 );68 sout | (int)f12() | (double)f12() | f22( 2 ); 66 69 } 67 70 -
doc/papers/concurrency/examples/Fib2.cfa
r3cd5fdd rc6a1e8a 10 10 // Created On : Thu Apr 26 23:20:08 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 22 17:26:41201913 // Update Count : 2812 // Last Modified On : Sat May 18 08:55:59 2019 13 // Update Count : 36 14 14 // 15 15 … … 17 17 #include <coroutine.hfa> 18 18 19 coroutine Fibonacci { int fn1 ; };// used for communication19 coroutine Fibonacci { int fn1, fn; }; // used for communication 20 20 21 21 void main( Fibonacci & fib ) with( fib ) { // called on first resume 22 int fn; 23 [fn1, fn] = [0, 1]; // precompute first two states 22 [fn1, fn] = [1, 0]; // precompute first two states 24 23 for () { 25 24 suspend(); // restart last resume … … 29 28 30 29 int ?()( Fibonacci & fib ) with( fib ) { // function call operator 31 resume( fib ); // restart last suspend 32 return fn1; 30 return resume( fib ).fn1; // restart last suspend 33 31 } 34 32 … … 36 34 Fibonacci f1, f2; 37 35 for ( 10 ) { // print N Fibonacci values 38 sout | f1() | f2();36 sout | resume( f1 ).fn | resume( f2 ).fn; 39 37 } // for 40 38 } … … 42 40 // Local Variables: // 43 41 // tab-width: 4 // 44 // compile-command: "cfa fibonacci_1.cfa" //42 // compile-command: "cfa Fib2.cfa" // 45 43 // End: // -
libcfa/src/iostream.cfa
r3cd5fdd rc6a1e8a 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue May 21 13:01:26201913 // Update Count : 67412 // Last Modified On : Tue Jun 4 17:32:34 2019 13 // Update Count : 761 14 14 // 15 15 … … 20 20 #include <stdbool.h> // true/false 21 21 //#include <string.h> // strlen, strcmp 22 extern size_t strlen (const char *__s) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))); 22 23 extern int strcmp (const char *__s1, const char *__s2) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2))); 23 extern size_t strlen (const char *__s) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))); 24 extern char *strcpy (char *__restrict __dest, const char *__restrict __src) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2))); 25 extern void *memcpy (void *__restrict __dest, const void *__restrict __src, size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2))); 24 26 #include <float.h> // DBL_DIG, LDBL_DIG 25 27 #include <math.h> // isfinite 26 28 #include <complex.h> // creal, cimag 27 } 29 } // extern "C" 28 30 29 31 forall( dtype ostype | ostream( ostype ) ) { … … 198 200 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 199 201 // os | crealf( fc ) | nonl; 200 float f = crealf( fc ); 201 PrintWithDP( os, "%g", f ); 202 f = cimagf( fc ); 203 PrintWithDP( os, "%+g", f ); 202 PrintWithDP( os, "%g", crealf( fc ) ); 203 PrintWithDP( os, "%+g", cimagf( fc ) ); 204 204 fmt( os, "i" ); 205 205 return os; … … 212 212 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 213 213 // os | creal( dc ) | nonl; 214 double d = creal( dc ); 215 PrintWithDP( os, "%.*lg", d, DBL_DIG ); 216 d = cimag( dc ); 217 PrintWithDP( os, "%+.*lg", d, DBL_DIG ); 214 PrintWithDP( os, "%.*lg", creal( dc ), DBL_DIG ); 215 PrintWithDP( os, "%+.*lg", cimag( dc ), DBL_DIG ); 218 216 fmt( os, "i" ); 219 217 return os; … … 226 224 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 227 225 // os | creall( ldc ) || nonl; 228 long double ld = creall( ldc ); 229 PrintWithDP( os, "%.*Lg", ld, LDBL_DIG ); 230 ld = cimagl( ldc ); 231 PrintWithDP( os, "%+.*Lg", ld, LDBL_DIG ); 226 PrintWithDP( os, "%.*Lg", creall( ldc ), LDBL_DIG ); 227 PrintWithDP( os, "%+.*Lg", cimagl( ldc ), LDBL_DIG ); 232 228 fmt( os, "i" ); 233 229 return os; … … 395 391 } // distribution 396 392 393 394 //*********************************** Integral *********************************** 395 396 static const char * shortbin[] = { "0", "1", "10", "11", "100", "101", "110", "111", "1000", "1001", "1010", "1011", "1100", "1101", "1110", "1111" }; 397 static const char * longbin[] = { "0000", "0001", "0010", "0011", "0100", "0101", "0110", "0111", "1000", "1001", "1010", "1011", "1100", "1101", "1110", "1111" }; 398 399 // Default prefix for non-decimal prints is 0b, 0, 0x. 400 #define IntegralFMTImpl( T, CODE, IFMTNP, IFMTP ) \ 401 forall( dtype ostype | ostream( ostype ) ) { \ 402 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 403 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); \ 404 \ 405 if ( f.base == 'b' || f.base == 'B' ) { /* bespoke binary format */ \ 406 int bits; \ 407 if ( f.val == (T){0} ) bits = 1; /* force at least one bit to print */ \ 408 else bits = sizeof(long long int) * 8 - __builtin_clzll( f.val ); /* position of most significant bit */ \ 409 bits = bits > sizeof(f.val) * 8 ? sizeof(f.val) * 8 : bits; \ 410 int spaces = f.wd - bits; /* can be negative */ \ 411 if ( ! f.flags.nobsdp ) { spaces -= 2; } /* base prefix takes space */ \ 412 /* printf( "%d %d\n", bits, spaces ); */ \ 413 if ( ! f.flags.left ) { /* right justified ? */ \ 414 /* Note, base prefix then zero padding or spacing then prefix. */ \ 415 if ( f.flags.pad0 || f.flags.pc ) { \ 416 if ( ! f.flags.nobsdp ) { fmt( os, "0%c", f.base ); } \ 417 if ( f.flags.pc ) spaces = f.pc - bits; \ 418 if ( spaces > 0 ) fmt( os, "%0*d", spaces, 0 ); /* zero pad */ \ 419 } else { \ 420 if ( spaces > 0 ) fmt( os, "%*s", spaces, " " ); /* space pad */ \ 421 if ( ! f.flags.nobsdp ) { fmt( os, "0%c", f.base ); } \ 422 } /* if */ \ 423 } else if ( ! f.flags.nobsdp ) { \ 424 fmt( os, "0%c", f.base ); \ 425 } /* if */ \ 426 int shift = (bits - 1) / 4 * 4; /* floor( bits - 1, 4 ) */ \ 427 typeof( f.val ) temp = f.val; \ 428 fmt( os, "%s", shortbin[(temp >> shift) & 0xf] ); \ 429 for () { \ 430 shift -= 4; \ 431 if ( shift < 0 ) break; \ 432 temp = f.val; \ 433 fmt( os, "%s", longbin[(temp >> shift) & 0xf] ); \ 434 } /* for */ \ 435 if ( f.flags.left && spaces > 0 ) fmt( os, "%*s", spaces, " " ); \ 436 return os; \ 437 } /* if */ \ 438 \ 439 char fmtstr[sizeof(IFMTP)]; /* sizeof includes '\0' */ \ 440 if ( ! f.flags.pc ) memcpy( &fmtstr, IFMTNP, sizeof(IFMTNP) ); \ 441 else memcpy( &fmtstr, IFMTP, sizeof(IFMTP) ); \ 442 int star = 4; /* position before first '*' */ \ 443 \ 444 /* Insert flags into spaces before '*', from right to left. */ \ 445 if ( ! f.flags.nobsdp ) { fmtstr[star] = '#'; star -= 1; } \ 446 if ( f.flags.left ) { fmtstr[star] = '-'; star -= 1; } \ 447 if ( f.flags.sign && f.base == CODE ) { fmtstr[star] = '+'; star -= 1; } \ 448 if ( f.flags.pad0 && ! f.flags.pc ) { fmtstr[star] = '0'; star -= 1; } \ 449 fmtstr[star] = '%'; \ 450 \ 451 if ( ! f.flags.pc ) { /* no precision */ \ 452 /* printf( "%s\n", &fmtstr[star] ); */ \ 453 fmtstr[sizeof(IFMTNP)-2] = f.base; /* sizeof includes '\0' */ \ 454 fmt( os, &fmtstr[star], f.wd, f.val ); \ 455 } else { /* precision */ \ 456 fmtstr[sizeof(IFMTP)-2] = f.base; /* sizeof includes '\0' */ \ 457 /* printf( "%s\n", &fmtstr[star] ); */ \ 458 fmt( os, &fmtstr[star], f.wd, f.pc, f.val ); \ 459 } /* if */ \ 460 return os; \ 461 } /* ?|? */ \ 462 void ?|?( ostype & os, _Ostream_Manip(T) f ) { (ostype &)(os | f); nl( os ); } \ 463 } // distribution 464 465 IntegralFMTImpl( signed char, 'd', "% *hh ", "% *.*hh " ) 466 IntegralFMTImpl( unsigned char, 'u', "% *hh ", "% *.*hh " ) 467 IntegralFMTImpl( signed short int, 'd', "% *h ", "% *.*h " ) 468 IntegralFMTImpl( unsigned short int, 'u', "% *h ", "% *.*h " ) 469 IntegralFMTImpl( signed int, 'd', "% * ", "% *.* " ) 470 IntegralFMTImpl( unsigned int, 'u', "% * ", "% *.* " ) 471 IntegralFMTImpl( signed long int, 'd', "% *l ", "% *.*l " ) 472 IntegralFMTImpl( unsigned long int, 'u', "% *l ", "% *.*l " ) 473 IntegralFMTImpl( signed long long int, 'd', "% *ll ", "% *.*ll " ) 474 IntegralFMTImpl( unsigned long long int, 'u', "% *ll ", "% *.*ll " ) 475 476 //*********************************** Floating Point *********************************** 477 478 #define PrintWithDP2( os, format, val, ... ) \ 479 { \ 480 enum { size = 48 }; \ 481 char buf[size]; \ 482 int bufbeg = 0, i, len = snprintf( buf, size, format, ##__VA_ARGS__, val ); \ 483 if ( isfinite( val ) && (f.base != 'g' || f.pc != 0) ) { /* if number, print decimal point */ \ 484 for ( i = 0; i < len && buf[i] != '.' && buf[i] != 'e' && buf[i] != 'E'; i += 1 ); /* decimal point or scientific ? */ \ 485 if ( i == len && ! f.flags.nobsdp ) { \ 486 if ( ! f.flags.left ) { \ 487 buf[i] = '.'; buf[i + 1] = '\0'; \ 488 if ( buf[0] == ' ' ) bufbeg = 1; /* decimal point within width */ \ 489 } else { \ 490 for ( i = 0; i < len && buf[i] != ' '; i += 1 ); /* trailing blank ? */ \ 491 buf[i] = '.'; \ 492 if ( i == len ) buf[i + 1] = '\0'; \ 493 } /* if */ \ 494 } /* if */ \ 495 } /* if */ \ 496 fmt( os, "%s", &buf[bufbeg] ); \ 497 } 498 499 #define FloatingPointFMTImpl( T, DFMTNP, DFMTP ) \ 500 forall( dtype ostype | ostream( ostype ) ) { \ 501 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 502 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); \ 503 char fmtstr[sizeof(DFMTP)]; /* sizeof includes '\0' */ \ 504 if ( ! f.flags.pc ) memcpy( &fmtstr, DFMTNP, sizeof(DFMTNP) ); \ 505 else memcpy( &fmtstr, DFMTP, sizeof(DFMTP) ); \ 506 int star = 4; /* position before first '*' */ \ 507 \ 508 /* Insert flags into spaces before '*', from right to left. */ \ 509 if ( f.flags.left ) { fmtstr[star] = '-'; star -= 1; } \ 510 if ( f.flags.sign ) { fmtstr[star] = '+'; star -= 1; } \ 511 if ( f.flags.pad0 ) { fmtstr[star] = '0'; star -= 1; } \ 512 fmtstr[star] = '%'; \ 513 \ 514 if ( ! f.flags.pc ) { /* no precision */ \ 515 fmtstr[sizeof(DFMTNP)-2] = f.base; /* sizeof includes '\0' */ \ 516 /* printf( "%g %d %s\n", f.val, f.wd, &fmtstr[star]); */ \ 517 PrintWithDP2( os, &fmtstr[star], f.val, f.wd ) \ 518 } else { /* precision */ \ 519 fmtstr[sizeof(DFMTP)-2] = f.base; /* sizeof includes '\0' */ \ 520 /* printf( "%g %d %d %s\n", f.val, f.wd, f.pc, &fmtstr[star] ); */ \ 521 PrintWithDP2( os, &fmtstr[star], f.val, f.wd, f.pc ) \ 522 } /* if */ \ 523 return os; \ 524 } /* ?|? */ \ 525 void ?|?( ostype & os, _Ostream_Manip(T) f ) { (ostype &)(os | f); nl( os ); } \ 526 } // distribution 527 528 FloatingPointFMTImpl( double, "% * ", "% *.* " ) 529 FloatingPointFMTImpl( long double, "% *L ", "% *.*L " ) 530 531 //*********************************** Character *********************************** 532 533 forall( dtype ostype | ostream( ostype ) ) { 534 ostype & ?|?( ostype & os, _Ostream_Manip(char) f ) { 535 if ( f.base != 'c' ) { // bespoke binary/octal/hex format 536 _Ostream_Manip(unsigned char) fmtuc @= { f.val, f.wd, f.pc, f.base, {'\0'} }; 537 fmtuc.flags.pc = f.flags.pc; 538 fmtuc.flags.nobsdp = f.flags.nobsdp; 539 // os | fmtuc | nonl; 540 (ostype &)(os | fmtuc); 541 return os; 542 } // if 543 544 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 545 546 #define CFMTNP "% * " 547 char fmtstr[sizeof(CFMTNP)]; // sizeof includes '\0' 548 memcpy( &fmtstr, CFMTNP, sizeof(CFMTNP) ); 549 int star = 1; // position before first '*' 550 551 // Insert flags into spaces before '*', from right to left. 552 if ( f.flags.left ) { fmtstr[star] = '-'; star -= 1; } 553 fmtstr[star] = '%'; 554 555 fmtstr[sizeof(CFMTNP)-2] = f.base; // sizeof includes '\0' 556 // printf( "%d %s\n", f.wd, &fmtstr[star] ); 557 fmt( os, &fmtstr[star], f.wd, f.val ); 558 return os; 559 } // ?|? 560 void ?|?( ostype & os, _Ostream_Manip(char) f ) { (ostype &)(os | f); nl( os ); } 561 } // distribution 562 563 //*********************************** C String *********************************** 564 565 forall( dtype ostype | ostream( ostype ) ) { 566 ostype & ?|?( ostype & os, _Ostream_Manip(const char *) f ) { 567 if ( ! f.val ) return os; // null pointer ? 568 569 if ( f.base != 's' ) { // bespoke binary/octal/hex format 570 _Ostream_Manip(unsigned char) fmtuc @= { 0, f.wd, f.pc, f.base, {'\0'} }; 571 fmtuc.flags.pc = f.flags.pc; 572 fmtuc.flags.nobsdp = f.flags.nobsdp; 573 for ( unsigned int i = 0; f.val[i] != '\0'; i += 1 ) { 574 fmtuc.val = f.val[i]; 575 // os | fmtuc | nonl; 576 (ostype &)(os | fmtuc); 577 } // for 578 return os; 579 } // if 580 581 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 582 583 #define SFMTNP "% * " 584 #define SFMTP "% *.* " 585 char fmtstr[sizeof(SFMTP)]; // sizeof includes '\0' 586 if ( ! f.flags.pc ) memcpy( &fmtstr, SFMTNP, sizeof(SFMTNP) ); 587 else memcpy( &fmtstr, SFMTP, sizeof(SFMTP) ); 588 int star = 1; // position before first '*' 589 590 // Insert flags into spaces before '*', from right to left. 591 if ( f.flags.left ) { fmtstr[star] = '-'; star -= 1; } 592 fmtstr[star] = '%'; 593 594 if ( ! f.flags.pc ) { // no precision 595 // printf( "%d %s\n", f.wd, &fmtstr[star] ); 596 fmtstr[sizeof(SFMTNP)-2] = f.base; // sizeof includes '\0' 597 fmt( os, &fmtstr[star], f.wd, f.val ); 598 } else { // precision 599 fmtstr[sizeof(SFMTP)-2] = f.base; // sizeof includes '\0' 600 // printf( "%d %d %s\n", f.wd, f.pc, &fmtstr[star] ); 601 fmt( os, &fmtstr[star], f.wd, f.pc, f.val ); 602 } // if 603 return os; 604 } // ?|? 605 void ?|?( ostype & os, _Ostream_Manip(const char *) f ) { (ostype &)(os | f); nl( os ); } 606 } // distribution 607 397 608 //--------------------------------------- 398 609 … … 410 621 } // distribution 411 622 412 //--------------------------------------- 623 624 //*********************************** Istream *********************************** 625 413 626 414 627 forall( dtype istype | istream( istype ) ) { … … 437 650 438 651 istype & ?|?( istype & is, signed char & sc ) { 439 fmt( is, "%hh d", &sc );652 fmt( is, "%hhi", &sc ); 440 653 return is; 441 654 } // ?|? 442 655 443 656 istype & ?|?( istype & is, unsigned char & usc ) { 444 fmt( is, "%hh u", &usc );657 fmt( is, "%hhi", &usc ); 445 658 return is; 446 659 } // ?|? 447 660 448 661 istype & ?|?( istype & is, short int & si ) { 449 fmt( is, "%h d", &si );662 fmt( is, "%hi", &si ); 450 663 return is; 451 664 } // ?|? 452 665 453 666 istype & ?|?( istype & is, unsigned short int & usi ) { 454 fmt( is, "%h u", &usi );667 fmt( is, "%hi", &usi ); 455 668 return is; 456 669 } // ?|? 457 670 458 671 istype & ?|?( istype & is, int & i ) { 459 fmt( is, "% d", &i );672 fmt( is, "%i", &i ); 460 673 return is; 461 674 } // ?|? 462 675 463 676 istype & ?|?( istype & is, unsigned int & ui ) { 464 fmt( is, "% u", &ui );677 fmt( is, "%i", &ui ); 465 678 return is; 466 679 } // ?|? 467 680 468 681 istype & ?|?( istype & is, long int & li ) { 469 fmt( is, "%l d", &li );682 fmt( is, "%li", &li ); 470 683 return is; 471 684 } // ?|? 472 685 473 686 istype & ?|?( istype & is, unsigned long int & ulli ) { 474 fmt( is, "%l u", &ulli );687 fmt( is, "%li", &ulli ); 475 688 return is; 476 689 } // ?|? 477 690 478 691 istype & ?|?( istype & is, long long int & lli ) { 479 fmt( is, "%ll d", &lli );692 fmt( is, "%lli", &lli ); 480 693 return is; 481 694 } // ?|? 482 695 483 696 istype & ?|?( istype & is, unsigned long long int & ulli ) { 484 fmt( is, "%ll u", &ulli );697 fmt( is, "%lli", &ulli ); 485 698 return is; 486 699 } // ?|? … … 505 718 istype & ?|?( istype & is, float _Complex & fc ) { 506 719 float re, im; 507 fmt( is, "% g%gi", &re, &im );720 fmt( is, "%f%fi", &re, &im ); 508 721 fc = re + im * _Complex_I; 509 722 return is; … … 561 774 } // cstr 562 775 776 #if 0 777 forall( dtype istype | istream( istype ) ) 778 istype & ?|?( istype & is, _Istream_skip skip ) { 779 fmt( is, skip.s, "" ); // no input arguments 780 return is; 781 } // skip 782 783 forall( dtype istype | istream( istype ) ) 784 istype & ?|?( istype & is, _Istream_incl incl ) { 785 size_t len = strlen( incl.scanset ) + 4; // extras: "%[]\0" 786 char fmtstr[len]; 787 fmtstr[0] = '%'; fmtstr[1] = '['; 788 strcpy( &fmtstr[2], incl.scanset ); // after '[', copy includes '\0' 789 fmtstr[len - 2] = ']'; fmtstr[len - 1] = '\0'; 790 fmt( is, fmtstr, incl.s ); 791 return is; 792 } // incl 793 794 forall( dtype istype | istream( istype ) ) 795 istype & ?|?( istype & is, _Istream_excl excl ) { 796 size_t len = strlen( excl.scanset ); 797 char fmtstr[len+5]; 798 fmtstr[0] = '%'; fmtstr[1] = '['; fmtstr[2] = '^'; 799 strcpy( &fmtstr[3], excl.scanset ); // after '^', copy includes '\0' 800 fmtstr[len - 2] = ']'; fmtstr[len - 1] = '\0'; 801 fmt( is, fmtstr, excl.s ); 802 return is; 803 } // excl 804 805 forall( dtype istype | istream( istype ) ) 806 istype & ?|?( istype & is, _Istream_cstr cstr ) { 807 fmt( is, "%s", cstr.s ); 808 return is; 809 } // cstr 810 811 _Istream_cstrW cstr( char * s, int size ) { return (_Istream_cstrW){ s, size }; } 812 forall( dtype istype | istream( istype ) ) 813 istype & ?|?( istype & is, _Istream_cstrW cstr ) { 814 enum { size = 16 }; 815 char fmtstr[size]; 816 sprintf( fmtstr, "%%%ds", cstr.size ); 817 fmt( is, fmtstr, cstr.s ); 818 return is; 819 } // cstr 820 821 forall( dtype istype | istream( istype ) ) istype & ?|?( istype &, _Istream_Cstr ) { 822 823 } 824 825 //*********************************** Manipulators *********************************** 826 827 #define InputFMTImpl( T, CODE ) \ 828 forall( dtype istype | istream( istype ) ) \ 829 istype & ?|?( istype & is, _Istream_Manip(T) f ) { \ 830 enum { size = 16 }; \ 831 char fmtstr[size]; \ 832 if ( f.wd == -1 ) { \ 833 snprintf( fmtstr, size, "%%%s%s", f.ignore ? "*" : "", CODE ); \ 834 } else { \ 835 snprintf( fmtstr, size, "%%%s%d%s", f.ignore ? "*" : "", f.wd, CODE ); \ 836 } /* if */ \ 837 /* printf( "%d %s %p\n", f.wd, fmtstr, &f.val ); */ \ 838 fmt( is, fmtstr, &f.val ); \ 839 return is; \ 840 } /* ?|? */ 841 842 InputFMTImpl( char, "c" ) 843 InputFMTImpl( signed char, "hhi" ) 844 InputFMTImpl( unsigned char, "hhi" ) 845 InputFMTImpl( signed short int, "hi" ) 846 InputFMTImpl( unsigned short int, "hi" ) 847 InputFMTImpl( signed int, "i" ) 848 InputFMTImpl( unsigned int, "i" ) 849 InputFMTImpl( signed long int, "li" ) 850 InputFMTImpl( unsigned long int, "li" ) 851 InputFMTImpl( signed long long int, "lli" ) 852 InputFMTImpl( unsigned long long int, "lli" ) 853 854 InputFMTImpl( float, "f" ) 855 InputFMTImpl( double, "lf" ) 856 InputFMTImpl( long double, "Lf" ) 857 858 InputFMTImpl( float _Complex, "ff" ) 859 InputFMTImpl( double _Complex, "lf" ) 860 InputFMTImpl( long double _Complex, "Lf" ) 861 #endif // 0 862 563 863 // Local Variables: // 564 864 // tab-width: 4 // -
libcfa/src/iostream.hfa
r3cd5fdd rc6a1e8a 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // iostream --7 // iostream.hfa -- 8 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat May 11 10:31:27201913 // Update Count : 2 3212 // Last Modified On : Tue Jun 4 17:33:43 2019 13 // Update Count : 286 14 14 // 15 15 … … 17 17 18 18 #include "iterator.hfa" 19 20 21 //*********************************** Ostream *********************************** 22 19 23 20 24 trait ostream( dtype ostype ) { … … 146 150 } // distribution 147 151 148 //--------------------------------------- 152 //*********************************** Manipulators *********************************** 153 154 forall( otype T ) 155 struct _Ostream_Manip { 156 T val; // polymorphic base-type 157 unsigned char wd, pc; // width, precision 158 char base; // numeric base / floating-point style 159 union { 160 unsigned char all; 161 struct { 162 unsigned char pc:1; // precision specified 163 unsigned char left:1; // left justify 164 unsigned char nobsdp:1; // base prefix / decimal point 165 unsigned char sign:1; // plus / minus sign 166 unsigned char pad0:1; // zero pad 167 } flags; 168 }; 169 }; // _Ostream_Manip 170 171 //*********************************** Integral *********************************** 172 173 #define IntegralFMTDecl( T, CODE ) \ 174 static inline { \ 175 _Ostream_Manip(T) bin( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'b', { .all : 0 } }; } \ 176 _Ostream_Manip(T) oct( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'o', { .all : 0 } }; } \ 177 _Ostream_Manip(T) hex( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'x', { .all : 0 } }; } \ 178 _Ostream_Manip(T) wd( unsigned char w, T v ) { return (_Ostream_Manip(T))@{ v, w, 0, CODE, { .all : 0 } }; } \ 179 _Ostream_Manip(T) wd( unsigned char w, unsigned char p, T v ) { return (_Ostream_Manip(T))@{ v, w, p, CODE, { .flags.pc : true } }; } \ 180 _Ostream_Manip(T) & wd( unsigned char w, _Ostream_Manip(T) & fmt ) with(fmt) { wd = w; return fmt; } \ 181 _Ostream_Manip(T) & wd( unsigned char w, unsigned char p, _Ostream_Manip(T) & fmt ) with(fmt) { wd = w; pc = p; flags.pc = true; return fmt; } \ 182 _Ostream_Manip(T) & left( _Ostream_Manip(T) & fmt ) { fmt.flags.left = true; return fmt; } \ 183 _Ostream_Manip(T) & upcase( _Ostream_Manip(T) & fmt ) { if ( fmt.base == 'x' || fmt.base == 'b' ) fmt.base -= 32; /* upper case */ return fmt; } \ 184 _Ostream_Manip(T) & nobase( _Ostream_Manip(T) & fmt ) { fmt.flags.nobsdp = true; return fmt; } \ 185 _Ostream_Manip(T) & pad0( _Ostream_Manip(T) & fmt ) { fmt.flags.pad0 = true; return fmt; } \ 186 _Ostream_Manip(T) sign( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, CODE, { .flags.sign : true } }; } \ 187 _Ostream_Manip(T) & sign( _Ostream_Manip(T) & fmt ) { fmt.flags.sign = true; return fmt; } \ 188 } \ 189 forall( dtype ostype | ostream( ostype ) ) { \ 190 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ); \ 191 void ?|?( ostype & os, _Ostream_Manip(T) f ); \ 192 } 193 194 IntegralFMTDecl( signed char, 'd' ) 195 IntegralFMTDecl( unsigned char, 'u' ) 196 IntegralFMTDecl( signed short int, 'd' ) 197 IntegralFMTDecl( unsigned short int, 'u' ) 198 IntegralFMTDecl( signed int, 'd' ) 199 IntegralFMTDecl( unsigned int, 'u' ) 200 IntegralFMTDecl( signed long int, 'd' ) 201 IntegralFMTDecl( unsigned long int, 'u' ) 202 IntegralFMTDecl( signed long long int, 'd' ) 203 IntegralFMTDecl( unsigned long long int, 'u' ) 204 205 //*********************************** Floating Point *********************************** 206 207 // Default suffix for values with no fraction is "." 208 #define FloatingPointFMTDecl( T ) \ 209 static inline { \ 210 _Ostream_Manip(T) sci( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'e', { .all : 0 } }; } \ 211 _Ostream_Manip(T) hex( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'a', { .all : 0 } }; } \ 212 _Ostream_Manip(T) wd( unsigned char w, T v ) { return (_Ostream_Manip(T))@{ v, w, 0, 'f', { .all : 0 } }; } \ 213 _Ostream_Manip(T) wd( unsigned char w, unsigned char p, T v ) { return (_Ostream_Manip(T))@{ v, w, p, 'f', { .flags.pc : true } }; } \ 214 _Ostream_Manip(T) ws( unsigned char w, unsigned char p, T v ) { return (_Ostream_Manip(T))@{ v, w, p, 'g', { .flags.pc : true } }; } \ 215 _Ostream_Manip(T) & wd( unsigned char w, _Ostream_Manip(T) & fmt ) with(fmt) { wd = w; return fmt; } \ 216 _Ostream_Manip(T) & wd( unsigned char w, unsigned char p, _Ostream_Manip(T) & fmt ) with(fmt) { wd = w; pc = p; flags.pc = true; return fmt; } \ 217 _Ostream_Manip(T) & left( _Ostream_Manip(T) & fmt ) { fmt.flags.left = true; return fmt; } \ 218 _Ostream_Manip(T) upcase( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'G', { .all : 0 } }; } \ 219 _Ostream_Manip(T) & upcase( _Ostream_Manip(T) & fmt ) { fmt.base -= 32; /* upper case */ return fmt; } \ 220 _Ostream_Manip(T) & pad0( _Ostream_Manip(T) & fmt ) { fmt.flags.pad0 = true; return fmt; } \ 221 _Ostream_Manip(T) sign( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'g', { .flags.sign : true } }; } \ 222 _Ostream_Manip(T) & sign( _Ostream_Manip(T) & fmt ) { fmt.flags.sign = true; return fmt; } \ 223 _Ostream_Manip(T) nodp( T v ) { return (_Ostream_Manip(T))@{ v, 1, 0, 'g', { .flags.nobsdp : true } }; } \ 224 _Ostream_Manip(T) & nodp( _Ostream_Manip(T) & fmt ) { fmt.flags.nobsdp = true; return fmt; } \ 225 } \ 226 forall( dtype ostype | ostream( ostype ) ) { \ 227 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ); \ 228 void ?|?( ostype & os, _Ostream_Manip(T) f ); \ 229 } 230 231 FloatingPointFMTDecl( double ) 232 FloatingPointFMTDecl( long double ) 233 234 //*********************************** Character *********************************** 235 236 static inline { 237 _Ostream_Manip(char) bin( char v ) { return (_Ostream_Manip(char))@{ v, 1, 0, 'b', { .all : 0 } }; } 238 _Ostream_Manip(char) oct( char v ) { return (_Ostream_Manip(char))@{ v, 1, 0, 'o', { .all : 0 } }; } 239 _Ostream_Manip(char) hex( char v ) { return (_Ostream_Manip(char))@{ v, 1, 0, 'x', { .all : 0 } }; } 240 _Ostream_Manip(char) wd( unsigned char w, char v ) { return (_Ostream_Manip(char))@{ v, w, 0, 'c', { .all : 0 } }; } 241 _Ostream_Manip(char) & wd( unsigned char w, _Ostream_Manip(char) & fmt ) with(fmt) { wd = w; return fmt; } 242 _Ostream_Manip(char) & left( _Ostream_Manip(char) & fmt ) { fmt.flags.left = true; return fmt; } 243 _Ostream_Manip(char) & upcase( _Ostream_Manip(char) & fmt ) { if ( fmt.base == 'x' || fmt.base == 'b' ) fmt.base -= 32; /* upper case */ return fmt; } 244 _Ostream_Manip(char) & nobase( _Ostream_Manip(char) & fmt ) { fmt.flags.nobsdp = true; return fmt; } 245 } // distribution 246 forall( dtype ostype | ostream( ostype ) ) { 247 ostype & ?|?( ostype & os, _Ostream_Manip(char) f ); 248 void ?|?( ostype & os, _Ostream_Manip(char) f ); 249 } 250 251 //*********************************** C String *********************************** 252 253 static inline { 254 _Ostream_Manip(const char *) bin( const char * v ) { return (_Ostream_Manip(const char *))@{ v, 1, 0, 'b', { .all : 0 } }; } 255 _Ostream_Manip(const char *) oct( const char * v ) { return (_Ostream_Manip(const char *))@{ v, 1, 0, 'o', { .all : 0 } }; } 256 _Ostream_Manip(const char *) hex( const char * v ) { return (_Ostream_Manip(const char *))@{ v, 1, 0, 'x', { .all : 0 } }; } 257 _Ostream_Manip(const char *) wd( unsigned char w, const char * v ) { return (_Ostream_Manip(const char *))@{ v, w, 0, 's', { .all : 0 } }; } 258 _Ostream_Manip(const char *) wd( unsigned char w, unsigned char p, const char * v ) { return (_Ostream_Manip(const char *))@{ v, w, p, 's', { .flags.pc : true } }; } 259 _Ostream_Manip(const char *) & wd( unsigned char w, _Ostream_Manip(const char *) & fmt ) with(fmt) { wd = w; return fmt; } 260 _Ostream_Manip(const char *) & wd( unsigned char w, unsigned char p, _Ostream_Manip(const char *) & fmt ) with(fmt) { wd = w; pc = p; flags.pc = true; return fmt; } 261 _Ostream_Manip(const char *) & left( _Ostream_Manip(const char *) & fmt ) { fmt.flags.left = true; return fmt; } 262 _Ostream_Manip(const char *) & nobase( _Ostream_Manip(const char *) & fmt ) { fmt.flags.nobsdp = true; return fmt; } 263 } // distribution 264 forall( dtype ostype | ostream( ostype ) ) { 265 ostype & ?|?( ostype & os, _Ostream_Manip(const char *) f ); 266 void ?|?( ostype & os, _Ostream_Manip(const char *) f ); 267 } 268 269 270 //*********************************** Istream *********************************** 271 149 272 150 273 trait istream( dtype istype ) { … … 204 327 forall( dtype istype | istream( istype ) ) istype & ?|?( istype &, _Istream_cstrC ); 205 328 329 #if 0 330 struct _Istream_Cstr { 331 char * s, * scanset; 332 int wd; // width 333 bool ignore; // no input argument 334 }; 335 static inline _Istream_Cstr skip( char * s ) { return (_Istream_Cstr){ 0p, s, -1, false }; } 336 static inline _Istream_Cstr incl( const char * scanset, char * s ) { return (_Istream_Cstr){ s, scanset, -1, false }; } 337 static inline _Istream_Cstr excl( const char * scanset, char * s ) { return (_Istream_Cstr){ s, scanset, -1, false }; } 338 static inline _Istream_Cstr cstr( char * s ) { return (_Istream_Cstr){ s, 0p, -1, false }; } 339 static inline _Istream_Cstr ignore( char * s ) { return (_Istream_Cstr)@{ s, 0p, -1, true }; } 340 static inline _Istream_Cstr ignore( _Istream_Cstr & fmt ) { fmt.ignore = true; return fmt; } 341 static inline _Istream_Cstr wd( unsigned int w, char * s ) { return (_Istream_Cstr)@{ s, 0p, w, false }; } 342 forall( dtype istype | istream( istype ) ) istype & ?|?( istype &, _Istream_Cstr ); 343 344 //*********************************** Manipulators *********************************** 345 346 forall( otype T ) 347 struct _Istream_Manip { 348 T & val; // polymorphic base-type 349 int wd; // width 350 bool ignore; // no input argument 351 }; // _Istream_Manip 352 353 #define InputFMTDecl( T ) \ 354 static inline _Istream_Manip(T) ignore( T & v ) { return (_Istream_Manip(T))@{ v, -1, true }; } \ 355 static inline _Istream_Manip(T) ignore( _Istream_Manip(T) & fmt ) { fmt.ignore = true; return fmt; } \ 356 static inline _Istream_Manip(T) wd( unsigned int w, T & v ) { return (_Istream_Manip(T))@{ v, w, false }; } \ 357 forall( dtype istype | istream( istype ) ) { \ 358 istype & ?|?( istype & is, _Istream_Manip(T) f ); \ 359 } 360 361 InputFMTDecl( char ) 362 InputFMTDecl( signed char ) 363 InputFMTDecl( unsigned char ) 364 InputFMTDecl( signed short int ) 365 InputFMTDecl( unsigned short int ) 366 InputFMTDecl( signed int ) 367 InputFMTDecl( unsigned int ) 368 InputFMTDecl( signed long int ) 369 InputFMTDecl( unsigned long int ) 370 InputFMTDecl( signed long long int ) 371 InputFMTDecl( unsigned long long int ) 372 373 InputFMTDecl( float ) 374 InputFMTDecl( double ) 375 InputFMTDecl( long double ) 376 377 InputFMTDecl( float _Complex ) 378 InputFMTDecl( double _Complex ) 379 InputFMTDecl( long double _Complex ) 380 #endif // 0 381 382 383 //*********************************** Time *********************************** 384 206 385 207 386 #include <time_t.hfa> // Duration (constructors) / Time (constructors) -
src/Parser/parser.yy
r3cd5fdd rc6a1e8a 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed May 15 21:25:27 201913 // Update Count : 4 29612 // Last Modified On : Tue May 28 17:06:37 2019 13 // Update Count : 4354 14 14 // 15 15 … … 278 278 %token OTYPE FTYPE DTYPE TTYPE TRAIT // CFA 279 279 %token SIZEOF OFFSETOF 280 // %token SUSPEND RESUME // CFA 280 281 %token ATTRIBUTE EXTENSION // GCC 281 282 %token IF ELSE SWITCH CASE DEFAULT DO WHILE FOR BREAK CONTINUE GOTO RETURN … … 482 483 %precedence '}' 483 484 %precedence '(' 485 486 // %precedence RESUME 487 // %precedence '{' 488 // %precedence ')' 484 489 485 490 %locations // support location tracking for error messages … … 599 604 $$ = new ExpressionNode( $5 ); 600 605 } 606 // | RESUME '(' comma_expression ')' 607 // { SemanticError( yylloc, "Resume expression is currently unimplemented." ); $$ = nullptr; } 608 // | RESUME '(' comma_expression ')' compound_statement 609 // { SemanticError( yylloc, "Resume expression is currently unimplemented." ); $$ = nullptr; } 601 610 ; 602 611 … … 1263 1272 | RETURN comma_expression_opt ';' 1264 1273 { $$ = new StatementNode( build_return( $2 ) ); } 1265 | RETURN '{' initializer_list_opt comma_opt '}' 1274 | RETURN '{' initializer_list_opt comma_opt '}' ';' 1266 1275 { SemanticError( yylloc, "Initializer return is currently unimplemented." ); $$ = nullptr; } 1276 // | SUSPEND ';' 1277 // { SemanticError( yylloc, "Suspend expression is currently unimplemented." ); $$ = nullptr; } 1278 // | SUSPEND compound_statement ';' 1279 // { SemanticError( yylloc, "Suspend expression is currently unimplemented." ); $$ = nullptr; } 1267 1280 | THROW assignment_expression_opt ';' // handles rethrow 1268 1281 { $$ = new StatementNode( build_throw( $2 ) ); } … … 2158 2171 2159 2172 bit_subrange_size: 2160 ':' constant_expression2173 ':' assignment_expression 2161 2174 { $$ = $2; } 2162 2175 ; -
src/main.cc
r3cd5fdd rc6a1e8a 10 10 // Created On : Fri May 15 23:12:02 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jun 5 13:48:41201913 // Update Count : 60 012 // Last Modified On : Wed Jun 5 20:35:13 2019 13 // Update Count : 601 14 14 // 15 15 … … 162 162 backtrace( 6 ); // skip first 6 stack frames 163 163 signal( SIGABRT, SIG_DFL); // reset default signal handler 164 164 raise( SIGABRT ); // reraise SIGABRT 165 165 } // sigAbortHandler 166 166
Note: See TracChangeset
for help on using the changeset viewer.