Changeset 63238a4
- Timestamp:
- Jun 26, 2018, 5:16:23 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 28f3a19, 6d6cf5a
- Parents:
- d286cf68 (diff), 3d56d15b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/papers/OOPSLA17/Makefile
rd286cf68 r63238a4 33 33 34 34 DOCUMENT = generic_types.pdf 35 BASE = ${basename ${DOCUMENT}} 35 36 36 37 # Directives # … … 41 42 42 43 clean : 43 @rm -frv ${DOCUMENT} ${ basename ${DOCUMENT}}.ps ${Build}44 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 44 45 45 46 # File Dependencies # 46 47 47 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps48 ${DOCUMENT} : ${BASE}.ps 48 49 ps2pdf $< 49 50 50 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi51 ${BASE}.ps : ${BASE}.dvi 51 52 dvips ${Build}/$< -o $@ 52 53 53 ${basename ${DOCUMENT}}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ../../bibliography/pl.bib 54 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 55 ../../bibliography/pl.bib | ${Build} 54 56 # Must have *.aux file containing citations for bibtex 55 57 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 63 65 ## Define the default recipes. 64 66 65 ${Build} :67 ${Build} : 66 68 mkdir -p ${Build} 67 69 … … 69 71 gnuplot -e Build="'${Build}/'" evaluation/timing.gp 70 72 71 %.tex : %.fig 73 %.tex : %.fig | ${Build} 72 74 fig2dev -L eepic $< > ${Build}/$@ 73 75 74 %.ps : %.fig 76 %.ps : %.fig | ${Build} 75 77 fig2dev -L ps $< > ${Build}/$@ 76 78 77 %.pstex : %.fig 79 %.pstex : %.fig | ${Build} 78 80 fig2dev -L pstex $< > ${Build}/$@ 79 81 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/papers/concurrency/Makefile
rd286cf68 r63238a4 20 20 21 21 FIGURES = ${addsuffix .tex, \ 22 monitor \23 ext_monitor \24 22 int_monitor \ 25 23 dependency \ … … 27 25 28 26 PICTURES = ${addsuffix .pstex, \ 27 monitor \ 28 ext_monitor \ 29 29 system \ 30 30 monitor_structs \ … … 59 59 dvips ${Build}/$< -o $@ 60 60 61 ${BASE}.dvi : Makefile ${B uild} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \62 annex/local.bib ../../bibliography/pl.bib 61 ${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 62 annex/local.bib ../../bibliography/pl.bib | ${Build} 63 63 # Must have *.aux file containing citations for bibtex 64 64 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi 65 ${BibTeX} ${Build}/${basename $@}65 -${BibTeX} ${Build}/${basename $@} 66 66 # Some citations reference others so run again to resolve these citations 67 67 ${LaTeX} ${basename $@}.tex 68 ${BibTeX} ${Build}/${basename $@}68 -${BibTeX} ${Build}/${basename $@} 69 69 # Run again to finish citations 70 70 ${LaTeX} ${basename $@}.tex … … 72 72 ## Define the default recipes. 73 73 74 ${Build} :74 ${Build} : 75 75 mkdir -p ${Build} 76 76 77 ${BASE}.out.ps :${Build}77 ${BASE}.out.ps : | ${Build} 78 78 ln -fs ${Build}/Paper.out.ps . 79 79 80 WileyNJD-AMA.bst :80 WileyNJD-AMA.bst : 81 81 ln -fs ../AMA/AMA-stix/ama/WileyNJD-AMA.bst . 82 82 83 %.tex : %.fig ${Build}83 %.tex : %.fig | ${Build} 84 84 fig2dev -L eepic $< > ${Build}/$@ 85 85 86 %.ps : %.fig ${Build}86 %.ps : %.fig | ${Build} 87 87 fig2dev -L ps $< > ${Build}/$@ 88 88 89 %.pstex : %.fig ${Build}89 %.pstex : %.fig | ${Build} 90 90 fig2dev -L pstex $< > ${Build}/$@ 91 91 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/papers/concurrency/Paper.tex
rd286cf68 r63238a4 271 271 Hence, there are two problems to be solved: concurrency and parallelism. 272 272 While these two concepts are often combined, they are distinct, requiring different tools~\cite[\S~2]{Buhr05a}. 273 Concurrency tools handle synchronization and mutual exclusion, while parallelism tools handle performance, costand resource utilization.273 Concurrency tools handle mutual exclusion and synchronization, while parallelism tools handle performance, cost, and resource utilization. 274 274 275 275 The proposed concurrency API is implemented in a dialect of C, called \CFA. … … 282 282 Extended versions and explanation of the following code examples are available at the \CFA website~\cite{Cforall} or in Moss~\etal~\cite{Moss18}. 283 283 284 \CFA is a nextension of ISO-C, and hence, supports all C paradigms.284 \CFA is a non-object-oriented extension of ISO-C, and hence, supports all C paradigms. 285 285 %It is a non-object-oriented system-language, meaning most of the major abstractions have either no runtime overhead or can be opted out easily. 286 Like C, the b asics of \CFA revolve aroundstructures and routines.286 Like C, the building blocks of \CFA are structures and routines. 287 287 Virtually all of the code generated by the \CFA translator respects C memory layouts and calling conventions. 288 288 While \CFA is not an object-oriented language, lacking the concept of a receiver (\eg @this@) and nominal inheritance-relationships, C does have a notion of objects: ``region of data storage in the execution environment, the contents of which can represent values''~\cite[3.15]{C11}. … … 296 296 int x = 1, y = 2, z = 3; 297 297 int * p1 = &x, ** p2 = &p1, *** p3 = &p2, $\C{// pointers to x}$ 298 `&` r1 = x, `&&` r2 = r1,`&&&` r3 = r2; $\C{// references to x}$298 `&` r1 = x, `&&` r2 = r1, `&&&` r3 = r2; $\C{// references to x}$ 299 299 int * p4 = &z, `&` r4 = z; 300 300 … … 411 411 \end{cquote} 412 412 Overloading is important for \CFA concurrency since the runtime system relies on creating different types to represent concurrency objects. 413 Therefore, overloading is necessary to prevent the need forlong prefixes and other naming conventions to prevent name clashes.413 Therefore, overloading eliminates long prefixes and other naming conventions to prevent name clashes. 414 414 As seen in Section~\ref{basics}, routine @main@ is heavily overloaded. 415 416 Variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name: 415 For example, variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name: 417 416 \begin{cfa} 418 417 struct S { int `i`; int j; double m; } s; … … 428 427 } 429 428 \end{cfa} 430 For parallel semantics, both @s.i@ and @t.i@ are visible the same type, so only @i@ is ambiguous without qualification.429 For parallel semantics, both @s.i@ and @t.i@ are visible with the same type, so only @i@ is ambiguous without qualification. 431 430 432 431 … … 468 467 \end{cquote} 469 468 While concurrency does not use operator overloading directly, it provides an introduction for the syntax of constructors. 470 471 472 \subsection{Parametric Polymorphism}473 \label{s:ParametricPolymorphism}474 475 The signature feature of \CFA is parametric-polymorphic routines~\cite{} with routines generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types.476 For example, the following sum routine works for any type that supports construction from 0 and addition:477 \begin{cfa}478 forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and +479 T sum( T a[$\,$], size_t size ) {480 `T` total = { `0` }; $\C{// initialize by 0 constructor}$481 for ( size_t i = 0; i < size; i += 1 )482 total = total `+` a[i]; $\C{// select appropriate +}$483 return total;484 }485 S sa[5];486 int i = sum( sa, 5 ); $\C{// use S's 0 construction and +}$487 \end{cfa}488 489 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each routine declaration:490 \begin{cfa}491 trait `sumable`( otype T ) {492 void `?{}`( T &, zero_t ); $\C{// 0 literal constructor}$493 T `?+?`( T, T ); $\C{// assortment of additions}$494 T ?+=?( T &, T );495 T ++?( T & );496 T ?++( T & );497 };498 forall( otype T `| sumable( T )` ) $\C{// use trait}$499 T sum( T a[$\,$], size_t size );500 \end{cfa}501 502 Assertions can be @otype@ or @dtype@.503 @otype@ refers to a ``complete'' object, \ie an object has a size, default constructor, copy constructor, destructor and an assignment operator.504 @dtype@ only guarantees an object has a size and alignment.505 506 Using the return type for discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@:507 \begin{cfa}508 forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); }509 int * ip = alloc(); $\C{// select type and size from left-hand side}$510 double * dp = alloc();511 struct S {...} * sp = alloc();512 \end{cfa}513 where the return type supplies the type/size of the allocation, which is impossible in most type systems.514 469 515 470 … … 540 495 \CFA also provides @new@ and @delete@, which behave like @malloc@ and @free@, in addition to constructing and destructing objects: 541 496 \begin{cfa} 542 { struct S s = {10}; $\C{// allocation, call constructor}$543 ... 497 { 498 ... struct S s = {10}; ... $\C{// allocation, call constructor}$ 544 499 } $\C{// deallocation, call destructor}$ 545 500 struct S * s = new(); $\C{// allocation, call constructor}$ … … 547 502 delete( s ); $\C{// deallocation, call destructor}$ 548 503 \end{cfa} 549 \CFA concurrency uses object lifetime as a means of synchronization and/or mutual exclusion. 504 \CFA concurrency uses object lifetime as a means of mutual exclusion and/or synchronization. 505 506 507 \subsection{Parametric Polymorphism} 508 \label{s:ParametricPolymorphism} 509 510 The signature feature of \CFA is parametric-polymorphic routines~\cite{} with routines generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types. 511 For example, the following sum routine works for any type that supports construction from 0 and addition: 512 \begin{cfa} 513 forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and + 514 T sum( T a[$\,$], size_t size ) { 515 `T` total = { `0` }; $\C{// initialize by 0 constructor}$ 516 for ( size_t i = 0; i < size; i += 1 ) 517 total = total `+` a[i]; $\C{// select appropriate +}$ 518 return total; 519 } 520 S sa[5]; 521 int i = sum( sa, 5 ); $\C{// use S's 0 construction and +}$ 522 \end{cfa} 523 The builtin type @zero_t@ (and @one_t@) overload constant 0 (and 1) for a new types, where both 0 and 1 have special meaning in C. 524 525 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each routine declaration: 526 \begin{cfa} 527 trait `sumable`( otype T ) { 528 void `?{}`( T &, zero_t ); $\C{// 0 literal constructor}$ 529 T `?+?`( T, T ); $\C{// assortment of additions}$ 530 T ?+=?( T &, T ); 531 T ++?( T & ); 532 T ?++( T & ); 533 }; 534 forall( otype T `| sumable( T )` ) $\C{// use trait}$ 535 T sum( T a[$\,$], size_t size ); 536 \end{cfa} 537 538 Assertions can be @otype@ or @dtype@. 539 @otype@ refers to a ``complete'' object, \ie an object has a size, default constructor, copy constructor, destructor and an assignment operator. 540 @dtype@ only guarantees an object has a size and alignment. 541 542 Using the return type for discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@: 543 \begin{cfa} 544 forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); } 545 int * ip = alloc(); $\C{// select type and size from left-hand side}$ 546 double * dp = alloc(); 547 struct S {...} * sp = alloc(); 548 \end{cfa} 549 where the return type supplies the type/size of the allocation, which is impossible in most type systems. 550 550 551 551 … … 727 727 728 728 Using a coroutine, it is possible to express the Fibonacci formula directly without any of the C problems. 729 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type: 730 \begin{cfa} 731 `coroutine` Fib { int fn; }; 732 \end{cfa} 733 which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines @next@. 729 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, @`coroutine` Fib { int fn; }@, which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines, \eg @next@. 734 730 Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main. 735 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code has the three suspend points, representing the three states in the Fibonacci formula, to context switch back to the caller's resume.731 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code has the three suspend points, representing the three states in the Fibonacci formula, to context switch back to the caller's @resume@. 736 732 The interface routine @next@, takes a Fibonacci instance and context switches to it using @resume@; 737 733 on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned. … … 843 839 \end{figure} 844 840 845 The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming routine for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal routines 846 However, there is no stack growth because @resume@/@suspend@ context switch to existing stack-frames rather than create new ones.841 The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming routine for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal routines. 842 However,@resume@/@suspend@ context switch to existing stack-frames rather than create new ones so there is no stack growth. 847 843 \newterm{Symmetric (full) coroutine}s have a coroutine call a resuming routine for another coroutine, which eventually forms a resuming-call cycle. 848 844 (The trivial cycle is a coroutine resuming itself.) … … 933 929 The producer call to @delivery@ transfers values into the consumer's communication variables, resumes the consumer, and returns the consumer status. 934 930 For the first resume, @cons@'s stack is initialized, creating local variables retained between subsequent activations of the coroutine. 935 The consumer iterates until the @done@ flag is set, prints , increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation).931 The consumer iterates until the @done@ flag is set, prints the values delivered by the producer, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation). 936 932 The call from the consumer to the @payment@ introduces the cycle between producer and consumer. 937 933 When @payment@ is called, the consumer copies values into the producer's communication variable and a resume is executed. … … 963 959 \end{cfa} 964 960 and the programming language (and possibly its tool set, \eg debugger) may need to understand @baseCoroutine@ because of the stack. 965 Furthermore, the execution of constructs/destructors is in the wrong order for certain operations , \eg for threads;966 \eg,if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived.961 Furthermore, the execution of constructs/destructors is in the wrong order for certain operations. 962 For example, for threads if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived. 967 963 968 964 An alternatively is composition: … … 984 980 symmetric_coroutine<>::yield_type 985 981 \end{cfa} 986 Similarly, the canonical threading paradigm is often based on routine pointers, \eg @pthread @~\cite{pthreads}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}.982 Similarly, the canonical threading paradigm is often based on routine pointers, \eg @pthreads@~\cite{pthreads}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}. 987 983 However, the generic thread-handle (identifier) is limited (few operations), unless it is wrapped in a custom type. 988 984 \begin{cfa} … … 1001 997 Note, the type @coroutine_t@ must be an abstract handle to the coroutine, because the coroutine descriptor and its stack are non-copyable. 1002 998 Copying the coroutine descriptor results in copies being out of date with the current state of the stack. 1003 Correspondingly, copying the stack results is copies being out of date with coroutine descriptor, and pointers in the stack being out of date to data on the stack.999 Correspondingly, copying the stack results is copies being out of date with the coroutine descriptor, and pointers in the stack being out of date to data on the stack. 1004 1000 (There is no mechanism in C to find all stack-specific pointers and update them as part of a copy.) 1005 1001 … … 1015 1011 Furthermore, implementing coroutines without language supports also displays the power of a programming language. 1016 1012 While this is ultimately the option used for idiomatic \CFA code, coroutines and threads can still be constructed without using the language support. 1017 The reserved keyword eases use for the common cases.1013 The reserved keyword simply eases use for the common cases. 1018 1014 1019 1015 Part of the mechanism to generalize coroutines is using a \CFA trait, which defines a coroutine as anything satisfying the trait @is_coroutine@, and this trait is used to restrict coroutine-manipulation routines: … … 1030 1026 The @main@ routine has no return value or additional parameters because the coroutine type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values versus fixed ones. 1031 1027 The generic routines @suspend@ and @resume@ can be redefined, but any object passed to them is a coroutine since it must satisfy the @is_coroutine@ trait to compile. 1032 The advantage of this approach is that users can easily create different types of coroutines, for example,changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine, and possibly redefining @suspend@ and @resume@.1028 The advantage of this approach is that users can easily create different types of coroutines, \eg changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine, and possibly redefining @suspend@ and @resume@. 1033 1029 The \CFA keyword @coroutine@ implicitly implements the getter and forward declarations required for implementing the coroutine main: 1034 1030 \begin{cquote} … … 1098 1094 The difference is that a coroutine borrows a thread from its caller, so the first thread resuming a coroutine creates an instance of @main@; 1099 1095 whereas, a user thread receives its own thread from the runtime system, which starts in @main@ as some point after the thread constructor is run.\footnote{ 1100 The \lstinline@main@ routine is already a special routine in C (where the program begins), so it is a natural extension of the semantics to use overloading to declare mains for different coroutines/threads (the normal main being the main of the initial thread).}1096 The \lstinline@main@ routine is already a special routine in C, \ie where the program's initial thread begins, so it is a natural extension of this semantics to use overloading to declare \lstinline@main@s for user coroutines and threads.} 1101 1097 No return value or additional parameters are necessary for this routine because the task type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values. 1102 1098 … … 1189 1185 void main( Adder & adder ) with( adder ) { 1190 1186 subtotal = 0; 1191 for ( int c = 0; c < cols; c += 1 ) { 1192 subtotal += row[c]; 1193 } 1187 for ( int c = 0; c < cols; c += 1 ) { subtotal += row[c]; } 1194 1188 } 1195 1189 int main() { … … 1216 1210 1217 1211 Uncontrolled non-deterministic execution is meaningless. 1218 To reestablish meaningful execution requires mechanisms to reintroduce determinism (\ie restrict non-determinism), called mutual exclusion and synchronization, where mutual exclusion is an access-control mechanism on data shared by threads, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}.1212 To reestablish meaningful execution requires mechanisms to reintroduce determinism, \ie restrict non-determinism, called mutual exclusion and synchronization, where mutual exclusion is an access-control mechanism on data shared by threads, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}. 1219 1213 Since many deterministic challenges appear with the use of mutable shared state, some languages/libraries disallow it, \eg Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka~\cite{Akka} (Scala). 1220 In these paradigms, interaction among concurrent objects is performed by stateless message-passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (\eg channels~\cite{CSP,Go}).1221 However, in call/return-based languages, these approaches force a clear distinction (\ie introduce a new programming paradigm) between regular and concurrent computation (\ie routine call versus message passing).1214 In these paradigms, interaction among concurrent objects is performed by stateless message-passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts, \eg channels~\cite{CSP,Go}. 1215 However, in call/return-based languages, these approaches force a clear distinction, \ie introduce a new programming paradigm, between regular and concurrent computation, \eg routine call versus message passing. 1222 1216 Hence, a programmer must learn and manipulate two sets of design patterns. 1223 1217 While this distinction can be hidden away in library code, effective use of the library still has to take both paradigms into account. … … 1244 1238 However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use. 1245 1239 Methods range from low-level locks, which are fast and flexible but require significant attention for correctness, to higher-level concurrency techniques, which sacrifice some performance to improve ease of use. 1246 Ease of use comes by either guaranteeing some problems cannot occur (\eg deadlock free), or by offering a more explicit coupling between shared data and critical section.1247 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (\eg reading/writing)for numerical types.1240 Ease of use comes by either guaranteeing some problems cannot occur, \eg deadlock free, or by offering a more explicit coupling between shared data and critical section. 1241 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations, \eg reading/writing, for numerical types. 1248 1242 However, a significant challenge with locks is composability because it takes careful organization for multiple locks to be used while preventing deadlock. 1249 1243 Easing composability is another feature higher-level mutual-exclusion mechanisms can offer. … … 1254 1248 Synchronization enforces relative ordering of execution, and synchronization tools provide numerous mechanisms to establish these timing relationships. 1255 1249 Low-level synchronization primitives offer good performance and flexibility at the cost of ease of use; 1256 higher-level mechanisms often simplify usage by adding better coupling between synchronization and data (\eg message passing), or offering a simpler solution to otherwise involved challenges, \eg barrier lock.1250 higher-level mechanisms often simplify usage by adding better coupling between synchronization and data, \eg message passing, or offering a simpler solution to otherwise involved challenges, \eg barrier lock. 1257 1251 Often synchronization is used to order access to a critical section, \eg ensuring a reader thread is the next kind of thread to enter a critical section. 1258 1252 If a writer thread is scheduled for next access, but another reader thread acquires the critical section first, that reader has \newterm{barged}. … … 1272 1266 The strong association with the call/return paradigm eases programmability, readability and maintainability, at a slight cost in flexibility and efficiency. 1273 1267 1274 Note, like coroutines/threads, both locks and monitors require an abstract handle to reference them, because at their core, both mechanisms are manipulating non-copyable shared 1268 Note, like coroutines/threads, both locks and monitors require an abstract handle to reference them, because at their core, both mechanisms are manipulating non-copyable shared-state. 1275 1269 Copying a lock is insecure because it is possible to copy an open lock and then use the open copy when the original lock is closed to simultaneously access the shared data. 1276 1270 Copying a monitor is secure because both the lock and shared data are copies, but copying the shared data is meaningless because it no longer represents a unique entity. … … 1375 1369 \end{cfa} 1376 1370 (While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.) 1377 In practice, writing multi-locking routines that do not deadlock sis tricky.1371 In practice, writing multi-locking routines that do not deadlock is tricky. 1378 1372 Having language support for such a feature is therefore a significant asset for \CFA. 1379 1373 1380 1374 The capability to acquire multiple locks before entering a critical section is called \newterm{bulk acquire}. 1381 In previous example, \CFA guarantees the order of acquisition is consistent across calls to different routines using the same monitors as arguments.1375 In the previous example, \CFA guarantees the order of acquisition is consistent across calls to different routines using the same monitors as arguments. 1382 1376 This consistent ordering means acquiring multiple monitors is safe from deadlock. 1383 1377 However, users can force the acquiring order. … … 1395 1389 In the calls to @bar@ and @baz@, the monitors are acquired in opposite order. 1396 1390 1397 However, such use leads to lock acquiring order problems resulting in deadlock~\cite{Lister77}, where detecting it requires dynamically tracking of monitor calls, and dealing with it requires implementrollback semantics~\cite{Dice10}.1391 However, such use leads to lock acquiring order problems resulting in deadlock~\cite{Lister77}, where detecting it requires dynamically tracking of monitor calls, and dealing with it requires rollback semantics~\cite{Dice10}. 1398 1392 In \CFA, safety is guaranteed by using bulk acquire of all monitors to shared objects, whereas other monitor systems provide no aid. 1399 1393 While \CFA provides only a partial solution, the \CFA partial solution handles many useful cases. … … 1440 1434 1441 1435 1442 \section{ InternalScheduling}1443 \label{s: InternalScheduling}1436 \section{Scheduling} 1437 \label{s:Scheduling} 1444 1438 1445 1439 While monitor mutual-exclusion provides safe access to shared data, the monitor data may indicate that a thread accessing it cannot proceed. … … 1454 1448 The appropriate condition lock is signalled to unblock an opposite kind of thread after an element is inserted/removed from the buffer. 1455 1449 Signalling is unconditional, because signalling an empty condition lock does nothing. 1450 1456 1451 Signalling semantics cannot have the signaller and signalled thread in the monitor simultaneously, which means: 1457 1452 \begin{enumerate} … … 1463 1458 The signalling thread blocks but is marked for urgrent unblocking at the next scheduling point and the signalled thread continues. 1464 1459 \end{enumerate} 1465 The first approach is too restrictive, as it precludes solving a reasonable class of problems (\eg dating service).1460 The first approach is too restrictive, as it precludes solving a reasonable class of problems, \eg dating service. 1466 1461 \CFA supports the next two semantics as both are useful. 1467 1462 Finally, while it is common to store a @condition@ as a field of the monitor, in \CFA, a @condition@ variable can be created/stored independently. … … 1539 1534 If the buffer is full, only calls to @remove@ can acquire the buffer, and if the buffer is empty, only calls to @insert@ can acquire the buffer. 1540 1535 Threads making calls to routines that are currently excluded block outside (external) of the monitor on a calling queue, versus blocking on condition queues inside (internal) of the monitor. 1536 % External scheduling is more constrained and explicit, which helps programmers reduce the non-deterministic nature of concurrency. 1537 External scheduling allows users to wait for events from other threads without concern of unrelated events occurring. 1538 The mechnaism can be done in terms of control flow, \eg Ada @accept@ or \uC @_Accept@, or in terms of data, \eg Go channels. 1539 While both mechanisms have strengths and weaknesses, this project uses a control-flow mechanism to stay consistent with other language semantics. 1540 Two challenges specific to \CFA for external scheduling are loose object-definitions (see Section~\ref{s:LooseObjectDefinitions}) and multiple-monitor routines (see Section~\ref{s:Multi-MonitorScheduling}). 1541 1541 1542 1542 For internal scheduling, non-blocking signalling (as in the producer/consumer example) is used when the signaller is providing the cooperation for a waiting thread; 1543 1543 the signaller enters the monitor and changes state, detects a waiting threads that can use the state, performs a non-blocking signal on the condition queue for the waiting thread, and exits the monitor to run concurrently. 1544 The waiter unblocks next, takes the state, and exits the monitor.1544 The waiter unblocks next, uses/takes the state, and exits the monitor. 1545 1545 Blocking signalling is the reverse, where the waiter is providing the cooperation for the signalling thread; 1546 1546 the signaller enters the monitor, detects a waiting thread providing the necessary state, performs a blocking signal to place it on the urgent queue and unblock the waiter. 1547 The waiter changes state and exits the monitor, and the signaller unblocks next from the urgent queue to take the state.1547 The waiter changes state and exits the monitor, and the signaller unblocks next from the urgent queue to use/take the state. 1548 1548 1549 1549 Figure~\ref{f:DatingService} shows a dating service demonstrating the two forms of signalling: non-blocking and blocking. 1550 1550 The dating service matches girl and boy threads with matching compatibility codes so they can exchange phone numbers. 1551 1551 A thread blocks until an appropriate partner arrives. 1552 The complexity is exchanging phone number in the monitor, 1553 While the non-barging monitor prevents a caller from stealing a phone number, the monitor mutual-exclusion property 1554 1555 The dating service is an example of a monitor that cannot be written using external scheduling because: 1556 1557 The example in table \ref{tbl:datingservice} highlights the difference in behaviour. 1558 As mentioned, @signal@ only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed. 1559 To avoid this explicit synchronization, the @condition@ type offers the @signal_block@ routine, which handles the two-way handshake as shown in the example. 1560 This feature removes the need for a second condition variables and simplifies programming. 1561 Like every other monitor semantic, @signal_block@ uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to @signal_block@, meaning no other thread can acquire the monitor either before or after the call. 1552 The complexity is exchanging phone number in the monitor because the monitor mutual-exclusion property prevents exchanging numbers. 1553 For internal scheduling, the @exchange@ condition is necessary to block the thread finding the match, while the matcher unblocks to take the oppose number, post its phone number, and unblock the partner. 1554 For external scheduling, the implicit urgent-condition replaces the explict @exchange@-condition and @signal_block@ puts the finding thread on the urgent condition and unblocks the matcher.. 1555 1556 The dating service is an example of a monitor that cannot be written using external scheduling because it requires knowledge of calling parameters to make scheduling decisions, and parameters of waiting threads are unavailable; 1557 as well, an arriving thread may not find a partner and must wait, which requires a condition variable, and condition variables imply internal scheduling. 1562 1558 1563 1559 \begin{figure} … … 1655 1651 } 1656 1652 \end{cfa} 1657 must have acquired monitor locks that are greater than or equal to the number of locks for the waiting thread signalled from the front of the condition queue. 1658 In general, the signaller does not know the order of waiting threads, so in general, it must acquire the maximum number of mutex locks for the worst-case waiting thread. 1653 must have acquired monitor locks that are greater than or equal to the number of locks for the waiting thread signalled from the condition queue. 1659 1654 1660 1655 Similarly, for @waitfor( rtn )@, the default semantics is to atomically block the acceptor and release all acquired mutex types in the parameter list, \ie @waitfor( rtn, m1, m2 )@. … … 1667 1662 void foo( M & mutex m1, M & mutex m2 ) { 1668 1663 ... wait( `e, m1` ); ... $\C{// release m1, keeping m2 acquired )}$ 1669 void ba z( M & mutex m1, M & mutex m2 ) { $\C{// must acquire m1 and m2 )}$1664 void bar( M & mutex m1, M & mutex m2 ) { $\C{// must acquire m1 and m2 )}$ 1670 1665 ... signal( `e` ); ... 1671 1666 \end{cfa} 1672 The @wait@ only releases @m1@ so the signalling thread cannot acquire both @m1@ and @m2@ to enter @ba z@ to get to the @signal@.1667 The @wait@ only releases @m1@ so the signalling thread cannot acquire both @m1@ and @m2@ to enter @bar@ to get to the @signal@. 1673 1668 While deadlock issues can occur with multiple/nesting acquisition, this issue results from the fact that locks, and by extension monitors, are not perfectly composable. 1674 1669 … … 1755 1750 However, Figure~\ref{f:OtherWaitingThread} shows this solution is complex depending on other waiters, resulting is choices when the signaller finishes the inner mutex-statement. 1756 1751 The singaller can retain @m2@ until completion of the outer mutex statement and pass the locks to waiter W1, or it can pass @m2@ to waiter W2 after completing the inner mutex-statement, while continuing to hold @m1@. 1757 In the latter case, waiter W2 must eventually pass @m2@ to waiter W1, which is complex because W 2 may have waited before W1 so it is unaware of W1.1752 In the latter case, waiter W2 must eventually pass @m2@ to waiter W1, which is complex because W1 may have waited before W2, so W2 is unaware of it. 1758 1753 Furthermore, there is an execution sequence where the signaller always finds waiter W2, and hence, waiter W1 starves. 1759 1754 … … 1856 1851 The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependencies unfold. 1857 1852 Resolving dependency graphs being a complex and expensive endeavour, this solution is not the preferred one. 1858 1859 \subsubsection{Partial Signalling} \label{partial-sig}1860 1853 \end{comment} 1861 1854 1862 1855 1856 \begin{comment} 1863 1857 \section{External scheduling} \label{extsched} 1864 1858 1865 An alternative to internal scheduling is external scheduling (see Table~\ref{tbl:sched}).1866 1867 \begin{comment}1868 1859 \begin{table} 1869 1860 \begin{tabular}{|c|c|c|} … … 1929 1920 \label{tbl:sched} 1930 1921 \end{table} 1931 \end{comment}1932 1933 This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency.1934 Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring.1935 External scheduling can generally be done either in terms of control flow (\eg Ada with @accept@, \uC with @_Accept@) or in terms of data (\eg Go with channels).1936 Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics.1937 Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines.1938 The previous example shows a simple use @_Accept@ versus @wait@/@signal@ and its advantages.1939 Note that while other languages often use @accept@/@select@ as the core external scheduling keyword, \CFA uses @waitfor@ to prevent name collisions with existing socket \textbf{api}s.1940 1922 1941 1923 For the @P@ member above using internal scheduling, the call to @wait@ only guarantees that @V@ is the last routine to access the monitor, allowing a third routine, say @isInUse()@, acquire mutual exclusion several times while routine @P@ is waiting. 1942 1924 On the other hand, external scheduling guarantees that while routine @P@ is waiting, no other routine than @V@ can acquire the monitor. 1943 1944 % ====================================================================== 1945 % ====================================================================== 1925 \end{comment} 1926 1927 1946 1928 \subsection{Loose Object Definitions} 1947 % ====================================================================== 1948 % ====================================================================== 1949 In \uC, a monitor class declaration includes an exhaustive list of monitor operations. 1950 Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user: 1951 1952 \begin{cfa} 1953 monitor A {}; 1954 1955 void f(A & mutex a); 1956 void g(A & mutex a) { 1957 waitfor(f); // Obvious which f() to wait for 1958 } 1959 1960 void f(A & mutex a, int); // New different F added in scope 1961 void h(A & mutex a) { 1962 waitfor(f); // Less obvious which f() to wait for 1963 } 1964 \end{cfa} 1965 1966 Furthermore, external scheduling is an example where implementation constraints become visible from the interface. 1967 Here is the cfa-code for the entering phase of a monitor: 1968 \begin{center} 1969 \begin{tabular}{l} 1970 \begin{cfa} 1971 if monitor is free 1972 enter 1973 elif already own the monitor 1974 continue 1975 elif monitor accepts me 1976 enter 1977 else 1978 block 1979 \end{cfa} 1980 \end{tabular} 1981 \end{center} 1929 \label{s:LooseObjectDefinitions} 1930 1931 In an object-oriented programming-language, a class includes an exhaustive list of operations. 1932 However, new members can be added via static inheritance or dynaic members, \eg JavaScript~\cite{JavaScript}. 1933 Similarly, monitor routines can be added at any time in \CFA, making it less clear for programmers and more difficult to implement. 1934 \begin{cfa} 1935 monitor M {}; 1936 void `f`( M & mutex m ); 1937 void g( M & mutex m ) { waitfor( `f` ); } $\C{// clear which f}$ 1938 void `f`( M & mutex m, int ); $\C{// different f}$ 1939 void h( M & mutex m ) { waitfor( `f` ); } $\C{// unclear which f}$ 1940 \end{cfa} 1941 Hence, the cfa-code for the entering a monitor looks like: 1942 \begin{cfa} 1943 if ( $\textrm{\textit{monitor is free}}$ ) $\LstCommentStyle{// \color{red}enter}$ 1944 else if ( $\textrm{\textit{already own monitor}}$ ) $\LstCommentStyle{// \color{red}continue}$ 1945 else if ( $\textrm{\textit{monitor accepts me}}$ ) $\LstCommentStyle{// \color{red}enter}$ 1946 else $\LstCommentStyle{// \color{red}block}$ 1947 \end{cfa} 1982 1948 For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instructions. 1983 However, a fast check for @monitor accepts me@is much harder to implement depending on the constraints put on the monitors.1984 Indeed, monitors are often expressed as an entry queue and some acceptor queue as in Figure~\ref{fig:ClassicalMonitor}.1949 However, a fast check for \emph{monitor accepts me} is much harder to implement depending on the constraints put on the monitors. 1950 Figure~\ref{fig:ClassicalMonitor} shows monitors are often expressed as an entry (calling) queue, some acceptor queues, and an urgent stack/queue. 1985 1951 1986 1952 \begin{figure} 1987 1953 \centering 1988 \subfloat[Classical Monitor] {1954 \subfloat[Classical monitor] { 1989 1955 \label{fig:ClassicalMonitor} 1990 {\resizebox{0.45\textwidth}{!}{\input{monitor }}}1956 {\resizebox{0.45\textwidth}{!}{\input{monitor.pstex_t}}} 1991 1957 }% subfloat 1992 \q quad1993 \subfloat[ bulk acquire Monitor] {1958 \quad 1959 \subfloat[Bulk acquire monitor] { 1994 1960 \label{fig:BulkMonitor} 1995 {\resizebox{0.45\textwidth}{!}{\input{ext_monitor }}}1961 {\resizebox{0.45\textwidth}{!}{\input{ext_monitor.pstex_t}}} 1996 1962 }% subfloat 1997 \caption{External Scheduling Monitor} 1963 \caption{Monitor Implementation} 1964 \label{f:MonitorImplementation} 1998 1965 \end{figure} 1999 1966 2000 There are other alternatives to these pictures, but in the case of the left picture, implementing a fast accept check is relatively easy. 2001 Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (\eg 128) of mutex members. 2002 This approach requires a unique dense ordering of routines with an upper-bound and that ordering must be consistent across translation units. 2003 For OO languages these constraints are common, since objects only offer adding member routines consistently across translation units via inheritance. 2004 However, in \CFA users can extend objects with mutex routines that are only visible in certain translation unit. 2005 This means that establishing a program-wide dense-ordering among mutex routines can only be done in the program linking phase, and still could have issues when using dynamically shared objects. 2006 2007 The alternative is to alter the implementation as in Figure~\ref{fig:BulkMonitor}. 2008 Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept separate. 2009 Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions. 2010 Storing an array of accepted routine pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search. 2011 Furthermore, supporting nested external scheduling (\eg listing \ref{f:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued. 2012 1967 For a fixed (small) number of mutex routines (\eg 128), the accept check reduces to a bitmask of allowed callers, which can be checked with a single instruction. 1968 This approach requires a unique dense ordering of routines with a small upper-bound and the ordering must be consistent across translation units. 1969 For object-oriented languages these constraints are common, but \CFA mutex routines can be added in any scope and are only visible in certain translation unit, precluding program-wide dense-ordering among mutex routines. 1970 1971 Figure~\ref{fig:BulkMonitor} shows the \CFA monitor implementation. 1972 The mutex routine called is associated with each thread on the entry queue, while a list of acceptable routines is kept separately. 1973 The accepted list is a variable-sized array of accepted routine pointers, so the single instruction bitmask comparison is replaced by dereferencing a pointer followed by a linear search. 1974 1975 \begin{comment} 2013 1976 \begin{figure} 2014 1977 \begin{cfa}[caption={Example of nested external scheduling},label={f:nest-ext}] … … 2034 1997 In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be hard to write. 2035 1998 This decision is based on the assumption that writing fast but inflexible locks is closer to a solved problem than writing locks that are as flexible as external scheduling in \CFA. 2036 2037 % ====================================================================== 2038 % ====================================================================== 1999 \end{comment} 2000 2001 2039 2002 \subsection{Multi-Monitor Scheduling} 2040 % ====================================================================== 2041 % ====================================================================== 2003 \label{s:Multi-MonitorScheduling} 2042 2004 2043 2005 External scheduling, like internal scheduling, becomes significantly more complex when introducing multi-monitor syntax. 2044 Even in the simplest possible case, somenew semantics needs to be established:2006 Even in the simplest possible case, new semantics needs to be established: 2045 2007 \begin{cfa} 2046 2008 monitor M {}; 2047 2048 void f(M & mutex a); 2049 2050 void g(M & mutex b, M & mutex c) { 2051 waitfor(f); // two monitors M => unknown which to pass to f(M & mutex) 2052 } 2053 \end{cfa} 2054 The obvious solution is to specify the correct monitor as follows: 2055 2009 void f( M & mutex m1 ); 2010 void g( M & mutex m1, M & mutex m2 ) { 2011 waitfor( f ); $\C{// pass m1 or m2 to f?}$ 2012 } 2013 \end{cfa} 2014 The solution is for the programmer to disambiguate: 2015 \begin{cfa} 2016 waitfor( f, m2 ); $\C{// wait for call to f with argument m2}$ 2017 \end{cfa} 2018 Routine @g@ has acquired both locks, so when routine @f@ is called, the lock for monitor @m2@ is passed from @g@ to @f@ (while @g@ still holds lock @m1@). 2019 This behaviour can be extended to the multi-monitor @waitfor@ statement. 2056 2020 \begin{cfa} 2057 2021 monitor M {}; 2058 2059 void f(M & mutex a); 2060 2061 void g(M & mutex a, M & mutex b) { 2062 // wait for call to f with argument b 2063 waitfor(f, b); 2064 } 2065 \end{cfa} 2066 This syntax is unambiguous. 2067 Both locks are acquired and kept by @g@. 2068 When routine @f@ is called, the lock for monitor @b@ is temporarily transferred from @g@ to @f@ (while @g@ still holds lock @a@). 2069 This behaviour can be extended to the multi-monitor @waitfor@ statement as follows. 2070 2071 \begin{cfa} 2072 monitor M {}; 2073 2074 void f(M & mutex a, M & mutex b); 2075 2076 void g(M & mutex a, M & mutex b) { 2077 // wait for call to f with arguments a and b 2078 waitfor(f, a, b); 2079 } 2080 \end{cfa} 2081 2082 Note that the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired in the routine. @waitfor@ used in any other context is undefined behaviour. 2022 void f( M & mutex m1, M & mutex m2 ); 2023 void g( M & mutex m1, M & mutex m2 ) { 2024 waitfor( f, m1, m2 ); $\C{// wait for call to f with arguments m1 and m2}$ 2025 } 2026 \end{cfa} 2027 Again, the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired by accepting routine. 2083 2028 2084 2029 An important behaviour to note is when a set of monitors only match partially: 2085 2086 2030 \begin{cfa} 2087 2031 mutex struct A {}; 2088 2089 2032 mutex struct B {}; 2090 2091 void g(A & mutex a, B & mutex b) { 2092 waitfor(f, a, b); 2093 } 2094 2033 void g( A & mutex m1, B & mutex m2 ) { 2034 waitfor( f, m1, m2 ); 2035 } 2095 2036 A a1, a2; 2096 2037 B b; 2097 2098 2038 void foo() { 2099 g(a1, b); // block on accept 2100 } 2101 2039 g( a1, b ); // block on accept 2040 } 2102 2041 void bar() { 2103 f( a2, b); // fulfill cooperation2042 f( a2, b ); // fulfill cooperation 2104 2043 } 2105 2044 \end{cfa} … … 2108 2047 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; @waitfor(f,a,b)@ and @waitfor(f,b,a)@ are indistinguishable waiting condition. 2109 2048 2110 % ====================================================================== 2111 % ====================================================================== 2049 2112 2050 \subsection{\protect\lstinline|waitfor| Semantics} 2113 % ======================================================================2114 % ======================================================================2115 2051 2116 2052 Syntactically, the @waitfor@ statement takes a routine identifier and a set of monitors. … … 2211 2147 \end{figure} 2212 2148 2213 % ====================================================================== 2214 % ====================================================================== 2149 2215 2150 \subsection{Waiting For The Destructor} 2216 % ====================================================================== 2217 % ====================================================================== 2151 2218 2152 An interesting use for the @waitfor@ statement is destructor semantics. 2219 2153 Indeed, the @waitfor@ statement can accept any @mutex@ routine, which includes the destructor (see section \ref{data}). … … 2242 2176 2243 2177 2244 % ###### # ###### # # # ####### # ### ##### # #2245 % # # # # # # # # # # # # # # # ## ##2246 % # # # # # # # # # # # # # # # # # #2247 % ###### # # ###### # # # # ##### # # ##### # # #2248 % # ####### # # ####### # # # # # # # #2249 % # # # # # # # # # # # # # # # #2250 % # # # # # # # ####### ####### ####### ####### ### ##### # #2251 2178 \section{Parallelism} 2179 2252 2180 Historically, computer performance was about processor speeds and instruction counts. 2253 2181 However, with heat dissipation being a direct consequence of speed increase, parallelism has become the new source for increased performance~\cite{Sutter05, Sutter05b}. … … 2259 2187 While there are many variations of the presented paradigms, most of these variations do not actually change the guarantees or the semantics, they simply move costs in order to achieve better performance for certain workloads. 2260 2188 2189 2261 2190 \section{Paradigms} 2191 2192 2262 2193 \subsection{User-Level Threads} 2194 2263 2195 A direct improvement on the \textbf{kthread} approach is to use \textbf{uthread}. 2264 2196 These threads offer most of the same features that the operating system already provides but can be used on a much larger scale. … … 2269 2201 Examples of languages that support \textbf{uthread} are Erlang~\cite{Erlang} and \uC~\cite{uC++book}. 2270 2202 2203 2271 2204 \subsection{Fibers : User-Level Threads Without Preemption} \label{fibers} 2205 2272 2206 A popular variant of \textbf{uthread} is what is often referred to as \textbf{fiber}. 2273 2207 However, \textbf{fiber} do not present meaningful semantic differences with \textbf{uthread}. … … 2278 2212 An example of a language that uses fibers is Go~\cite{Go} 2279 2213 2214 2280 2215 \subsection{Jobs and Thread Pools} 2216 2281 2217 An approach on the opposite end of the spectrum is to base parallelism on \textbf{pool}. 2282 2218 Indeed, \textbf{pool} offer limited flexibility but at the benefit of a simpler user interface. … … 2289 2225 The gold standard of this implementation is Intel's TBB library~\cite{TBB}. 2290 2226 2227 2291 2228 \subsection{Paradigm Performance} 2229 2292 2230 While the choice between the three paradigms listed above may have significant performance implications, it is difficult to pin down the performance implications of choosing a model at the language level. 2293 2231 Indeed, in many situations one of these paradigms may show better performance but it all strongly depends on the workload. … … 2297 2235 Finally, if the units of uninterrupted work are large, enough the paradigm choice is largely amortized by the actual work done. 2298 2236 2237 2299 2238 \section{The \protect\CFA\ Kernel : Processors, Clusters and Threads}\label{kernel} 2239 2300 2240 A \textbf{cfacluster} is a group of \textbf{kthread} executed in isolation. \textbf{uthread} are scheduled on the \textbf{kthread} of a given \textbf{cfacluster}, allowing organization between \textbf{uthread} and \textbf{kthread}. 2301 2241 It is important that \textbf{kthread} belonging to a same \textbf{cfacluster} have homogeneous settings, otherwise migrating a \textbf{uthread} from one \textbf{kthread} to the other can cause issues. … … 2305 2245 Currently \CFA only supports one \textbf{cfacluster}, the initial one. 2306 2246 2247 2307 2248 \subsection{Future Work: Machine Setup}\label{machine} 2249 2308 2250 While this was not done in the context of this paper, another important aspect of clusters is affinity. 2309 2251 While many common desktop and laptop PCs have homogeneous CPUs, other devices often have more heterogeneous setups. … … 2311 2253 OS support for CPU affinity is now common~\cite{affinityLinux, affinityWindows, affinityFreebsd, affinityNetbsd, affinityMacosx}, which means it is both possible and desirable for \CFA to offer an abstraction mechanism for portable CPU affinity. 2312 2254 2255 2313 2256 \subsection{Paradigms}\label{cfaparadigms} 2257 2314 2258 Given these building blocks, it is possible to reproduce all three of the popular paradigms. 2315 2259 Indeed, \textbf{uthread} is the default paradigm in \CFA. … … 2319 2263 2320 2264 2321 2322 2265 \section{Behind the Scenes} 2266 2323 2267 There are several challenges specific to \CFA when implementing concurrency. 2324 2268 These challenges are a direct result of bulk acquire and loose object definitions. … … 2337 2281 Note that since the major contributions of this paper are extending monitor semantics to bulk acquire and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed. 2338 2282 2339 % ====================================================================== 2340 % ====================================================================== 2283 2341 2284 \section{Mutex Routines} 2342 % ======================================================================2343 % ======================================================================2344 2285 2345 2286 The first step towards the monitor implementation is simple @mutex@ routines. … … 2376 2317 \end{figure} 2377 2318 2319 2378 2320 \subsection{Details: Interaction with polymorphism} 2321 2379 2322 Depending on the choice of semantics for when monitor locks are acquired, interaction between monitors and \CFA's concept of polymorphism can be more complex to support. 2380 2323 However, it is shown that entry-point locking solves most of the issues. … … 2456 2399 Furthermore, entry-point locking requires less code generation since any useful routine is called multiple times but there is only one entry point for many call sites. 2457 2400 2458 % ====================================================================== 2459 % ====================================================================== 2401 2460 2402 \section{Threading} \label{impl:thread} 2461 % ======================================================================2462 % ======================================================================2463 2403 2464 2404 Figure \ref{fig:system1} shows a high-level picture if the \CFA runtime system in regards to concurrency. … … 2473 2413 \end{figure} 2474 2414 2415 2475 2416 \subsection{Processors} 2417 2476 2418 Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically @pthread@s in the current implementation of \CFA. 2477 2419 Indeed, any parallelism must go through operating-system libraries. … … 2481 2423 Processors internally use coroutines to take advantage of the existing context-switching semantics. 2482 2424 2425 2483 2426 \subsection{Stack Management} 2427 2484 2428 One of the challenges of this system is to reduce the footprint as much as possible. 2485 2429 Specifically, all @pthread@s created also have a stack created with them, which should be used as much as possible. … … 2488 2432 In order to respect C user expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor, which can grow very large. 2489 2433 2434 2490 2435 \subsection{Context Switching} 2436 2491 2437 As mentioned in section \ref{coroutine}, coroutines are a stepping stone for implementing threading, because they share the same mechanism for context-switching between different stacks. 2492 2438 To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific routine call. … … 2502 2448 This option is not currently present in \CFA, but the changes required to add it are strictly additive. 2503 2449 2450 2504 2451 \subsection{Preemption} \label{preemption} 2452 2505 2453 Finally, an important aspect for any complete threading system is preemption. 2506 2454 As mentioned in section \ref{basics}, preemption introduces an extra degree of uncertainty, which enables users to have multiple threads interleave transparently, rather than having to cooperate among threads for proper scheduling and CPU distribution. … … 2536 2484 Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel. 2537 2485 2486 2538 2487 \subsection{Scheduler} 2539 2488 Finally, an aspect that was not mentioned yet is the scheduling algorithm. … … 2541 2490 Further discussion on scheduling is present in section \ref{futur:sched}. 2542 2491 2543 % ====================================================================== 2544 % ====================================================================== 2492 2545 2493 \section{Internal Scheduling} \label{impl:intsched} 2546 % ====================================================================== 2547 % ====================================================================== 2494 2548 2495 The following figure is the traditional illustration of a monitor (repeated from page~\pageref{fig:ClassicalMonitor} for convenience): 2549 2496 2550 2497 \begin{figure} 2551 2498 \begin{center} 2552 {\resizebox{0.4\textwidth}{!}{\input{monitor }}}2499 {\resizebox{0.4\textwidth}{!}{\input{monitor.pstex_t}}} 2553 2500 \end{center} 2554 2501 \caption{Traditional illustration of a monitor} -
doc/papers/concurrency/figures/ext_monitor.fig
rd286cf68 r63238a4 8 8 -2 9 9 1200 2 10 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 3150.000 3450.000 3150 3150 2850 3450 3150375011 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 3150.000 4350.000 3150 4050 2850 4350 3150465012 6 5850 1950 6150225013 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2100 105 105 6000 2100 6105220514 4 1 -1 0 0 0 10 0.0000 2 105 90 60002160 d\00110 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1575.000 3450.000 1575 3150 1275 3450 1575 3750 11 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1575.000 4350.000 1575 4050 1275 4350 1575 4650 12 6 4275 1950 4575 2250 13 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2100 105 105 4425 2100 4530 2205 14 4 1 -1 0 0 0 10 0.0000 2 105 90 4425 2160 d\001 15 15 -6 16 6 5100 2100 5400 240017 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 5250 2250 105 105 5250 2250 5355 225018 4 1 -1 0 0 0 10 0.0000 2 105 120 5250 2295 X\00116 6 4275 1650 4575 1950 17 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 1800 105 105 4425 1800 4530 1905 18 4 1 -1 0 0 0 10 0.0000 2 105 90 4425 1860 b\001 19 19 -6 20 6 5100 1800 5400 2100 21 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 5250 1950 105 105 5250 1950 5355 1950 22 4 1 -1 0 0 0 10 0.0000 2 105 120 5250 2010 Y\001 20 6 1495 5445 5700 5655 21 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 1575 5550 80 80 1575 5550 1655 5630 22 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2925 5550 105 105 2925 5550 3030 5655 23 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 4425 5550 105 105 4425 5550 4530 5655 24 4 0 -1 0 0 0 12 0.0000 2 135 1035 3150 5625 blocked task\001 25 4 0 -1 0 0 0 12 0.0000 2 135 870 1725 5625 active task\001 26 4 0 -1 0 0 0 12 0.0000 2 135 1050 4650 5625 routine mask\001 23 27 -6 24 6 5850 1650 6150 1950 25 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 1800 105 105 6000 1800 6105 1905 26 4 1 -1 0 0 0 10 0.0000 2 105 90 6000 1860 b\001 28 6 3525 1800 3825 2400 29 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3675 1950 105 105 3675 1950 3780 1950 30 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 31 3525 1800 3825 1800 3825 2400 3525 2400 3525 1800 32 4 1 4 0 0 0 10 0.0000 2 105 120 3675 2010 Y\001 27 33 -6 28 6 3070 5445 7275 5655 29 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3150 5550 80 80 3150 5550 3230 5630 30 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4500 5550 105 105 4500 5550 4605 5655 31 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 6000 5550 105 105 6000 5550 6105 5655 32 4 0 -1 0 0 0 12 0.0000 2 135 1035 4725 5625 blocked task\001 33 4 0 -1 0 0 0 12 0.0000 2 135 870 3300 5625 active task\001 34 4 0 -1 0 0 0 12 0.0000 2 135 1050 6225 5625 routine mask\001 34 6 3525 2100 3825 2400 35 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3675 2250 105 105 3675 2250 3780 2250 36 4 1 4 0 0 0 10 0.0000 2 105 120 3675 2295 X\001 35 37 -6 36 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3300 3600 105 105 3300 3600 3405370537 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3600 3600 105 105 3600 3600 3705370538 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6600 3900 105 105 6600 3900 6705400539 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6900 3900 105 105 6900 3900 7005400540 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2700 105 105 6000 2700 6105280541 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2400 105 105 6000 2400 6105250542 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 5100 4575 80 80 5100 4575 5180465538 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1725 3600 105 105 1725 3600 1830 3705 39 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2025 3600 105 105 2025 3600 2130 3705 40 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5025 3900 105 105 5025 3900 5130 4005 41 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5325 3900 105 105 5325 3900 5430 4005 42 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2700 105 105 4425 2700 4530 2805 43 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2400 105 105 4425 2400 4530 2505 44 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3525 4575 80 80 3525 4575 3605 4655 43 45 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 44 4050 2925 5475 2925 5475 3225 4050 3225 4050292546 2475 2925 3900 2925 3900 3225 2475 3225 2475 2925 45 47 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 46 3150 3750 3750 3750 3750 4050 3150405048 1575 3750 2175 3750 2175 4050 1575 4050 47 49 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 48 3150 3450 3750 3450 3900367550 1575 3450 2175 3450 2325 3675 49 51 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 50 3750 3150 3600337552 2175 3150 2025 3375 51 53 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 52 3150 4350 3750 4350 3900457554 1575 4350 2175 4350 2325 4575 53 55 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 54 3750 4050 3600427556 2175 4050 2025 4275 55 57 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 56 3150 4650 3750 4650 3750 4950 4950495058 1575 4650 2175 4650 2175 4950 3375 4950 57 59 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 58 6450 3750 6300397560 4875 3750 4725 3975 59 61 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 60 4950 4950 5175510062 3375 4950 3600 5100 61 63 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 9 62 5250 4950 6450 4950 6450 4050 7050 4050 7050 3750 6450375063 6450 2850 6150 2850 6150165064 3675 4950 4875 4950 4875 4050 5475 4050 5475 3750 4875 3750 65 4875 2850 4575 2850 4575 1650 64 66 2 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5 65 5850 4200 5850 3300 4350 3300 4350 4200 5850420066 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1267 4275 4200 4275 3300 2775 3300 2775 4200 4275 4200 68 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 67 69 1 1 1.00 60.00 120.00 68 7 1 1.00 60.00 120.00 69 5250 3150 5250 2400 70 3675 3075 3675 2400 71 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 72 4125 2850 4575 3000 70 73 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 71 3150 3150 3750 3150 3750 2850 5700 2850 5700 1650 72 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 73 5700 2850 6150 3000 74 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 75 5100 1800 5400 1800 5400 2400 5100 2400 5100 1800 76 4 1 -1 0 0 0 10 0.0000 2 75 75 6000 2745 a\001 77 4 1 -1 0 0 0 10 0.0000 2 75 75 6000 2445 c\001 78 4 1 -1 0 0 0 12 0.0000 2 135 315 5100 5325 exit\001 79 4 1 -1 0 0 0 12 0.0000 2 135 135 3300 3075 A\001 80 4 1 -1 0 0 0 12 0.0000 2 135 795 3300 4875 condition\001 81 4 1 -1 0 0 0 12 0.0000 2 135 135 3300 5100 B\001 82 4 0 -1 0 0 0 12 0.0000 2 135 420 6600 3675 stack\001 83 4 0 -1 0 0 0 12 0.0000 2 180 750 6600 3225 acceptor/\001 84 4 0 -1 0 0 0 12 0.0000 2 180 750 6600 3450 signalled\001 85 4 1 -1 0 0 0 12 0.0000 2 135 795 3300 2850 condition\001 86 4 1 -1 0 0 0 12 0.0000 2 165 420 6000 1350 entry\001 87 4 1 -1 0 0 0 12 0.0000 2 135 495 6000 1575 queue\001 88 4 0 -1 0 0 0 12 0.0000 2 135 525 6300 2400 arrival\001 89 4 0 -1 0 0 0 12 0.0000 2 135 630 6300 2175 order of\001 90 4 1 -1 0 0 0 12 0.0000 2 135 525 5100 3675 shared\001 91 4 1 -1 0 0 0 12 0.0000 2 135 735 5100 3975 variables\001 92 4 0 0 50 -1 0 11 0.0000 2 165 855 4275 3150 Acceptables\001 93 4 0 0 50 -1 0 11 0.0000 2 120 165 5775 2700 W\001 94 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 2400 X\001 95 4 0 0 50 -1 0 11 0.0000 2 120 105 5775 2100 Z\001 96 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 1800 Y\001 74 1575 3150 2175 3150 2175 2850 4125 2850 4125 1650 75 4 1 -1 0 0 0 10 0.0000 2 75 75 4425 2745 a\001 76 4 1 -1 0 0 0 10 0.0000 2 75 75 4425 2445 c\001 77 4 1 -1 0 0 0 12 0.0000 2 135 315 3525 5325 exit\001 78 4 1 -1 0 0 0 12 0.0000 2 135 135 1725 3075 A\001 79 4 1 -1 0 0 0 12 0.0000 2 135 795 1725 4875 condition\001 80 4 1 -1 0 0 0 12 0.0000 2 135 135 1725 5100 B\001 81 4 0 -1 0 0 0 12 0.0000 2 135 420 5025 3675 stack\001 82 4 0 -1 0 0 0 12 0.0000 2 180 750 5025 3225 acceptor/\001 83 4 0 -1 0 0 0 12 0.0000 2 180 750 5025 3450 signalled\001 84 4 1 -1 0 0 0 12 0.0000 2 135 795 1725 2850 condition\001 85 4 1 -1 0 0 0 12 0.0000 2 165 420 4425 1350 entry\001 86 4 1 -1 0 0 0 12 0.0000 2 135 495 4425 1575 queue\001 87 4 0 -1 0 0 0 12 0.0000 2 135 525 4725 2400 arrival\001 88 4 0 -1 0 0 0 12 0.0000 2 135 630 4725 2175 order of\001 89 4 1 -1 0 0 0 12 0.0000 2 135 525 3525 3675 shared\001 90 4 1 -1 0 0 0 12 0.0000 2 135 735 3525 3975 variables\001 91 4 0 4 50 -1 0 11 0.0000 2 120 135 4150 1875 Y\001 92 4 0 4 50 -1 0 11 0.0000 2 120 105 4150 2175 Z\001 93 4 0 4 50 -1 0 11 0.0000 2 120 135 4150 2475 X\001 94 4 0 4 50 -1 0 11 0.0000 2 120 165 4150 2775 W\001 95 4 0 -1 0 0 3 12 0.0000 2 150 540 5025 4275 urgent\001 96 4 1 0 50 -1 0 11 0.0000 2 165 600 3150 3150 accepted\001 -
doc/papers/concurrency/figures/monitor.fig
rd286cf68 r63238a4 72 72 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 73 73 3600 1500 3600 2100 4200 2100 4200 900 74 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 475 2700 1500 2700 2100 3300 2100 3300 150076 74 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 9 77 75 3600 4200 4800 4200 4800 3300 5400 3300 5400 3000 4800 3000 … … 79 77 2 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5 80 78 4200 3450 4200 2550 2700 2550 2700 3450 4200 3450 79 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 80 2700 1500 2700 2100 3300 2100 3300 1500 81 81 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 1995 a\001 82 82 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 1695 c\001 … … 89 89 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 2700 signalled\001 90 90 4 1 -1 0 0 0 12 0.0000 2 135 795 1650 2100 condition\001 91 4 1 -10 0 0 12 0.0000 2 135 135 2550 1425 X\00192 4 1 -10 0 0 12 0.0000 2 135 135 3450 1425 Y\00191 4 1 4 0 0 0 12 0.0000 2 135 135 2550 1425 X\001 92 4 1 4 0 0 0 12 0.0000 2 135 135 3450 1425 Y\001 93 93 4 1 -1 0 0 0 12 0.0000 2 165 420 4350 600 entry\001 94 94 4 1 -1 0 0 0 12 0.0000 2 135 495 4350 825 queue\001 … … 100 100 4 1 -1 0 0 0 10 0.0000 2 75 75 3450 1995 c\001 101 101 4 1 -1 0 0 0 12 0.0000 2 135 570 3000 1200 queues\001 102 4 0 -1 0 0 3 12 0.0000 2 150 540 4950 3525 urgent\001 -
doc/papers/general/Makefile
rd286cf68 r63238a4 59 59 dvips ${Build}/$< -o $@ 60 60 61 ${BASE}.dvi : Makefile ${B uild} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \62 ../../bibliography/pl.bib 61 ${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 62 ../../bibliography/pl.bib | ${Build} 63 63 # Must have *.aux file containing citations for bibtex 64 64 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 75 75 mkdir -p ${Build} 76 76 77 ${BASE}.out.ps : ${Build}77 ${BASE}.out.ps : | ${Build} 78 78 ln -fs ${Build}/Paper.out.ps . 79 79 … … 84 84 gnuplot -e Build="'${Build}/'" evaluation/timing.gp 85 85 86 %.tex : %.fig ${Build}86 %.tex : %.fig | ${Build} 87 87 fig2dev -L eepic $< > ${Build}/$@ 88 88 89 %.ps : %.fig ${Build}89 %.ps : %.fig | ${Build} 90 90 fig2dev -L ps $< > ${Build}/$@ 91 91 92 %.pstex : %.fig ${Build}92 %.pstex : %.fig | ${Build} 93 93 fig2dev -L pstex $< > ${Build}/$@ 94 94 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/proposals/ctordtor/Makefile
rd286cf68 r63238a4 1 ## Define the appropriateconfiguration variables.1 ## Define the configuration variables. 2 2 3 MACROS = ../../LaTeXmacros 4 BIB = ../../bibliography 3 Build = build 4 Figures = figures 5 Macros = ../../LaTeXmacros 6 Bib = ../../bibliography 5 7 6 TeXLIB = .:$ (MACROS):$(MACROS)/listings:$(MACROS)/enumitem:$(BIB)/:7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error 8 TeXLIB = .:${Macros}:${MACROS}/listings:${MACROS}/enumitem:${Bib}/: 9 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 10 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex 11 12 MAKEFLAGS = --no-print-directory # --silent 13 VPATH = ${Build} ${Figures} 9 14 10 15 ## Define the text source files. … … 29 34 30 35 DOCUMENT = ctor.pdf 36 BASE = ${basename ${DOCUMENT}} 31 37 32 38 # Directives # 39 40 .PHONY : all clean # not file names 33 41 34 42 all : ${DOCUMENT} 35 43 36 44 clean : 37 rm -f *.bbl *.aux *.dvi *.idx *.ilg *.ind *.brf *.out *.log *.toc *.blg *.pstex_t *.cf \ 38 ${FIGURES} ${PICTURES} ${PROGRAMS} ${GRAPHS} ${basename ${DOCUMENT}}.ps ${DOCUMENT} 45 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 39 46 40 47 # File Dependencies # 41 48 42 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps49 ${DOCUMENT} : ${BASE}.ps 43 50 ps2pdf $< 44 51 45 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi46 dvips $ < -o $@52 ${BASE}.ps : ${BASE}.dvi 53 dvips ${Build}/$< -o $@ 47 54 48 ${ basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex\49 $ (MACROS)/common.tex $(MACROS)/indexstyle $(BIB)/cfa.bib55 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 56 ${Macros}/common.tex ${Macros}/indexstyle ${Bib}/pl.bib | ${Build} 50 57 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 51 if [ ! -r ${basename $@}.ind ] ; then touch${basename $@}.ind ; fi58 #if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi 52 59 # Must have *.aux file containing citations for bibtex 53 60 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi 54 -${BibTeX} ${ basename $@}55 # Some citations reference others so run stepsagain to resolve these citations61 -${BibTeX} ${Build}/${basename $@} 62 # Some citations reference others so run again to resolve these citations 56 63 ${LaTeX} ${basename $@}.tex 57 -${BibTeX} ${ basename $@}64 -${BibTeX} ${Build}/${basename $@} 58 65 # Make index from *.aux entries and input index at end of document 59 makeindex -s $(MACROS)/indexstyle ${basename $@}.idx 66 #makeindex -s ${Macros}/indexstyle ${Build}/${basename $@}.idx 67 # Run again to finish citations 60 68 ${LaTeX} ${basename $@}.tex 61 69 # Run again to get index title into table of contents … … 67 75 ## Define the default recipes. 68 76 69 %.tex : %.fig 70 fig2dev -L eepic $< > $@77 ${Build}: 78 mkdir -p ${Build} 71 79 72 %. ps : %.fig73 fig2dev -L ps $< >$@80 %.tex : %.fig | ${Build} 81 fig2dev -L eepic $< > ${Build}/$@ 74 82 75 %.pstex : %.fig 76 fig2dev -L pstex $< > $@ 77 fig2dev -L pstex_t -p $@ $< > $@_t 83 %.ps : %.fig | ${Build} 84 fig2dev -L ps $< > ${Build}/$@ 85 86 %.pstex : %.fig | ${Build} 87 fig2dev -L pstex $< > ${Build}/$@ 88 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t 78 89 79 90 # Local Variables: # -
doc/proposals/ctordtor/ctor.tex
rd286cf68 r63238a4 1 % inline code ©...© (copyright symbol) emacs: C-q M-)2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"5 % LaTex escape §...§ (section symbol) emacs: C-q M-'6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^7 % math escape $...$ (dollar symbol)8 9 1 \documentclass[twoside,11pt]{article} 10 2 … … 15 7 \usepackage{textcomp} 16 8 \usepackage[latin1]{inputenc} 9 17 10 \usepackage{fullpage,times,comment} 18 11 \usepackage{epic,eepic} 19 \usepackage{upquote} 12 \usepackage{upquote} % switch curled `'" to straight 20 13 \usepackage{calc} 21 14 \usepackage{xspace} 22 15 \usepackage{graphicx} 23 \usepackage{varioref} 24 \usepackage{listings} 25 \usepackage[flushmargin]{footmisc} 16 \usepackage{varioref} % extended references 17 \usepackage{listings} % format program code 18 \usepackage[flushmargin]{footmisc} % support label/reference in footnote 26 19 \usepackage{latexsym} % \Box glyph 27 20 \usepackage{mathptmx} % better math font with "times" … … 34 27 \renewcommand{\UrlFont}{\small\sf} 35 28 36 \setlength{\topmargin}{-0.45in} 29 \setlength{\topmargin}{-0.45in} % move running title into header 37 30 \setlength{\headsep}{0.25in} 38 31 … … 43 36 44 37 \interfootnotelinepenalty=10000 38 39 \CFAStyle % use default CFA format-style 40 % inline code ©...© (copyright symbol) emacs: C-q M-) 41 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-. 42 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_ 43 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-" 44 % LaTex escape §...§ (section symbol) emacs: C-q M-' 45 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^ 46 % math escape $...$ (dollar symbol) 47 45 48 46 49 \title{ … … 83 86 \thispagestyle{plain} 84 87 \pagenumbering{arabic} 85 86 88 87 89 -
doc/proposals/tuples/Makefile
rd286cf68 r63238a4 1 ## Define the appropriateconfiguration variables.1 ## Define the configuration variables. 2 2 3 MACROS = ../../LaTeXmacros 4 BIB = ../../bibliography 3 Build = build 4 Figures = figures 5 Macros = ../../LaTeXmacros 6 Bib = ../../bibliography 5 7 6 TeXLIB = .:$ (MACROS):$(MACROS)/listings:$(MACROS)/enumitem:$(BIB)/:7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error 8 TeXLIB = .:${Macros}:${MACROS}/listings:${MACROS}/enumitem:${Bib}/: 9 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 10 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex 11 12 MAKEFLAGS = --no-print-directory --silent # 13 VPATH = ${Build} ${Figures} 9 14 10 15 ## Define the text source files. … … 29 34 30 35 DOCUMENT = tuples.pdf 36 BASE = ${basename ${DOCUMENT}} 31 37 32 38 # Directives # 39 40 .PHONY : all clean # not file names 33 41 34 42 all : ${DOCUMENT} 35 43 36 44 clean : 37 rm -f *.bbl *.aux *.dvi *.idx *.ilg *.ind *.brf *.out *.log *.toc *.blg *.pstex_t *.cf \ 38 ${FIGURES} ${PICTURES} ${PROGRAMS} ${GRAPHS} ${basename ${DOCUMENT}}.ps ${DOCUMENT} 45 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 39 46 40 47 # File Dependencies # 41 48 42 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps49 ${DOCUMENT} : ${BASE}.ps 43 50 ps2pdf $< 44 51 45 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi46 dvips $ < -o $@52 ${BASE}.ps : ${BASE}.dvi 53 dvips ${Build}/$< -o $@ 47 54 48 ${ basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex\49 $ (MACROS)/common.tex $(MACROS)/indexstyle $(BIB)/cfa.bib55 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 56 ${Macros}/common.tex ${Macros}/indexstyle ${Bib}/pl.bib | ${Build} 50 57 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 51 if [ ! -r ${basename $@}.ind ] ; then touch${basename $@}.ind ; fi58 #if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi 52 59 # Must have *.aux file containing citations for bibtex 53 60 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi 54 -${BibTeX} ${ basename $@}55 # Some citations reference others so run stepsagain to resolve these citations61 -${BibTeX} ${Build}/${basename $@} 62 # Some citations reference others so run again to resolve these citations 56 63 ${LaTeX} ${basename $@}.tex 57 -${BibTeX} ${ basename $@}64 -${BibTeX} ${Build}/${basename $@} 58 65 # Make index from *.aux entries and input index at end of document 59 makeindex -s $(MACROS)/indexstyle ${basename $@}.idx 66 #makeindex -s ${Macros}/indexstyle ${Build}/${basename $@}.idx 67 # Run again to finish citations 60 68 ${LaTeX} ${basename $@}.tex 61 69 # Run again to get index title into table of contents … … 67 75 ## Define the default recipes. 68 76 69 %.tex : %.fig 70 fig2dev -L eepic $< > $@77 ${Build}: 78 mkdir -p ${Build} 71 79 72 %. ps : %.fig73 fig2dev -L ps $< >$@80 %.tex : %.fig | ${Build} 81 fig2dev -L eepic $< > ${Build}/$@ 74 82 75 %.pstex : %.fig 76 fig2dev -L pstex $< > $@ 77 fig2dev -L pstex_t -p $@ $< > $@_t 83 %.ps : %.fig | ${Build} 84 fig2dev -L ps $< > ${Build}/$@ 85 86 %.pstex : %.fig | ${Build} 87 fig2dev -L pstex $< > ${Build}/$@ 88 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t 78 89 79 90 # Local Variables: # -
doc/proposals/tuples/tuples.tex
rd286cf68 r63238a4 1 % inline code ©...© (copyright symbol) emacs: C-q M-)2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"5 % LaTex escape §...§ (section symbol) emacs: C-q M-'6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^7 % math escape $...$ (dollar symbol)8 9 1 \documentclass[twoside,11pt]{article} 10 2 … … 15 7 \usepackage{textcomp} 16 8 \usepackage[latin1]{inputenc} 9 17 10 \usepackage{fullpage,times,comment} 18 11 \usepackage{epic,eepic} 19 \usepackage{upquote} 12 \usepackage{upquote} % switch curled `'" to straight 20 13 \usepackage{calc} 21 14 \usepackage{xspace} 22 15 \usepackage{graphicx} 23 \usepackage{varioref} 24 \usepackage{listings} 25 \usepackage[flushmargin]{footmisc} 16 \usepackage{varioref} % extended references 17 \usepackage{listings} % format program code 18 \usepackage[flushmargin]{footmisc} % support label/reference in footnote 26 19 \usepackage{latexsym} % \Box glyph 27 20 \usepackage{mathptmx} % better math font with "times" … … 34 27 \renewcommand{\UrlFont}{\small\sf} 35 28 36 \setlength{\topmargin}{-0.45in} 29 \setlength{\topmargin}{-0.45in} % move running title into header 37 30 \setlength{\headsep}{0.25in} 38 31 … … 42 35 43 36 \interfootnotelinepenalty=10000 37 38 \CFAStyle % use default CFA format-style 39 % inline code ©...© (copyright symbol) emacs: C-q M-) 40 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-. 41 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_ 42 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-" 43 % LaTex escape §...§ (section symbol) emacs: C-q M-' 44 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^ 45 % math escape $...$ (dollar symbol) 46 44 47 45 48 \title{ -
doc/refrat/Makefile
rd286cf68 r63238a4 53 53 dvips ${Build}/$< -o $@ 54 54 55 ${BASE}.dvi : Makefile ${ Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \56 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib 55 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 56 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib | ${Build} 57 57 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 58 58 if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi … … 78 78 mkdir -p ${Build} 79 79 80 %.tex : %.fig ${Build}80 %.tex : %.fig | ${Build} 81 81 fig2dev -L eepic $< > ${Build}/$@ 82 82 83 %.ps : %.fig ${Build}83 %.ps : %.fig | ${Build} 84 84 fig2dev -L ps $< > ${Build}/$@ 85 85 86 %.pstex : %.fig ${Build}86 %.pstex : %.fig | ${Build} 87 87 fig2dev -L pstex $< > ${Build}/$@ 88 88 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/theses/aaron_moss/comp_II/Makefile
rd286cf68 r63238a4 32 32 33 33 DOCUMENT = comp_II.pdf 34 BASE = ${basename ${DOCUMENT}} 34 35 35 36 # Directives # … … 40 41 41 42 clean : 42 @rm -frv ${DOCUMENT} ${ basename ${DOCUMENT}}.ps ${Build}43 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 43 44 44 45 # File Dependencies # 45 46 46 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps47 ${DOCUMENT} : ${BASE}.ps 47 48 ps2pdf $< 48 49 49 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi50 ${BASE}.ps : ${BASE}.dvi 50 51 dvips ${Build}/$< -o $@ 51 52 52 ${ basename ${DOCUMENT}}.dvi : Makefile ${Build}${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \53 ${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib 53 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 54 ${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib | ${Build} 54 55 # Must have *.aux file containing citations for bibtex 55 56 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 66 67 mkdir -p ${Build} 67 68 68 %.tex : %.fig 69 %.tex : %.fig ${Build} 69 70 fig2dev -L eepic $< > ${Build}/$@ 70 71 71 %.ps : %.fig 72 %.ps : %.fig | ${Build} 72 73 fig2dev -L ps $< > ${Build}/$@ 73 74 74 %.pstex : %.fig 75 %.pstex : %.fig | ${Build} 75 76 fig2dev -L pstex $< > ${Build}/$@ 76 77 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/theses/thierry_delisle/Makefile
rd286cf68 r63238a4 51 51 52 52 DOCUMENT = thesis.pdf 53 BASE = ${basename ${DOCUMENT}} 53 54 54 55 # Directives # … … 59 60 60 61 clean : 61 @rm -frv ${DOCUMENT} ${ basename ${DOCUMENT}}.ps ${Build}62 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 62 63 63 64 # File Dependencies # 64 65 65 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps66 ${DOCUMENT} : ${BASE}.ps 66 67 ps2pdf $< 67 68 68 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi69 ${BASE}.ps : ${BASE}.dvi 69 70 dvips ${Build}/$< -o $@ 70 71 71 ${ basename ${DOCUMENT}}.dvi : Makefile ${Build}${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \72 ${Macros}/common.tex ${Macros}/indexstyle annex/local.bib ../../bibliography/pl.bib 72 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 73 ${Macros}/common.tex ${Macros}/indexstyle annex/local.bib ../../bibliography/pl.bib | ${Build} 73 74 # Must have *.aux file containing citations for bibtex 74 75 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 91 92 fig2dev -L eepic $< > ${Build}/$@ 92 93 93 %.ps : %.fig ${Build}94 %.ps : %.fig | ${Build} 94 95 fig2dev -L ps $< > ${Build}/$@ 95 96 96 %.pstex : %.fig ${Build}97 %.pstex : %.fig | ${Build} 97 98 fig2dev -L pstex $< > ${Build}/$@ 98 99 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/user/Makefile
rd286cf68 r63238a4 57 57 dvips ${Build}/$< -o $@ 58 58 59 ${BASE}.dvi : Makefile ${ Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \60 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib 59 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 60 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib | ${Build} 61 61 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 62 62 if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi … … 79 79 mkdir -p ${Build} 80 80 81 %.tex : %.fig ${Build}81 %.tex : %.fig | ${Build} 82 82 fig2dev -L eepic $< > ${Build}/$@ 83 83 84 %.ps : %.fig ${Build}84 %.ps : %.fig | ${Build} 85 85 fig2dev -L ps $< > ${Build}/$@ 86 86 87 %.pstex : %.fig ${Build}87 %.pstex : %.fig | ${Build} 88 88 fig2dev -L pstex $< > ${Build}/$@ 89 89 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
src/Parser/TypedefTable.cc
rd286cf68 r63238a4 10 10 // Created On : Sat May 16 15:20:13 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jun 7 13:17:56201813 // Update Count : 19212 // Last Modified On : Fri Jun 22 06:14:39 2018 13 // Update Count : 206 14 14 // 15 15 … … 78 78 debugPrint( cerr << "Adding current at " << locn << " " << identifier << " as " << kindName( kind ) << " scope " << scope << endl ); 79 79 auto ret = kindTable.insertAt( scope, identifier, kind ); 80 if ( ! ret.second ) ret.first->second = kind; // exists => update 80 //if ( ! ret.second ) ret.first->second = kind; // exists => update 81 assert( ret.first->second == kind ); // exists 81 82 } // TypedefTable::addToScope 82 83 83 84 void TypedefTable::addToEnclosingScope( const string & identifier, int kind, const char * locn __attribute__((unused)) ) { 84 assert( kindTable.currentScope() >= 1 );85 auto scope = kindTable.currentScope() - 1 ;86 debugPrint( cerr << "Adding enclosing at " << locn << " " << identifier << " as " << kindName( kind ) << " scope " << scope << endl );85 assert( kindTable.currentScope() >= 1 + level ); 86 auto scope = kindTable.currentScope() - 1 - level; 87 debugPrint( cerr << "Adding enclosing at " << locn << " " << identifier << " as " << kindName( kind ) << " scope " << scope << " level " << level << endl ); 87 88 auto ret = kindTable.insertAt( scope, identifier, kind ); 88 89 if ( ! ret.second ) ret.first->second = kind; // exists => update … … 91 92 void TypedefTable::enterScope() { 92 93 kindTable.beginScope(); 93 debugPrint( cerr << "Entering scope " << kindTable.currentScope() << endl ); 94 debugPrint( print() ); 94 debugPrint( cerr << "Entering scope " << kindTable.currentScope() << endl; print() ); 95 95 } // TypedefTable::enterScope 96 96 97 97 void TypedefTable::leaveScope() { 98 debugPrint( cerr << "Leaving scope " << kindTable.currentScope() << endl ); 99 debugPrint( print() ); 98 debugPrint( cerr << "Leaving scope " << kindTable.currentScope() << endl; print() ); 100 99 kindTable.endScope(); 101 100 } // TypedefTable::leaveScope … … 114 113 --scope; 115 114 debugPrint( cerr << endl << "[" << scope << "]" ); 116 } 115 } // while 117 116 debugPrint( cerr << endl ); 118 } 117 } // TypedefTable::print 119 118 120 119 // Local Variables: // -
src/Parser/TypedefTable.h
rd286cf68 r63238a4 10 10 // Created On : Sat May 16 15:24:36 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jun 7 12:10:17201813 // Update Count : 8 512 // Last Modified On : Fri Jun 22 05:29:58 2018 13 // Update Count : 86 14 14 // 15 15 … … 25 25 typedef ScopedMap< std::string, int > KindTable; 26 26 KindTable kindTable; 27 unsigned int level = 0; 27 28 public: 28 29 ~TypedefTable(); … … 37 38 void leaveScope(); 38 39 40 void up() { level += 1; } 41 void down() { level -= 1; } 42 39 43 void print( void ) const; 40 44 }; // TypedefTable -
src/Parser/lex.ll
rd286cf68 r63238a4 10 10 * Created On : Sat Sep 22 08:58:10 2001 11 11 * Last Modified By : Peter A. Buhr 12 * Last Modified On : Thu Jun 7 08:27:40201813 * Update Count : 6 7912 * Last Modified On : Wed Jun 20 09:08:28 2018 13 * Update Count : 682 14 14 */ 15 15 … … 25 25 //**************************** Includes and Defines **************************** 26 26 27 // trigger before each matching rule's action 28 #define YY_USER_ACTION \ 29 yylloc.first_line = yylineno; \ 30 yylloc.first_column = column; \ 31 column += yyleng; \ 32 yylloc.last_column = column; \ 33 yylloc.last_line = yylineno; \ 34 yylloc.filename = yyfilename ? yyfilename : ""; 27 35 unsigned int column = 0; // position of the end of the last token parsed 28 #define YY_USER_ACTION yylloc.first_line = yylineno; yylloc.first_column = column; column += yyleng; yylloc.last_column = column; yylloc.last_line = yylineno; yylloc.filename = yyfilename ? yyfilename : ""; // trigger before each matching rule's action29 36 30 37 #include <string> … … 49 56 #define NUMERIC_RETURN(x) rm_underscore(); RETURN_VAL( x ) // numeric constant 50 57 #define KEYWORD_RETURN(x) RETURN_CHAR( x ) // keyword 51 #define QKEYWORD_RETURN(x) typedefTable.isKind( yytext ); RETURN_VAL(x);// quasi-keyword58 #define QKEYWORD_RETURN(x) RETURN_VAL(x); // quasi-keyword 52 59 #define IDENTIFIER_RETURN() RETURN_VAL( typedefTable.isKind( yytext ) ) 53 60 #define ATTRIBUTE_RETURN() RETURN_VAL( ATTR_IDENTIFIER ) -
src/Parser/parser.yy
rd286cf68 r63238a4 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jun 7 10:07:12201813 // Update Count : 35 2712 // Last Modified On : Fri Jun 22 13:59:11 2018 13 // Update Count : 3586 14 14 // 15 15 … … 136 136 } // build_postfix_name 137 137 138 bool forall = false, xxx = false ;// aggregate have one or more forall qualifiers ?138 bool forall = false, xxx = false, yyy = false; // aggregate have one or more forall qualifiers ? 139 139 140 140 // https://www.gnu.org/software/bison/manual/bison.html#Location-Type … … 304 304 %type<en> enumerator_value_opt 305 305 306 %type<decl> exception_declaration external_definition external_definition_list external_definition_list_no_pop_push external_definition_list_opt 306 %type<decl> external_definition external_definition_list external_definition_list_opt 307 308 %type<decl> exception_declaration 307 309 308 310 %type<decl> field_declaration field_declaration_list_opt field_declarator_opt field_declaring_list … … 1821 1823 ; 1822 1824 1825 fred: 1826 // empty 1827 { yyy = false; } 1828 ; 1829 1823 1830 aggregate_type: // struct, union 1824 1831 aggregate_key attribute_list_opt '{' field_declaration_list_opt '}' 1825 1832 { $$ = DeclarationNode::newAggregate( $1, new string( DeclarationNode::anonymous.newName() ), nullptr, $4, true )->addQualifiers( $2 ); } 1826 | aggregate_key attribute_list_opt no_attr_identifier 1833 | aggregate_key attribute_list_opt no_attr_identifier fred 1827 1834 { 1828 1835 typedefTable.makeTypedef( *$3, forall ? TYPEGENname : TYPEDEFname ); // create typedef … … 1831 1838 } 1832 1839 '{' field_declaration_list_opt '}' 1833 { $$ = DeclarationNode::newAggregate( $1, $3, nullptr, $ 6, true )->addQualifiers( $2 ); }1834 | aggregate_key attribute_list_opt type_name 1840 { $$ = DeclarationNode::newAggregate( $1, $3, nullptr, $7, true )->addQualifiers( $2 ); } 1841 | aggregate_key attribute_list_opt type_name fred 1835 1842 { 1836 1843 typedefTable.makeTypedef( *$3->type->symbolic.name, forall ? TYPEGENname : TYPEDEFname ); // create typedef … … 1839 1846 } 1840 1847 '{' field_declaration_list_opt '}' 1841 { $$ = DeclarationNode::newAggregate( $1, $3->type->symbolic.name, nullptr, $ 6, true )->addQualifiers( $2 ); }1848 { $$ = DeclarationNode::newAggregate( $1, $3->type->symbolic.name, nullptr, $7, true )->addQualifiers( $2 ); } 1842 1849 | aggregate_key attribute_list_opt '(' type_list ')' '{' field_declaration_list_opt '}' // CFA 1843 1850 { $$ = DeclarationNode::newAggregate( $1, new string( DeclarationNode::anonymous.newName() ), $4, $7, false )->addQualifiers( $2 ); } … … 1846 1853 1847 1854 aggregate_type_nobody: // struct, union - {...} 1848 aggregate_key attribute_list_opt no_attr_identifier 1855 aggregate_key attribute_list_opt no_attr_identifier fred 1849 1856 { 1850 1857 typedefTable.makeTypedef( *$3, forall ? TYPEGENname : TYPEDEFname ); … … 1853 1860 $$ = DeclarationNode::newAggregate( $1, $3, nullptr, nullptr, false )->addQualifiers( $2 ); 1854 1861 } 1855 | aggregate_key attribute_list_opt type_name 1862 | aggregate_key attribute_list_opt type_name fred 1856 1863 { 1857 1864 // Create new generic declaration with same name as previous forward declaration, where the IDENTIFIER is … … 1867 1874 aggregate_key: 1868 1875 STRUCT 1869 { $$ = DeclarationNode::Struct; }1876 { yyy = true; $$ = DeclarationNode::Struct; } 1870 1877 | UNION 1871 { $$ = DeclarationNode::Union; }1878 { yyy = true; $$ = DeclarationNode::Union; } 1872 1879 | EXCEPTION 1873 { $$ = DeclarationNode::Exception; }1880 { yyy = true; $$ = DeclarationNode::Exception; } 1874 1881 | COROUTINE 1875 { $$ = DeclarationNode::Coroutine; }1882 { yyy = true; $$ = DeclarationNode::Coroutine; } 1876 1883 | MONITOR 1877 { $$ = DeclarationNode::Monitor; }1884 { yyy = true; $$ = DeclarationNode::Monitor; } 1878 1885 | THREAD 1879 { $$ = DeclarationNode::Thread; }1886 { yyy = true; $$ = DeclarationNode::Thread; } 1880 1887 ; 1881 1888 … … 2322 2329 2323 2330 translation_unit: 2324 // empty 2325 {} // empty input file 2331 // empty, input file 2326 2332 | external_definition_list 2327 2333 { parseTree = parseTree ? parseTree->appendList( $1 ) : $1; } … … 2337 2343 ; 2338 2344 2339 // SKULLDUGGERY: Declarations in extern "X" and distribution need to be added to the current lexical scope.2340 // However, external_definition_list creates a new scope around each external_definition, but the pop loses all the2341 // types in the extern "X" and distribution at the end of the block. This version of external_definition_list does2342 2343 // not do push/pop for declarations at the level of the extern "X" and distribution block. Any recursive uses of2344 // external_definition_list within the extern "X" and distribution block correctly pushes/pops for that scope level.2345 external_definition_list_no_pop_push:2346 external_definition2347 | external_definition_list_no_pop_push2348 { forall = xxx; }2349 external_definition2350 { $$ = $1 ? $1->appendList( $3 ) : $3; }2351 ;2352 2353 2345 external_definition_list_opt: 2354 2346 // empty 2355 2347 { $$ = nullptr; } 2356 | external_definition_list_no_pop_push 2348 | external_definition_list 2349 ; 2350 2351 up: 2352 { typedefTable.up(); } 2353 ; 2354 2355 down: 2356 { typedefTable.down(); } 2357 2357 ; 2358 2358 … … 2374 2374 linkage = LinkageSpec::linkageUpdate( yylloc, linkage, $2 ); 2375 2375 } 2376 '{' external_definition_list_opt'}'2376 '{' up external_definition_list_opt down '}' 2377 2377 { 2378 2378 linkage = linkageStack.top(); 2379 2379 linkageStack.pop(); 2380 $$ = $ 5;2380 $$ = $6; 2381 2381 } 2382 2382 | type_qualifier_list 2383 { if ( $1->type->forall ) xxx = forall = true; } // remember generic type 2384 '{' external_definition_list_opt '}' // CFA, namespace 2385 { 2386 for ( DeclarationNode * iter = $4; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2383 { 2384 if ( $1->type->qualifiers.val ) { SemanticError( yylloc, "CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); } 2385 if ( $1->type->forall ) xxx = forall = true; // remember generic type 2386 } 2387 '{' up external_definition_list_opt down '}' // CFA, namespace 2388 { 2389 for ( DeclarationNode * iter = $5; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2387 2390 if ( isMangled( iter->linkage ) ) { // ignore extern "C" 2388 2391 iter->addQualifiers( $1->clone() ); … … 2391 2394 xxx = false; 2392 2395 delete $1; 2393 $$ = $ 4;2396 $$ = $5; 2394 2397 } 2395 2398 | declaration_qualifier_list 2396 { if ( $1->type->forall ) xxx = forall = true; } // remember generic type 2397 '{' external_definition_list_opt '}' // CFA, namespace 2398 { 2399 for ( DeclarationNode * iter = $4; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2399 { 2400 if ( $1->type->qualifiers.val ) { SemanticError( yylloc, "CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); } 2401 if ( $1->type->forall ) xxx = forall = true; // remember generic type 2402 } 2403 '{' up external_definition_list_opt down '}' // CFA, namespace 2404 { 2405 for ( DeclarationNode * iter = $5; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2400 2406 if ( isMangled( iter->linkage ) ) { // ignore extern "C" 2401 2407 iter->addQualifiers( $1->clone() ); … … 2404 2410 xxx = false; 2405 2411 delete $1; 2406 $$ = $ 4;2412 $$ = $5; 2407 2413 } 2408 2414 | declaration_qualifier_list type_qualifier_list 2409 2415 { 2410 // forall must be in the type_qualifier_list2411 if ( $2->type->forall ) xxx = forall = true; // remember generic type2412 } 2413 '{' external_definition_list_opt '}'// CFA, namespace2414 { 2415 for ( DeclarationNode * iter = $ 5; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) {2416 if ( ($1->type && $1->type->qualifiers.val) || $2->type->qualifiers.val ) { SemanticError( yylloc, "CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); } 2417 if ( ($1->type && $1->type->forall) || $2->type->forall ) xxx = forall = true; // remember generic type 2418 } 2419 '{' up external_definition_list_opt down '}' // CFA, namespace 2420 { 2421 for ( DeclarationNode * iter = $6; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2416 2422 if ( isMangled( iter->linkage ) && isMangled( $2->linkage ) ) { // ignore extern "C" 2417 2423 iter->addQualifiers( $1->clone() ); … … 2422 2428 delete $1; 2423 2429 delete $2; 2424 $$ = $ 5;2430 $$ = $6; 2425 2431 } 2426 2432 ; -
src/SynTree/Type.cc
rd286cf68 r63238a4 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Sep 25 15:16:32 201713 // Update Count : 3 812 // Last Modified On : Fri Jun 22 10:17:19 2018 13 // Update Count : 39 14 14 // 15 15 #include "Type.h" … … 69 69 70 70 // These must remain in the same order as the corresponding bit fields. 71 const char * Type::FuncSpecifiersNames[] = { "inline", " fortran", "_Noreturn" };71 const char * Type::FuncSpecifiersNames[] = { "inline", "_Noreturn", "fortran" }; 72 72 const char * Type::StorageClassesNames[] = { "extern", "static", "auto", "register", "_Thread_local" }; 73 73 const char * Type::QualifiersNames[] = { "const", "restrict", "volatile", "lvalue", "mutex", "_Atomic" };
Note: See TracChangeset
for help on using the changeset viewer.