- Timestamp:
- Oct 29, 2019, 4:01:24 PM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 773db65, 9421f3d8
- Parents:
- 7951100 (diff), 8364209 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- doc
- Files:
-
- 157 added
- 6 deleted
- 23 edited
- 95 moved
-
LaTeXmacros/common.tex (modified) (3 diffs)
-
LaTeXmacros/lstlang.sty (modified) (2 diffs)
-
bibliography/pl.bib (modified) (99 diffs)
-
papers/AMA/AMA-stix/ama/WileyNJD-v2.cls (modified) (5 diffs)
-
papers/OOPSLA17/Makefile (modified) (4 diffs)
-
papers/concurrency/Makefile (modified) (4 diffs)
-
papers/concurrency/Paper.tex (modified) (33 diffs)
-
papers/concurrency/SPEOldPaper.pdf (added)
-
papers/concurrency/annex/local.bib (modified) (2 diffs)
-
papers/concurrency/examples/C++Cor-ts.cpp (added)
-
papers/concurrency/examples/Fib.c (added)
-
papers/concurrency/examples/Fib.cfa (added)
-
papers/concurrency/examples/Fib.cpp (added)
-
papers/concurrency/examples/Fib.py (added)
-
papers/concurrency/examples/Fib.sim (added)
-
papers/concurrency/examples/Fib1.c (added)
-
papers/concurrency/examples/Fib2.c (added)
-
papers/concurrency/examples/Fib2.cfa (added)
-
papers/concurrency/examples/Fib2.cpp (added)
-
papers/concurrency/examples/Fib2.py (added)
-
papers/concurrency/examples/Fib3.c (added)
-
papers/concurrency/examples/Fib3.cc (added)
-
papers/concurrency/examples/FibRefactor.py (added)
-
papers/concurrency/examples/Fmt.sim (added)
-
papers/concurrency/examples/Format.c (added)
-
papers/concurrency/examples/Format.cc (added)
-
papers/concurrency/examples/Format.cfa (added)
-
papers/concurrency/examples/Format.cpp (added)
-
papers/concurrency/examples/Format.data (added)
-
papers/concurrency/examples/Format.py (added)
-
papers/concurrency/examples/Format.sim (added)
-
papers/concurrency/examples/Format1.c (added)
-
papers/concurrency/examples/PingPong.c (added)
-
papers/concurrency/examples/PingPong.cc (added)
-
papers/concurrency/examples/Pingpong.cc (added)
-
papers/concurrency/examples/Pingpong.cfa (added)
-
papers/concurrency/examples/Pingpong.py (added)
-
papers/concurrency/examples/Pingpong2.cfa (added)
-
papers/concurrency/examples/ProdCons.cfa (added)
-
papers/concurrency/examples/ProdCons.cpp (added)
-
papers/concurrency/examples/ProdCons.py (added)
-
papers/concurrency/examples/ProdCons.sim (added)
-
papers/concurrency/examples/RWMonitor.cfa (added)
-
papers/concurrency/examples/Refactor.py (added)
-
papers/concurrency/examples/counter.cpp (added)
-
papers/concurrency/figures/CondSigWait.fig (added)
-
papers/concurrency/figures/FullCoroutinePhases.fig (added)
-
papers/concurrency/figures/FullProdConsStack.fig (added)
-
papers/concurrency/figures/RunTimeStructure.fig (added)
-
papers/concurrency/figures/corlayout.fig (added)
-
papers/concurrency/figures/ext_monitor.fig (modified) (1 diff)
-
papers/concurrency/figures/monitor.fig (modified) (1 diff)
-
papers/concurrency/figures/monitor.old.fig (added)
-
papers/concurrency/mail (added)
-
papers/concurrency/mail2 (added)
-
papers/concurrency/style/cfa-format.tex (deleted)
-
papers/general/.gitignore (modified) (1 diff)
-
papers/general/Makefile (modified) (5 diffs)
-
papers/general/Paper.tex (modified) (124 diffs)
-
papers/general/fig.tex (added)
-
proposals/approx-equal.md (added)
-
proposals/ctordtor/Makefile (modified) (3 diffs)
-
proposals/ctordtor/ctor.tex (modified) (5 diffs)
-
proposals/flags.md (modified) (2 diffs)
-
proposals/interned_string.cc (added)
-
proposals/interned_string.h (added)
-
proposals/operator-defaults.md (added)
-
proposals/specialized_casts.md (added)
-
proposals/tuples/Makefile (modified) (3 diffs)
-
proposals/tuples/tuples.tex (modified) (4 diffs)
-
proposals/unicode.html (added)
-
proposals/user_conversions.md (modified) (9 diffs)
-
proposals/virtual.txt (deleted)
-
proposals/vtable.md (added)
-
refrat/Makefile (modified) (2 diffs)
-
theses/aaron_moss_PhD/comp_II/.gitignore (moved) (moved from doc/theses/aaron_moss/comp_II/.gitignore )
-
theses/aaron_moss_PhD/comp_II/Efficient Type Resolution in Cforall.pptx (moved) (moved from doc/theses/aaron_moss/comp_II/Efficient Type Resolution in Cforall.pptx )
-
theses/aaron_moss_PhD/comp_II/Makefile (moved) (moved from doc/theses/aaron_moss/comp_II/Makefile ) (3 diffs)
-
theses/aaron_moss_PhD/comp_II/comp_II.tex (moved) (moved from doc/theses/aaron_moss/comp_II/comp_II.tex )
-
theses/aaron_moss_PhD/comp_II/conversion_dag.eps (moved) (moved from doc/theses/aaron_moss/comp_II/conversion_dag.eps )
-
theses/aaron_moss_PhD/comp_II/conversion_dag.odg (moved) (moved from doc/theses/aaron_moss/comp_II/conversion_dag.odg )
-
theses/aaron_moss_PhD/comp_II/conversion_dag.png (moved) (moved from doc/theses/aaron_moss/comp_II/conversion_dag.png )
-
theses/aaron_moss_PhD/comp_II/resolution_dag.eps (moved) (moved from doc/theses/aaron_moss/comp_II/resolution_dag.eps )
-
theses/aaron_moss_PhD/comp_II/resolution_dag.odg (moved) (moved from doc/theses/aaron_moss/comp_II/resolution_dag.odg )
-
theses/aaron_moss_PhD/comp_II/resolution_dag.png (moved) (moved from doc/theses/aaron_moss/comp_II/resolution_dag.png )
-
theses/aaron_moss_PhD/comp_II/resolution_dag2.odg (moved) (moved from doc/theses/aaron_moss/comp_II/resolution_dag2.odg )
-
theses/aaron_moss_PhD/comp_II/resolution_dag2.png (moved) (moved from doc/theses/aaron_moss/comp_II/resolution_dag2.png )
-
theses/aaron_moss_PhD/phd/.gitignore (added)
-
theses/aaron_moss_PhD/phd/Makefile (added)
-
theses/aaron_moss_PhD/phd/background.tex (added)
-
theses/aaron_moss_PhD/phd/cfa-macros.tex (added)
-
theses/aaron_moss_PhD/phd/code/bespoke-generic.c (added)
-
theses/aaron_moss_PhD/phd/code/cfa-generic.cfa (added)
-
theses/aaron_moss_PhD/phd/code/macro-generic.c (added)
-
theses/aaron_moss_PhD/phd/code/void-generic.c (added)
-
theses/aaron_moss_PhD/phd/conclusion.tex (added)
-
theses/aaron_moss_PhD/phd/evaluation/algo-summary.dat (added)
-
theses/aaron_moss_PhD/phd/evaluation/algo-summary.gp (added)
-
theses/aaron_moss_PhD/phd/evaluation/bu-summary.dat (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-cc/cfa-bu.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-cc/cfa-co.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-cc/cfa-dca.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-cc/cfa-def.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-cc/cfa-imm.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-mem-by-time.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-mem.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-plots.gp (added)
-
theses/aaron_moss_PhD/phd/evaluation/cfa-time.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/data.xlsx (added)
-
theses/aaron_moss_PhD/phd/evaluation/generic-timing.dat (added)
-
theses/aaron_moss_PhD/phd/evaluation/generic-timing.gp (added)
-
theses/aaron_moss_PhD/phd/evaluation/mem-by-max-assns.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/mem-by-max-depth.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/metric-plots.gp (added)
-
theses/aaron_moss_PhD/phd/evaluation/per-prob-scatter.gp (added)
-
theses/aaron_moss_PhD/phd/evaluation/per-prob.gp (added)
-
theses/aaron_moss_PhD/phd/evaluation/per-prob.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/imgui-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/io1-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/io2-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/kernel-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/math1-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/math2-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/math3-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/math4-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/minmax-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/preemption-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/rational-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/searchsort-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/per_prob/swap-per-prob.csv (added)
-
theses/aaron_moss_PhD/phd/evaluation/time-by-max-assns.tsv (added)
-
theses/aaron_moss_PhD/phd/evaluation/time-by-max-depth.tsv (added)
-
theses/aaron_moss_PhD/phd/experiments.tex (added)
-
theses/aaron_moss_PhD/phd/figures/bilson-conv-graph.eps (added)
-
theses/aaron_moss_PhD/phd/figures/bilson-conv-graph.odg (added)
-
theses/aaron_moss_PhD/phd/figures/extended-conv-graph.eps (added)
-
theses/aaron_moss_PhD/phd/figures/extended-conv-graph.odg (added)
-
theses/aaron_moss_PhD/phd/figures/persistent-union-find.eps (added)
-
theses/aaron_moss_PhD/phd/figures/persistent-union-find.odg (added)
-
theses/aaron_moss_PhD/phd/figures/resolution-dag.eps (added)
-
theses/aaron_moss_PhD/phd/figures/resolution-dag.odg (added)
-
theses/aaron_moss_PhD/phd/figures/safe-conv-graph.eps (added)
-
theses/aaron_moss_PhD/phd/figures/safe-conv-graph.odg (added)
-
theses/aaron_moss_PhD/phd/figures/union-find-with-classes.eps (added)
-
theses/aaron_moss_PhD/phd/figures/union-find-with-classes.odg (added)
-
theses/aaron_moss_PhD/phd/frontpgs.tex (added)
-
theses/aaron_moss_PhD/phd/generic-bench.tex (added)
-
theses/aaron_moss_PhD/phd/generic-types.tex (added)
-
theses/aaron_moss_PhD/phd/introduction.tex (added)
-
theses/aaron_moss_PhD/phd/macros.tex (added)
-
theses/aaron_moss_PhD/phd/resolution-heuristics.tex (added)
-
theses/aaron_moss_PhD/phd/thesis.tex (added)
-
theses/aaron_moss_PhD/phd/timeline.md (added)
-
theses/aaron_moss_PhD/phd/type-environment.tex (added)
-
theses/lynn_tran_SE499/Bibliography.bib (added)
-
theses/lynn_tran_SE499/Chapters/CFA.tex (added)
-
theses/lynn_tran_SE499/Chapters/Conclusion.tex (added)
-
theses/lynn_tran_SE499/Chapters/Demangler.tex (added)
-
theses/lynn_tran_SE499/Chapters/Extensions.tex (added)
-
theses/lynn_tran_SE499/Chapters/GDB.tex (added)
-
theses/lynn_tran_SE499/Chapters/Introduction.tex (added)
-
theses/lynn_tran_SE499/Chapters/uCPP.tex (added)
-
theses/lynn_tran_SE499/LICENSE (added)
-
theses/lynn_tran_SE499/SE499-master.zip (added)
-
theses/lynn_tran_SE499/SE499-master/.gdbinit (added)
-
theses/lynn_tran_SE499/SE499-master/README.md (added)
-
theses/lynn_tran_SE499/SE499-master/test.cc (added)
-
theses/lynn_tran_SE499/SE499-master/utils-gdb.gdb (added)
-
theses/lynn_tran_SE499/SE499-master/utils-gdb.py (added)
-
theses/lynn_tran_SE499/SE499-master/utils.cpp (added)
-
theses/lynn_tran_SE499/Thesis.cls (added)
-
theses/lynn_tran_SE499/lstpatch.sty (added)
-
theses/lynn_tran_SE499/se499-report.tex (added)
-
theses/lynn_tran_SE499/uContext_stack.png (added)
-
theses/lynn_tran_SE499/vector.sty (added)
-
theses/rob_schluntz_MMath/.gitignore (moved) (moved from doc/theses/rob_schluntz/.gitignore )
-
theses/rob_schluntz_MMath/Makefile (moved) (moved from doc/theses/rob_schluntz/Makefile )
-
theses/rob_schluntz_MMath/cfa-format.tex (moved) (moved from doc/theses/rob_schluntz/cfa-format.tex )
-
theses/rob_schluntz_MMath/conclusions.tex (moved) (moved from doc/theses/rob_schluntz/conclusions.tex )
-
theses/rob_schluntz_MMath/ctordtor.tex (moved) (moved from doc/theses/rob_schluntz/ctordtor.tex )
-
theses/rob_schluntz_MMath/examples/conclusions/dtor.c (moved) (moved from doc/theses/rob_schluntz/examples/conclusions/dtor.c )
-
theses/rob_schluntz_MMath/examples/conclusions/except.c (moved) (moved from doc/theses/rob_schluntz/examples/conclusions/except.c )
-
theses/rob_schluntz_MMath/examples/conclusions/except.cc (moved) (moved from doc/theses/rob_schluntz/examples/conclusions/except.cc )
-
theses/rob_schluntz_MMath/examples/ctor/array_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/array_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/copy_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/copy_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/cv_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/cv_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/enum_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/enum_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/expr_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/expr_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/global_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/global_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/hide_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/hide_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/member.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/member.c )
-
theses/rob_schluntz_MMath/examples/ctor/placement_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/placement_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/return_dtor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/return_dtor.c )
-
theses/rob_schluntz_MMath/examples/ctor/static_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/static_ctor.c )
-
theses/rob_schluntz_MMath/examples/ctor/union_ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/ctor/union_ctor.c )
-
theses/rob_schluntz_MMath/examples/intro/FileOutputStream.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/FileOutputStream.java )
-
theses/rob_schluntz_MMath/examples/intro/compound_lit.c (moved) (moved from doc/theses/rob_schluntz/examples/intro/compound_lit.c )
-
theses/rob_schluntz_MMath/examples/intro/designation.c (moved) (moved from doc/theses/rob_schluntz/examples/intro/designation.c )
-
theses/rob_schluntz_MMath/examples/intro/ignore.c (moved) (moved from doc/theses/rob_schluntz/examples/intro/ignore.c )
-
theses/rob_schluntz_MMath/examples/intro/ires.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/ires.java )
-
theses/rob_schluntz_MMath/examples/intro/res.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/res.java )
-
theses/rob_schluntz_MMath/examples/intro/res1.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/res1.java )
-
theses/rob_schluntz_MMath/examples/intro/res2.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/res2.java )
-
theses/rob_schluntz_MMath/examples/intro/res3.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/res3.java )
-
theses/rob_schluntz_MMath/examples/intro/tuple.cc (moved) (moved from doc/theses/rob_schluntz/examples/intro/tuple.cc )
-
theses/rob_schluntz_MMath/examples/intro/variadic.java (moved) (moved from doc/theses/rob_schluntz/examples/intro/variadic.java )
-
theses/rob_schluntz_MMath/examples/malloc.cc (moved) (moved from doc/theses/rob_schluntz/examples/malloc.cc )
-
theses/rob_schluntz_MMath/examples/nested.c (moved) (moved from doc/theses/rob_schluntz/examples/nested.c )
-
theses/rob_schluntz_MMath/examples/poly.c (moved) (moved from doc/theses/rob_schluntz/examples/poly.c )
-
theses/rob_schluntz_MMath/examples/scope_guard.h (moved) (moved from doc/theses/rob_schluntz/examples/scope_guard.h )
-
theses/rob_schluntz_MMath/examples/test_scoped_guard.c (moved) (moved from doc/theses/rob_schluntz/examples/test_scoped_guard.c )
-
theses/rob_schluntz_MMath/examples/tuples/assign.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/assign.c )
-
theses/rob_schluntz_MMath/examples/tuples/cast.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/cast.c )
-
theses/rob_schluntz_MMath/examples/tuples/ctor.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/ctor.c )
-
theses/rob_schluntz_MMath/examples/tuples/mrv.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/mrv.c )
-
theses/rob_schluntz_MMath/examples/tuples/mrv_1.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/mrv_1.c )
-
theses/rob_schluntz_MMath/examples/tuples/mrv_2.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/mrv_2.c )
-
theses/rob_schluntz_MMath/examples/tuples/mrv_3.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/mrv_3.c )
-
theses/rob_schluntz_MMath/examples/tuples/named.c (moved) (moved from doc/theses/rob_schluntz/examples/tuples/named.c )
-
theses/rob_schluntz_MMath/examples/variadic/new.c (moved) (moved from doc/theses/rob_schluntz/examples/variadic/new.c )
-
theses/rob_schluntz_MMath/examples/variadic/print.c (moved) (moved from doc/theses/rob_schluntz/examples/variadic/print.c )
-
theses/rob_schluntz_MMath/examples/variadic/sum1.c (moved) (moved from doc/theses/rob_schluntz/examples/variadic/sum1.c )
-
theses/rob_schluntz_MMath/examples/variadic/sum2.c (moved) (moved from doc/theses/rob_schluntz/examples/variadic/sum2.c )
-
theses/rob_schluntz_MMath/intro.tex (moved) (moved from doc/theses/rob_schluntz/intro.tex )
-
theses/rob_schluntz_MMath/thesis-frontpgs.tex (moved) (moved from doc/theses/rob_schluntz/thesis-frontpgs.tex )
-
theses/rob_schluntz_MMath/thesis.bib (moved) (moved from doc/theses/rob_schluntz/thesis.bib )
-
theses/rob_schluntz_MMath/thesis.tex (moved) (moved from doc/theses/rob_schluntz/thesis.tex )
-
theses/rob_schluntz_MMath/tuples.tex (moved) (moved from doc/theses/rob_schluntz/tuples.tex )
-
theses/rob_schluntz_MMath/variadic.tex (moved) (moved from doc/theses/rob_schluntz/variadic.tex )
-
theses/thierry_delisle/notes/cor-thread-traits.c (deleted)
-
theses/thierry_delisle/notes/lit-review.md (deleted)
-
theses/thierry_delisle/notes/notes.md (deleted)
-
theses/thierry_delisle/style/style.tex (deleted)
-
theses/thierry_delisle_MMath/.gitignore (moved) (moved from doc/theses/thierry_delisle/.gitignore )
-
theses/thierry_delisle_MMath/Makefile (moved) (moved from doc/theses/thierry_delisle/Makefile ) (3 diffs)
-
theses/thierry_delisle_MMath/annex/glossary.tex (moved) (moved from doc/theses/thierry_delisle/annex/glossary.tex )
-
theses/thierry_delisle_MMath/annex/local.bib (moved) (moved from doc/theses/thierry_delisle/annex/local.bib )
-
theses/thierry_delisle_MMath/figures/dependency.fig (moved) (moved from doc/theses/thierry_delisle/figures/dependency.fig )
-
theses/thierry_delisle_MMath/figures/ext_monitor.fig (moved) (moved from doc/theses/thierry_delisle/figures/ext_monitor.fig )
-
theses/thierry_delisle_MMath/figures/int_monitor.fig (moved) (moved from doc/theses/thierry_delisle/figures/int_monitor.fig )
-
theses/thierry_delisle_MMath/figures/monitor.fig (moved) (moved from doc/theses/thierry_delisle/figures/monitor.fig )
-
theses/thierry_delisle_MMath/figures/monitor_structs.fig (moved) (moved from doc/theses/thierry_delisle/figures/monitor_structs.fig )
-
theses/thierry_delisle_MMath/figures/system.fig (moved) (moved from doc/theses/thierry_delisle/figures/system.fig )
-
theses/thierry_delisle_MMath/notes/cor-thread-traits.c (moved) (moved from doc/papers/concurrency/notes/cor-thread-traits.c )
-
theses/thierry_delisle_MMath/notes/lit-review.md (moved) (moved from doc/papers/concurrency/notes/lit-review.md )
-
theses/thierry_delisle_MMath/notes/notes.md (moved) (moved from doc/papers/concurrency/notes/notes.md )
-
theses/thierry_delisle_MMath/style/cfa-format.tex (moved) (moved from doc/theses/thierry_delisle/style/cfa-format.tex )
-
theses/thierry_delisle_MMath/style/style.tex (moved) (moved from doc/papers/concurrency/style/style.tex )
-
theses/thierry_delisle_MMath/text/basics.tex (moved) (moved from doc/theses/thierry_delisle/text/basics.tex )
-
theses/thierry_delisle_MMath/text/cforall.tex (moved) (moved from doc/theses/thierry_delisle/text/cforall.tex )
-
theses/thierry_delisle_MMath/text/concurrency.tex (moved) (moved from doc/theses/thierry_delisle/text/concurrency.tex )
-
theses/thierry_delisle_MMath/text/frontpgs.tex (moved) (moved from doc/theses/thierry_delisle/text/frontpgs.tex )
-
theses/thierry_delisle_MMath/text/future.tex (moved) (moved from doc/theses/thierry_delisle/text/future.tex )
-
theses/thierry_delisle_MMath/text/internals.tex (moved) (moved from doc/theses/thierry_delisle/text/internals.tex )
-
theses/thierry_delisle_MMath/text/intro.tex (moved) (moved from doc/theses/thierry_delisle/text/intro.tex )
-
theses/thierry_delisle_MMath/text/parallelism.tex (moved) (moved from doc/theses/thierry_delisle/text/parallelism.tex )
-
theses/thierry_delisle_MMath/text/results.tex (moved) (moved from doc/theses/thierry_delisle/text/results.tex )
-
theses/thierry_delisle_MMath/text/together.tex (moved) (moved from doc/theses/thierry_delisle/text/together.tex )
-
theses/thierry_delisle_MMath/thePlan.md (moved) (moved from doc/theses/thierry_delisle/thePlan.md )
-
theses/thierry_delisle_MMath/thesis.tex (moved) (moved from doc/theses/thierry_delisle/thesis.tex )
-
theses/thierry_delisle_MMath/version (moved) (moved from doc/theses/thierry_delisle/version )
-
theses/thierry_delisle_MMath/version.sh (moved) (moved from doc/theses/thierry_delisle/version.sh )
-
theses/thierry_delisle_PhD/code/assert.hpp (added)
-
theses/thierry_delisle_PhD/code/prefetch.cpp (added)
-
theses/thierry_delisle_PhD/code/processor.hpp (added)
-
theses/thierry_delisle_PhD/code/processor_list.hpp (added)
-
theses/thierry_delisle_PhD/code/processor_list_fast.cpp (added)
-
theses/thierry_delisle_PhD/code/processor_list_good.cpp (added)
-
theses/thierry_delisle_PhD/code/relaxed_list.cpp (added)
-
theses/thierry_delisle_PhD/code/relaxed_list.hpp (added)
-
theses/thierry_delisle_PhD/code/utils.hpp (added)
-
user/Makefile (modified) (4 diffs)
-
user/user.tex (modified) (172 diffs)
-
working/exception/impl/Makefile (added)
-
working/exception/impl/exception.c (modified) (4 diffs)
-
working/exception/impl/pdc.s (added)
-
working/exception/impl/pic.s (added)
-
working/exception/impl/test.c (added)
-
working/glen_conversions/float_promo.dia (added)
-
working/glen_conversions/float_promo.png (added)
-
working/glen_conversions/index.html (added)
Legend:
- Unmodified
- Added
- Removed
-
doc/LaTeXmacros/common.tex
r7951100 rb067d9b 11 11 %% Created On : Sat Apr 9 10:06:17 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Mon Mar 19 17:18:23 201814 %% Update Count : 3 7913 %% Last Modified On : Fri May 24 07:59:54 2019 14 %% Update Count : 382 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 55 55 \setlength{\parindentlnth}{\parindent} 56 56 57 \newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{#1}}} 57 58 \newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}} 58 59 \newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}} … … 60 61 \newlength{\gcolumnposn} % temporary hack because lstlisting does not handle tabs correctly 61 62 \newlength{\columnposn} 62 \setlength{\gcolumnposn}{2. 5in}63 \setlength{\gcolumnposn}{2.75in} 63 64 \setlength{\columnposn}{\gcolumnposn} 64 65 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}} -
doc/LaTeXmacros/lstlang.sty
r7951100 rb067d9b 8 8 %% Created On : Sat May 13 16:34:42 2017 9 9 %% Last Modified By : Peter A. Buhr 10 %% Last Modified On : Fri Apr 6 23:44:50 201811 %% Update Count : 2 010 %% Last Modified On : Tue Jan 8 14:40:33 2019 11 %% Update Count : 21 12 12 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 13 13 … … 114 114 _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, __attribute, __attribute__, 115 115 auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__, 116 coroutine, disable, dtype, enable, __extension__, exception, fallthrough, fallthru, finally,116 coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally, 117 117 __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__, 118 118 inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or, -
doc/bibliography/pl.bib
r7951100 rb067d9b 21 21 % toplas: ACM Trans. on Prog. Lang. & Sys. 22 22 % tcs: Theoretical Computer Science 23 @string{ieeepds="IEEE Transactions on Parallel and Distributed Systems"} 24 % @string{ieeepds="IEEE Trans. Parallel Distrib. Syst."} 25 @string{ieeese="IEEE Transactions on Software Engineering"} 26 % @string{ieeese="IEEE Trans. Softw. Eng."} 27 @string{spe="Software---\-Practice and Experience"} 28 % @string{spe="Softw. Pract. Exp."} 29 @string{ccpe="Concurrency and Computation: Practice and Experience"} 30 % @string{ccpe="Concurrency Comput: Pract Experience"} 31 @string{sigplan="SIGPLAN Notices"} 32 % @string{sigplan="SIGPLAN Not."} 33 @string{joop="Journal of Object-Oriented Programming"} 34 % @string{joop="J. of Object-Oriented Program."} 23 24 string{ieeepds="IEEE Transactions on Parallel and Distributed Systems"} 25 @string{ieeepds="IEEE Trans. Parallel Distrib. Syst."} 26 string{ieeese="IEEE Transactions on Software Engineering"} 27 @string{ieeese="IEEE Trans. Softw. Eng."} 28 string{spe="Software---\-Practice and Experience"} 29 @string{spe="Softw. Pract. Exper."} 30 string{ccpe="Concurrency and Computation: Practice and Experience"} 31 @string{ccpe="Concurrency Comput.: Pract. Exper."} 32 string{sigplan="SIGPLAN Notices"} 33 @string{sigplan="SIGPLAN Not."} 34 string{joop="Journal of Object-Oriented Programming"} 35 @string{joop="J. of Object-Oriented Program."} 35 36 @string{popl="Conference Record of the ACM Symposium on Principles of Programming Languages"} 36 37 @string{osr="Operating Systems Review"} 37 38 @string{pldi="Programming Language Design and Implementation"} 38 39 @string{toplas="Transactions on Programming Languages and Systems"} 39 @string{mathann="Mathematische Annalen"}40 %@string{mathann="Math. Ann."}40 string{mathann="Mathematische Annalen"} 41 @string{mathann="Math. Ann."} 41 42 42 43 % A … … 329 330 contributer = {pabuhr@plg}, 330 331 author = {Nissim Francez}, 331 title = {Another Advantage of Key word Notation for Parameter Communication with Subprograms},332 title = {Another Advantage of Keyword Notation for Parameter Communication with Subprograms}, 332 333 journal = cacm, 333 334 volume = 20, … … 566 567 } 567 568 569 @inproceedings {Qin18, 570 author = {Henry Qin and Qian Li and Jacqueline Speiser and Peter Kraft and John Ousterhout}, 571 title = {Arachne: Core-Aware Thread Management}, 572 booktitle = {13th {USENIX} Symp. on Oper. Sys. Design and Impl. ({OSDI} 18)}, 573 year = {2018}, 574 address = {Carlsbad, CA}, 575 pages = {145-160}, 576 publisher = {{USENIX} Association}, 577 note = {\href{https://www.usenix.org/conference/osdi18/presentation/qin}{https://\-www.usenix.org/\-conference/\-osdi18/\-presentation/\-qin}}, 578 } 579 568 580 @article{Kessels82, 569 581 keywords = {concurrency, critical section}, … … 653 665 author = {Joung, Yuh-Jzer}, 654 666 title = {Asynchronous group mutual exclusion}, 655 journal = {Distributed Computing}, 667 journal = {Dist. Comput.}, 668 optjournal = {Distributed Computing}, 656 669 year = {2000}, 657 670 month = {Nov}, … … 700 713 701 714 % B 715 716 @article{Michael13, 717 contributer = {pabuhr@plg}, 718 author = {Maged M. Michael}, 719 title = {The Balancing Act of Choosing Nonblocking Features}, 720 journal = cacm, 721 volume = 56, 722 number = 9, 723 month = sep, 724 year = 2013, 725 pages = {46--53}, 726 publisher = {ACM}, 727 address = {New York, NY, USA}, 728 } 702 729 703 730 @incollection{beta:old, … … 782 809 time computable inheritance hierarchy. 783 810 }, 784 comment = {811 comment = { 785 812 Classes are predicates; if object {\tt o} is in class {\tt C}, then 786 813 {\tt C} is true of {\tt o}. Classes are combined with {\tt :AND}, … … 804 831 year = 2015, 805 832 howpublished= {\href{http://www.boost.org/doc/libs/1_61_0/libs/coroutine/doc/html/index.html} 806 {{http://www.boost.org/\-doc/\-libs/1\_61\_0/\-libs/\-coroutine/\-doc/\-html/\-index.html}}}, 807 optnote = {Accessed: 2016-09}, 833 {http://www.boost.org/\-doc/\-libs/1\_61\_0/\-libs/\-coroutine/\-doc/\-html/\-index.html}}, 834 } 835 836 @misc{BoostThreads, 837 keywords = {Boost Thread Library}, 838 contributer = {pabuhr@plg}, 839 author = {Anthony Williams and Vicente J. Botet Escriba}, 840 title = {Boost Thread Library}, 841 year = 2015, 842 howpublished= {\href{https://www.boost.org/doc/libs/1_61_0/doc/html/thread.html} 843 {https://\-www.boost.org/\-doc/\-libs/\-1\_61\_0/\-doc/\-html/\-thread.html}}, 808 844 } 809 845 … … 816 852 month = oct, 817 853 type = {Diplomarbeit}, 818 note = { {\small\textsf{ftp://\-plg.uwaterloo.ca/\-pub/\-theses/\-KrischerThesis.ps.gz}}},854 note = {\href{https://plg.uwaterloo.ca/~usystem/theses/KrischerThesis.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-theses/\-KrischerThesis.pdf}}, 819 855 } 820 856 … … 907 943 } 908 944 945 @article{Moss18, 946 keywords = {type systems, polymorphism, tuples, Cforall}, 947 contributer = {pabuhr@plg}, 948 author = {Aaron Moss and Robert Schluntz and Peter A. Buhr}, 949 title = {\textsf{C}$\mathbf{\forall}$ : Adding Modern Programming Language Features to {C}}, 950 journal = spe, 951 volume = 48, 952 number = 12, 953 month = dec, 954 year = 2018, 955 pages = {2111-2146}, 956 note = {\href{http://dx.doi.org/10.1002/spe.2624}{http://\-dx.doi.org/\-10.1002/\-spe.2624}}, 957 } 958 959 @misc{CforallBenchMarks, 960 contributer = {pabuhr@plg}, 961 key = {Cforall Benchmarks}, 962 author = {{\textsf{C}{$\mathbf{\forall}$} Benchmarks}}, 963 howpublished= {\href{https://plg.uwaterloo.ca/~cforall/benchmark.tar}{https://\-plg.uwaterloo.ca/\-$\sim$cforall/\-benchmark.tar}}, 964 } 965 909 966 @misc{Cforall, 967 contributer = {pabuhr@plg}, 910 968 key = {Cforall}, 911 title = {\textsf{C}{$\mathbf{\forall}$} Features}, 912 howpublished= {\url{https://plg.uwaterloo.ca/~cforall/features}}, 913 optnote = {Accessed: 2018-01-01}, 969 author = {{\textsf{C}{$\mathbf{\forall}$} Features}}, 970 howpublished= {\href{https://plg.uwaterloo.ca/~cforall/features}{https://\-plg.uwaterloo.ca/\-$\sim$cforall/\-features}}, 971 } 972 973 @misc{CFAStackEvaluation, 974 contributer = {a3moss@plg}, 975 author = {Aaron Moss}, 976 title = {\textsf{C}$\mathbf{\forall}$ Stack Evaluation Programs}, 977 year = 2018, 978 howpublished= {\href{https://cforall.uwaterloo.ca/CFAStackEvaluation.zip}{https://cforall.uwaterloo.ca/\-CFAStackEvaluation.zip}}, 914 979 } 915 980 … … 925 990 } 926 991 927 @misc{CFAStackEvaluation, 992 @phdthesis{Moss19, 993 keywords = {type system, generic type, resolution algorithm, type environment, Cforall}, 928 994 author = {Aaron Moss}, 929 title = {\textsf{C}$\mathbf{\forall}$ Stack Evaluation Programs}, 930 year = 2018, 931 howpublished= {\href{https://plg.uwaterloo.ca/~cforall/evaluation.zip}{https://plg.uwaterloo.ca/\-\-$\sim$cforall/\-StackEvaluation.zip}}, 932 optnote = {[Accessed May 2018]}, 933 } 934 935 @article{Moss18, 936 keywords = {concurrency, C++}, 937 contributer = {pabuhr@plg}, 938 author = {Aaron Moss and Robert Schluntz and Peter A. Buhr}, 939 title = {\textsf{C}$\mathbf{\forall}$ : Adding Modern Programming Language Features to C}, 940 year = 2018, 941 journal = spe, 942 note = {Accepted, to appear}, 995 title = {\textsf{C}$\mathbf{\forall}$ Type System Implementation}, 996 school = {School of Computer Science, University of Waterloo}, 997 year = 2019, 998 optaddress = {Waterloo, Ontario, Canada, N2L 3G1}, 999 note = {\href{https://uwspace.uwaterloo.ca/handle/10012/14584}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-14584}}, 943 1000 } 944 1001 … … 962 1019 comment = { 963 1020 The evidence given is thin. 964 } 1021 }, 965 1022 } 966 1023 … … 972 1029 journal = {Dr. Dobb's Journal of Software Tools}, 973 1030 year = 1989, 974 month = feb, volume = 14, number = 2, pages = {45-51}, 1031 month = feb, 1032 volume = 14, 1033 number = 2, 1034 pages = {45-51}, 975 1035 comment = { 976 1036 A light-weight multitasking kernel for MS-DOS. A task\_control … … 1048 1108 year = 2006, 1049 1109 edition = {4th}, 1110 } 1111 1112 @techreport{Prokopec11, 1113 keywords = {ctrie, concurrent map}, 1114 contributer = {a3moss@uwaterloo.ca}, 1115 title ={Cache-aware lock-free concurrent hash tries}, 1116 author ={Prokopec, Aleksandar and Bagwell, Phil and Odersky, Martin}, 1117 institution ={EPFL}, 1118 year ={2011} 1050 1119 } 1051 1120 … … 1084 1153 month = oct, 1085 1154 year = 2001, 1086 note = {\href{http://plg.uwaterloo.ca/~cforall/cfa.ps}{http://\-plg.uwaterloo.ca/\- \char`\~cforall/\-cfa.ps}},1155 note = {\href{http://plg.uwaterloo.ca/~cforall/cfa.ps}{http://\-plg.uwaterloo.ca/\-$\sim$cforall/\-cfa.ps}}, 1087 1156 } 1088 1157 … … 1096 1165 year = 1998, 1097 1166 note = {{\small\textsf{ftp://\-plg.uwaterloo.ca/\-pub/\-Cforall/\-refrat.ps.gz}}}, 1167 } 1168 1169 @phdthesis{Norrish98, 1170 title = {C formalised in HOL}, 1171 author = {Norrish, Michael}, 1172 year = {1998}, 1173 school = {University of Cambridge} 1174 } 1175 1176 @inproceedings{Tarditi18, 1177 keywords = {Checked C}, 1178 contributer = {a3moss@uwaterloo.ca}, 1179 author = {Tarditi, David and Elliott, Archibald Samuel and Ruef, Andrew and Hicks, Michael}, 1180 title = {Checked C: Making C Safe by Extension}, 1181 booktitle = {2018 IEEE Cybersecurity Development (SecDev)}, 1182 publisher = {IEEE}, 1183 year = {2018}, 1184 month = sep, 1185 pages = {53-60}, 1186 url = {https://www.microsoft.com/en-us/research/publication/checkedc-making-c-safe-by-extension/}, 1187 } 1188 1189 @misc{Clang, 1190 keywords = {clang}, 1191 contributer = {a3moss@uwaterloo.ca}, 1192 title = {Clang: a {C} language family frontend for {LLVM}}, 1193 howpublished= {\href{https://clang.llvm.org/}{https://\-clang.llvm.org/}} 1098 1194 } 1099 1195 … … 1183 1279 that is ``compiled''. 1184 1280 }, 1185 comment = {1186 Imagine the program, including the subroutines, spread out over a1187 table, with the compiler dropping Jello on the parts as they are1188 compiled. At first little drops appear in seemingly random places.1189 These get bigger and combine with other drops to form growing1190 globs. When two globs meet, ripples will go out through each as1191 they adjust to each other's presence, although the parts of the1192 globs that formed first are less affected by the ripples. When1193 compilation is complete, there is one congealed mass.1194 }1195 1281 } 1196 1282 … … 1206 1292 number = 11, 1207 1293 pages = {853-860}, 1294 } 1295 1296 @inproceedings{Odersky01, 1297 keywords = {Scala}, 1298 contributer = {a3moss@uwaterloo.ca}, 1299 author = {Odersky, Martin and Zenger, Christoph and Zenger, Matthias}, 1300 title = {Colored Local Type Inference}, 1301 booktitle = {Proceedings of the 28th ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, 1302 series = {POPL '01}, 1303 year = {2001}, 1304 isbn = {1-58113-336-7}, 1305 location = {London, United Kingdom}, 1306 pages = {41--53}, 1307 numpages = {13}, 1308 url = {http://doi.acm.org/10.1145/360204.360207}, 1309 doi = {10.1145/360204.360207}, 1310 acmid = {360207}, 1311 publisher = {ACM}, 1312 address = {New York, NY, USA}, 1208 1313 } 1209 1314 … … 1268 1373 journal = sigplan, 1269 1374 year = 1986, 1270 month = oct, volume = 21, number = 10, pages = {19-28}, 1375 month = oct, 1376 volume = 21, 1377 number = 10, 1378 pages = {19-28}, 1271 1379 note = {Object Oriented Programming Workshop} 1272 1380 } … … 1388 1496 Process-valued expressions and process variables. Processes have 1389 1497 execution priority: Create {\em process-type-name}(args) [with 1390 priority(p)], 1391 and the priority can be changed on the fly. Complicated guard/ 1392 screen structure on accept: accept {\em transaction}(param names) 1498 priority(p)], and the priority can be changed on the fly. Complicated 1499 guard/screen structure on accept: accept {\em transaction}(param names) 1393 1500 [suchthat (exp)] [by (exp)] [compoundstatement]. Accepts cannot 1394 1501 appear in functions! Can specify timeouts on transaction calls. … … 1425 1532 Many errors in the two solutions. 1426 1533 } 1534 } 1535 1536 @misc{NThreadCode13, 1537 keywords = {N-thread software-solution mutual exclusion}, 1538 contributer = {pabuhr@plg}, 1539 key = {concurrent locking}, 1540 author = {Peter A. Buhr and David Dice and Wim H. Hesselink}, 1541 title = {concurrent-locking}, 1542 howpublished= {\href{https://github.com/pabuhr/concurrent-locking}{https://\-github.com/\-pabuhr/\-concurrent-locking}}, 1427 1543 } 1428 1544 … … 1491 1607 } 1492 1608 1493 @ techreport{uC++,1609 @manual{uC++, 1494 1610 keywords = {C++, concurrency, light-weight process, shared memory}, 1495 1611 contributer = {pabuhr@plg}, 1612 key = {uC++}, 1496 1613 author = {Peter A. Buhr}, 1497 1614 title = {$\mu${C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Annotated Reference Manual, Version 7.0.0}, 1498 institution = {School of Computer Science, University of Waterloo}, 1499 address = {Waterloo, Ontario, Canada, N2L 3G1}, 1500 month = dec, 1501 year = 2017, 1502 note = {\href{http://plg.uwaterloo.ca/~usystem/pub/uSystem/u++-7.0.0.sh}{http://\-plg.\-uwaterloo.\-ca/\-$\sim$usystem/\-pub/\-uSystem/\-u++-7.0.0.sh}}, 1615 organization= {University of Waterloo}, 1616 month = sep, 1617 year = 2018, 1618 note = {\href{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uC++.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-pub/\-uSystem/uC++.pdf}}, 1503 1619 } 1504 1620 … … 1567 1683 } 1568 1684 1685 @mastersthesis{Sun15, 1686 author = {Sun, Xianda}, 1687 title = {Concurrent High-performance Persistent Hash Table In {J}ava}, 1688 school = {School of Computer Sc., University of Waterloo}, 1689 year = 2015, 1690 optaddress = {Waterloo, Ontario, Canada, N2L 3G1}, 1691 note = {\href{https://uwspace.uwaterloo.ca/handle/10012/10013}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-10013}}, 1692 } 1693 1569 1694 @book{Andrews91:book, 1570 1695 keywords = {concurrency}, … … 1575 1700 address = {Redwood City}, 1576 1701 year = 1991, 1702 } 1703 1704 @inproceedings{Prokopec12, 1705 keywords = {ctrie, hash trie, concurrent map}, 1706 contributer = {a3moss@uwaterloo.ca}, 1707 title = {Concurrent tries with efficient non-blocking snapshots}, 1708 author = {Prokopec, Aleksandar and Bronson, Nathan Grasso and Bagwell, Phil and Odersky, Martin}, 1709 booktitle = {ACM SIGPLAN Notices}, 1710 volume = {47}, 1711 number = {8}, 1712 pages = {151--160}, 1713 year = {2012}, 1714 organization={ACM} 1577 1715 } 1578 1716 … … 1591 1729 1592 1730 @mastersthesis{Delisle18, 1593 author = {Thierry Delisle }, 1731 keywords = {concurrency, Cforall}, 1732 contributer = {pabuhr@plg}, 1733 author = {Thierry Delisle}, 1594 1734 title = {Concurrency in \textsf{C}$\mathbf{\forall}$}, 1595 1735 school = {School of Computer Science, University of Waterloo}, 1596 1736 year = 2018, 1597 address = {Waterloo, Ontario, Canada, N2L 3G1},1737 optaddress = {Waterloo, Ontario, Canada, N2L 3G1}, 1598 1738 note = {\href{https://uwspace.uwaterloo.ca/handle/10012/12888}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-12888}}, 1739 } 1740 1741 @article{Delisle19, 1742 keywords = {concurrency, Cforall}, 1743 contributer = {pabuhr@plg}, 1744 author = {Thierry Delisle and Peter A. Buhr}, 1745 title = {Advanced Control-flow and Concurrency in \textsf{C}$\mathbf{\forall}$}, 1746 year = 2019, 1747 journal = spe, 1748 pages = {1-33}, 1749 note = {submitted}, 1599 1750 } 1600 1751 … … 1690 1841 howpublished= {\href{https://www.airs.com/blog/archives/428} 1691 1842 {https://www.airs.com/\-blog/\-archives/\-428}}, 1692 optnote = {Accessed: 2018-05},1693 1843 } 1694 1844 … … 1771 1921 author = {Glen Ditchfield}, 1772 1922 title = {Conversions for \textsf{C}$\mathbf{\forall}$}, 1773 note = {\href{http://plg.uwaterloo.ca/~cforall/Conversions/index.html}{http://\-plg.uwaterloo.ca/\- \textasciitildecforall/\-Conversions/\-index.html}},1923 note = {\href{http://plg.uwaterloo.ca/~cforall/Conversions/index.html}{http://\-plg.uwaterloo.ca/\-$\sim$cforall/\-Conversions/\-index.html}}, 1774 1924 month = {Nov}, 1775 1925 year = {2002}, … … 1786 1936 year = 1965, 1787 1937 note = {Reprinted in \cite{Genuys68} pp. 43--112.} 1938 } 1939 1940 @inproceedings{Adya02, 1941 contributer = {pabuhr@plg}, 1942 author = {Adya, Atul and Howell, Jon and Theimer, Marvin and Bolosky, William J. and Douceur, John R.}, 1943 title = {Cooperative Task Management Without Manual Stack Management}, 1944 booktitle = {Proceedings of the General Track of the Annual Conference on USENIX Annual Technical Conference}, 1945 series = {ATEC '02}, 1946 year = {2002}, 1947 pages = {289-302}, 1948 publisher = {USENIX Association}, 1949 address = {Berkeley, CA, USA}, 1950 } 1951 1952 @misc{CoroutineTS, 1953 keywords = {Coroutines TS, C++20, working draft}, 1954 contributer = {pabuhr@plg}, 1955 author = {Gor Nishanov}, 1956 title = {Merge Coroutines TS into C++20 Working Draft}, 1957 year = 2019, 1958 month = feb, 1959 howpublished= {\href{http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0912r5.html} 1960 {http://\-www.open-std.org/\-jtc1/\-sc22/\-wg21/\-docs/\-papers/\-2019/p0912r5.html}}, 1961 } 1962 1963 @manual{C++20Coroutine19, 1964 keywords = {coroutine}, 1965 contributer = {pabuhr@plg}, 1966 title = {Coroutines (C++20)}, 1967 organization= {cppreference.com}, 1968 month = apr, 1969 year = 2019, 1970 note = {\href{https://en.cppreference.com/w/cpp/language/coroutines}{https://\-en.cppreference.com/\-w/\-cpp/\-language/\-coroutines}}, 1788 1971 } 1789 1972 … … 1833 2016 } 1834 2017 1835 @article{Moore75, 1836 keywords = {approximation methods, integrated circuits}, 1837 contributer = {pabuhr@plg}, 1838 author = {Gordon E. Moore}, 1839 title = {Progress in Digital Integrated Electronics}, 1840 journal = {Technical Digest, International Electron Devices Meeting, IEEE}, 1841 year = 1975, 1842 pages = {11-13}, 2018 @misc{CS343, 2019 keywords = {uC++ teaching}, 2020 contributer = {pabuhr@plg}, 2021 key = {Peter Buhr}, 2022 title = {CS343}, 2023 year = 2018, 2024 howpublished= {\href{https://www.student.cs.uwaterloo.ca/~cs343}{https://\-www.student.cs.uwaterloo.ca/\-$\sim$cs343}}, 1843 2025 } 1844 2026 … … 1899 2081 note = {Svensk Standard SS 63 61 14}, 1900 2082 year = 1987, 1901 abstract = { 1902 Standard for the programming language SIMULA. Written in English. 1903 } 2083 abstract = {Standard for the programming language SIMULA. Written in English.} 2084 } 2085 2086 @article{Galil91, 2087 keywords = {union-find}, 2088 contributer = {a3moss@uwaterloo.ca}, 2089 title = {Data structures and algorithms for disjoint set union problems}, 2090 author = {Galil, Zvi and Italiano, Giuseppe F}, 2091 journal = {ACM Computing Surveys (CSUR)}, 2092 volume = 23, 2093 number = 3, 2094 pages = {319--344}, 2095 year = 1991, 2096 publisher = {ACM}, 1904 2097 } 1905 2098 … … 2041 2234 year = {1998}, 2042 2235 pages = {393-407}, 2236 } 2237 2238 @book{Aho74, 2239 keywords = {algorithms, textbook, union-find}, 2240 contributer = {a3moss@uwaterloo.ca}, 2241 title = {The Design and Analysis of Computer Algorithms}, 2242 author = {Aho, Alfred V and Hopcroft, John E and Ullman, Jeffrey D}, 2243 year = {1974}, 2244 publisher = {Addison-Wesley}, 2245 address = {Reading, MA, USA} 2043 2246 } 2044 2247 … … 2194 2397 } 2195 2398 2399 @article{Ritchie93, 2400 keywords = {C, history}, 2401 contributer = {pabuhr@plg}, 2402 author = {Ritchie, Dennis M.}, 2403 title = {The Development of the {C} Language}, 2404 journal = sigplan, 2405 volume = 28, 2406 number = 3, 2407 month = mar, 2408 year = 1993, 2409 pages = {201--208}, 2410 url = {http://doi.acm.org/10.1145/155360.155580}, 2411 publisher = {ACM}, 2412 address = {New York, NY, USA}, 2413 } 2414 2196 2415 @article{design, 2197 2416 keywords = {Smalltalk, designing classes}, … … 2201 2420 journal = joop, 2202 2421 year = 1988, 2203 volume = 1, number = 2, pages = {22-35}, 2422 volume = 1, 2423 number = 2, 2424 pages = {22-35}, 2204 2425 comment = { 2205 2426 Abstract classes represent standard protocols. ``It is better to … … 2286 2507 year = 1990, 2287 2508 pages = {315-323} 2509 } 2510 2511 @misc{Dotty-github, 2512 keywords = {dotty,scala}, 2513 contributer = {a3moss@uwaterloo.ca}, 2514 author = {Martin Odersky}, 2515 title = {Dotty}, 2516 howpublished= {\href{https://github.com/lampepfl/dotty}{https://\-github.com/\-lampepfl/\-dotty}}, 2517 note = {Acessed: 2019-02-22} 2288 2518 } 2289 2519 … … 2394 2624 } 2395 2625 2626 @article{Tarjan75, 2627 keywords = {union-find}, 2628 contributer = {a3moss@uwaterloo.ca}, 2629 author = {Tarjan, Robert Endre}, 2630 title = {Efficiency of a Good But Not Linear Set Union Algorithm}, 2631 journal = {J. ACM}, 2632 issue_date = {April 1975}, 2633 volume = {22}, 2634 number = {2}, 2635 month = apr, 2636 year = {1975}, 2637 issn = {0004-5411}, 2638 pages = {215--225}, 2639 numpages = {11}, 2640 url = {http://doi.acm.org/10.1145/321879.321884}, 2641 doi = {10.1145/321879.321884}, 2642 acmid = {321884}, 2643 publisher = {ACM}, 2644 address = {New York, NY, USA}, 2645 } 2646 2396 2647 @book{Eiffel, 2397 2648 keywords = {Eiffel}, … … 2412 2663 journal = ipl, 2413 2664 year = 1980, 2414 month = apr, volume = 10, number = 3, pages = {120-123}, 2665 month = apr, 2666 volume = 10, 2667 number = 3, 2668 pages = {120-123}, 2415 2669 comment = { 2416 2670 The ``two-pass'' algorithm. An upward pass over a parse tree … … 2446 2700 } 2447 2701 2448 @ InProceedings{chambers89a,2702 @inproceedings{chambers89a, 2449 2703 keywords = {maps, delegation}, 2450 2704 author = "Craig Chambers and David Ungar and Elgin Lee", 2451 title = "An Efficient Implementation of {SELF}, a Dynamically-Typed 2452 Object-Oriented Language Based on Prototypes", 2705 title = "An Efficient Implementation of {SELF}, a Dynamically-Typed Object-Oriented Language Based on Prototypes", 2453 2706 crossref = "OOPSLA89", 2454 2707 pages = {49-70} 2455 2708 } 2456 2709 2710 @misc{Turley99, 2711 keywords = {embedded system, micrprocessor}, 2712 contributer = {pabuhr@plg}, 2713 author = {Jim Turley}, 2714 title = {Embedded Processors by the Numbers}, 2715 year = 1999, 2716 month = may, 2717 note = {Electronic Engineering Times}, 2718 howpublished= {\href{https://www.eetimes.com/author.asp?sectionid=36&doc_id=1287712} 2719 {https://\-www.eetimes.com/\-author.asp?sectionid=\-36&doc_id=1287712}}, 2720 } 2721 2457 2722 @article{oop:encapsulation, 2458 2723 keywords = {Encapsulation, Inheritance, Subclasses, Multiple Inheritance}, 2459 2724 contributer = {gjditchfield@plg}, 2460 2725 author = {Alan Snyder}, 2461 title = {Encapsulation and Inheritance in Object-Oriented Programming 2462 Languages}, 2726 title = {Encapsulation and Inheritance in Object-Oriented Programming Languages}, 2463 2727 journal = sigplan, 2464 2728 volume = {21}, number = {11}, … … 2495 2759 title = {Encapsulators: A New Software Paradigm in Smalltalk-80}, 2496 2760 journal = sigplan, 2497 volume = {21}, number = {11}, 2761 volume = {21}, 2762 number = {11}, 2498 2763 pages = {341-346}, 2499 month = nov, year = 1986, 2764 month = nov, 2765 year = 1986, 2500 2766 comment = { 2501 2767 Encapsulators are objects that surround other objects. … … 2568 2834 year = 1979, 2569 2835 pages = {24-32} 2836 } 2837 2838 @inproceedings{XaaS, 2839 keywords = {Everything as a Service, Anything as a Service, Cloud computing, SOA}, 2840 contributer = {pabuhr@plg}, 2841 author = {Duan, Yucong and Fu, Guohua and Zhou, Nianjun and Sun, Xiaobing and Narendra, Nanjangud C. and Hu, Bo}, 2842 title = {Everything As a Service (XaaS) on the Cloud: Origins, Current and Future Trends}, 2843 booktitle = {Proceedings of the 2015 IEEE 8th International Conference on Cloud Computing}, 2844 series = {CLOUD'15}, 2845 year = {2015}, 2846 pages = {621--628}, 2847 publisher = {IEEE Computer Society}, 2848 address = {Washington, DC, USA}, 2570 2849 } 2571 2850 … … 2779 3058 title = {Extending Modula-2 to Build Large, Integrated Systems}, 2780 3059 journal = {IEEE Software}, 2781 month = nov, year = 1986, 2782 volume = 3, number = 6, pages = {46-57}, 3060 month = nov, 3061 year = 1986, 3062 volume = 3, 3063 number = 6, 3064 pages = {46-57}, 2783 3065 comment = { 2784 3066 Exceptions can have a parameter. Procedures can declare the … … 2813 3095 year = 2014, 2814 3096 howpublished= {\href{https://gcc.gnu.org/onlinedocs/gcc-4.7.2/gcc/C-Extensions.html}{https://\-gcc.gnu.org/\-onlinedocs/\-gcc-4.7.2/\-gcc/\-C\-Extensions.html}}, 2815 optnote = {Accessed: 2017-04-02},2816 3097 } 2817 3098 … … 2825 3106 year = 1988, 2826 3107 pages = {143-149} 3108 } 3109 3110 @inproceedings{Patwary10, 3111 keywords = {union-find}, 3112 contributer = {a3moss@uwaterloo.ca}, 3113 author = {Patwary, Md. Mostofa Ali and Blair, Jean and Manne, Fredrik}, 3114 editor = {Festa, Paola}, 3115 title = {Experiments on Union-Find Algorithms for the Disjoint-Set Data Structure}, 3116 booktitle = {Experimental Algorithms}, 3117 year = 2010, 3118 publisher = {Springer Berlin Heidelberg}, 3119 address = {Berlin, Heidelberg}, 3120 pages = {411--423}, 3121 isbn = {978-3-642-13193-6} 2827 3122 } 2828 3123 … … 2860 3155 keywords = {concurrency, mutual exclusion, performance experiment, software solutions}, 2861 3156 title = {Fast mutual exclusion by the {T}riangle algorithm}, 2862 author = {Wim H. Hesselink and Peter Buhr and David Dice},3157 author = {Wim H. Hesselink and Peter A. Buhr and David Dice}, 2863 3158 journal = ccpe, 2864 3159 volume = 30, … … 2867 3162 month = feb, 2868 3163 publisher = {John Wiley \& Sons}, 2869 note = {\ url{https://doi.org/10.1002/cpe.4183}}3164 note = {\href{https://doi.org/10.1002/cpe.4183}{https://\-doi.org/\-10.1002/\-cpe.4183}} 2870 3165 } 2871 3166 … … 2883 3178 } 2884 3179 3180 @manual{WindowsFibers, 3181 keywords = {threads, fibers}, 3182 contributer = {pabuhr@plg}, 3183 author = {Windows}, 3184 title = {Fibers}, 3185 organization= {Microsoft, Windows Development Center}, 3186 address = {\href{https://docs.microsoft.com/en-us/windows/desktop/ProcThread/fibers}{https://\-docs.microsoft.com/\-en-us/\-windows/\-desktop/\-ProcThread/\-fibers}}, 3187 year = 2018, 3188 } 3189 2885 3190 @inproceedings{F-bound, 2886 3191 keywords = {}, … … 2930 3235 } 2931 3236 3237 @manual{Folly, 3238 keywords = {Folly}, 3239 contributer = {pabuhr@plg}, 3240 author = {Folly}, 3241 title = {Facebook Open-source Library}, 3242 organization= {Facebook}, 3243 address = {\href{https://github.com/facebook/folly}{https://\-github.com/\-facebook/\-folly}}, 3244 year = 2018, 3245 } 3246 3247 @article{Leroy09, 3248 keywords = {C formalization}, 3249 contributer = {a3moss@uwaterloo.ca}, 3250 author = {Leroy, Xavier}, 3251 title = {Formal Verification of a Realistic Compiler}, 3252 journal = {Commun. ACM}, 3253 issue_date = {July 2009}, 3254 volume = {52}, 3255 number = {7}, 3256 month = jul, 3257 year = {2009}, 3258 issn = {0001-0782}, 3259 pages = {107--115}, 3260 numpages = {9}, 3261 url = {http://doi.acm.org/10.1145/1538788.1538814}, 3262 doi = {10.1145/1538788.1538814}, 3263 acmid = {1538814}, 3264 publisher = {ACM}, 3265 address = {New York, NY, USA}, 3266 } 3267 2932 3268 @manual{Fortran95, 2933 3269 keywords = {Fortran 95}, … … 2950 3286 address = {\href{https://www.iso.org/standard/50459.html}{https://\-www.iso.org/\-standard/\-50459.html}}, 2951 3287 year = 2010, 3288 } 3289 3290 @manual{Fortran18, 3291 keywords = {ISO/IEC Fortran 10}, 3292 contributer = {pabuhr@plg}, 3293 author = {Fortran18}, 3294 title = {Programming Languages -- {Fortran} Part 1:Base Language ISO/IEC 1539-1:2018}, 3295 edition = {4rd}, 3296 publisher = {International Standard Organization}, 3297 address = {\href{https://www.iso.org/standard/72320.html}{https://\-www.iso.org/\-standard/\-72320.html}}, 3298 year = 2018, 2952 3299 } 2953 3300 … … 3170 3517 keywords = {Go programming language}, 3171 3518 contributer = {pabuhr@plg}, 3519 author = {Robert Griesemer and Rob Pike and Ken Thompson}, 3172 3520 title = {{Go} Programming Language}, 3173 author = {Robert Griesemer and Rob Pike and Ken Thompson},3174 3521 organization= {Google}, 3175 3522 year = 2009, … … 3199 3546 year = 2014, 3200 3547 howpublished= {https://developer.gnome.org/gobject/stable/}, 3201 optnote = {Accessed: 2017-04},3202 3548 } 3203 3549 … … 3279 3625 edition = {{S}imon {M}arlow}, 3280 3626 year = 2010, 3281 note = {\href{https://haskell.org/definition/haskell2010.pdf}{https:// haskell.org/\-definition/\-haskell2010.pdf}},3627 note = {\href{https://haskell.org/definition/haskell2010.pdf}{https://\-haskell.org/\-definition/\-haskell2010.pdf}}, 3282 3628 } 3283 3629 … … 3353 3699 } 3354 3700 3701 @article{Hesselink17b, 3702 keywords = {concurrency, mutual exclusion, performance experiment, software solutions}, 3703 title = {High-Contention Mutual Exclusion by Elevator Algorithms}, 3704 author = {Peter A. Buhr and David Dice and Wim H. Hesselink}, 3705 journal = ccpe, 3706 volume = 30, 3707 number = 18, 3708 year = 2018, 3709 month = sep, 3710 publisher = {John Wiley \& Sons}, 3711 note = {\href{https://doi.org/10.1002/cpe.4475}{https://\-doi.org/\-10.1002/\-cpe.4475}}, 3712 } 3713 3355 3714 @article{Buhr15a, 3356 3715 keywords = {software solution, mutual exclusion, performance experiment}, … … 3488 3847 publisher = {ACM Press}, 3489 3848 address = {New York, NY, USA}, 3849 } 3850 3851 @article{Galler64, 3852 keywords = {union-find, original}, 3853 contributer = {a3moss@uwaterloo.ca}, 3854 title = {An improved equivalence algorithm}, 3855 author = {Galler, Bernard A and Fisher, Michael J}, 3856 journal = {Communications of the ACM}, 3857 volume = {7}, 3858 number = {5}, 3859 pages = {301--303}, 3860 year = {1964}, 3861 publisher = {ACM} 3862 } 3863 3864 @phdthesis{Barghi18, 3865 keywords = {concurrency, user threads, actors}, 3866 contributer = {pabuhr@plg}, 3867 author = {Saman Barghi}, 3868 title = {Improving the Performance of User-level Runtime Systems for Concurrent Applications}, 3869 school = {School of Computer Science, University of Waterloo}, 3870 year = 2018, 3871 month = sep, 3872 optaddress = {Waterloo, Ontario, Canada, N2L 3G1}, 3873 note = {\href{https://uwspace.uwaterloo.ca/handle/10012/13935}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-13935}}, 3874 } 3875 3876 @article{Swift05, 3877 contributer = {pabuhr@plg}, 3878 author = {Michael M. Swift and Brian N. Bershad and Henry M. Levy}, 3879 title = {Improving the Reliability of Commodity Operating Systems}, 3880 journal = tocs, 3881 volume = 23, 3882 number = 1, 3883 month = feb, 3884 year = 2005, 3885 pages = {77-110}, 3490 3886 } 3491 3887 … … 3794 4190 } 3795 4191 4192 @article{Morgado13, 4193 keywords = {expression resolution}, 4194 contributer = {a3moss@uwaterloo.ca}, 4195 title = {Iterative and core-guided {MaxSAT} solving: A survey and assessment}, 4196 author = {Morgado, Antonio and Heras, Federico and Liffiton, Mark and Planes, Jordi and Marques-Silva, Joao}, 4197 journal = {Constraints}, 4198 volume = {18}, 4199 number = {4}, 4200 pages = {478--534}, 4201 year = {2013}, 4202 publisher = {Springer} 4203 } 4204 3796 4205 % J 3797 4206 … … 3817 4226 } 3818 4227 4228 @manual{Java11, 4229 keywords = {Java SE 11}, 4230 contributer = {pabuhr@plg}, 4231 author = {James Gosling and Bill Joy and Guy Steele and Gilad Bracha and Alex Buckley and Daniel Smith}, 4232 title = {{Java} Language Specification}, 4233 publisher = {Oracle}, 4234 month = sep, 4235 year = 2018, 4236 edition = {{J}ava {SE} 11}, 4237 } 4238 4239 @manual{JDK1.1, 4240 keywords = {JDK 1.1}, 4241 contributer = {pabuhr@plg}, 4242 author = {{Multithreading Models}}, 4243 title = {JDK 1.1 for Solaris Developer's Guide}, 4244 publisher = {Oracle}, 4245 address = {\href{https://docs.oracle.com/cd/E19455-01/806-3461/6jck06gqk/index.html#ch2mt-41}{https://\-docs.oracle.com/\-cd/\-E19455-01/\-806-3461/\-6jck06gqk/\-index.html\#ch2mt-41}}, 4246 year = 2010, 4247 } 4248 3819 4249 @manual{JUC, 3820 4250 keywords = {Java concurrency library}, … … 3829 4259 % K 3830 4260 4261 @inproceedings{Buhr96b, 4262 author = {Peter A. Buhr and Martin Karsten and Jun Shih}, 4263 title = {{\small\textsf{KDB}}: A Multi-threaded Debugger for Multi-threaded Applications}, 4264 booktitle = {Proc. of SPDT'96: SIGMETRICS Symp. on Parallel and Distributed Tools}, 4265 publisher = {ACM Press}, 4266 address = {Philadelphia, Pennsylvania, U.S.A.}, 4267 month = may, 4268 year = 1996, 4269 pages = {80-87}, 4270 } 4271 3831 4272 @article{Duggan96, 3832 4273 keywords = {concurrency, critical section}, 3833 4274 contributer = {pabuhr@plg}, 3834 author = {Dominic Duggan and G .V. Cormack and John Ophel},4275 author = {Dominic Duggan and Gordon V. Cormack and John Ophel}, 3835 4276 title = {Kinded Type Inference for Parametric Overloading}, 3836 4277 journal = acta, … … 3972 4413 } 3973 4414 4415 @misc{libdill, 4416 keywords = {libdill/libmill Thread Library}, 4417 contributer = {pabuhr@plg}, 4418 author = {Alex Cornejo, et al}, 4419 title = {libdill Thread Library}, 4420 year = 2019, 4421 howpublished= {\href{http://libdill.org/libdill-2.14.tar.gz} 4422 {http://\-libdill.org/\-libdill-2.14.tar.gz}}, 4423 } 4424 3974 4425 @article{Linda, 3975 4426 keywords = {Linda, concurrency}, … … 3985 4436 } 3986 4437 4438 @manual{libmill, 4439 keywords = {libmill}, 4440 contributer = {pabuhr@plg}, 4441 author = {libmill}, 4442 title = {{G}o-style concurrency in {C}, Version 1.18}, 4443 organization= {libmill}, 4444 address = {\href{http://libmill.org/documentation.html}{http://\-libmill.org/\-documentation.html}}, 4445 month = jan, 4446 year = 2017, 4447 } 4448 3987 4449 @book{Weissman67, 3988 4450 keywords = {lisp}, … … 3994 4456 } 3995 4457 4458 @article{Pierce00, 4459 keywords = {Scala}, 4460 contributer = {a3moss@uwaterloo.ca}, 4461 author = {Pierce, Benjamin C. and Turner, David N.}, 4462 title = {Local Type Inference}, 4463 journal = {ACM Trans. Program. Lang. Syst.}, 4464 issue_date = {Jan. 2000}, 4465 volume = {22}, 4466 number = {1}, 4467 month = jan, 4468 year = {2000}, 4469 issn = {0164-0925}, 4470 pages = {1--44}, 4471 numpages = {44}, 4472 url = {http://doi.acm.org/10.1145/345099.345100}, 4473 doi = {10.1145/345099.345100}, 4474 acmid = {345100}, 4475 publisher = {ACM}, 4476 address = {New York, NY, USA}, 4477 keywords = {polymorphism, subtyping, type inference}, 4478 } 4479 3996 4480 @article{Sundell08, 3997 4481 keywords = {lock free, deque}, … … 4004 4488 year = 2008, 4005 4489 pages = {1008-1020}, 4490 } 4491 4492 @misc{Matsakis17, 4493 keywords = {Rust, Chalk, PROLOG}, 4494 contributer = {a3moss@uwaterloo.ca}, 4495 author = {Nicholas Matsakis}, 4496 title = {Lowering {Rust} traits to logic}, 4497 month = jan, 4498 year = 2017, 4499 howpublished= {\href{http://smallcultfollowing.com/babysteps/blog/2017/01/26/lowering-rust-traits-to-logic/} 4500 {http://smallcultfollowing.com/\-babysteps/\-blog/\-2017/\-01/\-26/\-lowering-rust-traits-to-logic/}}, 4501 optnote = {Accessed: 2019-01}, 4006 4502 } 4007 4503 … … 4020 4516 } 4021 4517 4518 @manual{Lua, 4519 keywords = {Lua}, 4520 contributer = {pabuhr@plg}, 4521 author = {Lua}, 4522 title = {Lua 5.3 Reference Manual}, 4523 address = {\href{https://www.lua.org/manual/5.3}{https://\-www.lua.org/\-manual/\-5.3}}, 4524 year = 2018, 4525 } 4526 4022 4527 % M 4023 4528 … … 4029 4534 publisher = {Motorola}, 4030 4535 year = 1992, 4536 } 4537 4538 @misc{Haberman16, 4539 keywords = {C++ template expansion}, 4540 contributer = {a3moss@uwaterloo.ca}, 4541 author = {Josh Haberman}, 4542 title = {Making arbitrarily-large binaries from fixed-size {C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} code}, 4543 year = 2016, 4544 howpublished= {\href{http://blog.reverberate.org/2016/01/making-arbitrarily-large-binaries-from.html} 4545 {http://blog.reverberate.org/\-2016/\-01/\-making-arbitrarily-large-binaries-from.html}}, 4546 optnote = {Accessed: 2016-09}, 4031 4547 } 4032 4548 … … 4051 4567 } 4052 4568 4569 @misc{Marcel, 4570 keywords = {Marcel Thread Library}, 4571 contributer = {pabuhr@plg}, 4572 author = {Gabriel Antoniu, et al}, 4573 title = {Marcel Thread Library}, 4574 year = 2011, 4575 howpublished= {\href{https://gforge.inria.fr/frs/download.php/file/28643/marcel-2.99.3.tar.gz} 4576 {https://\-gforge.inria.fr/\-frs/\-download.php/\-file/\-28643/\-marcel-2.99.3.tar.gz}}, 4577 } 4578 4053 4579 @inproceedings{mprof, 4054 4580 keywords = {malloc}, … … 4067 4593 month = sep, 4068 4594 year = 2006, 4069 note = {\textsf{http://cs.anu.edu.au/\- \char`\~Robin.Garner/\-mmtk-guide.pdf}},4595 note = {\textsf{http://cs.anu.edu.au/\-$\sim$Robin.Garner/\-mmtk-guide.pdf}}, 4070 4596 } 4071 4597 … … 4171 4697 month = sep, 4172 4698 year = 1994, 4173 note = { {\small\textsf{ftp://\-plg.uwaterloo.ca/\-pub/\-uSystem/\-uSystem.ps.gz}}},4699 note = {\href{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uSystem.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-pub/\-uSystem/\-uSystem.pdf}}, 4174 4700 } 4175 4701 … … 4347 4873 } 4348 4874 % editor = {Allen Kent and James G. Williams}, 4875 4876 @incollection{MPC, 4877 keywords = {user-level threading}, 4878 contributer = {pabuhr@plg}, 4879 author = {Marc P\'erache and Herv\'e Jourdren and Raymond Namyst}, 4880 title = {MPC: A Unified Parallel Runtime for Clusters of {NUMA} Machines}, 4881 booktitle = {Euro-Par 2008}, 4882 pages = {329-342}, 4883 publisher = {Springer}, 4884 address = {Berlin, Heidelberg}, 4885 year = 2008, 4886 volume = 5168, 4887 series = {Lecture Notes in Computer Science}, 4888 } 4349 4889 4350 4890 @manual{MPI, … … 4515 5055 4516 5056 % N 5057 5058 @techreport{Drepper03, 5059 keywords = {NPTL, threading, 1:1 model}, 5060 contributer = {pabuhr@plg}, 5061 author = {Ulrich Drepper and Ingo Molnar}, 5062 title = {The Native POSIX Thread Library for Linux}, 5063 institution = {Red Hat}, 5064 year = 2003, 5065 note = {\href{http://www.cs.utexas.edu/~witchel/372/lectures/POSIX_Linux_Threading.pdf}{http://www.cs.utexas.edu/\-$\sim$witchel/\-372/\-lectures/\-POSIX\_Linux\_Threading.pdf}}, 5066 } 4517 5067 4518 5068 @article{Haddon77, … … 4641 5191 } 4642 5192 5193 @misc{nginx, 5194 key = {nginx}, 5195 author = {{NGINX}}, 5196 howpublished= {\href{https://www.nginx.com}{https://\-www.nginx.com}}, 5197 } 5198 4643 5199 @article{Herlihy05, 4644 5200 keywords = {Multiprocessors, concurrent data structures, dynamic data structures, memory management, nonblocking synchronization}, … … 4697 5253 year = 1995, 4698 5254 number = 31, 4699 note = {{\small\textsf{http://\-www.cs.wustl.edu/\- \char`\~schmidt/\-PDF/\-IPC\_SAP-92.pdf}}},5255 note = {{\small\textsf{http://\-www.cs.wustl.edu/\-$\sim$schmidt/\-PDF/\-IPC\_SAP-92.pdf}}}, 4700 5256 } 4701 5257 … … 4751 5307 @misc{obj-c-book, 4752 5308 keywords = {objective-c}, 4753 contributor = { a3moss@uwaterloo.ca},5309 contributor = {pabuhr@plg}, 4754 5310 key = {Objective-C}, 4755 title= {Objective-C},5311 author = {Objective-C}, 4756 5312 publisher = {Apple Inc.}, 4757 year = 2015, 4758 howpublished= {\href{https://developer.apple.com/library/content/documentation/General/Conceptual/DevPedia-CocoaCore/ObjectiveC.html}{https://developer.apple.com/\-library/\-content/\-documentation/\-General/\-Conceptual/\-DevPedia-\-CocoaCore/\-ObjectiveC.html}}, 4759 optnote = {Accessed: 2018-03} 5313 year = 2014, 5314 howpublished= {\href{https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ProgrammingWithObjectiveC}{https://\-developer.apple.com/\-library/archive/\-documentation/\-Cocoa/\-Conceptual/\-ProgrammingWithObjectiveC}}, 4760 5315 } 4761 5316 … … 4766 5321 title = {{X}code 7 Release Notes}, 4767 5322 year = 2015, 4768 howpublished= {\href{https://developer.apple.com/library/content/documentation/Xcode/Conceptual/RN-Xcode-Archive/Chapters/xc7_release_notes.html}{https://developer.apple.com/\-library/\-content/\-documentation/\-Xcode/\-Conceptual/\-RN-Xcode-Archive/\-Chapters/\-xc7\_release\_notes.html}}, 4769 optnote = {Accessed: 2017-04} 5323 howpublished= {\href{https://developer.apple.com/library/content/documentation/Xcode/Conceptual/RN-Xcode-Archive/Chapters/xc7_release_notes.html}{https://\-developer.apple.com/\-library/\-content/\-documentation/\-Xcode/\-Conceptual/\-RN-Xcode-Archive/\-Chapters/\-xc7\_release\_notes.html}}, 4770 5324 } 4771 5325 … … 4893 5447 } 4894 5448 4895 @ techreport{OpenMP,5449 @manual{OpenMP, 4896 5450 keywords = {concurrency, openmp, spmd}, 4897 5451 contributer = {pabuhr@plg}, 4898 author = {OpenMP Architecture Review Board}, 4899 title = {OpenMP Application Program Interface, Version 4.0}, 4900 month = jul, 4901 year = 2013, 4902 note = {\href{http://www.openmp.org/mp-documents/OpenMP4.0.0.pdf}{http://\-www.openmp.org/\-mp-documents/\-OpenMP4.0.0.pdf}}, 4903 } 5452 key = {OpenMP}, 5453 title = {OpenMP Application Program Interface, Version 4.5}, 5454 month = nov, 5455 year = 2015, 5456 note = {\href{https://www.openmp.org/wp-content/uploads/openmp-4.5.pdf}{https://\-www.openmp.org/\-wp-content/\-uploads/\-openmp-4.5.pdf}}, 5457 } 5458 5459 @inproceedings{Krebbers14, 5460 keywords = {c formalization}, 5461 contributer = {a3moss@uwaterloo.ca}, 5462 author = {Krebbers, Robbert}, 5463 title = {An Operational and Axiomatic Semantics for Non-determinism and Sequence Points in C}, 5464 booktitle = {Proceedings of the 41st ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, 5465 series = {POPL '14}, 5466 year = {2014}, 5467 isbn = {978-1-4503-2544-8}, 5468 location = {San Diego, California, USA}, 5469 pages = {101--112}, 5470 numpages = {12}, 5471 url = {http://doi.acm.org/10.1145/2535838.2535878}, 5472 doi = {10.1145/2535838.2535878}, 5473 acmid = {2535878}, 5474 publisher = {ACM}, 5475 address = {New York, NY, USA}, 5476 } 4904 5477 4905 5478 @book{Deitel04, … … 5266 5839 } 5267 5840 5841 @misc{perf, 5842 contributer = {pabuhr@plg}, 5843 key = {perf}, 5844 author = {perf}, 5845 howpublished= {\href{https://perf.wiki.kernel.org/index.php/Tutorial}{https://\-perf.wiki.kernel.org/\-index.php/\-Tutorial}}, 5846 } 5847 5848 @misc{Verch12, 5849 contributer = {pabuhr@plg}, 5850 author = {Shaun Verch}, 5851 title = {Performance Analysis of 64-Bit Capriccio}, 5852 month = may, 5853 year = 2012, 5854 howpublished= {\href{http://cs.brown.edu/research/pubs/theses/masters/2012/verch.pdf}{http://cs.brown.edu/\-research/\-pubs/\-theses/\-masters/\-2012/\-verch.pdf}}, 5855 } 5856 5268 5857 @article{Anderson90, 5269 5858 keywords = {spin locks, back off, performance}, … … 5277 5866 number = 1, 5278 5867 pages = {6-16}, 5868 } 5869 5870 @inproceedings{Conchon07, 5871 keywords = {persistent array, union-find}, 5872 contributer = {a3moss@uwaterloo.ca}, 5873 title = {A persistent union-find data structure}, 5874 author = {Conchon, Sylvain and Filli{\^a}tre, Jean-Christophe}, 5875 booktitle = {Proceedings of the 2007 workshop on Workshop on ML}, 5876 pages = {37--46}, 5877 year = {2007}, 5878 organization= {ACM} 5279 5879 } 5280 5880 … … 5488 6088 } 5489 6089 6090 @inproceedings{Buhr98, 6091 keywords = {profiling, monitoring, visualization}, 6092 contributer = {pabuhr@plg}, 6093 author = {Peter A. Buhr and Robert Denda}, 6094 title = {{$\mu$Profiler} : Profiling User-Level Threads in a Shared-Memory Programming Environment}, 6095 booktitle = {Proc. of 2nd Inter. Symp. on Computing in Object-Oriented Parallel Environments}, 6096 series = {Lecture Notes in Computer Science}, 6097 publisher = {Springer-Verlag}, 6098 volume = 1505, 6099 opteditor = {Dennis Caromel and Rodney R. Oldehoeft and Marydell Tholburn}, 6100 address = {Santa Fe, New Mexico, U.S.A.}, 6101 month = dec, 6102 year = 1998, 6103 pages = {159-166}, 6104 } 6105 5490 6106 @book{PowerPC, 5491 6107 key = {PowerPC processor}, … … 5560 6176 address = {\href{https://www.iso.org/standard/64029.html}{https://\-www.iso.org/\-standard/\-64029.html}}, 5561 6177 year = 2014, 6178 } 6179 6180 @manual{C++17, 6181 keywords = {ISO/IEC C++ 17}, 6182 contributer = {pabuhr@plg}, 6183 key = {C++17}, 6184 title = {{C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Programming Language ISO/IEC 14882:2017}, 6185 edition = {5th}, 6186 publisher = {International Standard Organization}, 6187 address = {\href{https://www.iso.org/standard/68564.html}{https://\-www.iso.org/\-standard/\-68564.html}}, 6188 year = 2017, 5562 6189 } 5563 6190 … … 5713 6340 institution = {Carnegie Mellon University}, 5714 6341 year = 1991, 5715 month = feb, number = "CMU-CS-91-106", 6342 month = feb, 6343 number = {CMU-CS-91-106}, 5716 6344 annote = { 5717 6345 Discusses a typed lambda calculus with … … 5753 6381 } 5754 6382 6383 @article{Moore75, 6384 keywords = {approximation methods, integrated circuits}, 6385 contributer = {pabuhr@plg}, 6386 author = {Gordon E. Moore}, 6387 title = {Progress in Digital Integrated Electronics}, 6388 journal = {Technical Digest, International Electron Devices Meeting, IEEE}, 6389 year = 1975, 6390 pages = {11-13}, 6391 } 6392 5755 6393 @article{promises, 5756 6394 keywords = {futures, Argus, call streams, rpc}, 5757 6395 contributer = {gjditchfield@plg}, 5758 6396 author = {Barbara Liskov and Liuba Shrira}, 5759 title = {Promises: Linguistic Support for Efficient Asynchronous 5760 Procedure Calls in Distributed Systems}, 6397 title = {Promises: Linguistic Support for Efficient Asynchronous Procedure Calls in Distributed Systems}, 5761 6398 journal = sigplan, 5762 6399 year = 1988, 5763 month = jul, volume = 23, number = 7, pages = {260-267}, 5764 note = {Proceedings of the SIGPLAN '88 Conference on Programming Language 5765 Design and Implementation}, 6400 month = jul, 6401 volume = 23, 6402 number = 7, 6403 pages = {260-267}, 6404 note = {Proceedings of the SIGPLAN '88 Conference on Programming Language Design and Implementation}, 5766 6405 abstract = { 5767 6406 This paper deals with the integration of an efficient asynchronous … … 5813 6452 } 5814 6453 6454 @misc{Pthreads, 6455 keywords = {pthreads, C concurrency}, 6456 contributer = {pabuhr@plg}, 6457 key = {pthreads}, 6458 title = {{Pthread}.h, Specifications Issue 7, {IEEE} Std 1003.1-2017}, 6459 author = {IEEE and {The Open Group}}, 6460 year = 2018, 6461 howpublished= {\href{http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/pthread.h.html} 6462 {http://\-pubs.opengroup.org/\-onlinepubs/\-9699919799/\-basedefs/\-pthread.h.html}}, 6463 } 6464 5815 6465 @manual{Python, 5816 6466 keywords = {Python}, 5817 6467 contributer = {pabuhr@plg}, 5818 title = {Python Reference Manual, Release 2.5},5819 author = {Guido van Rossum},6468 author = {Python}, 6469 title = {Python Language Reference, Release 3.7.2}, 5820 6470 organization= {Python Software Foundation}, 5821 month = sep, 5822 year = 2006, 5823 note = {Fred L. Drake, Jr., editor}, 6471 address = {\href{https://docs.python.org/3/reference/index.html}{https://\-docs.python.org/\-3/\-reference/\-index.html}}, 6472 year = 2018, 5824 6473 } 5825 6474 5826 6475 % Q 6476 6477 @inproceedings{Qthreads, 6478 keywords = {user-level threading}, 6479 author = {Kyle B. Wheeler and Richard C. Murphy and Douglas Thain}, 6480 title = {Qthreads: An API for Programming with Millions of Lightweight Threads}, 6481 booktitle = {International Symposium on Parallel and Distributed Processing}, 6482 organization= {IEEE}, 6483 address = {Miami, FL, USA}, 6484 month = apr, 6485 year = 2008, 6486 } 5827 6487 5828 6488 @article{Grossman06, 5829 6489 keywords = {Cyclone, existential types, polymorphism, type variables}, 5830 6490 contributer = {a3moss@plg}, 5831 author = {D .Grossman},6491 author = {Dan Grossman}, 5832 6492 title = {Quantified Types in an Imperative Language}, 5833 6493 journal = toplas, … … 5839 6499 issn = {0164-0925}, 5840 6500 pages = {429-475}, 5841 url = {http://doi.acm.org .proxy.lib.uwaterloo.ca/10.1145/1133651.1133653},6501 url = {http://doi.acm.org/10.1145/1133651.1133653}, 5842 6502 doi = {10.1145/1133651.1133653}, 5843 6503 acmid = {1133653}, … … 5861 6521 } 5862 6522 6523 @manual{Quasar, 6524 keywords = {Quasar}, 6525 contributer = {pabuhr@plg}, 6526 author = {Quasar}, 6527 title = {Quasar Documentation, Release 0.8.0}, 6528 organization= {Parallel Universe}, 6529 address = {\href{http://docs.paralleluniverse.co/quasar}{http://\-docs.paralleluniverse.co/\-quasar}}, 6530 year = 2018, 6531 } 6532 5863 6533 % R 5864 6534 … … 5938 6608 } 5939 6609 6610 @article{Ronsse03, 6611 keywords = {profiling, replay}, 6612 contributer = {pabuhr@plg}, 6613 author = {Michiel Ronsse and De Bosschere, Koen and Mark Christiaens and Jacques Chassin de Kergommeaux and Dieter Kranzlm\"{u}ller}, 6614 title = {Record/Replay for Nondeterministic Program Executions}, 6615 journal = cacm, 6616 month = sep, 6617 year = 2003, 6618 volume = 46, 6619 number = 9, 6620 pages = {62-67}, 6621 } 6622 5940 6623 @article{Robinson48, 5941 6624 keywords = {recursion, Ackermann function}, … … 5963 6646 } 5964 6647 6648 @article{Hesselink06, 6649 author = {Wim H. Hesselink}, 6650 title = {Refinement Verification of the Lazy Caching Algorithm}, 6651 journal = acta, 6652 year = 2006, 6653 month = oct, 6654 volume = 43, 6655 number = 3, 6656 pages = {195--222}, 6657 } 6658 5965 6659 @article{RemoteRendezvous, 5966 6660 keywords = {rendezvous, concurrency}, … … 5976 6670 } 5977 6671 6672 @mastersthesis{Schuster99, 6673 author = {Oliver Schuster}, 6674 title = {Replay of Concurrent Shared-Memory Programs}, 6675 school = {Universit\"at Mannheim}, 6676 address = {Mannheim, Deutschland}, 6677 year = 1999, 6678 month = apr, 6679 type = {Diplomarbeit}, 6680 note = {\href{https://plg.uwaterloo.ca/~usystem/theses/SchusterThesis.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-theses/\-SchusterThesis.pdf}}, 6681 } 6682 5978 6683 @article{Euclid, 5979 6684 keywords = {Euclid}, … … 6007 6712 school = {School of Computer Science, University of Waterloo}, 6008 6713 year = 2017, 6009 address = {Waterloo, Ontario, Canada, N2L 3G1},6714 optaddress = {Waterloo, Ontario, Canada, N2L 3G1}, 6010 6715 note = {\href{https://uwspace.uwaterloo.ca/handle/10012/11830}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-11830}}, 6011 6716 } … … 6082 6787 contributer = {pabuhr@plg}, 6083 6788 key = {Rust}, 6084 title = { The{R}ust Programming Language},6085 address = {TheRust Project Developers},6789 title = {{R}ust Programming Language}, 6790 optaddress = {Rust Project Developers}, 6086 6791 year = 2015, 6087 6792 note = {\href{https://doc.rust-lang.org/reference.html}{https://\-doc.rust-lang\-.org/\-reference.html}}, 6793 } 6794 6795 @manual{Ruby, 6796 keywords = {Ruby}, 6797 contributer = {pabuhr@plg}, 6798 author = {Ruby}, 6799 title = {Ruby Documentation, Release 2.6.0}, 6800 organization= {Python Software Foundation}, 6801 address = {\href{https://www.ruby-lang.org/en/documentation}{https://\-www.ruby-lang.org/\-en/\-documentation}}, 6802 year = 2018, 6088 6803 } 6089 6804 … … 6149 6864 publisher = {Springer}, 6150 6865 note = {Lecture Notes in Computer Science v. 173}, 6866 } 6867 6868 @article{Baker78, 6869 keywords = {Algol display, FUNARG's, Lisp 1.5, deep binding, environment trees, multiprogramming, shallow binding}, 6870 contributer = {a3moss@uwaterloo.ca}, 6871 author = {Baker,Jr., Henry G.}, 6872 title = {Shallow Binding in Lisp 1.5}, 6873 journal = {Commun. ACM}, 6874 issue_date = {July 1978}, 6875 volume = 21, 6876 number = 7, 6877 month = jul, 6878 year = 1978, 6879 issn = {0001-0782}, 6880 pages = {565--569}, 6881 numpages = {5}, 6882 url = {http://doi.acm.org/10.1145/359545.359566}, 6883 doi = {10.1145/359545.359566}, 6884 acmid = {359566}, 6885 publisher = {ACM}, 6886 address = {New York, NY, USA} 6887 } 6888 6889 @article{Baker91, 6890 keywords = {shallow binding, functional arrays}, 6891 contributer = {a3moss@uwaterloo.ca}, 6892 author = {Baker, Henry G.}, 6893 title = {Shallow Binding Makes Functional Arrays Fast}, 6894 journal = {SIGPLAN Not.}, 6895 issue_date = {Aug. 1991}, 6896 volume = 26, 6897 number = 8, 6898 month = aug, 6899 year = 1991, 6900 issn = {0362-1340}, 6901 pages = {145--147}, 6902 numpages = {3}, 6903 url = {http://doi.acm.org/10.1145/122598.122614}, 6904 doi = {10.1145/122598.122614}, 6905 acmid = {122614}, 6906 publisher = {ACM}, 6907 address = {New York, NY, USA}, 6151 6908 } 6152 6909 … … 6782 7539 } 6783 7540 7541 @article{SysVABI, 7542 keywords = {System V ABI}, 7543 contributer = {a3moss@uwaterloo.ca}, 7544 title = {System {V} application binary interface}, 7545 author = {Matz, Michael and Hubicka, Jan and Jaeger, Andreas and Mitchell, Mark}, 7546 journal = {AMD64 Architecture Processor Supplement, Draft v0}, 7547 volume = {99}, 7548 year = {2013} 7549 } 7550 6784 7551 % T 6785 7552 … … 6834 7601 contributer = {pabuhr@plg}, 6835 7602 key = {TIOBE Index}, 6836 title = {{TIOBE} Index},7603 author = {{TIOBE Index}}, 6837 7604 howpublished= {\href{http://www.tiobe.com/tiobe_index}{http://\-www.tiobe.com/\-tiobe\_index}}, 6838 optnote = {Accessed: 2018-09}, 7605 } 7606 7607 @misc{ThreadModel, 7608 contributer = {pabuhr@plg}, 7609 key = {ThreadModel}, 7610 title = {Thread (computing)}, 7611 author = {{Threading Model}}, 7612 howpublished= {\href{https://en.wikipedia.org/wiki/Thread_(computing)}{https://\-en.wikipedia.org/\-wiki/\-Thread\_(computing)}}, 6839 7613 } 6840 7614 … … 6974 7748 year = 1980 6975 7749 } 7750 7751 @misc{TraceCompass, 7752 contributer = {pabuhr@plg}, 7753 key = {Trace Compass}, 7754 author = {{T}race {C}ompass}, 7755 howpublished= {\href{https://projects.eclipse.org/proposals/trace-compass}{https://\-projects.eclipse.org/\-proposals/\-trace-compass}}, 7756 } 6976 7757 6977 7758 @article{Leroy00, … … 6989 7770 Argues against declaring exceptions on routine definitions. 6990 7771 }, 7772 } 7773 7774 @techreport{Black90, 7775 title = {Typechecking polymorphism in {Emerald}}, 7776 author = {Black, Andrew P and Hutchinson, Norman C}, 7777 year = {1990}, 7778 institution = {Cambridge Research Laboratory, Digital Equipment Corporation} 6991 7779 } 6992 7780 … … 7109 7897 title = {Usability Challenges in Exception Handling}, 7110 7898 booktitle = {5th International Workshop on Exception Handling (WEH)}, 7111 o rganization= {16th International Symposium on the Foundations of Software Engineering (FSE 16)},7899 optorganization= {16th International Symposium on the Foundations of Software Engineering (FSE 16)}, 7112 7900 address = {Zurich, Switzerland}, 7113 7901 month = jun, … … 7161 7949 year = 2017, 7162 7950 howpublished= {\url{https://wiki.gnome.org/Projects/Vala/Manual}}, 7163 optnote = {Accessed: 2017-04}7164 7951 } 7165 7952 … … 7253 8040 } 7254 8041 8042 @inproceedings{vonBehren03, 8043 keywords = {threads, events, web server}, 8044 contributer = {pabuhr@plg}, 8045 author = {Rob von Behren and Jeremy Condit and Eric Brewer}, 8046 title = {Why Events Are a Bad Idea (for high-concurrency servers)}, 8047 booktitle = {HotOS IX: The 9th Workshop on Hot Topics in Operating Systems}, 8048 publisher = {USENIX Association}, 8049 address = {Lihue, Hawaii, U.S.A.}, 8050 month = may, 8051 year = 2003, 8052 pages = {19-24}, 8053 } 8054 7255 8055 @techreport{Moss90, 7256 8056 keywords = {Swizzling, database}, … … 7297 8097 } 7298 8098 8099 @article{Tarjan84, 8100 keywords = {union-find}, 8101 contributer = {a3moss@uwaterloo.ca}, 8102 author = {Tarjan, Robert E. and van Leeuwen, Jan}, 8103 title = {Worst-case Analysis of Set Union Algorithms}, 8104 journal = {J. ACM}, 8105 issue_date = {April 1984}, 8106 volume = 31, 8107 number = 2, 8108 month = mar, 8109 year = 1984, 8110 issn = {0004-5411}, 8111 pages = {245--281}, 8112 numpages = {37}, 8113 url = {http://doi.acm.org/10.1145/62.2160}, 8114 doi = {10.1145/62.2160}, 8115 acmid = {2160}, 8116 publisher = {ACM}, 8117 address = {New York, NY, USA}, 8118 } 8119 7299 8120 % X 7300 8121 7301 8122 % Y 8123 8124 @article{Boehm12, 8125 keywords = {memory model, race condition}, 8126 contributer = {pabuhr@plg}, 8127 author = {Boehm, Hans-J. and Adve, Sarita V.}, 8128 title = {You Don'T Know Jack About Shared Variables or Memory Models}, 8129 journal = cacm, 8130 volume = 55, 8131 number = 2, 8132 month = feb, 8133 year = 2012, 8134 pages = {48--54}, 8135 publisher = {ACM}, 8136 address = {New York, NY, USA}, 8137 } 7302 8138 7303 8139 % Z … … 7317 8153 year = 1986, 7318 8154 editor = {Norman Meyrowitz}, 7319 publisher = sigplan # " 21(11)",8155 publisher = sigplan, 7320 8156 organization= {Association for Computing Machinery}, 7321 8157 address = {Portland, Oregon}, 7322 month = sep # { 29}8158 month = sep, 7323 8159 } 7324 8160 … … 7329 8165 year = 1987, 7330 8166 editor = {Norman Meyrowitz}, 7331 publisher = sigplan # " 22(12)",8167 publisher = sigplan, 7332 8168 organization= {Association for Computing Machinery}, 7333 8169 address = {Orlando, Florida}, 7334 month = oct # { 4--8}8170 month = oct, 7335 8171 } 7336 8172 … … 7341 8177 year = 1988, 7342 8178 editor = {Norman Meyrowitz}, 7343 publisher = sigplan # " 23(11)",8179 publisher = sigplan, 7344 8180 organization= {Association for Computing Machinery}, 7345 8181 address = {San Diego, California}, 7346 month = sep # { 25--30}8182 month = sep, 7347 8183 } 7348 8184 … … 7353 8189 year = 1989, 7354 8190 editor = {Norman Meyrowitz}, 7355 publisher = sigplan # " 24(10)",8191 publisher = sigplan, 7356 8192 organization= {Association for Computing Machinery}, 7357 8193 address = {New Orleans, Louisiana}, 7358 month = oct # { 1--6}8194 month = oct, 7359 8195 } 7360 8196 … … 7365 8201 year = 1990, 7366 8202 editor = {Norman Meyrowitz}, 7367 publisher = sigplan # " 25(10)",8203 publisher = sigplan, 7368 8204 organization= {Association for Computing Machinery}, 7369 8205 address = {Ottawa, Canada}, 7370 month = oct # { 21--25}8206 month = oct, 7371 8207 } 7372 8208 … … 7377 8213 year = 1991, 7378 8214 editor = {Andreas Paepcke}, 7379 publisher = sigplan # " 26(11)",8215 publisher = sigplan, 7380 8216 organization= {Association for Computing Machinery}, 7381 8217 address = {Phoenix, Arizona}, 7382 month = oct # { 6--11}7383 } 8218 month = oct, 8219 } -
doc/papers/AMA/AMA-stix/ama/WileyNJD-v2.cls
r7951100 rb067d9b 1854 1854 \vspace*{8.5\p@}% 1855 1855 \rightskip0pt\raggedright\hspace*{7\p@}\hbox{\reset@font\abstractfont{\absheadfont#1}}\par\vskip3pt% LN20feb2016 1856 {\abstractfont\baselineskip15pt\ifFWabstract\hsize\textwidth\fi #2\par\vspace*{0\p@}}%1856 {\abstractfont\baselineskip15pt\ifFWabstract\hsize\textwidth\fi\hsize0.68\textwidth#2\par\vspace*{0\p@}}% 1857 1857 \addcontentsline{toc}{section}{\abstractname}% 1858 1858 }}%\abstract{}% … … 1882 1882 }% 1883 1883 % 1884 \def\fundinginfohead#1{\gdef\@fundinginfo@head{#1}}\fundinginfohead{Funding Information}%1884 \def\fundinginfohead#1{\gdef\@fundinginfo@head{#1}}\fundinginfohead{Funding information}% 1885 1885 \def\fundinginfoheadtext#1{\gdef\@fundinginfo@head@text{#1}}\fundinginfoheadtext{}% 1886 1886 \gdef\@fundinginfo{{% … … 2319 2319 %% Keywords %% 2320 2320 2321 \def\keywords#1{\def\@keywords{{\keywordsheadfont\textbf{KEYWORDS :}\par\removelastskip\nointerlineskip\vskip6pt \keywordsfont#1\par}}}\def\@keywords{}%2321 \def\keywords#1{\def\@keywords{{\keywordsheadfont\textbf{KEYWORDS}\par\removelastskip\nointerlineskip\vskip6pt \keywordsfont#1\par}}}\def\@keywords{}% 2322 2322 2323 2323 \def\@fnsymbol#1{\ifcase#1\or \dagger\or \ddagger\or … … 2444 2444 \@afterheading} 2445 2445 2446 \renewcommand\section{\@startsection{section}{1}{\z@}{-2 7pt \@plus -2pt \@minus -2pt}{12\p@}{\sectionfont}}%2447 \renewcommand\subsection{\@startsection{subsection}{2}{\z@}{-2 3pt \@plus -2pt \@minus -2pt}{5\p@}{\subsectionfont}}%2446 \renewcommand\section{\@startsection{section}{1}{\z@}{-25pt \@plus -2pt \@minus -2pt}{12\p@}{\sectionfont}}% 2447 \renewcommand\subsection{\@startsection{subsection}{2}{\z@}{-22pt \@plus -2pt \@minus -2pt}{5\p@}{\subsectionfont}}% 2448 2448 \renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}{-20pt \@plus -2pt \@minus -2pt}{2\p@}{\subsubsectionfont}}% 2449 2449 % … … 3406 3406 \hskip-\parindentvalue\fbox{\vbox{\noindent\@jnlcitation}}}% 3407 3407 3408 \AtEndDocument{\ifappendixsec\else\printjnlcitation\fi}%3408 %\AtEndDocument{\ifappendixsec\else\printjnlcitation\fi}% 3409 3409 3410 3410 %% Misc math macros %% -
doc/papers/OOPSLA17/Makefile
r7951100 rb067d9b 33 33 34 34 DOCUMENT = generic_types.pdf 35 BASE = ${basename ${DOCUMENT}} 35 36 36 37 # Directives # … … 41 42 42 43 clean : 43 @rm -frv ${DOCUMENT} ${ basename ${DOCUMENT}}.ps ${Build}44 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 44 45 45 46 # File Dependencies # 46 47 47 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps48 ${DOCUMENT} : ${BASE}.ps 48 49 ps2pdf $< 49 50 50 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi51 ${BASE}.ps : ${BASE}.dvi 51 52 dvips ${Build}/$< -o $@ 52 53 53 ${basename ${DOCUMENT}}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ../../bibliography/pl.bib 54 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 55 ../../bibliography/pl.bib | ${Build} 54 56 # Must have *.aux file containing citations for bibtex 55 57 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 63 65 ## Define the default recipes. 64 66 65 ${Build} :67 ${Build} : 66 68 mkdir -p ${Build} 67 69 … … 69 71 gnuplot -e Build="'${Build}/'" evaluation/timing.gp 70 72 71 %.tex : %.fig 73 %.tex : %.fig | ${Build} 72 74 fig2dev -L eepic $< > ${Build}/$@ 73 75 74 %.ps : %.fig 76 %.ps : %.fig | ${Build} 75 77 fig2dev -L ps $< > ${Build}/$@ 76 78 77 %.pstex : %.fig 79 %.pstex : %.fig | ${Build} 78 80 fig2dev -L pstex $< > ${Build}/$@ 79 81 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/papers/concurrency/Makefile
r7951100 rb067d9b 4 4 Figures = figures 5 5 Macros = ../AMA/AMA-stix/ama 6 TeXLIB = .: annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography:6 TeXLIB = .:../../LaTeXmacros:${Macros}:${Build}: 7 7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 BibTeX = BIBINPUTS= ${TeXLIB}&& export BIBINPUTS && bibtex8 BibTeX = BIBINPUTS=annex:../../bibliography: && export BIBINPUTS && bibtex 9 9 10 10 MAKEFLAGS = --no-print-directory # --silent … … 15 15 SOURCES = ${addsuffix .tex, \ 16 16 Paper \ 17 style/style \18 style/cfa-format \19 17 } 20 18 21 19 FIGURES = ${addsuffix .tex, \ 22 monitor \23 ext_monitor \24 20 int_monitor \ 25 21 dependency \ 22 RunTimeStructure \ 26 23 } 27 24 28 25 PICTURES = ${addsuffix .pstex, \ 26 FullProdConsStack \ 27 FullCoroutinePhases \ 28 corlayout \ 29 CondSigWait \ 30 monitor \ 31 ext_monitor \ 29 32 system \ 30 33 monitor_structs \ … … 59 62 dvips ${Build}/$< -o $@ 60 63 61 ${BASE}.dvi : Makefile ${B uild} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \62 annex/local.bib ../../bibliography/pl.bib 64 ${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 65 annex/local.bib ../../bibliography/pl.bib | ${Build} 63 66 # Must have *.aux file containing citations for bibtex 64 67 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi 65 ${BibTeX} ${Build}/${basename $@}68 -${BibTeX} ${Build}/${basename $@} 66 69 # Some citations reference others so run again to resolve these citations 67 70 ${LaTeX} ${basename $@}.tex 68 ${BibTeX} ${Build}/${basename $@}71 -${BibTeX} ${Build}/${basename $@} 69 72 # Run again to finish citations 70 73 ${LaTeX} ${basename $@}.tex … … 72 75 ## Define the default recipes. 73 76 74 ${Build} :77 ${Build} : 75 78 mkdir -p ${Build} 76 79 77 ${BASE}.out.ps :${Build}80 ${BASE}.out.ps : | ${Build} 78 81 ln -fs ${Build}/Paper.out.ps . 79 82 80 WileyNJD-AMA.bst :83 WileyNJD-AMA.bst : 81 84 ln -fs ../AMA/AMA-stix/ama/WileyNJD-AMA.bst . 82 85 83 %.tex : %.fig ${Build}86 %.tex : %.fig | ${Build} 84 87 fig2dev -L eepic $< > ${Build}/$@ 85 88 86 %.ps : %.fig ${Build}89 %.ps : %.fig | ${Build} 87 90 fig2dev -L ps $< > ${Build}/$@ 88 91 89 %.pstex : %.fig ${Build}92 %.pstex : %.fig | ${Build} 90 93 fig2dev -L pstex $< > ${Build}/$@ 91 94 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/papers/concurrency/Paper.tex
r7951100 rb067d9b 3 3 \articletype{RESEARCH ARTICLE}% 4 4 5 \received{26 April 2016} 6 \revised{6 June 2016} 7 \accepted{6 June 2016} 5 % Referees 6 % Doug Lea, dl@cs.oswego.edu, SUNY Oswego 7 % Herb Sutter, hsutter@microsoft.com, Microsoft Corp 8 % Gor Nishanov, gorn@microsoft.com, Microsoft Corp 9 % James Noble, kjx@ecs.vuw.ac.nz, Victoria University of Wellington, School of Engineering and Computer Science 10 11 \received{XXXXX} 12 \revised{XXXXX} 13 \accepted{XXXXX} 8 14 9 15 \raggedbottom … … 15 21 \usepackage{epic,eepic} 16 22 \usepackage{xspace} 23 \usepackage{enumitem} 17 24 \usepackage{comment} 18 25 \usepackage{upquote} % switch curled `'" to straight … … 21 28 \renewcommand{\thesubfigure}{(\Alph{subfigure})} 22 29 \captionsetup{justification=raggedright,singlelinecheck=false} 23 \usepackage{siunitx} 24 \sisetup{binary-units=true} 30 \usepackage{dcolumn} % align decimal points in tables 31 \usepackage{capt-of} 32 \setlength{\multicolsep}{6.0pt plus 2.0pt minus 1.5pt} 25 33 26 34 \hypersetup{breaklinks=true} … … 32 40 \renewcommand{\linenumberfont}{\scriptsize\sffamily} 33 41 42 \renewcommand{\topfraction}{0.8} % float must be greater than X of the page before it is forced onto its own page 43 \renewcommand{\bottomfraction}{0.8} % float must be greater than X of the page before it is forced onto its own page 44 \renewcommand{\floatpagefraction}{0.8} % float must be greater than X of the page before it is forced onto its own page 34 45 \renewcommand{\textfraction}{0.0} % the entire page maybe devoted to floats with no text on the page at all 35 46 … … 132 143 \makeatother 133 144 134 \newenvironment{cquote}{% 135 \list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}% 136 \item\relax 137 }{% 138 \endlist 139 }% cquote 145 \newenvironment{cquote} 146 {\list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}% 147 \item\relax} 148 {\endlist} 149 150 %\newenvironment{cquote}{% 151 %\list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}% 152 %\item\relax% 153 %}{% 154 %\endlist% 155 %}% cquote 140 156 141 157 % CFA programming language, based on ANSI C (with some gcc additions) … … 145 161 auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__, 146 162 coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally, 147 __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,163 __float80, float80, __float128, float128, forall, ftype, generator, _Generic, _Imaginary, __imag, __imag__, 148 164 inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or, 149 165 otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread, 150 166 _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__, 151 167 virtual, __volatile, __volatile__, waitfor, when, with, zero_t}, 152 moredirectives={defined,include_next}% 168 moredirectives={defined,include_next}, 169 % replace/adjust listing characters that look bad in sanserif 170 literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1 171 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 172 {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1 173 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2, 153 174 } 154 175 … … 167 188 aboveskip=4pt, % spacing above/below code block 168 189 belowskip=3pt, 169 % replace/adjust listing characters that look bad in sanserif170 literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1171 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1172 {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1173 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2,174 190 moredelim=**[is][\color{red}]{`}{`}, 175 191 }% lstset … … 197 213 } 198 214 215 % Go programming language: https://github.com/julienc91/listings-golang/blob/master/listings-golang.sty 216 \lstdefinelanguage{Golang}{ 217 morekeywords=[1]{package,import,func,type,struct,return,defer,panic,recover,select,var,const,iota,}, 218 morekeywords=[2]{string,uint,uint8,uint16,uint32,uint64,int,int8,int16,int32,int64, 219 bool,float32,float64,complex64,complex128,byte,rune,uintptr, error,interface}, 220 morekeywords=[3]{map,slice,make,new,nil,len,cap,copy,close,true,false,delete,append,real,imag,complex,chan,}, 221 morekeywords=[4]{for,break,continue,range,goto,switch,case,fallthrough,if,else,default,}, 222 morekeywords=[5]{Println,Printf,Error,}, 223 sensitive=true, 224 morecomment=[l]{//}, 225 morecomment=[s]{/*}{*/}, 226 morestring=[b]', 227 morestring=[b]", 228 morestring=[s]{`}{`}, 229 % replace/adjust listing characters that look bad in sanserif 230 literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1 231 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 232 {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1 233 {<-}{\makebox[2ex][c]{\textrm{\textless}\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}}2, 234 } 235 199 236 \lstnewenvironment{cfa}[1][] 200 237 {\lstset{#1}} … … 207 244 {} 208 245 \lstnewenvironment{Go}[1][] 209 {\lstset{#1}} 246 {\lstset{language=Golang,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}} 247 {} 248 \lstnewenvironment{python}[1][] 249 {\lstset{language=python,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}} 210 250 {} 211 251 … … 222 262 } 223 263 224 \title{\texorpdfstring{Concurrency in \protect\CFA}{Concurrency in Cforall}} 264 \newbox\myboxA 265 \newbox\myboxB 266 \newbox\myboxC 267 \newbox\myboxD 268 269 \title{\texorpdfstring{Advanced Control-flow and Concurrency in \protect\CFA}{Advanced Control-flow in Cforall}} 225 270 226 271 \author[1]{Thierry Delisle} … … 232 277 \corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON, N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}} 233 278 234 \fundingInfo{Natural Sciences and Engineering Research Council of Canada}279 % \fundingInfo{Natural Sciences and Engineering Research Council of Canada} 235 280 236 281 \abstract[Summary]{ 237 \CFA is a modern, polymorphic, \emph{non-object-oriented} extension of the C programming language. 238 This paper discusses the design of the concurrency and parallelism features in \CFA, and the concurrent runtime-system. 239 These features are created from scratch as ISO C lacks concurrency, relying largely on the pthreads library. 240 Coroutines and lightweight (user) threads are introduced into the language. 241 In addition, monitors are added as a high-level mechanism for mutual exclusion and synchronization. 242 A unique contribution is allowing multiple monitors to be safely acquired simultaneously. 243 All features respect the expectations of C programmers, while being fully integrate with the \CFA polymorphic type-system and other language features. 244 Finally, experimental results are presented to compare the performance of the new features with similar mechanisms in other concurrent programming-languages. 282 \CFA is a polymorphic, non-object-oriented, concurrent, backwards-compatible extension of the C programming language. 283 This paper discusses the design philosophy and implementation of its advanced control-flow and concurrent/parallel features, along with the supporting runtime written in \CFA. 284 These features are created from scratch as ISO C has only low-level and/or unimplemented concurrency, so C programmers continue to rely on library features like pthreads. 285 \CFA introduces modern language-level control-flow mechanisms, like generators, coroutines, user-level threading, and monitors for mutual exclusion and synchronization. 286 % Library extension for executors, futures, and actors are built on these basic mechanisms. 287 The runtime provides significant programmer simplification and safety by eliminating spurious wakeup and monitor barging. 288 The runtime also ensures multiple monitors can be safely acquired \emph{simultaneously} (deadlock free), and this feature is fully integrated with all monitor synchronization mechanisms. 289 All control-flow features integrate with the \CFA polymorphic type-system and exception handling, while respecting the expectations and style of C programmers. 290 Experimental results show comparable performance of the new features with similar mechanisms in other concurrent programming languages. 245 291 }% 246 292 247 \keywords{ concurrency, parallelism, coroutines, threads, monitors, runtime, C, Cforall}293 \keywords{generator, coroutine, concurrency, parallelism, thread, monitor, runtime, C, \CFA (Cforall)} 248 294 249 295 … … 256 302 \section{Introduction} 257 303 258 This paper provides a minimal concurrency \newterm{Application Program Interface} (API) that is simple, efficient and can be used to build other concurrency features. 259 While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master. 260 An easier approach for programmers is to support higher-level constructs as the basis of concurrency. 261 Indeed, for highly productive concurrent programming, high-level approaches are much more popular~\cite{Hochstein05}. 262 Examples of high-level approaches are task (work) based~\cite{TBB}, implicit threading~\cite{OpenMP}, monitors~\cite{Java}, channels~\cite{CSP,Go}, and message passing~\cite{Erlang,MPI}. 263 264 The following terminology is used. 265 A \newterm{thread} is a fundamental unit of execution that runs a sequence of code and requires a stack to maintain state. 266 Multiple simultaneous threads give rise to \newterm{concurrency}, which requires locking to ensure safe communication and access to shared data. 267 % Correspondingly, concurrency is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, \etc) concurrent threads are introduced. 268 \newterm{Locking}, and by extension \newterm{locks}, are defined as a mechanism to prevent progress of threads to provide safety. 269 \newterm{Parallelism} is running multiple threads simultaneously. 270 Parallelism implies \emph{actual} simultaneous execution, where concurrency only requires \emph{apparent} simultaneous execution. 271 As such, parallelism only affects performance, which is observed through differences in space and/or time at runtime. 272 273 Hence, there are two problems to be solved: concurrency and parallelism. 274 While these two concepts are often combined, they are distinct, requiring different tools~\cite[\S~2]{Buhr05a}. 275 Concurrency tools handle synchronization and mutual exclusion, while parallelism tools handle performance, cost and resource utilization. 276 277 The proposed concurrency API is implemented in a dialect of C, called \CFA. 278 The paper discusses how the language features are added to the \CFA translator with respect to parsing, semantic, and type checking, and the corresponding high-performance runtime-library to implement the concurrency features. 279 280 281 \section{\CFA Overview} 282 283 The following is a quick introduction to the \CFA language, specifically tailored to the features needed to support concurrency. 284 Extended versions and explanation of the following code examples are available at the \CFA website~\cite{Cforall} or in Moss~\etal~\cite{Moss18}. 285 286 \CFA is an extension of ISO-C, and hence, supports all C paradigms. 287 %It is a non-object-oriented system-language, meaning most of the major abstractions have either no runtime overhead or can be opted out easily. 288 Like C, the basics of \CFA revolve around structures and routines. 289 Virtually all of the code generated by the \CFA translator respects C memory layouts and calling conventions. 290 While \CFA is not an object-oriented language, lacking the concept of a receiver (\eg @this@) and nominal inheritance-relationships, C does have a notion of objects: ``region of data storage in the execution environment, the contents of which can represent values''~\cite[3.15]{C11}. 291 While some \CFA features are common in object-oriented programming-languages, they are an independent capability allowing \CFA to adopt them while retaining a procedural paradigm. 292 293 294 \subsection{References} 295 296 \CFA provides multi-level rebindable references, as an alternative to pointers, which significantly reduces syntactic noise. 297 \begin{cfa} 298 int x = 1, y = 2, z = 3; 299 int * p1 = &x, ** p2 = &p1, *** p3 = &p2, $\C{// pointers to x}$ 300 `&` r1 = x, `&&` r2 = r1, `&&&` r3 = r2; $\C{// references to x}$ 301 int * p4 = &z, `&` r4 = z; 302 303 *p1 = 3; **p2 = 3; ***p3 = 3; // change x 304 r1 = 3; r2 = 3; r3 = 3; // change x: implicit dereferences *r1, **r2, ***r3 305 **p3 = &y; *p3 = &p4; // change p1, p2 306 `&`r3 = &y; `&&`r3 = &`&`r4; // change r1, r2: cancel implicit dereferences (&*)**r3, (&(&*)*)*r3, &(&*)r4 307 \end{cfa} 308 A reference is a handle to an object, like a pointer, but is automatically dereferenced by the specified number of levels. 309 Referencing (address-of @&@) a reference variable cancels one of the implicit dereferences, until there are no more implicit references, after which normal expression behaviour applies. 310 311 312 \subsection{\texorpdfstring{\protect\lstinline{with} Statement}{with Statement}} 313 \label{s:WithStatement} 314 315 Heterogeneous data is aggregated into a structure/union. 316 To reduce syntactic noise, \CFA provides a @with@ statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate field-qualification by opening a scope containing the field identifiers. 317 \begin{cquote} 318 \vspace*{-\baselineskip}%??? 319 \lstDeleteShortInline@% 320 \begin{cfa} 321 struct S { char c; int i; double d; }; 322 struct T { double m, n; }; 323 // multiple aggregate parameters 324 \end{cfa} 325 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 326 \begin{cfa} 327 void f( S & s, T & t ) { 328 `s.`c; `s.`i; `s.`d; 329 `t.`m; `t.`n; 330 } 331 \end{cfa} 332 & 333 \begin{cfa} 334 void f( S & s, T & t ) `with ( s, t )` { 335 c; i; d; // no qualification 336 m; n; 337 } 338 \end{cfa} 339 \end{tabular} 340 \lstMakeShortInline@% 341 \end{cquote} 342 Object-oriented programming languages only provide implicit qualification for the receiver. 343 344 In detail, the @with@ statement has the form: 345 \begin{cfa} 346 $\emph{with-statement}$: 347 'with' '(' $\emph{expression-list}$ ')' $\emph{compound-statement}$ 348 \end{cfa} 349 and may appear as the body of a routine or nested within a routine body. 350 Each expression in the expression-list provides a type and object. 351 The type must be an aggregate type. 352 (Enumerations are already opened.) 353 The object is the implicit qualifier for the open structure-fields. 354 All expressions in the expression list are open in parallel within the compound statement, which is different from Pascal, which nests the openings from left to right. 355 356 357 \subsection{Overloading} 358 359 \CFA maximizes the ability to reuse names via overloading to aggressively address the naming problem. 360 Both variables and routines may be overloaded, where selection is based on types, and number of returns (as in Ada~\cite{Ada}) and arguments. 361 \begin{cquote} 362 \vspace*{-\baselineskip}%??? 363 \lstDeleteShortInline@% 364 \begin{cfa} 365 // selection based on type 366 \end{cfa} 367 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 368 \begin{cfa} 369 const short int `MIN` = -32768; 370 const int `MIN` = -2147483648; 371 const long int `MIN` = -9223372036854775808L; 372 \end{cfa} 373 & 374 \begin{cfa} 375 short int si = `MIN`; 376 int i = `MIN`; 377 long int li = `MIN`; 378 \end{cfa} 379 \end{tabular} 380 \begin{cfa} 381 // selection based on type and number of parameters 382 \end{cfa} 383 \begin{tabular}{@{}l@{\hspace{2.7\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 384 \begin{cfa} 385 void `f`( void ); 386 void `f`( char ); 387 void `f`( int, double ); 388 \end{cfa} 389 & 390 \begin{cfa} 391 `f`(); 392 `f`( 'a' ); 393 `f`( 3, 5.2 ); 394 \end{cfa} 395 \end{tabular} 396 \begin{cfa} 397 // selection based on type and number of returns 398 \end{cfa} 399 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}} 400 \begin{cfa} 401 char `f`( int ); 402 double `f`( int ); 403 [char, double] `f`( int ); 404 \end{cfa} 405 & 406 \begin{cfa} 407 char c = `f`( 3 ); 408 double d = `f`( 3 ); 409 [d, c] = `f`( 3 ); 410 \end{cfa} 411 \end{tabular} 412 \lstMakeShortInline@% 413 \end{cquote} 414 Overloading is important for \CFA concurrency since the runtime system relies on creating different types to represent concurrency objects. 415 Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions to prevent name clashes. 416 As seen in Section~\ref{basics}, routine @main@ is heavily overloaded. 417 418 Variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name: 419 \begin{cfa} 420 struct S { int `i`; int j; double m; } s; 421 struct T { int `i`; int k; int m; } t; 422 with ( s, t ) { 423 j + k; $\C{// unambiguous, s.j + t.k}$ 424 m = 5.0; $\C{// unambiguous, s.m = 5.0}$ 425 m = 1; $\C{// unambiguous, t.m = 1}$ 426 int a = m; $\C{// unambiguous, a = t.m }$ 427 double b = m; $\C{// unambiguous, b = s.m}$ 428 int c = `s.i` + `t.i`; $\C{// unambiguous, qualification}$ 429 (double)m; $\C{// unambiguous, cast s.m}$ 430 } 431 \end{cfa} 432 For parallel semantics, both @s.i@ and @t.i@ are visible the same type, so only @i@ is ambiguous without qualification. 433 434 435 \subsection{Operators} 436 437 Overloading also extends to operators. 438 Operator-overloading syntax creates a routine name with an operator symbol and question marks for the operands: 439 \begin{cquote} 440 \lstDeleteShortInline@% 441 \begin{tabular}{@{}ll@{\hspace{\parindentlnth}}|@{\hspace{\parindentlnth}}l@{}} 442 \begin{cfa} 443 int ++? (int op); 444 int ?++ (int op); 445 int `?+?` (int op1, int op2); 446 int ?<=?(int op1, int op2); 447 int ?=? (int & op1, int op2); 448 int ?+=?(int & op1, int op2); 449 \end{cfa} 450 & 451 \begin{cfa} 452 // unary prefix increment 453 // unary postfix increment 454 // binary plus 455 // binary less than 456 // binary assignment 457 // binary plus-assignment 458 \end{cfa} 459 & 460 \begin{cfa} 461 struct S { int i, j; }; 462 S `?+?`( S op1, S op2) { // add two structures 463 return (S){op1.i + op2.i, op1.j + op2.j}; 464 } 465 S s1 = {1, 2}, s2 = {2, 3}, s3; 466 s3 = s1 `+` s2; // compute sum: s3 == {2, 5} 467 \end{cfa} 468 \end{tabular} 469 \lstMakeShortInline@% 470 \end{cquote} 471 While concurrency does not use operator overloading directly, it provides an introduction for the syntax of constructors. 472 473 474 \subsection{Parametric Polymorphism} 475 \label{s:ParametricPolymorphism} 476 477 The signature feature of \CFA is parametric-polymorphic routines~\cite{} with routines generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types. 478 For example, the following sum routine works for any type that supports construction from 0 and addition \commenttd{constructors have not been introduced yet.}: 479 \begin{cfa} 480 forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and + 481 T sum( T a[$\,$], size_t size ) { 482 `T` total = { `0` }; $\C{// initialize by 0 constructor}$ 483 for ( size_t i = 0; i < size; i += 1 ) 484 total = total `+` a[i]; $\C{// select appropriate +}$ 485 return total; 486 } 487 S sa[5]; 488 int i = sum( sa, 5 ); $\C{// use S's 0 construction and +}$ 489 \end{cfa} 490 491 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each routine declaration: 492 \begin{cfa} 493 trait `sumable`( otype T ) { 494 void `?{}`( T &, zero_t ); $\C{// 0 literal constructor}$ 495 T `?+?`( T, T ); $\C{// assortment of additions}$ 496 T ?+=?( T &, T ); 497 T ++?( T & ); 498 T ?++( T & ); 499 }; 500 forall( otype T `| sumable( T )` ) $\C{// use trait}$ 501 T sum( T a[$\,$], size_t size ); 502 \end{cfa} 503 504 Assertions can be @otype@ or @dtype@. 505 @otype@ refers to a ``complete'' object, \ie an object has a size, default constructor, copy constructor, destructor and an assignment operator. 506 @dtype@ only guarantees an object has a size and alignment. 507 508 Using the return type for discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@: 509 \begin{cfa} 510 forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); } 511 int * ip = alloc(); $\C{// select type and size from left-hand side}$ 512 double * dp = alloc(); 513 struct S {...} * sp = alloc(); 514 \end{cfa} 515 where the return type supplies the type/size of the allocation, which is impossible in most type systems. 516 517 518 \subsection{Constructors / Destructors} 519 520 Object lifetime is a challenge in non-managed programming languages. 521 \CFA responds with \CC-like constructors and destructors: 522 \begin{cfa} 523 struct VLA { int len, * data; }; $\C{// variable length array of integers}$ 524 void ?{}( VLA & vla ) with ( vla ) { len = 10; data = alloc( len ); } $\C{// default constructor}$ 525 void ?{}( VLA & vla, int size, char fill ) with ( vla ) { len = size; data = alloc( len, fill ); } // initialization 526 void ?{}( VLA & vla, VLA other ) { vla.len = other.len; vla.data = other.data; } $\C{// copy, shallow}$ 527 void ^?{}( VLA & vla ) with ( vla ) { free( data ); } $\C{// destructor}$ 528 { 529 VLA x, y = { 20, 0x01 }, z = y; $\C{// z points to y}$ 530 // x{}; y{ 20, 0x01 }; z{ z, y }; 531 ^x{}; $\C{// deallocate x}$ 532 x{}; $\C{// reallocate x}$ 533 z{ 5, 0xff }; $\C{// reallocate z, not pointing to y}$ 534 ^y{}; $\C{// deallocate y}$ 535 y{ x }; $\C{// reallocate y, points to x}$ 536 x{}; $\C{// reallocate x, not pointing to y}$ 537 // ^z{}; ^y{}; ^x{}; 538 } 539 \end{cfa} 540 Like \CC, construction is implicit on allocation (stack/heap) and destruction is implicit on deallocation. 541 The object and all their fields are constructed/destructed. 542 \CFA also provides @new@ and @delete@, which behave like @malloc@ and @free@, in addition to constructing and destructing objects: 543 \begin{cfa} 544 { struct S s = {10}; $\C{// allocation, call constructor}$ 545 ... 546 } $\C{// deallocation, call destructor}$ 547 struct S * s = new(); $\C{// allocation, call constructor}$ 548 ... 549 delete( s ); $\C{// deallocation, call destructor}$ 550 \end{cfa} 551 \CFA concurrency uses object lifetime as a means of synchronization and/or mutual exclusion. 552 553 554 \section{Concurrency Basics}\label{basics} 555 556 At its core, concurrency is based on multiple call-stacks and scheduling threads executing on these stacks. 557 Multiple call stacks (or contexts) and a single thread of execution, called \newterm{coroutining}~\cite{Conway63,Marlin80}, does \emph{not} imply concurrency~\cite[\S~2]{Buhr05a}. 558 In coroutining, the single thread is self-scheduling across the stacks, so execution is deterministic, \ie given fixed inputs, the execution path to the outputs is fixed and predictable. 559 A \newterm{stackless} coroutine executes on the caller's stack~\cite{Python} but this approach is restrictive, \eg preventing modularization and supporting only iterator/generator-style programming; 560 a \newterm{stackfull} coroutine executes on its own stack, allowing full generality. 561 Only stackfull coroutines are a stepping-stone to concurrency. 562 563 The transition to concurrency, even for execution with a single thread and multiple stacks, occurs when coroutines also context switch to a scheduling oracle, introducing non-determinism from the coroutine perspective~\cite[\S~3]{Buhr05a}. 564 Therefore, a minimal concurrency system is possible using coroutines (see Section \ref{coroutine}) in conjunction with a scheduler to decide where to context switch next. 565 The resulting execution system now follows a cooperative threading-model, called \newterm{non-preemptive scheduling}. 566 567 Because the scheduler is special, it can either be a stackless or stackfull coroutine. \commenttd{I dislike this sentence, it seems imply 1-step vs 2-step but also seems to say that some kind of coroutine is required, which is not the case.} 568 For stackless, the scheduler performs scheduling on the stack of the current coroutine and switches directly to the next coroutine, so there is one context switch. 569 For stackfull, the current coroutine switches to the scheduler, which performs scheduling, and it then switches to the next coroutine, so there are two context switches. 570 A stackfull scheduler is often used for simplicity and security, even through there is a slightly higher runtime-cost. \commenttd{I'm not a fan of the fact that we don't quantify this but yet imply it is negligeable.} 571 572 Regardless of the approach used, a subset of concurrency related challenges start to appear. 573 For the complete set of concurrency challenges to occur, the missing feature is \newterm{preemption}, where context switching occurs randomly between any two instructions, often based on a timer interrupt, called \newterm{preemptive scheduling}. 574 While a scheduler introduces uncertainty in the order of execution, preemption introduces uncertainty where context switches occur. 575 Interestingly, uncertainty is necessary for the runtime (operating) system to give the illusion of parallelism on a single processor and increase performance on multiple processors. 576 The reason is that only the runtime has complete knowledge about resources and how to best utilized them. 577 However, the introduction of unrestricted non-determinism results in the need for \newterm{mutual exclusion} and \newterm{synchronization} to restrict non-determinism for correctness; 578 otherwise, it is impossible to write meaningful programs. 579 Optimal performance in concurrent applications is often obtained by having as much non-determinism as correctness allows. 580 581 582 \subsection{\protect\CFA's Thread Building Blocks} 583 584 An important missing feature in C is threading\footnote{While the C11 standard defines a ``threads.h'' header, it is minimal and defined as optional. 585 As such, library support for threading is far from widespread. 586 At the time of writing the paper, neither \protect\lstinline|gcc| nor \protect\lstinline|clang| support ``threads.h'' in their standard libraries.}. 587 In modern programming languages, a lack of threading is unacceptable~\cite{Sutter05, Sutter05b}, and therefore existing and new programming languages must have tools for writing efficient concurrent programs to take advantage of parallelism. 588 As an extension of C, \CFA needs to express these concepts in a way that is as natural as possible to programmers familiar with imperative languages. 589 Furthermore, because C is a system-level language, programmers expect to choose precisely which features they need and which cost they are willing to pay. 590 Hence, concurrent programs should be written using high-level mechanisms, and only step down to lower-level mechanisms when performance bottlenecks are encountered. 591 592 593 \subsection{Coroutines: A Stepping Stone}\label{coroutine} 594 595 While the focus of this discussion is concurrency and parallelism, it is important to address coroutines, which are a significant building block of a concurrency system. 596 Coroutines are generalized routines allowing execution to be temporarily suspend and later resumed. 597 Hence, unlike a normal routine, a coroutine may not terminate when it returns to its caller, allowing it to be restarted with the values and execution location present at the point of suspension. 598 This capability is accomplish via the coroutine's stack, where suspend/resume context switch among stacks. 599 Because threading design-challenges are present in coroutines, their design effort is relevant, and this effort can be easily exposed to programmers giving them a useful new programming paradigm because a coroutine handles the class of problems that need to retain state between calls, \eg plugins, device drivers, and finite-state machines. 600 Therefore, the core \CFA coroutine-API for has two fundamental features: independent call-stacks and @suspend@/@resume@ operations. 601 602 For example, a problem made easier with coroutines is unbounded generators, \eg generating an infinite sequence of Fibonacci numbers, where Figure~\ref{f:C-fibonacci} shows conventional approaches for writing a Fibonacci generator in C. 603 \begin{displaymath} 604 \mathsf{fib}(n) = \left \{ 605 \begin{array}{ll} 606 0 & n = 0 \\ 607 1 & n = 1 \\ 608 \mathsf{fib}(n-1) + \mathsf{fib}(n-2) & n \ge 2 \\ 609 \end{array} 610 \right. 611 \end{displaymath} 612 Figure~\ref{f:GlobalVariables} illustrates the following problems: 613 unique unencapsulated global variables necessary to retain state between calls; 614 only one Fibonacci generator; 615 execution state must be explicitly retained via explicit state variables. 616 Figure~\ref{f:ExternalState} addresses these issues: 617 unencapsulated program global variables become encapsulated structure variables; 618 unique global variables are replaced by multiple Fibonacci objects; 619 explicit execution state is removed by precomputing the first two Fibonacci numbers and returning $\mathsf{fib}(n-2)$. 304 This paper discusses the design philosophy and implementation of advanced language-level control-flow and concurrent/parallel features in \CFA~\cite{Moss18,Cforall} and its runtime, which is written entirely in \CFA. 305 \CFA is a modern, polymorphic, non-object-oriented\footnote{ 306 \CFA has features often associated with object-oriented programming languages, such as constructors, destructors, virtuals and simple inheritance. 307 However, functions \emph{cannot} be nested in structures, so there is no lexical binding between a structure and set of functions (member/method) implemented by an implicit \lstinline@this@ (receiver) parameter.}, 308 backwards-compatible extension of the C programming language. 309 In many ways, \CFA is to C as Scala~\cite{Scala} is to Java, providing a \emph{research vehicle} for new typing and control-flow capabilities on top of a highly popular programming language allowing immediate dissemination. 310 Within the \CFA framework, new control-flow features are created from scratch because ISO \Celeven defines only a subset of the \CFA extensions, where the overlapping features are concurrency~\cite[\S~7.26]{C11}. 311 However, \Celeven concurrency is largely wrappers for a subset of the pthreads library~\cite{Butenhof97,Pthreads}, and \Celeven and pthreads concurrency is simple, based on thread fork/join in a function and mutex/condition locks, which is low-level and error-prone; 312 no high-level language concurrency features are defined. 313 Interestingly, almost a decade after publication of the \Celeven standard, neither gcc-8, clang-9 nor msvc-19 (most recent versions) support the \Celeven include @threads.h@, indicating little interest in the C11 concurrency approach (possibly because the effort to add concurrency to \CC). 314 Finally, while the \Celeven standard does not state a threading model, the historical association with pthreads suggests implementations would adopt kernel-level threading (1:1)~\cite{ThreadModel}. 315 316 In contrast, there has been a renewed interest during the past decade in user-level (M:N, green) threading in old and new programming languages. 317 As multi-core hardware became available in the 1980/90s, both user and kernel threading were examined. 318 Kernel threading was chosen, largely because of its simplicity and fit with the simpler operating systems and hardware architectures at the time, which gave it a performance advantage~\cite{Drepper03}. 319 Libraries like pthreads were developed for C, and the Solaris operating-system switched from user (JDK 1.1~\cite{JDK1.1}) to kernel threads. 320 As a result, languages like Java, Scala, Objective-C~\cite{obj-c-book}, \CCeleven~\cite{C11}, and C\#~\cite{Csharp} adopt the 1:1 kernel-threading model, with a variety of presentation mechanisms. 321 From 2000 onwards, languages like Go~\cite{Go}, Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, D~\cite{D}, and \uC~\cite{uC++,uC++book} have championed the M:N user-threading model, and many user-threading libraries have appeared~\cite{Qthreads,MPC,Marcel}, including putting green threads back into Java~\cite{Quasar}. 322 The main argument for user-level threading is that it is lighter weight than kernel threading (locking and context switching do not cross the kernel boundary), so there is less restriction on programming styles that encourage large numbers of threads performing medium work units to facilitate load balancing by the runtime~\cite{Verch12}. 323 As well, user-threading facilitates a simpler concurrency approach using thread objects that leverage sequential patterns versus events with call-backs~\cite{Adya02,vonBehren03}. 324 Finally, performant user-threading implementations (both time and space) meet or exceed direct kernel-threading implementations, while achieving the programming advantages of high concurrency levels and safety. 325 326 A further effort over the past two decades is the development of language memory models to deal with the conflict between language features and compiler/hardware optimizations, \ie some language features are unsafe in the presence of aggressive sequential optimizations~\cite{Buhr95a,Boehm05}. 327 The consequence is that a language must provide sufficient tools to program around safety issues, as inline and library code is all sequential to the compiler. 328 One solution is low-level qualifiers and functions (\eg @volatile@ and atomics) allowing \emph{programmers} to explicitly write safe (race-free~\cite{Boehm12}) programs. 329 A safer solution is high-level language constructs so the \emph{compiler} knows the optimization boundaries, and hence, provides implicit safety. 330 This problem is best known with respect to concurrency, but applies to other complex control-flow, like exceptions\footnote{ 331 \CFA exception handling will be presented in a separate paper. 332 The key feature that dovetails with this paper is nonlocal exceptions allowing exceptions to be raised across stacks, with synchronous exceptions raised among coroutines and asynchronous exceptions raised among threads, similar to that in \uC~\cite[\S~5]{uC++} 333 } and coroutines. 334 Finally, language solutions allow matching constructs with language paradigm, \ie imperative and functional languages often have different presentations of the same concept to fit their programming model. 335 336 Finally, it is important for a language to provide safety over performance \emph{as the default}, allowing careful reduction of safety for performance when necessary. 337 Two concurrency violations of this philosophy are \emph{spurious wakeup} (random wakeup~\cite[\S~8]{Buhr05a}) and \emph{barging}\footnote{ 338 The notion of competitive succession instead of direct handoff, \ie a lock owner releases the lock and an arriving thread acquires it ahead of preexisting waiter threads. 339 } (signals-as-hints~\cite[\S~8]{Buhr05a}), where one is a consequence of the other, \ie once there is spurious wakeup, signals-as-hints follow. 340 However, spurious wakeup is \emph{not} a foundational concurrency property~\cite[\S~8]{Buhr05a}, it is a performance design choice. 341 Similarly, signals-as-hints are often a performance decision. 342 We argue removing spurious wakeup and signals-as-hints make concurrent programming significantly safer because it removes local non-determinism and matches with programmer expectation. 343 (Author experience teaching concurrency is that students are highly confused by these semantics.) 344 Clawing back performance, when local non-determinism is unimportant, should be an option not the default. 345 346 \begin{comment} 347 Most augmented traditional (Fortran 18~\cite{Fortran18}, Cobol 14~\cite{Cobol14}, Ada 12~\cite{Ada12}, Java 11~\cite{Java11}) and new languages (Go~\cite{Go}, Rust~\cite{Rust}, and D~\cite{D}), except \CC, diverge from C with different syntax and semantics, only interoperate indirectly with C, and are not systems languages, for those with managed memory. 348 As a result, there is a significant learning curve to move to these languages, and C legacy-code must be rewritten. 349 While \CC, like \CFA, takes an evolutionary approach to extend C, \CC's constantly growing complex and interdependent features-set (\eg objects, inheritance, templates, etc.) mean idiomatic \CC code is difficult to use from C, and C programmers must expend significant effort learning \CC. 350 Hence, rewriting and retraining costs for these languages, even \CC, are prohibitive for companies with a large C software-base. 351 \CFA with its orthogonal feature-set, its high-performance runtime, and direct access to all existing C libraries circumvents these problems. 352 \end{comment} 353 354 \CFA embraces user-level threading, language extensions for advanced control-flow, and safety as the default. 355 We present comparative examples so the reader can judge if the \CFA control-flow extensions are better and safer than those in other concurrent, imperative programming languages, and perform experiments to show the \CFA runtime is competitive with other similar mechanisms. 356 The main contributions of this work are: 357 \begin{itemize}[topsep=3pt,itemsep=1pt] 358 \item 359 language-level generators, coroutines and user-level threading, which respect the expectations of C programmers. 360 \item 361 monitor synchronization without barging, and the ability to safely acquiring multiple monitors \emph{simultaneously} (deadlock free), while seamlessly integrating these capabilities with all monitor synchronization mechanisms. 362 \item 363 providing statically type-safe interfaces that integrate with the \CFA polymorphic type-system and other language features. 364 % \item 365 % library extensions for executors, futures, and actors built on the basic mechanisms. 366 \item 367 a runtime system with no spurious wakeup. 368 \item 369 a dynamic partitioning mechanism to segregate the execution environment for specialized requirements. 370 % \item 371 % a non-blocking I/O library 372 \item 373 experimental results showing comparable performance of the new features with similar mechanisms in other programming languages. 374 \end{itemize} 375 376 Section~\ref{s:StatefulFunction} begins advanced control by introducing sequential functions that retain data and execution state between calls, which produces constructs @generator@ and @coroutine@. 377 Section~\ref{s:Concurrency} begins concurrency, or how to create (fork) and destroy (join) a thread, which produces the @thread@ construct. 378 Section~\ref{s:MutualExclusionSynchronization} discusses the two mechanisms to restricted nondeterminism when controlling shared access to resources (mutual exclusion) and timing relationships among threads (synchronization). 379 Section~\ref{s:Monitor} shows how both mutual exclusion and synchronization are safely embedded in the @monitor@ and @thread@ constructs. 380 Section~\ref{s:CFARuntimeStructure} describes the large-scale mechanism to structure (cluster) threads and virtual processors (kernel threads). 381 Section~\ref{s:Performance} uses a series of microbenchmarks to compare \CFA threading with pthreads, Java OpenJDK-9, Go 1.12.6 and \uC 7.0.0. 382 383 384 \section{Stateful Function} 385 \label{s:StatefulFunction} 386 387 The stateful function is an old idea~\cite{Conway63,Marlin80} that is new again~\cite{C++20Coroutine19}, where execution is temporarily suspended and later resumed, \eg plugin, device driver, finite-state machine. 388 Hence, a stateful function may not end when it returns to its caller, allowing it to be restarted with the data and execution location present at the point of suspension. 389 This capability is accomplished by retaining a data/execution \emph{closure} between invocations. 390 If the closure is fixed size, we call it a \emph{generator} (or \emph{stackless}), and its control flow is restricted, \eg suspending outside the generator is prohibited. 391 If the closure is variable size, we call it a \emph{coroutine} (or \emph{stackful}), and as the names implies, often implemented with a separate stack with no programming restrictions. 392 Hence, refactoring a stackless coroutine may require changing it to stackful. 393 A foundational property of all \emph{stateful functions} is that resume/suspend \emph{do not} cause incremental stack growth, \ie resume/suspend operations are remembered through the closure not the stack. 394 As well, activating a stateful function is \emph{asymmetric} or \emph{symmetric}, identified by resume/suspend (no cycles) and resume/resume (cycles). 395 A fixed closure activated by modified call/return is faster than a variable closure activated by context switching. 396 Additionally, any storage management for the closure (especially in unmanaged languages, \ie no garbage collection) must also be factored into design and performance. 397 Therefore, selecting between stackless and stackful semantics is a tradeoff between programming requirements and performance, where stackless is faster and stackful is more general. 398 Note, creation cost is amortized across usage, so activation cost is usually the dominant factor. 620 399 621 400 \begin{figure} 622 401 \centering 623 \newbox\myboxA624 402 \begin{lrbox}{\myboxA} 625 403 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 626 `int f1, f2, state = 1;` // single global variables 627 int fib() { 628 int fn; 629 `switch ( state )` { // explicit execution state 630 case 1: fn = 0; f1 = fn; state = 2; break; 631 case 2: fn = 1; f2 = f1; f1 = fn; state = 3; break; 632 case 3: fn = f1 + f2; f2 = f1; f1 = fn; break; 633 } 404 typedef struct { 405 int fn1, fn; 406 } Fib; 407 #define FibCtor { 1, 0 } 408 int fib( Fib * f ) { 409 410 411 412 int fn = f->fn; f->fn = f->fn1; 413 f->fn1 = f->fn + fn; 634 414 return fn; 415 635 416 } 636 417 int main() { 637 638 for ( int i = 0; i < 10; i += 1 ) {639 printf( "%d \n", fib() );640 }418 Fib f1 = FibCtor, f2 = FibCtor; 419 for ( int i = 0; i < 10; i += 1 ) 420 printf( "%d %d\n", 421 fib( &f1 ), fib( &f2 ) ); 641 422 } 642 423 \end{cfa} 643 424 \end{lrbox} 644 425 645 \newbox\myboxB646 426 \begin{lrbox}{\myboxB} 647 427 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 648 #define FIB_INIT `{ 0, 1 }` 649 typedef struct { int f2, f1; } Fib; 650 int fib( Fib * f ) { 651 652 int ret = f->f2; 653 int fn = f->f1 + f->f2; 654 f->f2 = f->f1; f->f1 = fn; 655 656 return ret; 428 `generator` Fib { 429 int fn1, fn; 430 }; 431 432 void `main(Fib & fib)` with(fib) { 433 434 [fn1, fn] = [1, 0]; 435 for () { 436 `suspend;` 437 [fn1, fn] = [fn, fn + fn1]; 438 439 } 657 440 } 658 441 int main() { 659 Fib f1 = FIB_INIT, f2 = FIB_INIT; 660 for ( int i = 0; i < 10; i += 1 ) { 661 printf( "%d %d\n", fib( &f1 ), fib( &f2 ) ); 442 Fib f1, f2; 443 for ( 10 ) 444 sout | `resume( f1 )`.fn 445 | `resume( f2 )`.fn; 446 } 447 \end{cfa} 448 \end{lrbox} 449 450 \begin{lrbox}{\myboxC} 451 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 452 typedef struct { 453 int fn1, fn; void * `next`; 454 } Fib; 455 #define FibCtor { 1, 0, NULL } 456 Fib * comain( Fib * f ) { 457 if ( f->next ) goto *f->next; 458 f->next = &&s1; 459 for ( ;; ) { 460 return f; 461 s1:; int fn = f->fn + f->fn1; 462 f->fn1 = f->fn; f->fn = fn; 662 463 } 663 464 } 465 int main() { 466 Fib f1 = FibCtor, f2 = FibCtor; 467 for ( int i = 0; i < 10; i += 1 ) 468 printf("%d %d\n",comain(&f1)->fn, 469 comain(&f2)->fn); 470 } 664 471 \end{cfa} 665 472 \end{lrbox} 666 473 667 \subfloat[3 States: global variables]{\label{f:GlobalVariables}\usebox\myboxA} 668 \qquad 669 \subfloat[1 State: external variables]{\label{f:ExternalState}\usebox\myboxB} 670 \caption{C Fibonacci Implementations} 671 \label{f:C-fibonacci} 474 \subfloat[C asymmetric generator]{\label{f:CFibonacci}\usebox\myboxA} 475 \hspace{3pt} 476 \vrule 477 \hspace{3pt} 478 \subfloat[\CFA asymmetric generator]{\label{f:CFAFibonacciGen}\usebox\myboxB} 479 \hspace{3pt} 480 \vrule 481 \hspace{3pt} 482 \subfloat[C generator implementation]{\label{f:CFibonacciSim}\usebox\myboxC} 483 \caption{Fibonacci (output) asymmetric generator} 484 \label{f:FibonacciAsymmetricGenerator} 672 485 673 486 \bigskip 674 487 675 \newbox\myboxA676 488 \begin{lrbox}{\myboxA} 677 489 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 678 `coroutine` Fib { int fn; }; 679 void main( Fib & fib ) with( fib ) { 680 int f1, f2; 681 fn = 0; f1 = fn; `suspend()`; 682 fn = 1; f2 = f1; f1 = fn; `suspend()`; 683 for ( ;; ) { 684 fn = f1 + f2; f2 = f1; f1 = fn; `suspend()`; 490 `generator Fmt` { 491 char ch; 492 int g, b; 493 }; 494 void ?{}( Fmt & fmt ) { `resume(fmt);` } // constructor 495 void ^?{}( Fmt & f ) with(f) { $\C[1.75in]{// destructor}$ 496 if ( g != 0 || b != 0 ) sout | nl; } 497 void `main( Fmt & f )` with(f) { 498 for () { $\C{// until destructor call}$ 499 for ( ; g < 5; g += 1 ) { $\C{// groups}$ 500 for ( ; b < 4; b += 1 ) { $\C{// blocks}$ 501 `suspend;` $\C{// wait for character}$ 502 while ( ch == '\n' ) `suspend;` // ignore 503 sout | ch; // newline 504 } sout | " "; // block spacer 505 } sout | nl; // group newline 685 506 } 686 507 } 687 int next( Fib & fib ) with( fib ) {688 `resume( fib );`689 return fn;690 }691 508 int main() { 692 Fib f1, f2; 693 for ( int i = 1; i <= 10; i += 1 ) { 694 sout | next( f1 ) | next( f2 ) | endl; 509 Fmt fmt; $\C{// fmt constructor called}$ 510 for () { 511 sin | fmt.ch; $\C{// read into generator}$ 512 if ( eof( sin ) ) break; 513 `resume( fmt );` 695 514 } 696 } 515 516 } $\C{// fmt destructor called}\CRT$ 697 517 \end{cfa} 698 518 \end{lrbox} 699 \newbox\myboxB 519 700 520 \begin{lrbox}{\myboxB} 701 521 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 702 `coroutine` Fib { int ret; }; 703 void main( Fib & f ) with( fib ) { 704 int fn, f1 = 1, f2 = 0; 522 typedef struct { 523 void * next; 524 char ch; 525 int g, b; 526 } Fmt; 527 void comain( Fmt * f ) { 528 if ( f->next ) goto *f->next; 529 f->next = &&s1; 705 530 for ( ;; ) { 706 ret = f2; 707 708 fn = f1 + f2; f2 = f1; f1 = fn; `suspend();` 531 for ( f->g = 0; f->g < 5; f->g += 1 ) { 532 for ( f->b = 0; f->b < 4; f->b += 1 ) { 533 return; 534 s1:; while ( f->ch == '\n' ) return; 535 printf( "%c", f->ch ); 536 } printf( " " ); 537 } printf( "\n" ); 709 538 } 710 539 } 711 int next( Fib & fib ) with( fib ) { 712 `resume( fib );` 713 return ret; 714 } 715 716 717 718 719 720 540 int main() { 541 Fmt fmt = { NULL }; comain( &fmt ); // prime 542 for ( ;; ) { 543 scanf( "%c", &fmt.ch ); 544 if ( feof( stdin ) ) break; 545 comain( &fmt ); 546 } 547 if ( fmt.g != 0 || fmt.b != 0 ) printf( "\n" ); 548 } 721 549 \end{cfa} 722 550 \end{lrbox} 723 \subfloat[3 States, internal variables]{\label{f:Coroutine3States}\usebox\myboxA} 724 \qquad\qquad 725 \subfloat[1 State, internal variables]{\label{f:Coroutine1State}\usebox\myboxB} 726 \caption{\CFA Coroutine Fibonacci Implementations} 727 \label{f:fibonacci-cfa} 551 552 \subfloat[\CFA asymmetric generator]{\label{f:CFAFormatGen}\usebox\myboxA} 553 \hspace{3pt} 554 \vrule 555 \hspace{3pt} 556 \subfloat[C generator simulation]{\label{f:CFormatSim}\usebox\myboxB} 557 \hspace{3pt} 558 \caption{Formatter (input) asymmetric generator} 559 \label{f:FormatterAsymmetricGenerator} 728 560 \end{figure} 729 561 730 Using a coroutine, it is possible to express the Fibonacci formula directly without any of the C problems. 731 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type: 732 \begin{cfa} 733 `coroutine` Fib { int fn; }; 734 \end{cfa} 735 which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines @next@. 736 Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main. 737 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code has the three suspend points, representing the three states in the Fibonacci formula, to context switch back to the caller's resume. 738 The interface routine @next@, takes a Fibonacci instance and context switches to it using @resume@; 739 on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned. 740 The first @resume@ is special because it cocalls the coroutine at its coroutine main and allocates the stack; 741 when the coroutine main returns, its stack is deallocated. 742 Hence, @Fib@ is an object at creation, transitions to a coroutine on its first resume, and transitions back to an object when the coroutine main finishes. 743 Figure~\ref{f:Coroutine1State} shows the coroutine version of the C version in Figure~\ref{f:ExternalState}. 744 Coroutine generators are called \newterm{output coroutines} because values are only returned. 745 746 Figure~\ref{f:CFAFmt} shows an \newterm{input coroutine}, @Format@, for restructuring text into groups of characters of fixed-size blocks. 747 For example, the input of the left is reformatted into the output on the right. 748 \begin{quote} 562 Stateful functions appear as generators, coroutines, and threads, where presentations are based on function objects or pointers~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}. 563 For example, Python presents generators as a function object: 564 \begin{python} 565 def Gen(): 566 ... `yield val` ... 567 gen = Gen() 568 for i in range( 10 ): 569 print( next( gen ) ) 570 \end{python} 571 Boost presents coroutines in terms of four functor object-types: 572 \begin{cfa} 573 asymmetric_coroutine<>::pull_type 574 asymmetric_coroutine<>::push_type 575 symmetric_coroutine<>::call_type 576 symmetric_coroutine<>::yield_type 577 \end{cfa} 578 and many languages present threading using function pointers, @pthreads@~\cite{Butenhof97}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}, \eg pthreads: 579 \begin{cfa} 580 void * rtn( void * arg ) { ... } 581 int i = 3, rc; 582 pthread_t t; $\C{// thread id}$ 583 `rc = pthread_create( &t, rtn, (void *)i );` $\C{// create and initialized task, type-unsafe input parameter}$ 584 \end{cfa} 585 % void mycor( pthread_t cid, void * arg ) { 586 % int * value = (int *)arg; $\C{// type unsafe, pointer-size only}$ 587 % // thread body 588 % } 589 % int main() { 590 % int input = 0, output; 591 % coroutine_t cid = coroutine_create( &mycor, (void *)&input ); $\C{// type unsafe, pointer-size only}$ 592 % coroutine_resume( cid, (void *)input, (void **)&output ); $\C{// type unsafe, pointer-size only}$ 593 % } 594 \CFA's preferred presentation model for generators/coroutines/threads is a hybrid of objects and functions, with an object-oriented flavour. 595 Essentially, the generator/coroutine/thread function is semantically coupled with a generator/coroutine/thread custom type. 596 The custom type solves several issues, while accessing the underlying mechanisms used by the custom types is still allowed. 597 598 599 \subsection{Generator} 600 601 Stackless generators have the potential to be very small and fast, \ie as small and fast as function call/return for both creation and execution. 602 The \CFA goal is to achieve this performance target, possibly at the cost of some semantic complexity. 603 A series of different kinds of generators and their implementation demonstrate how this goal is accomplished. 604 605 Figure~\ref{f:FibonacciAsymmetricGenerator} shows an unbounded asymmetric generator for an infinite sequence of Fibonacci numbers written in C and \CFA, with a simple C implementation for the \CFA version. 606 This generator is an \emph{output generator}, producing a new result on each resumption. 607 To compute Fibonacci, the previous two values in the sequence are retained to generate the next value, \ie @fn1@ and @fn@, plus the execution location where control restarts when the generator is resumed, \ie top or middle. 608 An additional requirement is the ability to create an arbitrary number of generators (of any kind), \ie retaining one state in global variables is insufficient; 609 hence, state is retained in a closure between calls. 610 Figure~\ref{f:CFibonacci} shows the C approach of manually creating the closure in structure @Fib@, and multiple instances of this closure provide multiple Fibonacci generators. 611 The C version only has the middle execution state because the top execution state is declaration initialization. 612 Figure~\ref{f:CFAFibonacciGen} shows the \CFA approach, which also has a manual closure, but replaces the structure with a custom \CFA @generator@ type. 613 This generator type is then connected to a function that \emph{must be named \lstinline|main|},\footnote{ 614 The name \lstinline|main| has special meaning in C, specifically the function where a program starts execution. 615 Hence, overloading this name for other starting points (generator/coroutine/thread) is a logical extension.} 616 called a \emph{generator main},which takes as its only parameter a reference to the generator type. 617 The generator main contains @suspend@ statements that suspend execution without ending the generator versus @return@. 618 For the Fibonacci generator-main,\footnote{ 619 The \CFA \lstinline|with| opens an aggregate scope making its fields directly accessible, like Pascal \lstinline|with|, but using parallel semantics. 620 Multiple aggregates may be opened.} 621 the top initialization state appears at the start and the middle execution state is denoted by statement @suspend@. 622 Any local variables in @main@ \emph{are not retained} between calls; 623 hence local variables are only for temporary computations \emph{between} suspends. 624 All retained state \emph{must} appear in the generator's type. 625 As well, generator code containing a @suspend@ cannot be refactored into a helper function called by the generator, because @suspend@ is implemented via @return@, so a return from the helper function goes back to the current generator not the resumer. 626 The generator is started by calling function @resume@ with a generator instance, which begins execution at the top of the generator main, and subsequent @resume@ calls restart the generator at its point of last suspension. 627 Resuming an ended (returned) generator is undefined. 628 Function @resume@ returns its argument generator so it can be cascaded in an expression, in this case to print the next Fibonacci value @fn@ computed in the generator instance. 629 Figure~\ref{f:CFibonacciSim} shows the C implementation of the \CFA generator only needs one additional field, @next@, to handle retention of execution state. 630 The computed @goto@ at the start of the generator main, which branches after the previous suspend, adds very little cost to the resume call. 631 Finally, an explicit generator type provides both design and performance benefits, such as multiple type-safe interface functions taking and returning arbitrary types.\footnote{ 632 The \CFA operator syntax uses \lstinline|?| to denote operands, which allows precise definitions for pre, post, and infix operators, \eg \lstinline|++?|, \lstinline|?++|, and \lstinline|?+?|, in addition \lstinline|?\{\}| denotes a constructor, as in \lstinline|foo `f` = `\{`...`\}`|, \lstinline|^?\{\}| denotes a destructor, and \lstinline|?()| is \CC function call \lstinline|operator()|. 633 }% 634 \begin{cfa} 635 int ?()( Fib & fib ) { return `resume( fib )`.fn; } $\C[3.9in]{// function-call interface}$ 636 int ?()( Fib & fib, int N ) { for ( N - 1 ) `fib()`; return `fib()`; } $\C{// use function-call interface to skip N values}$ 637 double ?()( Fib & fib ) { return (int)`fib()` / 3.14159; } $\C{// different return type, cast prevents recursive call}\CRT$ 638 sout | (int)f1() | (double)f1() | f2( 2 ); // alternative interface, cast selects call based on return type, step 2 values 639 \end{cfa} 640 Now, the generator can be a separately compiled opaque-type only accessed through its interface functions. 641 For contrast, Figure~\ref{f:PythonFibonacci} shows the equivalent Python Fibonacci generator, which does not use a generator type, and hence only has a single interface, but an implicit closure. 642 643 Having to manually create the generator closure by moving local-state variables into the generator type is an additional programmer burden. 644 (This restriction is removed by the coroutine in Section~\ref{s:Coroutine}.) 645 This requirement follows from the generality of variable-size local-state, \eg local state with a variable-length array requires dynamic allocation because the array size is unknown at compile time. 646 However, dynamic allocation significantly increases the cost of generator creation/destruction and is a showstopper for embedded real-time programming. 647 But more importantly, the size of the generator type is tied to the local state in the generator main, which precludes separate compilation of the generator main, \ie a generator must be inlined or local state must be dynamically allocated. 648 With respect to safety, we believe static analysis can discriminate local state from temporary variables in a generator, \ie variable usage spanning @suspend@, and generate a compile-time error. 649 Finally, our current experience is that most generator problems have simple data state, including local state, but complex execution state, so the burden of creating the generator type is small. 650 As well, C programmers are not afraid of this kind of semantic programming requirement, if it results in very small, fast generators. 651 652 Figure~\ref{f:CFAFormatGen} shows an asymmetric \newterm{input generator}, @Fmt@, for restructuring text into groups of characters of fixed-size blocks, \ie the input on the left is reformatted into the output on the right, where newlines are ignored. 653 \begin{center} 749 654 \tt 750 655 \begin{tabular}{@{}l|l@{}} 751 656 \multicolumn{1}{c|}{\textbf{\textrm{input}}} & \multicolumn{1}{c}{\textbf{\textrm{output}}} \\ 752 abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz 657 \begin{tabular}[t]{@{}ll@{}} 658 abcdefghijklmnopqrstuvwxyz \\ 659 abcdefghijklmnopqrstuvwxyz 660 \end{tabular} 753 661 & 754 662 \begin{tabular}[t]{@{}lllll@{}} … … 758 666 \end{tabular} 759 667 \end{tabular} 760 \end{quote} 761 The example takes advantage of resuming a coroutine in the constructor to prime the loops so the first character sent for formatting appears inside the nested loops. 762 The destruction provides a newline if formatted text ends with a full line. 763 Figure~\ref{f:CFmt} shows the C equivalent formatter, where the loops of the coroutine are flatten (linearized) and rechecked on each call because execution location is not retained between calls. 668 \end{center} 669 The example takes advantage of resuming a generator in the constructor to prime the loops so the first character sent for formatting appears inside the nested loops. 670 The destructor provides a newline, if formatted text ends with a full line. 671 Figure~\ref{f:CFormatSim} shows the C implementation of the \CFA input generator with one additional field and the computed @goto@. 672 For contrast, Figure~\ref{f:PythonFormatter} shows the equivalent Python format generator with the same properties as the Fibonacci generator. 673 674 Figure~\ref{f:DeviceDriverGen} shows a \emph{killer} asymmetric generator, a device-driver, because device drivers caused 70\%-85\% of failures in Windows/Linux~\cite{Swift05}. 675 Device drives follow the pattern of simple data state but complex execution state, \ie finite state-machine (FSM) parsing a protocol. 676 For example, the following protocol: 677 \begin{center} 678 \ldots\, STX \ldots\, message \ldots\, ESC ETX \ldots\, message \ldots\, ETX 2-byte crc \ldots 679 \end{center} 680 is a network message beginning with the control character STX, ending with an ETX, and followed by a 2-byte cyclic-redundancy check. 681 Control characters may appear in a message if preceded by an ESC. 682 When a message byte arrives, it triggers an interrupt, and the operating system services the interrupt by calling the device driver with the byte read from a hardware register. 683 The device driver returns a status code of its current state, and when a complete message is obtained, the operating system knows the message is in the message buffer. 684 Hence, the device driver is an input/output generator. 685 686 Note, the cost of creating and resuming the device-driver generator, @Driver@, is virtually identical to call/return, so performance in an operating-system kernel is excellent. 687 As well, the data state is small, where variables @byte@ and @msg@ are communication variables for passing in message bytes and returning the message, and variables @lnth@, @crc@, and @sum@ are local variable that must be retained between calls and are manually hoisted into the generator type. 688 % Manually, detecting and hoisting local-state variables is easy when the number is small. 689 In contrast, the execution state is large, with one @resume@ and seven @suspend@s. 690 Hence, the key benefits of the generator are correctness, safety, and maintenance because the execution states are transcribed directly into the programming language rather than using a table-driven approach. 691 Because FSMs can be complex and frequently occur in important domains, direct generator support is important in a system programming language. 764 692 765 693 \begin{figure} … … 767 695 \newbox\myboxA 768 696 \begin{lrbox}{\myboxA} 697 \begin{python}[aboveskip=0pt,belowskip=0pt] 698 def Fib(): 699 fn1, fn = 0, 1 700 while True: 701 `yield fn1` 702 fn1, fn = fn, fn1 + fn 703 f1 = Fib() 704 f2 = Fib() 705 for i in range( 10 ): 706 print( next( f1 ), next( f2 ) ) 707 708 709 710 711 712 713 \end{python} 714 \end{lrbox} 715 716 \newbox\myboxB 717 \begin{lrbox}{\myboxB} 718 \begin{python}[aboveskip=0pt,belowskip=0pt] 719 def Fmt(): 720 try: 721 while True: 722 for g in range( 5 ): 723 for b in range( 4 ): 724 print( `(yield)`, end='' ) 725 print( ' ', end='' ) 726 print() 727 except GeneratorExit: 728 if g != 0 | b != 0: 729 print() 730 fmt = Fmt() 731 `next( fmt )` # prime, next prewritten 732 for i in range( 41 ): 733 `fmt.send( 'a' );` # send to yield 734 \end{python} 735 \end{lrbox} 736 \subfloat[Fibonacci]{\label{f:PythonFibonacci}\usebox\myboxA} 737 \hspace{3pt} 738 \vrule 739 \hspace{3pt} 740 \subfloat[Formatter]{\label{f:PythonFormatter}\usebox\myboxB} 741 \caption{Python generator} 742 \label{f:PythonGenerator} 743 744 \bigskip 745 746 \begin{tabular}{@{}l|l@{}} 769 747 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 770 `coroutine` Format { 771 char ch; // used for communication 772 int g, b; // global because used in destructor 748 enum Status { CONT, MSG, ESTX, 749 ELNTH, ECRC }; 750 `generator` Driver { 751 Status status; 752 unsigned char byte, * msg; // communication 753 unsigned int lnth, sum; // local state 754 unsigned short int crc; 773 755 }; 774 void main( Format & fmt ) with( fmt ) { 775 for ( ;; ) { 776 for ( g = 0; g < 5; g += 1 ) { // group 777 for ( b = 0; b < 4; b += 1 ) { // block 778 `suspend();` 779 sout | ch; // separator 756 void ?{}( Driver & d, char * m ) { d.msg = m; } 757 Status next( Driver & d, char b ) with( d ) { 758 byte = b; `resume( d );` return status; 759 } 760 void main( Driver & d ) with( d ) { 761 enum { STX = '\002', ESC = '\033', 762 ETX = '\003', MaxMsg = 64 }; 763 msg: for () { // parse message 764 status = CONT; 765 lnth = 0; sum = 0; 766 while ( byte != STX ) `suspend;` 767 emsg: for () { 768 `suspend;` // process byte 769 \end{cfa} 770 & 771 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 772 choose ( byte ) { // switch with implicit break 773 case STX: 774 status = ESTX; `suspend;` continue msg; 775 case ETX: 776 break emsg; 777 case ESC: 778 `suspend;` 780 779 } 781 sout | " "; // separator 780 if ( lnth >= MaxMsg ) { // buffer full ? 781 status = ELNTH; `suspend;` continue msg; } 782 msg[lnth++] = byte; 783 sum += byte; 782 784 } 783 sout | endl; 785 msg[lnth] = '\0'; // terminate string 786 `suspend;` 787 crc = byte << 8; 788 `suspend;` 789 status = (crc | byte) == sum ? MSG : ECRC; 790 `suspend;` 784 791 } 785 792 } 786 void ?{}( Format & fmt ) { `resume( fmt );` } 787 void ^?{}( Format & fmt ) with( fmt ) { 788 if ( g != 0 || b != 0 ) sout | endl; 789 } 790 void format( Format & fmt ) { 791 `resume( fmt );` 793 \end{cfa} 794 \end{tabular} 795 \caption{Device-driver generator for communication protocol} 796 \label{f:DeviceDriverGen} 797 \end{figure} 798 799 Figure~\ref{f:CFAPingPongGen} shows a symmetric generator, where the generator resumes another generator, forming a resume/resume cycle. 800 (The trivial cycle is a generator resuming itself.) 801 This control flow is similar to recursion for functions but without stack growth. 802 The steps for symmetric control-flow are creating, executing, and terminating the cycle. 803 Constructing the cycle must deal with definition-before-use to close the cycle, \ie, the first generator must know about the last generator, which is not within scope. 804 (This issue occurs for any cyclic data structure.) 805 % The example creates all the generators and then assigns the partners that form the cycle. 806 % Alternatively, the constructor can assign the partners as they are declared, except the first, and the first-generator partner is set after the last generator declaration to close the cycle. 807 Once the cycle is formed, the program main resumes one of the generators, and the generators can then traverse an arbitrary cycle using @resume@ to activate partner generator(s). 808 Terminating the cycle is accomplished by @suspend@ or @return@, both of which go back to the stack frame that started the cycle (program main in the example). 809 The starting stack-frame is below the last active generator because the resume/resume cycle does not grow the stack. 810 Also, since local variables are not retained in the generator function, it does not contain any objects with destructors that must be called, so the cost is the same as a function return. 811 Destructor cost occurs when the generator instance is deallocated, which is easily controlled by the programmer. 812 813 Figure~\ref{f:CPingPongSim} shows the implementation of the symmetric generator, where the complexity is the @resume@, which needs an extension to the calling convention to perform a forward rather than backward jump. 814 This jump-starts at the top of the next generator main to re-execute the normal calling convention to make space on the stack for its local variables. 815 However, before the jump, the caller must reset its stack (and any registers) equivalent to a @return@, but subsequently jump forward. 816 This semantics is basically a tail-call optimization, which compilers already perform. 817 The example shows the assembly code to undo the generator's entry code before the direct jump. 818 This assembly code depends on what entry code is generated, specifically if there are local variables and the level of optimization. 819 To provide this new calling convention requires a mechanism built into the compiler, which is beyond the scope of \CFA at this time. 820 Nevertheless, it is possible to hand generate any symmetric generators for proof of concept and performance testing. 821 A compiler could also eliminate other artifacts in the generator simulation to further increase performance, \eg LLVM has various coroutine support~\cite{CoroutineTS}, and \CFA can leverage this support should it fork @clang@. 822 823 \begin{figure} 824 \centering 825 \begin{lrbox}{\myboxA} 826 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 827 `generator PingPong` { 828 const char * name; 829 int N; 830 int i; // local state 831 PingPong & partner; // rebindable reference 832 }; 833 834 void `main( PingPong & pp )` with(pp) { 835 for ( ; i < N; i += 1 ) { 836 sout | name | i; 837 `resume( partner );` 838 } 792 839 } 793 840 int main() { 794 Format fmt; 795 eof: for ( ;; ) { 796 sin | fmt.ch; 797 if ( eof( sin ) ) break eof; 798 format( fmt ); 841 enum { N = 5 }; 842 PingPong ping = {"ping",N,0}, pong = {"pong",N,0}; 843 &ping.partner = &pong; &pong.partner = &ping; 844 `resume( ping );` 845 } 846 \end{cfa} 847 \end{lrbox} 848 849 \begin{lrbox}{\myboxB} 850 \begin{cfa}[escapechar={},aboveskip=0pt,belowskip=0pt] 851 typedef struct PingPong { 852 const char * name; 853 int N, i; 854 struct PingPong * partner; 855 void * next; 856 } PingPong; 857 #define PPCtor(name, N) {name,N,0,NULL,NULL} 858 void comain( PingPong * pp ) { 859 if ( pp->next ) goto *pp->next; 860 pp->next = &&cycle; 861 for ( ; pp->i < pp->N; pp->i += 1 ) { 862 printf( "%s %d\n", pp->name, pp->i ); 863 asm( "mov %0,%%rdi" : "=m" (pp->partner) ); 864 asm( "mov %rdi,%rax" ); 865 asm( "popq %rbx" ); 866 asm( "jmp comain" ); 867 cycle: ; 868 } 869 } 870 \end{cfa} 871 \end{lrbox} 872 873 \subfloat[\CFA symmetric generator]{\label{f:CFAPingPongGen}\usebox\myboxA} 874 \hspace{3pt} 875 \vrule 876 \hspace{3pt} 877 \subfloat[C generator simulation]{\label{f:CPingPongSim}\usebox\myboxB} 878 \hspace{3pt} 879 \caption{Ping-Pong symmetric generator} 880 \label{f:PingPongSymmetricGenerator} 881 \end{figure} 882 883 Finally, part of this generator work was inspired by the recent \CCtwenty generator proposal~\cite{C++20Coroutine19} (which they call coroutines). 884 Our work provides the same high-performance asymmetric generators as \CCtwenty, and extends their work with symmetric generators. 885 An additional \CCtwenty generator feature allows @suspend@ and @resume@ to be followed by a restricted compound statement that is executed after the current generator has reset its stack but before calling the next generator, specified with \CFA syntax: 886 \begin{cfa} 887 ... suspend`{ ... }`; 888 ... resume( C )`{ ... }` ... 889 \end{cfa} 890 Since the current generator's stack is released before calling the compound statement, the compound statement can only reference variables in the generator's type. 891 This feature is useful when a generator is used in a concurrent context to ensure it is stopped before releasing a lock in the compound statement, which might immediately allow another thread to resume the generator. 892 Hence, this mechanism provides a general and safe handoff of the generator among competing threads. 893 894 895 \subsection{Coroutine} 896 \label{s:Coroutine} 897 898 Stackful coroutines extend generator semantics, \ie there is an implicit closure and @suspend@ may appear in a helper function called from the coroutine main. 899 A coroutine is specified by replacing @generator@ with @coroutine@ for the type. 900 Coroutine generality results in higher cost for creation, due to dynamic stack allocation, execution, due to context switching among stacks, and terminating, due to possible stack unwinding and dynamic stack deallocation. 901 A series of different kinds of coroutines and their implementations demonstrate how coroutines extend generators. 902 903 First, the previous generator examples are converted to their coroutine counterparts, allowing local-state variables to be moved from the generator type into the coroutine main. 904 \begin{description} 905 \item[Fibonacci] 906 Move the declaration of @fn1@ to the start of coroutine main. 907 \begin{cfa}[xleftmargin=0pt] 908 void main( Fib & fib ) with(fib) { 909 `int fn1;` 910 \end{cfa} 911 \item[Formatter] 912 Move the declaration of @g@ and @b@ to the for loops in the coroutine main. 913 \begin{cfa}[xleftmargin=0pt] 914 for ( `g`; 5 ) { 915 for ( `b`; 4 ) { 916 \end{cfa} 917 \item[Device Driver] 918 Move the declaration of @lnth@ and @sum@ to their points of initialization. 919 \begin{cfa}[xleftmargin=0pt] 920 status = CONT; 921 `unsigned int lnth = 0, sum = 0;` 922 ... 923 `unsigned short int crc = byte << 8;` 924 \end{cfa} 925 \item[PingPong] 926 Move the declaration of @i@ to the for loop in the coroutine main. 927 \begin{cfa}[xleftmargin=0pt] 928 void main( PingPong & pp ) with(pp) { 929 for ( `i`; N ) { 930 \end{cfa} 931 \end{description} 932 It is also possible to refactor code containing local-state and @suspend@ statements into a helper function, like the computation of the CRC for the device driver. 933 \begin{cfa} 934 unsigned int Crc() { 935 `suspend;` 936 unsigned short int crc = byte << 8; 937 `suspend;` 938 status = (crc | byte) == sum ? MSG : ECRC; 939 return crc; 940 } 941 \end{cfa} 942 A call to this function is placed at the end of the driver's coroutine-main. 943 For complex finite-state machines, refactoring is part of normal program abstraction, especially when code is used in multiple places. 944 Again, this complexity is usually associated with execution state rather than data state. 945 946 \begin{comment} 947 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, @`coroutine` Fib { int fn; }@, which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface functions, \eg @next@. 948 Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main. 949 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code represents the three states in the Fibonacci formula via the three suspend points, to context switch back to the caller's @resume@. 950 The interface function @next@, takes a Fibonacci instance and context switches to it using @resume@; 951 on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned. 952 The first @resume@ is special because it allocates the coroutine stack and cocalls its coroutine main on that stack; 953 when the coroutine main returns, its stack is deallocated. 954 Hence, @Fib@ is an object at creation, transitions to a coroutine on its first resume, and transitions back to an object when the coroutine main finishes. 955 Figure~\ref{f:Coroutine1State} shows the coroutine version of the C version in Figure~\ref{f:ExternalState}. 956 Coroutine generators are called \newterm{output coroutines} because values are only returned. 957 958 \begin{figure} 959 \centering 960 \newbox\myboxA 961 % \begin{lrbox}{\myboxA} 962 % \begin{cfa}[aboveskip=0pt,belowskip=0pt] 963 % `int fn1, fn2, state = 1;` // single global variables 964 % int fib() { 965 % int fn; 966 % `switch ( state )` { // explicit execution state 967 % case 1: fn = 0; fn1 = fn; state = 2; break; 968 % case 2: fn = 1; fn2 = fn1; fn1 = fn; state = 3; break; 969 % case 3: fn = fn1 + fn2; fn2 = fn1; fn1 = fn; break; 970 % } 971 % return fn; 972 % } 973 % int main() { 974 % 975 % for ( int i = 0; i < 10; i += 1 ) { 976 % printf( "%d\n", fib() ); 977 % } 978 % } 979 % \end{cfa} 980 % \end{lrbox} 981 \begin{lrbox}{\myboxA} 982 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 983 #define FibCtor { 0, 1 } 984 typedef struct { int fn1, fn; } Fib; 985 int fib( Fib * f ) { 986 987 int ret = f->fn1; 988 f->fn1 = f->fn; 989 f->fn = ret + f->fn; 990 return ret; 991 } 992 993 994 995 int main() { 996 Fib f1 = FibCtor, f2 = FibCtor; 997 for ( int i = 0; i < 10; i += 1 ) { 998 printf( "%d %d\n", 999 fib( &f1 ), fib( &f2 ) ); 799 1000 } 800 1001 } … … 805 1006 \begin{lrbox}{\myboxB} 806 1007 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 807 struct Format { 808 char ch; 809 int g, b; 810 }; 811 void format( struct Format * fmt ) { 812 if ( fmt->ch != -1 ) { // not EOF ? 813 printf( "%c", fmt->ch ); 814 fmt->b += 1; 815 if ( fmt->b == 4 ) { // block 816 printf( " " ); // separator 817 fmt->b = 0; 818 fmt->g += 1; 819 } 820 if ( fmt->g == 5 ) { // group 821 printf( "\n" ); // separator 822 fmt->g = 0; 823 } 824 } else { 825 if ( fmt->g != 0 || fmt->b != 0 ) printf( "\n" ); 1008 `coroutine` Fib { int fn1; }; 1009 void main( Fib & fib ) with( fib ) { 1010 int fn; 1011 [fn1, fn] = [0, 1]; 1012 for () { 1013 `suspend;` 1014 [fn1, fn] = [fn, fn1 + fn]; 826 1015 } 827 1016 } 1017 int ?()( Fib & fib ) with( fib ) { 1018 return `resume( fib )`.fn1; 1019 } 828 1020 int main() { 829 struct Format fmt = { 0, 0, 0 }; 830 for ( ;; ) { 831 scanf( "%c", &fmt.ch ); 832 if ( feof( stdin ) ) break; 833 format( &fmt ); 834 } 835 fmt.ch = -1; 836 format( &fmt ); 837 } 1021 Fib f1, f2; 1022 for ( 10 ) { 1023 sout | f1() | f2(); 1024 } 1025 1026 838 1027 \end{cfa} 839 1028 \end{lrbox} 840 \subfloat[\CFA Coroutine]{\label{f:CFAFmt}\usebox\myboxA} 1029 1030 \newbox\myboxC 1031 \begin{lrbox}{\myboxC} 1032 \begin{python}[aboveskip=0pt,belowskip=0pt] 1033 1034 def Fib(): 1035 1036 fn1, fn = 0, 1 1037 while True: 1038 `yield fn1` 1039 fn1, fn = fn, fn1 + fn 1040 1041 1042 // next prewritten 1043 1044 1045 f1 = Fib() 1046 f2 = Fib() 1047 for i in range( 10 ): 1048 print( next( f1 ), next( f2 ) ) 1049 1050 1051 1052 \end{python} 1053 \end{lrbox} 1054 1055 \subfloat[C]{\label{f:GlobalVariables}\usebox\myboxA} 1056 \hspace{3pt} 1057 \vrule 1058 \hspace{3pt} 1059 \subfloat[\CFA]{\label{f:ExternalState}\usebox\myboxB} 1060 \hspace{3pt} 1061 \vrule 1062 \hspace{3pt} 1063 \subfloat[Python]{\label{f:ExternalState}\usebox\myboxC} 1064 \caption{Fibonacci generator} 1065 \label{f:C-fibonacci} 1066 \end{figure} 1067 1068 \bigskip 1069 1070 \newbox\myboxA 1071 \begin{lrbox}{\myboxA} 1072 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1073 `coroutine` Fib { int fn; }; 1074 void main( Fib & fib ) with( fib ) { 1075 fn = 0; int fn1 = fn; `suspend`; 1076 fn = 1; int fn2 = fn1; fn1 = fn; `suspend`; 1077 for () { 1078 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `suspend`; } 1079 } 1080 int next( Fib & fib ) with( fib ) { `resume( fib );` return fn; } 1081 int main() { 1082 Fib f1, f2; 1083 for ( 10 ) 1084 sout | next( f1 ) | next( f2 ); 1085 } 1086 \end{cfa} 1087 \end{lrbox} 1088 \newbox\myboxB 1089 \begin{lrbox}{\myboxB} 1090 \begin{python}[aboveskip=0pt,belowskip=0pt] 1091 1092 def Fibonacci(): 1093 fn = 0; fn1 = fn; `yield fn` # suspend 1094 fn = 1; fn2 = fn1; fn1 = fn; `yield fn` 1095 while True: 1096 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `yield fn` 1097 1098 1099 f1 = Fibonacci() 1100 f2 = Fibonacci() 1101 for i in range( 10 ): 1102 print( `next( f1 )`, `next( f2 )` ) # resume 1103 1104 \end{python} 1105 \end{lrbox} 1106 \subfloat[\CFA]{\label{f:Coroutine3States}\usebox\myboxA} 841 1107 \qquad 842 \subfloat[ C Linearized]{\label{f:CFmt}\usebox\myboxB}843 \caption{F ormatting text into lines of 5 blocks of 4 characters.}844 \label{f: fmt-line}1108 \subfloat[Python]{\label{f:Coroutine1State}\usebox\myboxB} 1109 \caption{Fibonacci input coroutine, 3 states, internal variables} 1110 \label{f:cfa-fibonacci} 845 1111 \end{figure} 846 847 The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming routine for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal routines 848 However, there is no stack growth because @resume@/@suspend@ context switch to existing stack-frames rather than create new ones. 849 \newterm{Symmetric (full) coroutine}s have a coroutine call a resuming routine for another coroutine, which eventually forms a resuming-call cycle. 850 (The trivial cycle is a coroutine resuming itself.) 851 This control flow is similar to recursion for normal routines, but again there is no stack growth from the context switch. 1112 \end{comment} 852 1113 853 1114 \begin{figure} … … 857 1118 \begin{cfa} 858 1119 `coroutine` Prod { 859 Cons & c; 1120 Cons & c; // communication 860 1121 int N, money, receipt; 861 1122 }; 862 1123 void main( Prod & prod ) with( prod ) { 863 1124 // 1st resume starts here 864 for ( i nt i = 0; i < N; i += 1) {1125 for ( i; N ) { 865 1126 int p1 = random( 100 ), p2 = random( 100 ); 866 sout | p1 | " " | p2 | endl;1127 sout | p1 | " " | p2; 867 1128 int status = delivery( c, p1, p2 ); 868 sout | " $" | money | endl | status | endl;1129 sout | " $" | money | nl | status; 869 1130 receipt += 1; 870 1131 } 871 1132 stop( c ); 872 sout | "prod stops" | endl;1133 sout | "prod stops"; 873 1134 } 874 1135 int payment( Prod & prod, int money ) { … … 891 1152 \begin{cfa} 892 1153 `coroutine` Cons { 893 Prod & p; 1154 Prod & p; // communication 894 1155 int p1, p2, status; 895 _Bool done;1156 bool done; 896 1157 }; 897 1158 void ?{}( Cons & cons, Prod & p ) { 898 &cons.p = &p; 1159 &cons.p = &p; // reassignable reference 899 1160 cons.[status, done ] = [0, false]; 900 1161 } 901 void ^?{}( Cons & cons ) {}902 1162 void main( Cons & cons ) with( cons ) { 903 1163 // 1st resume starts here 904 1164 int money = 1, receipt; 905 1165 for ( ; ! done; ) { 906 sout | p1 | " " | p2 | endl | " $" | money | endl;1166 sout | p1 | " " | p2 | nl | " $" | money; 907 1167 status += 1; 908 1168 receipt = payment( p, money ); 909 sout | " #" | receipt | endl;1169 sout | " #" | receipt; 910 1170 money += 1; 911 1171 } 912 sout | "cons stops" | endl;1172 sout | "cons stops"; 913 1173 } 914 1174 int delivery( Cons & cons, int p1, int p2 ) { … … 921 1181 `resume( cons );` 922 1182 } 1183 923 1184 \end{cfa} 924 1185 \end{tabular} 925 \caption{Producer / consumer: resume-resume cycle, bi -directional communication}1186 \caption{Producer / consumer: resume-resume cycle, bidirectional communication} 926 1187 \label{f:ProdCons} 927 1188 \end{figure} 928 1189 929 Figure~\ref{f:ProdCons} shows a producer/consumer symmetric-coroutine performing bi-directional communication.930 Since the solution involves a full-coroutining cycle, the program main creates one coroutine in isolation, passes this coroutine to its partner, and closes the cycle at the call to @start@.931 The @start@ routine communicates both the number of elements to be produced and the consumer into the producer's coroutine structure.932 The n the @resume@ to@prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it.933 @prod@'s coroutine main starts, creates local variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer.1190 Figure~\ref{f:ProdCons} shows the ping-pong example in Figure~\ref{f:CFAPingPongGen} extended into a producer/consumer symmetric-coroutine performing bidirectional communication. 1191 This example is illustrative because both producer/consumer have two interface functions with @resume@s that suspend execution in these interface (helper) functions. 1192 The program main creates the producer coroutine, passes it to the consumer coroutine in its initialization, and closes the cycle at the call to @start@ along with the number of items to be produced. 1193 The first @resume@ of @prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it. 1194 @prod@'s coroutine main starts, creates local-state variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer. 934 1195 935 1196 The producer call to @delivery@ transfers values into the consumer's communication variables, resumes the consumer, and returns the consumer status. 936 For the first resume, @cons@'s stack is initialized, creating localvariables retained between subsequent activations of the coroutine.937 The consumer iterates until the @done@ flag is set, prints , increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation).938 The call from the consumer to the@payment@ introduces the cycle between producer and consumer.1197 On the first resume, @cons@'s stack is created and initialized, holding local-state variables retained between subsequent activations of the coroutine. 1198 The consumer iterates until the @done@ flag is set, prints the values delivered by the producer, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation). 1199 The call from the consumer to @payment@ introduces the cycle between producer and consumer. 939 1200 When @payment@ is called, the consumer copies values into the producer's communication variable and a resume is executed. 940 The context switch restarts the producer at the point where it was last context switched, so it continues in @delivery@ after the resume. 941 1201 The context switch restarts the producer at the point where it last context switched, so it continues in @delivery@ after the resume. 942 1202 @delivery@ returns the status value in @prod@'s coroutine main, where the status is printed. 943 1203 The loop then repeats calling @delivery@, where each call resumes the consumer coroutine. … … 945 1205 The consumer increments and returns the receipt to the call in @cons@'s coroutine main. 946 1206 The loop then repeats calling @payment@, where each call resumes the producer coroutine. 947 948 After iterating $N$ times, the producer calls @stop@. 949 The @done@ flag is set to stop the consumer's execution and a resume is executed. 950 The context switch restarts @cons@ in @payment@ and it returns with the last receipt. 951 The consumer terminates its loops because @done@ is true, its @main@ terminates, so @cons@ transitions from a coroutine back to an object, and @prod@ reactivates after the resume in @stop@. 952 @stop@ returns and @prod@'s coroutine main terminates. 953 The program main restarts after the resume in @start@. 954 @start@ returns and the program main terminates. 955 956 957 \subsection{Coroutine Implementation} 958 959 A significant implementation challenge for coroutines (and threads, see section \ref{threads}) is adding extra fields and executing code after/before the coroutine constructor/destructor and coroutine main to create/initialize/de-initialize/destroy extra fields and the stack. 960 There are several solutions to this problem and the chosen option forced the \CFA coroutine design. 961 962 Object-oriented inheritance provides extra fields and code in a restricted context, but it requires programmers to explicitly perform the inheritance: 963 \begin{cfa} 964 struct mycoroutine $\textbf{\textsf{inherits}}$ baseCoroutine { ... } 965 \end{cfa} 966 and the programming language (and possibly its tool set, \eg debugger) may need to understand @baseCoroutine@ because of the stack. 967 Furthermore, the execution of constructs/destructors is in the wrong order for certain operations, \eg for threads; 968 \eg, if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived. 969 970 An alternatively is composition: 971 \begin{cfa} 972 struct mycoroutine { 973 ... // declarations 1207 Figure~\ref{f:ProdConsRuntimeStacks} shows the runtime stacks of the program main, and the coroutine mains for @prod@ and @cons@ during the cycling. 1208 1209 \begin{figure} 1210 \begin{center} 1211 \input{FullProdConsStack.pstex_t} 1212 \end{center} 1213 \vspace*{-10pt} 1214 \caption{Producer / consumer runtime stacks} 1215 \label{f:ProdConsRuntimeStacks} 1216 1217 \medskip 1218 1219 \begin{center} 1220 \input{FullCoroutinePhases.pstex_t} 1221 \end{center} 1222 \vspace*{-10pt} 1223 \caption{Ping / Pong coroutine steps} 1224 \label{f:PingPongFullCoroutineSteps} 1225 \end{figure} 1226 1227 Terminating a coroutine cycle is more complex than a generator cycle, because it requires context switching to the program main's \emph{stack} to shutdown the program, whereas generators started by the program main run on its stack. 1228 Furthermore, each deallocated coroutine must guarantee all destructors are run for object allocated in the coroutine type \emph{and} allocated on the coroutine's stack at the point of suspension, which can be arbitrarily deep. 1229 When a coroutine's main ends, its stack is already unwound so any stack allocated objects with destructors have been finalized. 1230 The na\"{i}ve semantics for coroutine-cycle termination is to context switch to the last resumer, like executing a @suspend@/@return@ in a generator. 1231 However, for coroutines, the last resumer is \emph{not} implicitly below the current stack frame, as for generators, because each coroutine's stack is independent. 1232 Unfortunately, it is impossible to determine statically if a coroutine is in a cycle and unrealistic to check dynamically (graph-cycle problem). 1233 Hence, a compromise solution is necessary that works for asymmetric (acyclic) and symmetric (cyclic) coroutines. 1234 1235 Our solution is to context switch back to the first resumer (starter) once the coroutine ends. 1236 This semantics works well for the most common asymmetric and symmetric coroutine usage patterns. 1237 For asymmetric coroutines, it is common for the first resumer (starter) coroutine to be the only resumer. 1238 All previous generators converted to coroutines have this property. 1239 For symmetric coroutines, it is common for the cycle creator to persist for the lifetime of the cycle. 1240 Hence, the starter coroutine is remembered on the first resume and ending the coroutine resumes the starter. 1241 Figure~\ref{f:ProdConsRuntimeStacks} shows this semantic by the dashed lines from the end of the coroutine mains: @prod@ starts @cons@ so @cons@ resumes @prod@ at the end, and the program main starts @prod@ so @prod@ resumes the program main at the end. 1242 For other scenarios, it is always possible to devise a solution with additional programming effort, such as forcing the cycle forward (backward) to a safe point before starting termination. 1243 1244 The producer/consumer example does not illustrate the full power of the starter semantics because @cons@ always ends first. 1245 Assume generator @PingPong@ is converted to a coroutine. 1246 Figure~\ref{f:PingPongFullCoroutineSteps} shows the creation, starter, and cyclic execution steps of the coroutine version. 1247 The program main creates (declares) coroutine instances @ping@ and @pong@. 1248 Next, program main resumes @ping@, making it @ping@'s starter, and @ping@'s main resumes @pong@'s main, making it @pong@'s starter. 1249 Execution forms a cycle when @pong@ resumes @ping@, and cycles $N$ times. 1250 By adjusting $N$ for either @ping@/@pong@, it is possible to have either one finish first, instead of @pong@ always ending first. 1251 If @pong@ ends first, it resumes its starter @ping@ in its coroutine main, then @ping@ ends and resumes its starter the program main in function @start@. 1252 If @ping@ ends first, it resumes its starter the program main in function @start@. 1253 Regardless of the cycle complexity, the starter stack always leads back to the program main, but the stack can be entered at an arbitrary point. 1254 Once back at the program main, coroutines @ping@ and @pong@ are deallocated. 1255 For generators, deallocation runs the destructors for all objects in the generator type. 1256 For coroutines, deallocation deals with objects in the coroutine type and must also run the destructors for any objects pending on the coroutine's stack for any unterminated coroutine. 1257 Hence, if a coroutine's destructor detects the coroutine is not ended, it implicitly raises a cancellation exception (uncatchable exception) at the coroutine and resumes it so the cancellation exception can propagate to the root of the coroutine's stack destroying all local variable on the stack. 1258 So the \CFA semantics for the generator and coroutine, ensure both can be safely deallocated at any time, regardless of their current state, like any other aggregate object. 1259 Explicitly raising normal exceptions at another coroutine can replace flag variables, like @stop@, \eg @prod@ raises a @stop@ exception at @cons@ after it finishes generating values and resumes @cons@, which catches the @stop@ exception to terminate its loop. 1260 1261 Finally, there is an interesting effect for @suspend@ with symmetric coroutines. 1262 A coroutine must retain its last resumer to suspend back because the resumer is on a different stack. 1263 These reverse pointers allow @suspend@ to cycle \emph{backwards}, which may be useful in certain cases. 1264 However, there is an anomaly if a coroutine resumes itself, because it overwrites its last resumer with itself, losing the ability to resume the last external resumer. 1265 To prevent losing this information, a self-resume does not overwrite the last resumer. 1266 1267 1268 \subsection{Generator / Coroutine Implementation} 1269 1270 A significant implementation challenge for generators/coroutines (and threads in Section~\ref{s:threads}) is adding extra fields to the custom types and related functions, \eg inserting code after/before the coroutine constructor/destructor and @main@ to create/initialize/de-initialize/destroy any extra fields, \eg stack. 1271 There are several solutions to these problem, which follow from the object-oriented flavour of adopting custom types. 1272 1273 For object-oriented languages, inheritance is used to provide extra fields and code via explicit inheritance: 1274 \begin{cfa}[morekeywords={class,inherits}] 1275 class myCoroutine inherits baseCoroutine { ... } 1276 \end{cfa} 1277 % The problem is that the programming language and its tool chain, \eg debugger, @valgrind@, need to understand @baseCoroutine@ because it infers special property, so type @baseCoroutine@ becomes a de facto keyword and all types inheriting from it are implicitly custom types. 1278 The problem is that some special properties are not handled by existing language semantics, \eg the execution of constructors/destructors is in the wrong order to implicitly start threads because the thread must start \emph{after} all constructors as it relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived. 1279 Alternatives, such as explicitly starting threads as in Java, are repetitive and forgetting to call start is a common source of errors. 1280 An alternative is composition: 1281 \begin{cfa} 1282 struct myCoroutine { 1283 ... // declaration/communication variables 974 1284 baseCoroutine dummy; // composition, last declaration 975 1285 } 976 1286 \end{cfa} 977 which also requires an explicit declaration that must be the last oneto ensure correct initialization order.1287 which also requires an explicit declaration that must be last to ensure correct initialization order. 978 1288 However, there is nothing preventing wrong placement or multiple declarations. 979 1289 980 For coroutines as for threads, many implementations are based on routine pointers or routine objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}. 981 For example, Boost implements coroutines in terms of four functor object-types: 982 \begin{cfa} 983 asymmetric_coroutine<>::pull_type 984 asymmetric_coroutine<>::push_type 985 symmetric_coroutine<>::call_type 986 symmetric_coroutine<>::yield_type 987 \end{cfa} 988 Similarly, the canonical threading paradigm is often based on routine pointers, \eg @pthread@~\cite{pthreads}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}. 989 However, the generic thread-handle (identifier) is limited (few operations), unless it is wrapped in a custom type. 990 \begin{cfa} 991 void mycor( coroutine_t cid, void * arg ) { 992 int * value = (int *)arg; $\C{// type unsafe, pointer-size only}$ 993 // Coroutine body 994 } 995 int main() { 996 int input = 0, output; 997 coroutine_t cid = coroutine_create( &mycor, (void *)&input ); $\C{// type unsafe, pointer-size only}$ 998 coroutine_resume( cid, (void *)input, (void **)&output ); $\C{// type unsafe, pointer-size only}$ 999 } 1000 \end{cfa} 1001 Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda-based coroutines adds very little. 1002 1003 Note, the type @coroutine_t@ must be an abstract handle to the coroutine, because the coroutine descriptor and its stack are non-copyable. 1004 Copying the coroutine descriptor results in copies being out of date with the current state of the stack. 1005 Correspondingly, copying the stack results is copies being out of date with coroutine descriptor, and pointers in the stack being out of date to data on the stack. 1006 (There is no mechanism in C to find all stack-specific pointers and update them as part of a copy.) 1007 1008 The selected approach is to use language support by introducing a new kind of aggregate (structure): 1009 \begin{cfa} 1010 coroutine Fibonacci { 1011 int fn; // communication variables 1012 }; 1013 \end{cfa} 1014 The @coroutine@ keyword means the compiler (and tool set) can find and inject code where needed. 1015 The downside of this approach is that it makes coroutine a special case in the language. 1016 Users wanting to extend coroutines or build their own for various reasons can only do so in ways offered by the language. 1017 Furthermore, implementing coroutines without language supports also displays the power of a programming language. 1018 While this is ultimately the option used for idiomatic \CFA code, coroutines and threads can still be constructed without using the language support. 1019 The reserved keyword eases use for the common cases. 1020 1021 Part of the mechanism to generalize coroutines is using a \CFA trait, which defines a coroutine as anything satisfying the trait @is_coroutine@, and this trait is used to restrict coroutine-manipulation routines: 1290 \CFA custom types make any special properties explicit to the language and its tool chain, \eg the language code-generator knows where to inject code 1291 % and when it is unsafe to perform certain optimizations, 1292 and IDEs using simple parsing can find and manipulate types with special properties. 1293 The downside of this approach is that it makes custom types a special case in the language. 1294 Users wanting to extend custom types or build their own can only do so in ways offered by the language. 1295 Furthermore, implementing custom types without language support may display the power of a programming language. 1296 \CFA blends the two approaches, providing custom type for idiomatic \CFA code, while extending and building new custom types is still possible, similar to Java concurrency with builtin and library. 1297 1298 Part of the mechanism to generalize custom types is the \CFA trait~\cite[\S~2.3]{Moss18}, \eg the definition for custom-type @coroutine@ is anything satisfying the trait @is_coroutine@, and this trait both enforces and restricts the coroutine-interface functions. 1022 1299 \begin{cfa} 1023 1300 trait is_coroutine( `dtype` T ) { … … 1025 1302 coroutine_desc * get_coroutine( T & ); 1026 1303 }; 1027 forall( `dtype` T | is_coroutine(T) ) void suspend( T & ); 1028 forall( `dtype` T | is_coroutine(T) ) void resume( T & ); 1029 \end{cfa} 1030 The @dtype@ property of the trait ensures the coroutine descriptor is non-copyable, so all coroutines must be passed by reference (pointer). 1031 The routine definitions ensures there is a statically-typed @main@ routine that is the starting point (first stack frame) of a coroutine, and a mechanism to get (read) the currently executing coroutine handle. 1032 The @main@ routine has no return value or additional parameters because the coroutine type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values versus fixed ones. 1033 The generic routines @suspend@ and @resume@ can be redefined, but any object passed to them is a coroutine since it must satisfy the @is_coroutine@ trait to compile. 1034 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine, and possibly redefining @suspend@ and @resume@. 1035 The \CFA keyword @coroutine@ implicitly implements the getter and forward declarations required for implementing the coroutine main: 1304 forall( `dtype` T | is_coroutine(T) ) void $suspend$( T & ), resume( T & ); 1305 \end{cfa} 1306 Note, copying generators/coroutines/threads is not meaningful. 1307 For example, both the resumer and suspender descriptors can have bidirectional pointers; 1308 copying these coroutines does not update the internal pointers so behaviour of both copies would be difficult to understand. 1309 Furthermore, two coroutines cannot logically execute on the same stack. 1310 A deep coroutine copy, which copies the stack, is also meaningless in an unmanaged language (no garbage collection), like C, because the stack may contain pointers to object within it that require updating for the copy. 1311 The \CFA @dtype@ property provides no \emph{implicit} copying operations and the @is_coroutine@ trait provides no \emph{explicit} copying operations, so all coroutines must be passed by reference (pointer). 1312 The function definitions ensure there is a statically typed @main@ function that is the starting point (first stack frame) of a coroutine, and a mechanism to get (read) the coroutine descriptor from its handle. 1313 The @main@ function has no return value or additional parameters because the coroutine type allows an arbitrary number of interface functions with corresponding arbitrary typed input/output values versus fixed ones. 1314 The advantage of this approach is that users can easily create different types of coroutines, \eg changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ function, and possibly redefining \textsf{suspend} and @resume@. 1315 1316 The \CFA custom-type @coroutine@ implicitly implements the getter and forward declarations for the coroutine main. 1036 1317 \begin{cquote} 1037 1318 \begin{tabular}{@{}ccc@{}} … … 1069 1350 \end{tabular} 1070 1351 \end{cquote} 1071 The combination of these two approaches allows an easy and concise specification to coroutining (and concurrency) for normal users, while more advanced users have tighter control on memory layout and initialization. 1072 1073 1074 \subsection{Thread Interface} 1075 \label{threads} 1076 1077 Both user and kernel threads are supported, where user threads provide concurrency and kernel threads provide parallelism. 1078 Like coroutines and for the same design reasons, the selected approach for user threads is to use language support by introducing a new kind of aggregate (structure) and a \CFA trait: 1352 The combination of custom types and fundamental @trait@ description of these types allows a concise specification for programmers and tools, while more advanced programmers can have tighter control over memory layout and initialization. 1353 1354 Figure~\ref{f:CoroutineMemoryLayout} shows different memory-layout options for a coroutine (where a task is similar). 1355 The coroutine handle is the @coroutine@ instance containing programmer specified type global/communication variables across interface functions. 1356 The coroutine descriptor contains all implicit declarations needed by the runtime, \eg @suspend@/@resume@, and can be part of the coroutine handle or separate. 1357 The coroutine stack can appear in a number of locations and be fixed or variable sized. 1358 Hence, the coroutine's stack could be a VLS\footnote{ 1359 We are examining variable-sized structures (VLS), where fields can be variable-sized structures or arrays. 1360 Once allocated, a VLS is fixed sized.} 1361 on the allocating stack, provided the allocating stack is large enough. 1362 For a VLS stack allocation/deallocation is an inexpensive adjustment of the stack pointer, modulo any stack constructor costs (\eg initial frame setup). 1363 For heap stack allocation, allocation/deallocation is an expensive heap allocation (where the heap can be a shared resource), modulo any stack constructor costs. 1364 With heap stack allocation, it is also possible to use a split (segmented) stack calling convention, available with gcc and clang, so the stack is variable sized. 1365 Currently, \CFA supports stack/heap allocated descriptors but only fixed-sized heap allocated stacks. 1366 In \CFA debug-mode, the fixed-sized stack is terminated with a write-only page, which catches most stack overflows. 1367 Experience teaching concurrency with \uC~\cite{CS343} shows fixed-sized stacks are rarely an issue for students. 1368 Split-stack allocation is under development but requires recompilation of legacy code, which may be impossible. 1369 1370 \begin{figure} 1371 \centering 1372 \input{corlayout.pstex_t} 1373 \caption{Coroutine memory layout} 1374 \label{f:CoroutineMemoryLayout} 1375 \end{figure} 1376 1377 1378 \section{Concurrency} 1379 \label{s:Concurrency} 1380 1381 Concurrency is nondeterministic scheduling of independent sequential execution paths (threads), where each thread has its own stack. 1382 A single thread with multiple call stacks, \newterm{coroutining}~\cite{Conway63,Marlin80}, does \emph{not} imply concurrency~\cite[\S~2]{Buhr05a}. 1383 In coroutining, coroutines self-schedule the thread across stacks so execution is deterministic. 1384 (It is \emph{impossible} to generate a concurrency error when coroutining.) 1385 However, coroutines are a stepping stone towards concurrency. 1386 1387 The transition to concurrency, even for a single thread with multiple stacks, occurs when coroutines context switch to a \newterm{scheduling coroutine}, introducing non-determinism from the coroutine perspective~\cite[\S~3,]{Buhr05a}. 1388 Therefore, a minimal concurrency system requires coroutines \emph{in conjunction with a nondeterministic scheduler}. 1389 The resulting execution system now follows a cooperative threading model~\cite{Adya02,libdill}, called \newterm{non-preemptive scheduling}. 1390 Adding \newterm{preemption} introduces non-cooperative scheduling, where context switching occurs randomly between any two instructions often based on a timer interrupt, called \newterm{preemptive scheduling}. 1391 While a scheduler introduces uncertain execution among explicit context switches, preemption introduces uncertainty by introducing implicit context switches. 1392 Uncertainty gives the illusion of parallelism on a single processor and provides a mechanism to access and increase performance on multiple processors. 1393 The reason is that the scheduler/runtime have complete knowledge about resources and how to best utilized them. 1394 However, the introduction of unrestricted nondeterminism results in the need for \newterm{mutual exclusion} and \newterm{synchronization}, which restrict nondeterminism for correctness; 1395 otherwise, it is impossible to write meaningful concurrent programs. 1396 Optimal concurrent performance is often obtained by having as much nondeterminism as mutual exclusion and synchronization correctness allow. 1397 1398 A scheduler can either be a stackless or stackful. 1399 For stackless, the scheduler performs scheduling on the stack of the current coroutine and switches directly to the next coroutine, so there is one context switch. 1400 For stackful, the current coroutine switches to the scheduler, which performs scheduling, and it then switches to the next coroutine, so there are two context switches. 1401 The \CFA runtime uses a stackful scheduler for uniformity and security. 1402 1403 1404 \subsection{Thread} 1405 \label{s:threads} 1406 1407 Threading needs the ability to start a thread and wait for its completion. 1408 A common API for this ability is @fork@ and @join@. 1409 \begin{cquote} 1410 \begin{tabular}{@{}lll@{}} 1411 \multicolumn{1}{c}{\textbf{Java}} & \multicolumn{1}{c}{\textbf{\Celeven}} & \multicolumn{1}{c}{\textbf{pthreads}} \\ 1412 \begin{cfa} 1413 class MyTask extends Thread {...} 1414 mytask t = new MyTask(...); 1415 `t.start();` // start 1416 // concurrency 1417 `t.join();` // wait 1418 \end{cfa} 1419 & 1420 \begin{cfa} 1421 class MyTask { ... } // functor 1422 MyTask mytask; 1423 `thread t( mytask, ... );` // start 1424 // concurrency 1425 `t.join();` // wait 1426 \end{cfa} 1427 & 1428 \begin{cfa} 1429 void * rtn( void * arg ) {...} 1430 pthread_t t; int i = 3; 1431 `pthread_create( &t, rtn, (void *)i );` // start 1432 // concurrency 1433 `pthread_join( t, NULL );` // wait 1434 \end{cfa} 1435 \end{tabular} 1436 \end{cquote} 1437 \CFA has a simpler approach using a custom @thread@ type and leveraging declaration semantics (allocation/deallocation), where threads implicitly @fork@ after construction and @join@ before destruction. 1438 \begin{cfa} 1439 thread MyTask {}; 1440 void main( MyTask & this ) { ... } 1441 int main() { 1442 MyTask team`[10]`; $\C[2.5in]{// allocate stack-based threads, implicit start after construction}$ 1443 // concurrency 1444 } $\C{// deallocate stack-based threads, implicit joins before destruction}$ 1445 \end{cfa} 1446 This semantic ensures a thread is started and stopped exactly once, eliminating some programming error, and scales to multiple threads for basic (termination) synchronization. 1447 For block allocation to arbitrary depth, including recursion, threads are created/destroyed in a lattice structure (tree with top and bottom). 1448 Arbitrary topologies are possible using dynamic allocation, allowing threads to outlive their declaration scope, identical to normal dynamic allocation. 1449 \begin{cfa} 1450 MyTask * factory( int N ) { ... return `anew( N )`; } $\C{// allocate heap-based threads, implicit start after construction}$ 1451 int main() { 1452 MyTask * team = factory( 10 ); 1453 // concurrency 1454 `delete( team );` $\C{// deallocate heap-based threads, implicit joins before destruction}\CRT$ 1455 } 1456 \end{cfa} 1457 1458 Figure~\ref{s:ConcurrentMatrixSummation} shows concurrently adding the rows of a matrix and then totalling the subtotals sequentially, after all the row threads have terminated. 1459 The program uses heap-based threads because each thread needs different constructor values. 1460 (Python provides a simple iteration mechanism to initialize array elements to different values allowing stack allocation.) 1461 The allocation/deallocation pattern appears unusual because allocated objects are immediately deallocated without any intervening code. 1462 However, for threads, the deletion provides implicit synchronization, which is the intervening code. 1463 % While the subtotals are added in linear order rather than completion order, which slightly inhibits concurrency, the computation is restricted by the critical-path thread (\ie the thread that takes the longest), and so any inhibited concurrency is very small as totalling the subtotals is trivial. 1464 1465 \begin{figure} 1466 \begin{cfa} 1467 `thread` Adder { int * row, cols, & subtotal; } $\C{// communication variables}$ 1468 void ?{}( Adder & adder, int row[], int cols, int & subtotal ) { 1469 adder.[ row, cols, &subtotal ] = [ row, cols, &subtotal ]; 1470 } 1471 void main( Adder & adder ) with( adder ) { 1472 subtotal = 0; 1473 for ( c; cols ) { subtotal += row[c]; } 1474 } 1475 int main() { 1476 const int rows = 10, cols = 1000; 1477 int matrix[rows][cols], subtotals[rows], total = 0; 1478 // read matrix 1479 Adder * adders[rows]; 1480 for ( r; rows; ) { $\C{// start threads to sum rows}$ 1481 adders[r] = `new( matrix[r], cols, &subtotals[r] );` 1482 } 1483 for ( r; rows ) { $\C{// wait for threads to finish}$ 1484 `delete( adders[r] );` $\C{// termination join}$ 1485 total += subtotals[r]; $\C{// total subtotal}$ 1486 } 1487 sout | total; 1488 } 1489 \end{cfa} 1490 \caption{Concurrent matrix summation} 1491 \label{s:ConcurrentMatrixSummation} 1492 \end{figure} 1493 1494 1495 \subsection{Thread Implementation} 1496 1497 Threads in \CFA are user level run by runtime kernel threads (see Section~\ref{s:CFARuntimeStructure}), where user threads provide concurrency and kernel threads provide parallelism. 1498 Like coroutines, and for the same design reasons, \CFA provides a custom @thread@ type and a @trait@ to enforce and restrict the task-interface functions. 1079 1499 \begin{cquote} 1080 1500 \begin{tabular}{@{}c@{\hspace{3\parindentlnth}}c@{}} 1081 1501 \begin{cfa} 1082 1502 thread myThread { 1083 //communication variables1503 ... // declaration/communication variables 1084 1504 }; 1085 1505 … … 1089 1509 \begin{cfa} 1090 1510 trait is_thread( `dtype` T ) { 1091 void main( T & );1092 thread_desc * get_thread( T & );1093 void ^?{}( T & `mutex` );1511 void main( T & ); 1512 thread_desc * get_thread( T & ); 1513 void ^?{}( T & `mutex` ); 1094 1514 }; 1095 1515 \end{cfa} 1096 1516 \end{tabular} 1097 1517 \end{cquote} 1098 (The qualifier @mutex@ for the destructor parameter is discussed in Section~\ref{s:Monitors}.) 1099 Like a coroutine, the statically-typed @main@ routine is the starting point (first stack frame) of a user thread. 1100 The difference is that a coroutine borrows a thread from its caller, so the first thread resuming a coroutine creates an instance of @main@; 1101 whereas, a user thread receives its own thread from the runtime system, which starts in @main@ as some point after the thread constructor is run.\footnote{ 1102 The \lstinline@main@ routine is already a special routine in C (where the program begins), so it is a natural extension of the semantics to use overloading to declare mains for different coroutines/threads (the normal main being the main of the initial thread).} 1103 No return value or additional parameters are necessary for this routine because the task type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values. 1104 1105 \begin{comment} % put in appendix with coroutine version ??? 1106 As such the @main@ routine of a thread can be defined as 1107 \begin{cfa} 1108 thread foo {}; 1109 1110 void main(foo & this) { 1111 sout | "Hello World!" | endl; 1112 } 1113 \end{cfa} 1114 1115 In this example, threads of type @foo@ start execution in the @void main(foo &)@ routine, which prints @"Hello World!".@ While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity. 1116 With the static semantics it is trivial to write a thread type that takes a routine pointer as a parameter and executes it on its stack asynchronously. 1117 \begin{cfa} 1118 typedef void (*voidRtn)(int); 1119 1120 thread RtnRunner { 1121 voidRtn func; 1122 int arg; 1123 }; 1124 1125 void ?{}(RtnRunner & this, voidRtn inRtn, int arg) { 1126 this.func = inRtn; 1127 this.arg = arg; 1128 } 1129 1130 void main(RtnRunner & this) { 1131 // thread starts here and runs the routine 1132 this.func( this.arg ); 1133 } 1134 1135 void hello(/*unused*/ int) { 1136 sout | "Hello World!" | endl; 1137 } 1138 1139 int main() { 1140 RtnRunner f = {hello, 42}; 1141 return 0? 1142 } 1143 \end{cfa} 1144 A consequence of the strongly typed approach to main is that memory layout of parameters and return values to/from a thread are now explicitly specified in the \textbf{api}. 1145 \end{comment} 1146 1147 For user threads to be useful, it must be possible to start and stop the underlying thread, and wait for it to complete execution. 1148 While using an API such as @fork@ and @join@ is relatively common, such an interface is awkward and unnecessary. 1149 A simple approach is to use allocation/deallocation principles, and have threads implicitly @fork@ after construction and @join@ before destruction. 1150 \begin{cfa} 1151 thread World {}; 1152 void main( World & this ) { 1153 sout | "World!" | endl; 1154 } 1155 int main() { 1156 World w`[10]`; $\C{// implicit forks after creation}$ 1157 sout | "Hello " | endl; $\C{// "Hello " and 10 "World!" printed concurrently}$ 1158 } $\C{// implicit joins before destruction}$ 1159 \end{cfa} 1160 This semantics ensures a thread is started and stopped exactly once, eliminating some programming error, and scales to multiple threads for basic (termination) synchronization. 1161 This tree-structure (lattice) create/delete from C block-structure is generalized by using dynamic allocation, so threads can outlive the scope in which they are created, much like dynamically allocating memory lets objects outlive the scope in which they are created. 1162 \begin{cfa} 1163 int main() { 1164 MyThread * heapLived; 1165 { 1166 MyThread blockLived; $\C{// fork block-based thread}$ 1167 heapLived = `new`( MyThread ); $\C{// fork heap-based thread}$ 1168 ... 1169 } $\C{// join block-based thread}$ 1170 ... 1171 `delete`( heapLived ); $\C{// join heap-based thread}$ 1172 } 1173 \end{cfa} 1174 The heap-based approach allows arbitrary thread-creation topologies, with respect to fork/join-style concurrency. 1175 1176 Figure~\ref{s:ConcurrentMatrixSummation} shows concurrently adding the rows of a matrix and then totalling the subtotals sequential, after all the row threads have terminated. 1177 The program uses heap-based threads because each thread needs different constructor values. 1178 (Python provides a simple iteration mechanism to initialize array elements to different values allowing stack allocation.) 1179 The allocation/deallocation pattern appears unusual because allocated objects are immediately deleted without any intervening code. 1180 However, for threads, the deletion provides implicit synchronization, which is the intervening code. 1181 While the subtotals are added in linear order rather than completion order, which slight inhibits concurrency, the computation is restricted by the critical-path thread (\ie the thread that takes the longest), and so any inhibited concurrency is very small as totalling the subtotals is trivial. 1182 1183 \begin{figure} 1184 \begin{cfa} 1185 thread Adder { 1186 int * row, cols, & subtotal; $\C{// communication}$ 1187 }; 1188 void ?{}( Adder & adder, int row[], int cols, int & subtotal ) { 1189 adder.[ row, cols, &subtotal ] = [ row, cols, &subtotal ]; 1190 } 1191 void main( Adder & adder ) with( adder ) { 1192 subtotal = 0; 1193 for ( int c = 0; c < cols; c += 1 ) { 1194 subtotal += row[c]; 1195 } 1196 } 1197 int main() { 1198 const int rows = 10, cols = 1000; 1199 int matrix[rows][cols], subtotals[rows], total = 0; 1200 // read matrix 1201 Adder * adders[rows]; 1202 for ( int r = 0; r < rows; r += 1 ) { $\C{// start threads to sum rows}$ 1203 adders[r] = new( matrix[r], cols, &subtotals[r] ); 1204 } 1205 for ( int r = 0; r < rows; r += 1 ) { $\C{// wait for threads to finish}$ 1206 delete( adders[r] ); $\C{// termination join}$ 1207 total += subtotals[r]; $\C{// total subtotal}$ 1208 } 1209 sout | total | endl; 1210 } 1211 \end{cfa} 1212 \caption{Concurrent Matrix Summation} 1213 \label{s:ConcurrentMatrixSummation} 1214 \end{figure} 1518 Like coroutines, the @dtype@ property prevents \emph{implicit} copy operations and the @is_thread@ trait provides no \emph{explicit} copy operations, so threads must be passed by reference (pointer). 1519 Similarly, the function definitions ensure there is a statically typed @main@ function that is the thread starting point (first stack frame), a mechanism to get (read) the thread descriptor from its handle, and a special destructor to prevent deallocation while the thread is executing. 1520 (The qualifier @mutex@ for the destructor parameter is discussed in Section~\ref{s:Monitor}.) 1521 The difference between the coroutine and thread is that a coroutine borrows a thread from its caller, so the first thread resuming a coroutine creates the coroutine's stack and starts running the coroutine main on the stack; 1522 whereas, a thread is scheduling for execution in @main@ immediately after its constructor is run. 1523 No return value or additional parameters are necessary for this function because the @thread@ type allows an arbitrary number of interface functions with corresponding arbitrary typed input/output values. 1215 1524 1216 1525 1217 1526 \section{Mutual Exclusion / Synchronization} 1218 1219 Uncontrolled non-deterministic execution is meaningless. 1220 To reestablish meaningful execution requires mechanisms to reintroduce determinism (\ie restrict non-determinism), called mutual exclusion and synchronization, where mutual exclusion is an access-control mechanism on data shared by threads, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}.1221 Since many deterministic challenges appear with the use of mutable shared state, some languages/libraries disallow it, \eg Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka~\cite{Akka} (Scala).1222 In these paradigms, interaction among concurrent objects is performed by stateless message-passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (\eg channels~\cite{CSP,Go}).1223 However, in call/return-based languages, these approaches force a clear distinction (\ie introduce a new programming paradigm) between regular and concurrent computation (\ie routine call versus message passing).1224 Hence, a programmer must learn and manipulate two sets of design patterns.1527 \label{s:MutualExclusionSynchronization} 1528 1529 Unrestricted nondeterminism is meaningless as there is no way to know when the result is completed without synchronization. 1530 To produce meaningful execution requires clawing back some determinism using mutual exclusion and synchronization, where mutual exclusion provides access control for threads using shared data, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}. 1531 Some concurrent systems eliminate mutable shared-state by switching to stateless communication like message passing~\cite{Thoth,Harmony,V-Kernel,MPI} (Erlang, MPI), channels~\cite{CSP} (CSP,Go), actors~\cite{Akka} (Akka, Scala), or functional techniques (Haskell). 1532 However, these approaches introduce a new communication mechanism for concurrency different from the standard communication using function call/return. 1533 Hence, a programmer must learn and manipulate two sets of design/programming patterns. 1225 1534 While this distinction can be hidden away in library code, effective use of the library still has to take both paradigms into account. 1226 In contrast, approaches based on stateful l models more closely resemble the standard call/return programming-model, resulting in a single programming paradigm.1227 1228 At the lowest level, concurrent control is implemented by atomic operations, upon which different kinds of lock s mechanismare constructed, \eg semaphores~\cite{Dijkstra68b}, barriers, and path expressions~\cite{Campbell74}.1535 In contrast, approaches based on stateful models more closely resemble the standard call/return programming model, resulting in a single programming paradigm. 1536 1537 At the lowest level, concurrent control is implemented by atomic operations, upon which different kinds of locking mechanisms are constructed, \eg semaphores~\cite{Dijkstra68b}, barriers, and path expressions~\cite{Campbell74}. 1229 1538 However, for productivity it is always desirable to use the highest-level construct that provides the necessary efficiency~\cite{Hochstein05}. 1230 1539 A newer approach for restricting non-determinism is transactional memory~\cite{Herlihy93}. 1231 While this approach is pursued in hardware~\cite{Nakaike15} and system languages, like \CC~\cite{Cpp-Transactions}, the performance and feature set is still too restrictive to be the main concurrency paradigm for system languages, which is why it was rejected as the core paradigm for concurrency in \CFA.1540 While this approach is pursued in hardware~\cite{Nakaike15} and system languages, like \CC~\cite{Cpp-Transactions}, the performance and feature set is still too restrictive to be the main concurrency paradigm for system languages, which is why it is rejected as the core paradigm for concurrency in \CFA. 1232 1541 1233 1542 One of the most natural, elegant, and efficient mechanisms for mutual exclusion and synchronization for shared-memory systems is the \emph{monitor}. 1234 First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, many concurrent programming -languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}.1543 First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, many concurrent programming languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}. 1235 1544 In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as mutex locks or semaphores to simulate monitors. 1236 For these reasons, \CFA selected monitors as the core high-level concurrency -construct, upon which higher-level approaches can be easily constructed.1545 For these reasons, \CFA selected monitors as the core high-level concurrency construct, upon which higher-level approaches can be easily constructed. 1237 1546 1238 1547 1239 1548 \subsection{Mutual Exclusion} 1240 1549 1241 A group of instructions manipulating a specific instance of shared data that must be performed atomically is called an (individual) \newterm{critical-section}~\cite{Dijkstra65}. 1242 The generalization is called a \newterm{group critical-section}~\cite{Joung00}, where multiple tasks with the same session may use the resource simultaneously, but different sessions may not use the resource simultaneously. 1243 The readers/writer problem~\cite{Courtois71} is an instance of a group critical-section, where readers have the same session and all writers have a unique session. 1244 \newterm{Mutual exclusion} enforces that the correct kind and number of threads are using a critical section. 1550 A group of instructions manipulating a specific instance of shared data that must be performed atomically is called a \newterm{critical section}~\cite{Dijkstra65}, which is enforced by \newterm{simple mutual-exclusion}. 1551 The generalization is called a \newterm{group critical-section}~\cite{Joung00}, where multiple tasks with the same session use the resource simultaneously and different sessions are segregated, which is enforced by \newterm{complex mutual-exclusion} providing the correct kind and number of threads using a group critical-section. 1552 The readers/writer problem~\cite{Courtois71} is an instance of a group critical-section, where readers share a session but writers have a unique session. 1245 1553 1246 1554 However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use. 1247 1555 Methods range from low-level locks, which are fast and flexible but require significant attention for correctness, to higher-level concurrency techniques, which sacrifice some performance to improve ease of use. 1248 Ease of use comes by either guaranteeing some problems cannot occur (\eg deadlock free), or by offering a more explicit coupling between shared data and critical section.1249 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (\eg reading/writing)for numerical types.1556 Ease of use comes by either guaranteeing some problems cannot occur, \eg deadlock free, or by offering a more explicit coupling between shared data and critical section. 1557 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations, \eg reading/writing, for numerical types. 1250 1558 However, a significant challenge with locks is composability because it takes careful organization for multiple locks to be used while preventing deadlock. 1251 1559 Easing composability is another feature higher-level mutual-exclusion mechanisms can offer. … … 1256 1564 Synchronization enforces relative ordering of execution, and synchronization tools provide numerous mechanisms to establish these timing relationships. 1257 1565 Low-level synchronization primitives offer good performance and flexibility at the cost of ease of use; 1258 higher-level mechanisms often simplify usage by adding better coupling between synchronization and data (\eg message passing), or offering a simpler solution to otherwise involved challenges, \eg barrier lock.1259 Often synchronization is used to order access to a critical section, \eg ensuring a reader thread is the next kind of thread to enter a critical section.1260 If a writer thread is scheduled for next access, but another reader thread acquires the critical section first, that reader has \newterm{barged}.1566 higher-level mechanisms often simplify usage by adding better coupling between synchronization and data, \eg receive-specific versus receive-any thread in message passing or offering specialized solutions, \eg barrier lock. 1567 Often synchronization is used to order access to a critical section, \eg ensuring a waiting writer thread enters the critical section before a calling reader thread. 1568 If the calling reader is scheduled before the waiting writer, the reader has barged. 1261 1569 Barging can result in staleness/freshness problems, where a reader barges ahead of a writer and reads temporally stale data, or a writer barges ahead of another writer overwriting data with a fresh value preventing the previous value from ever being read (lost computation). 1262 Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs. 1263 This challenge is often split into two different approaches: barging avoidance and barging prevention. 1264 Algorithms that allow a barger, but divert it until later using current synchronization state (flags), are avoiding the barger; 1265 algorithms that preclude a barger from entering during synchronization in the critical section prevent barging completely. 1266 Techniques like baton-pass locks~\cite{Andrews89} between threads instead of unconditionally releasing locks is an example of barging prevention. 1267 1268 1269 \section{Monitors} 1270 \label{s:Monitors} 1271 1272 A \textbf{monitor} is a set of routines that ensure mutual exclusion when accessing shared state. 1273 More precisely, a monitor is a programming technique that binds mutual exclusion to routine scope, as opposed to locks, where mutual-exclusion is defined by acquire/release calls, independent of lexical context (analogous to block and heap storage allocation). 1274 The strong association with the call/return paradigm eases programmability, readability and maintainability, at a slight cost in flexibility and efficiency. 1275 1276 Note, like coroutines/threads, both locks and monitors require an abstract handle to reference them, because at their core, both mechanisms are manipulating non-copyable shared state. 1277 Copying a lock is insecure because it is possible to copy an open lock and then use the open copy when the original lock is closed to simultaneously access the shared data. 1278 Copying a monitor is secure because both the lock and shared data are copies, but copying the shared data is meaningless because it no longer represents a unique entity. 1279 As for coroutines/tasks, a non-copyable (@dtype@) trait is used to capture this requirement, so all locks/monitors must be passed by reference (pointer). 1570 Preventing or detecting barging is an involved challenge with low-level locks, which is made easier through higher-level constructs. 1571 This challenge is often split into two different approaches: barging avoidance and prevention. 1572 Algorithms that unconditionally releasing a lock for competing threads to acquire use barging avoidance during synchronization to force a barging thread to wait; 1573 algorithms that conditionally hold locks during synchronization, \eg baton-passing~\cite{Andrews89}, prevent barging completely. 1574 1575 1576 \section{Monitor} 1577 \label{s:Monitor} 1578 1579 A \textbf{monitor} is a set of functions that ensure mutual exclusion when accessing shared state. 1580 More precisely, a monitor is a programming technique that implicitly binds mutual exclusion to static function scope, as opposed to locks, where mutual-exclusion is defined by acquire/release calls, independent of lexical context (analogous to block and heap storage allocation). 1581 Restricting acquire/release points eases programming, comprehension, and maintenance, at a slight cost in flexibility and efficiency. 1582 \CFA uses a custom @monitor@ type and leverages declaration semantics (deallocation) to protect active or waiting threads in a monitor. 1583 1584 The following is a \CFA monitor implementation of an atomic counter. 1585 \begin{cfa}[morekeywords=nomutex] 1586 `monitor` Aint { int cnt; }; $\C[4.25in]{// atomic integer counter}$ 1587 int ++?( Aint & `mutex`$\(_{opt}\)$ this ) with( this ) { return ++cnt; } $\C{// increment}$ 1588 int ?=?( Aint & `mutex`$\(_{opt}\)$ lhs, int rhs ) with( lhs ) { cnt = rhs; } $\C{// conversions with int}\CRT$ 1589 int ?=?( int & lhs, Aint & `mutex`$\(_{opt}\)$ rhs ) with( rhs ) { lhs = cnt; } 1590 \end{cfa} 1591 % The @Aint@ constructor, @?{}@, uses the \lstinline[morekeywords=nomutex]@nomutex@ qualifier indicating mutual exclusion is unnecessary during construction because an object is inaccessible (private) until after it is initialized. 1592 % (While a constructor may publish its address into a global variable, doing so generates a race-condition.) 1593 The prefix increment operation, @++?@, is normally @mutex@, indicating mutual exclusion is necessary during function execution, to protect the incrementing from race conditions, unless there is an atomic increment instruction for the implementation type. 1594 The assignment operators provide bidirectional conversion between an atomic and normal integer without accessing field @cnt@; 1595 these operations only need @mutex@, if reading/writing the implementation type is not atomic. 1596 The atomic counter is used without any explicit mutual-exclusion and provides thread-safe semantics, which is similar to the \CC template @std::atomic@. 1597 \begin{cfa} 1598 int i = 0, j = 0, k = 5; 1599 Aint x = { 0 }, y = { 0 }, z = { 5 }; $\C{// no mutex required}$ 1600 ++x; ++y; ++z; $\C{// safe increment by multiple threads}$ 1601 x = 2; y = i; z = k; $\C{// conversions}$ 1602 i = x; j = y; k = z; 1603 \end{cfa} 1604 1605 \CFA monitors have \newterm{multi-acquire} semantics so the thread in the monitor may acquire it multiple times without deadlock, allowing recursion and calling other interface functions. 1606 \begin{cfa} 1607 monitor M { ... } m; 1608 void foo( M & mutex m ) { ... } $\C{// acquire mutual exclusion}$ 1609 void bar( M & mutex m ) { $\C{// acquire mutual exclusion}$ 1610 ... `bar( m );` ... `foo( m );` ... $\C{// reacquire mutual exclusion}$ 1611 } 1612 \end{cfa} 1613 \CFA monitors also ensure the monitor lock is released regardless of how an acquiring function ends (normal or exceptional), and returning a shared variable is safe via copying before the lock is released. 1614 Similar safety is offered by \emph{explicit} mechanisms like \CC RAII; 1615 monitor \emph{implicit} safety ensures no programmer usage errors. 1616 Furthermore, RAII mechanisms cannot handle complex synchronization within a monitor, where the monitor lock may not be released on function exit because it is passed to an unblocking thread; 1617 RAII is purely a mutual-exclusion mechanism (see Section~\ref{s:Scheduling}). 1618 1619 1620 \subsection{Monitor Implementation} 1621 1622 For the same design reasons, \CFA provides a custom @monitor@ type and a @trait@ to enforce and restrict the monitor-interface functions. 1623 \begin{cquote} 1624 \begin{tabular}{@{}c@{\hspace{3\parindentlnth}}c@{}} 1625 \begin{cfa} 1626 monitor M { 1627 ... // shared data 1628 }; 1629 1630 \end{cfa} 1631 & 1280 1632 \begin{cfa} 1281 1633 trait is_monitor( `dtype` T ) { … … 1284 1636 }; 1285 1637 \end{cfa} 1638 \end{tabular} 1639 \end{cquote} 1640 The @dtype@ property prevents \emph{implicit} copy operations and the @is_monitor@ trait provides no \emph{explicit} copy operations, so monitors must be passed by reference (pointer). 1641 % Copying a lock is insecure because it is possible to copy an open lock and then use the open copy when the original lock is closed to simultaneously access the shared data. 1642 % Copying a monitor is secure because both the lock and shared data are copies, but copying the shared data is meaningless because it no longer represents a unique entity. 1643 Similarly, the function definitions ensures there is a mechanism to get (read) the monitor descriptor from its handle, and a special destructor to prevent deallocation if a thread using the shared data. 1644 The custom monitor type also inserts any locks needed to implement the mutual exclusion semantics. 1286 1645 1287 1646 … … 1289 1648 \label{s:MutexAcquisition} 1290 1649 1291 While correctness implicitly implies a monitor's mutual exclusion is acquired and released, there are implementation options aboutwhen and where the locking/unlocking occurs.1650 While the monitor lock provides mutual exclusion for shared data, there are implementation options for when and where the locking/unlocking occurs. 1292 1651 (Much of this discussion also applies to basic locks.) 1293 For example, a monitor may need to be passed through multiple helper routines before it becomes necessary to acquire the monitor mutual-exclusion. 1294 \begin{cfa}[morekeywords=nomutex] 1295 monitor Aint { int cnt; }; $\C{// atomic integer counter}$ 1296 void ?{}( Aint & `nomutex` this ) with( this ) { cnt = 0; } $\C{// constructor}$ 1297 int ?=?( Aint & `mutex`$\(_{opt}\)$ lhs, int rhs ) with( lhs ) { cnt = rhs; } $\C{// conversions}$ 1298 void ?{}( int & this, Aint & `mutex`$\(_{opt}\)$ v ) { this = v.cnt; } 1299 int ?=?( int & lhs, Aint & `mutex`$\(_{opt}\)$ rhs ) with( rhs ) { lhs = cnt; } 1300 int ++?( Aint & `mutex`$\(_{opt}\)$ this ) with( this ) { return ++cnt; } $\C{// increment}$ 1301 \end{cfa} 1302 The @Aint@ constructor, @?{}@, uses the \lstinline[morekeywords=nomutex]@nomutex@ qualifier indicating mutual exclusion is unnecessary during construction because an object is inaccessible (private) until after it is initialized. 1303 (While a constructor may publish its address into a global variable, doing so generates a race-condition.) 1304 The conversion operators for initializing and assigning with a normal integer only need @mutex@, if reading/writing the implementation type is not atomic. 1305 Finally, the prefix increment operato, @++?@, is normally @mutex@ to protect the incrementing from race conditions, unless there is an atomic increment instruction for the implementation type. 1306 1307 The atomic counter is used without any explicit mutual-exclusion and provides thread-safe semantics, which is similar to the \CC template @std::atomic@. 1308 \begin{cfa} 1309 Aint x, y, z; 1310 ++x; ++y; ++z; $\C{// safe increment by multiple threads}$ 1311 x = 2; y = 2; z = 2; $\C{// conversions}$ 1312 int i = x, j = y, k = z; 1313 i = x; j = y; k = z; 1314 \end{cfa} 1315 1316 For maximum usability, monitors have \newterm{multi-acquire} semantics allowing a thread to acquire it multiple times without deadlock. 1317 For example, atomically printing the contents of a binary tree: 1318 \begin{cfa} 1319 monitor Tree { 1320 Tree * left, right; 1321 // value 1322 }; 1323 void print( Tree & mutex tree ) { $\C{// prefix traversal}$ 1324 // write value 1325 print( tree->left ); $\C{// multiply acquire monitor lock on each recursion}$ 1326 print( tree->right ); 1327 } 1328 \end{cfa} 1329 1330 Mandatory monitor qualifiers have the benefit of being self-documented, but requiring both @mutex@ and \lstinline[morekeywords=nomutex]@nomutex@ for all monitor parameter is redundant. 1331 Instead, one of qualifier semantics can be the default, and the other required. 1332 For example, assume the safe @mutex@ option for a monitor parameter because assuming \lstinline[morekeywords=nomutex]@nomutex@ may cause subtle errors. 1333 On the other hand, assuming \lstinline[morekeywords=nomutex]@nomutex@ is the \emph{normal} parameter behaviour, stating explicitly ``this parameter is not special''. 1652 For example, a monitor may be passed through multiple helper functions before it is necessary to acquire the monitor's mutual exclusion. 1653 1654 The benefit of mandatory monitor qualifiers is self-documentation, but requiring both @mutex@ and \lstinline[morekeywords=nomutex]@nomutex@ for all monitor parameters is redundant. 1655 Instead, the semantics has one qualifier as the default and the other required. 1656 For example, make the safe @mutex@ qualifier the default because assuming \lstinline[morekeywords=nomutex]@nomutex@ may cause subtle errors. 1657 Alternatively, make the unsafe \lstinline[morekeywords=nomutex]@nomutex@ qualifier the default because it is the \emph{normal} parameter semantics while @mutex@ parameters are rare. 1334 1658 Providing a default qualifier implies knowing whether a parameter is a monitor. 1335 Since \CFA relies heavily on traits as an abstraction mechanism, t he distinction between a type that is a monitor and a type that looks like a monitor can become blurred.1659 Since \CFA relies heavily on traits as an abstraction mechanism, types can coincidentally match the monitor trait but not be a monitor, similar to inheritance where a shape and playing card can both be drawable. 1336 1660 For this reason, \CFA requires programmers to identify the kind of parameter with the @mutex@ keyword and uses no keyword to mean \lstinline[morekeywords=nomutex]@nomutex@. 1337 1661 1338 1662 The next semantic decision is establishing which parameter \emph{types} may be qualified with @mutex@. 1339 Given: 1663 The following has monitor parameter types that are composed of multiple objects. 1340 1664 \begin{cfa} 1341 1665 monitor M { ... } 1342 int f1( M & mutex m ); 1343 int f2( M * mutex m ); 1344 int f3( M * mutex m[] ); 1345 int f4( stack( M * ) & mutex m ); 1346 \end{cfa} 1347 the issue is that some of these parameter types are composed of multiple objects. 1348 For @f1@, there is only a single parameter object. 1349 Adding indirection in @f2@ still identifies a single object. 1350 However, the matrix in @f3@ introduces multiple objects. 1351 While shown shortly, multiple acquisition is possible; 1352 however array lengths are often unknown in C. 1353 This issue is exacerbated in @f4@, where the data structure must be safely traversed to acquire all of its elements. 1354 1355 To make the issue tractable, \CFA only acquires one monitor per parameter with at most one level of indirection. 1356 However, the C type-system has an ambiguity with respects to arrays. 1357 Is the argument for @f2@ a single object or an array of objects? 1358 If it is an array, only the first element of the array is acquired, which seems unsafe; 1359 hence, @mutex@ is disallowed for array parameters. 1360 \begin{cfa} 1361 int f1( M & mutex m ); $\C{// allowed: recommended case}$ 1362 int f2( M * mutex m ); $\C{// disallowed: could be an array}$ 1363 int f3( M mutex m[$\,$] ); $\C{// disallowed: array length unknown}$ 1364 int f4( M ** mutex m ); $\C{// disallowed: could be an array}$ 1365 int f5( M * mutex m[$\,$] ); $\C{// disallowed: array length unknown}$ 1366 \end{cfa} 1367 % Note, not all array routines have distinct types: @f2@ and @f3@ have the same type, as do @f4@ and @f5@. 1368 % However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic. 1369 1370 For object-oriented monitors, calling a mutex member \emph{implicitly} acquires mutual exclusion of the receiver object, @`rec`.foo(...)@. 1371 \CFA has no receiver, and hence, must use an explicit mechanism to specify which object has mutual exclusion acquired. 1372 A positive consequence of this design decision is the ability to support multi-monitor routines. 1373 \begin{cfa} 1374 int f( M & mutex x, M & mutex y ); $\C{// multiple monitor parameter of any type}$ 1375 M m1, m2; 1376 f( m1, m2 ); 1377 \end{cfa} 1378 (While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.) 1379 In practice, writing multi-locking routines that do not deadlocks is tricky. 1380 Having language support for such a feature is therefore a significant asset for \CFA. 1381 1382 The capability to acquire multiple locks before entering a critical section is called \newterm{bulk acquire}. 1383 In previous example, \CFA guarantees the order of acquisition is consistent across calls to different routines using the same monitors as arguments. 1384 This consistent ordering means acquiring multiple monitors is safe from deadlock. 1385 However, users can force the acquiring order. 1386 For example, notice the use of @mutex@/\lstinline[morekeywords=nomutex]@nomutex@ and how this affects the acquiring order: 1387 \begin{cfa} 1388 void foo( M & mutex m1, M & mutex m2 ); $\C{// acquire m1 and m2}$ 1666 int f1( M & mutex m ); $\C{// single parameter object}$ 1667 int f2( M * mutex m ); $\C{// single or multiple parameter object}$ 1668 int f3( M * mutex m[$\,$] ); $\C{// multiple parameter object}$ 1669 int f4( stack( M * ) & mutex m ); $\C{// multiple parameters object}$ 1670 \end{cfa} 1671 Function @f1@ has a single parameter object, while @f2@'s indirection could be a single or multi-element array, where static array size is often unknown in C. 1672 Function @f3@ has a multiple object matrix, and @f4@ a multiple object data structure. 1673 While shown shortly, multiple object acquisition is possible, but the number of objects must be statically known. 1674 Therefore, \CFA only acquires one monitor per parameter with at most one level of indirection, excluding pointers as it is impossible to statically determine the size. 1675 1676 For object-oriented monitors, \eg Java, calling a mutex member \emph{implicitly} acquires mutual exclusion of the receiver object, @`rec`.foo(...)@. 1677 \CFA has no receiver, and hence, the explicit @mutex@ qualifier is used to specify which objects acquire mutual exclusion. 1678 A positive consequence of this design decision is the ability to support multi-monitor functions,\footnote{ 1679 While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.} 1680 called \newterm{bulk acquire}. 1681 \CFA guarantees acquisition order is consistent across calls to @mutex@ functions using the same monitors as arguments, so acquiring multiple monitors is safe from deadlock. 1682 Figure~\ref{f:BankTransfer} shows a trivial solution to the bank transfer problem~\cite{BankTransfer}, where two resources must be locked simultaneously, using \CFA monitors with implicit locking and \CC with explicit locking. 1683 A \CFA programmer only has to manage when to acquire mutual exclusion; 1684 a \CC programmer must select the correct lock and acquisition mechanism from a panoply of locking options. 1685 Making good choices for common cases in \CFA simplifies the programming experience and enhances safety. 1686 1687 \begin{figure} 1688 \centering 1689 \begin{lrbox}{\myboxA} 1690 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1691 monitor BankAccount { 1692 1693 int balance; 1694 } b1 = { 0 }, b2 = { 0 }; 1695 void deposit( BankAccount & `mutex` b, 1696 int deposit ) with(b) { 1697 balance += deposit; 1698 } 1699 void transfer( BankAccount & `mutex` my, 1700 BankAccount & `mutex` your, int me2you ) { 1701 1702 deposit( my, -me2you ); // debit 1703 deposit( your, me2you ); // credit 1704 } 1705 `thread` Person { BankAccount & b1, & b2; }; 1706 void main( Person & person ) with(person) { 1707 for ( 10_000_000 ) { 1708 if ( random() % 3 ) deposit( b1, 3 ); 1709 if ( random() % 3 ) transfer( b1, b2, 7 ); 1710 } 1711 } 1712 int main() { 1713 `Person p1 = { b1, b2 }, p2 = { b2, b1 };` 1714 1715 } // wait for threads to complete 1716 \end{cfa} 1717 \end{lrbox} 1718 1719 \begin{lrbox}{\myboxB} 1720 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1721 struct BankAccount { 1722 `recursive_mutex m;` 1723 int balance = 0; 1724 } b1, b2; 1725 void deposit( BankAccount & b, int deposit ) { 1726 `scoped_lock lock( b.m );` 1727 b.balance += deposit; 1728 } 1729 void transfer( BankAccount & my, 1730 BankAccount & your, int me2you ) { 1731 `scoped_lock lock( my.m, your.m );` 1732 deposit( my, -me2you ); // debit 1733 deposit( your, me2you ); // credit 1734 } 1735 1736 void person( BankAccount & b1, BankAccount & b2 ) { 1737 for ( int i = 0; i < 10$'$000$'$000; i += 1 ) { 1738 if ( random() % 3 ) deposit( b1, 3 ); 1739 if ( random() % 3 ) transfer( b1, b2, 7 ); 1740 } 1741 } 1742 int main() { 1743 `thread p1(person, ref(b1), ref(b2)), p2(person, ref(b2), ref(b1));` 1744 `p1.join(); p2.join();` 1745 } 1746 \end{cfa} 1747 \end{lrbox} 1748 1749 \subfloat[\CFA]{\label{f:CFABank}\usebox\myboxA} 1750 \hspace{3pt} 1751 \vrule 1752 \hspace{3pt} 1753 \subfloat[\CC]{\label{f:C++Bank}\usebox\myboxB} 1754 \hspace{3pt} 1755 \caption{Bank transfer problem} 1756 \label{f:BankTransfer} 1757 \end{figure} 1758 1759 Users can still force the acquiring order by using @mutex@/\lstinline[morekeywords=nomutex]@nomutex@. 1760 \begin{cfa} 1761 void foo( M & mutex m1, M & mutex m2 ); $\C{// acquire m1 and m2}$ 1389 1762 void bar( M & mutex m1, M & /* nomutex */ m2 ) { $\C{// acquire m1}$ 1390 ... foo( m1, m2 ); ... $\C{// acquire m2}$1763 ... foo( m1, m2 ); ... $\C{// acquire m2}$ 1391 1764 } 1392 1765 void baz( M & /* nomutex */ m1, M & mutex m2 ) { $\C{// acquire m2}$ 1393 ... foo( m1, m2 ); ... $\C{// acquire m1}$ 1394 } 1395 \end{cfa} 1396 The multi-acquire semantics allows @bar@ or @baz@ to acquire a monitor lock and reacquire it in @foo@. 1397 In the calls to @bar@ and @baz@, the monitors are acquired in opposite order. 1398 1399 However, such use leads to lock acquiring order problems resulting in deadlock~\cite{Lister77}, where detecting it requires dynamically tracking of monitor calls, and dealing with it requires implement rollback semantics~\cite{Dice10}. 1400 In \CFA, safety is guaranteed by using bulk acquire of all monitors to shared objects, whereas other monitor systems provide no aid. 1401 While \CFA provides only a partial solution, the \CFA partial solution handles many useful cases. 1402 \begin{cfa} 1403 monitor Bank { ... }; 1404 void deposit( Bank & `mutex` b, int deposit ); 1405 void transfer( Bank & `mutex` mybank, Bank & `mutex` yourbank, int me2you) { 1406 deposit( mybank, `-`me2you ); $\C{// debit}$ 1407 deposit( yourbank, me2you ); $\C{// credit}$ 1408 } 1409 \end{cfa} 1410 This example shows a trivial solution to the bank-account transfer problem~\cite{BankTransfer}. 1411 Without multi- and bulk acquire, the solution to this problem requires careful engineering. 1412 1413 1414 \subsection{\protect\lstinline|mutex| statement} \label{mutex-stmt} 1415 1416 The monitor call-semantics associate all locking semantics to routines. 1417 Like Java, \CFA offers an alternative @mutex@ statement to reduce refactoring and naming. 1766 ... foo( m1, m2 ); ... $\C{// acquire m1}$ 1767 } 1768 \end{cfa} 1769 The bulk-acquire semantics allow @bar@ or @baz@ to acquire a monitor lock and reacquire it in @foo@. 1770 The calls to @bar@ and @baz@ acquired the monitors in opposite order, possibly resulting in deadlock. 1771 However, this case is the simplest instance of the \emph{nested-monitor problem}~\cite{Lister77}, where monitors are acquired in sequence versus bulk. 1772 Detecting the nested-monitor problem requires dynamic tracking of monitor calls, and dealing with it requires rollback semantics~\cite{Dice10}. 1773 \CFA does not deal with this fundamental problem. 1774 1775 Finally, like Java, \CFA offers an alternative @mutex@ statement to reduce refactoring and naming. 1418 1776 \begin{cquote} 1419 \begin{tabular}{@{}c|@{\hspace{\parindentlnth}}c@{}} 1420 routine call & @mutex@ statement \\ 1421 \begin{cfa} 1422 monitor M {}; 1777 \renewcommand{\arraystretch}{0.0} 1778 \begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}} 1779 \multicolumn{1}{c}{\textbf{\lstinline@mutex@ call}} & \multicolumn{1}{c}{\lstinline@mutex@ \textbf{statement}} \\ 1780 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1781 monitor M { ... }; 1423 1782 void foo( M & mutex m1, M & mutex m2 ) { 1424 1783 // critical section … … 1429 1788 \end{cfa} 1430 1789 & 1431 \begin{cfa} 1790 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1432 1791 1433 1792 void bar( M & m1, M & m2 ) { … … 1442 1801 1443 1802 1444 \section{Internal Scheduling} 1445 \label{s:InternalScheduling} 1446 1447 While monitor mutual-exclusion provides safe access to shared data, the monitor data may indicate that a thread accessing it cannot proceed, \eg a bounded buffer, Figure~\ref{f:GenericBoundedBuffer}, may be full/empty so produce/consumer threads must block. 1803 \subsection{Scheduling} 1804 \label{s:Scheduling} 1805 1806 % There are many aspects of scheduling in a concurrency system, all related to resource utilization by waiting threads, \ie which thread gets the resource next. 1807 % Different forms of scheduling include access to processors by threads (see Section~\ref{s:RuntimeStructureCluster}), another is access to a shared resource by a lock or monitor. 1808 This section discusses monitor scheduling for waiting threads eligible for entry, \ie which thread gets the shared resource next. (See Section~\ref{s:RuntimeStructureCluster} for scheduling threads on virtual processors.) 1809 While monitor mutual-exclusion provides safe access to shared data, the monitor data may indicate that a thread accessing it cannot proceed, \eg a bounded buffer may be full/empty so produce/consumer threads must block. 1448 1810 Leaving the monitor and trying again (busy waiting) is impractical for high-level programming. 1449 Monitors eliminate busy waiting by providing internal synchronization to schedule threads needing access to the shared data, where the synchronization is blocking (threads are parked)versus spinning.1450 The synchronization is generally achieved with internal~\cite{Hoare74} or external~\cite[\S~2.9.2]{uC++} scheduling, where \newterm{scheduling} is defined as indicating which thread acquires the critical section next.1811 Monitors eliminate busy waiting by providing synchronization to schedule threads needing access to the shared data, where threads block versus spinning. 1812 Synchronization is generally achieved with internal~\cite{Hoare74} or external~\cite[\S~2.9.2]{uC++} scheduling. 1451 1813 \newterm{Internal scheduling} is characterized by each thread entering the monitor and making an individual decision about proceeding or blocking, while \newterm{external scheduling} is characterized by an entering thread making a decision about proceeding for itself and on behalf of other threads attempting entry. 1452 1453 Figure~\ref{f:BBInt} shows a \CFA bounded-buffer with internal scheduling, where producers/consumers enter the monitor, see the buffer is full/empty, and block on an appropriate condition lock, @full@/@empty@. 1454 The @wait@ routine atomically blocks the calling thread and implicitly releases the monitor lock(s) for all monitors in the routine's parameter list. 1455 The appropriate condition lock is signalled to unblock an opposite kind of thread after an element is inserted/removed from the buffer. 1456 Signalling is unconditional, because signalling an empty condition lock does nothing. 1457 Signalling semantics cannot have the signaller and signalled thread in the monitor simultaneously, which means: 1458 \begin{enumerate} 1459 \item 1460 The signalling thread returns immediately, and the signalled thread continues. 1461 \item 1462 The signalling thread continues and the signalled thread is marked for urgent unblocking at the next scheduling point (exit/wait). 1463 \item 1464 The signalling thread blocks but is marked for urgrent unblocking at the next scheduling point and the signalled thread continues. 1465 \end{enumerate} 1466 The first approach is too restrictive, as it precludes solving a reasonable class of problems (\eg dating service). 1467 \CFA supports the next two semantics as both are useful. 1468 Finally, while it is common to store a @condition@ as a field of the monitor, in \CFA, a @condition@ variable can be created/stored independently. 1469 Furthermore, a condition variable is tied to a \emph{group} of monitors on first use (called \newterm{branding}), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors. 1814 Finally, \CFA monitors do not allow calling threads to barge ahead of signalled threads, which simplifies synchronization among threads in the monitor and increases correctness. 1815 If barging is allowed, synchronization between a signaller and signallee is difficult, often requiring additional flags and multiple unblock/block cycles. 1816 In fact, signals-as-hints is completely opposite from that proposed by Hoare in the seminal paper on monitors~\cite[p.~550]{Hoare74}. 1817 % \begin{cquote} 1818 % However, we decree that a signal operation be followed immediately by resumption of a waiting program, without possibility of an intervening procedure call from yet a third program. 1819 % It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signalling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74} 1820 % \end{cquote} 1821 Furthermore, \CFA concurrency has no spurious wakeup~\cite[\S~9]{Buhr05a}, which eliminates an implicit form of self barging. 1822 Hence, a \CFA @wait@ statement is not enclosed in a @while@ loop retesting a blocking predicate, which can cause thread starvation due to barging. 1823 1824 Figure~\ref{f:MonitorScheduling} shows general internal/external scheduling (for the bounded-buffer example in Figure~\ref{f:InternalExternalScheduling}). 1825 External calling threads block on the calling queue, if the monitor is occupied, otherwise they enter in FIFO order. 1826 Internal threads block on condition queues via @wait@ and reenter from the condition in FIFO order. 1827 Alternatively, internal threads block on urgent from the @signal_block@ or @waitfor@, and reenter implicitly when the monitor becomes empty, \ie, the thread in the monitor exits or waits. 1828 1829 There are three signalling mechanisms to unblock waiting threads to enter the monitor. 1830 Note, signalling cannot have the signaller and signalled thread in the monitor simultaneously because of the mutual exclusion, so either the signaller or signallee can proceed. 1831 For internal scheduling, threads are unblocked from condition queues using @signal@, where the signallee is moved to urgent and the signaller continues (solid line). 1832 Multiple signals move multiple signallees to urgent until the condition is empty. 1833 When the signaller exits or waits, a thread blocked on urgent is processed before calling threads to prevent barging. 1834 (Java conceptually moves the signalled thread to the calling queue, and hence, allows barging.) 1835 The alternative unblock is in the opposite order using @signal_block@, where the signaller is moved to urgent and the signallee continues (dashed line), and is implicitly unblocked from urgent when the signallee exits or waits. 1836 1837 For external scheduling, the condition queues are not used; 1838 instead threads are unblocked directly from the calling queue using @waitfor@ based on function names requesting mutual exclusion. 1839 (The linear search through the calling queue to locate a particular call can be reduced to $O(1)$.) 1840 The @waitfor@ has the same semantics as @signal_block@, where the signalled thread executes before the signallee, which waits on urgent. 1841 Executing multiple @waitfor@s from different signalled functions causes the calling threads to move to urgent. 1842 External scheduling requires urgent to be a stack, because the signaller expects to execute immediately after the specified monitor call has exited or waited. 1843 Internal scheduling behaves the same for an urgent stack or queue, except for multiple signalling, where the threads unblock from urgent in reverse order from signalling. 1844 If the restart order is important, multiple signalling by a signal thread can be transformed into daisy-chain signalling among threads, where each thread signals the next thread. 1845 We tried both a stack for @waitfor@ and queue for signalling, but that resulted in complex semantics about which thread enters next. 1846 Hence, \CFA uses a single urgent stack to correctly handle @waitfor@ and adequately support both forms of signalling. 1847 1848 \begin{figure} 1849 \centering 1850 % \subfloat[Scheduling Statements] { 1851 % \label{fig:SchedulingStatements} 1852 % {\resizebox{0.45\textwidth}{!}{\input{CondSigWait.pstex_t}}} 1853 \input{CondSigWait.pstex_t} 1854 % }% subfloat 1855 % \quad 1856 % \subfloat[Bulk acquire monitor] { 1857 % \label{fig:BulkMonitor} 1858 % {\resizebox{0.45\textwidth}{!}{\input{ext_monitor.pstex_t}}} 1859 % }% subfloat 1860 \caption{Monitor Scheduling} 1861 \label{f:MonitorScheduling} 1862 \end{figure} 1863 1864 Figure~\ref{f:BBInt} shows a \CFA generic bounded-buffer with internal scheduling, where producers/consumers enter the monitor, detect the buffer is full/empty, and block on an appropriate condition variable, @full@/@empty@. 1865 The @wait@ function atomically blocks the calling thread and implicitly releases the monitor lock(s) for all monitors in the function's parameter list. 1866 The appropriate condition variable is signalled to unblock an opposite kind of thread after an element is inserted/removed from the buffer. 1867 Signalling is unconditional, because signalling an empty condition variable does nothing. 1868 It is common to declare condition variables as monitor fields to prevent shared access, hence no locking is required for access as the conditions are protected by the monitor lock. 1869 In \CFA, a condition variable can be created/stored independently. 1870 % To still prevent expensive locking on access, a condition variable is tied to a \emph{group} of monitors on first use, called \newterm{branding}, resulting in a low-cost boolean test to detect sharing from other monitors. 1871 1872 % Signalling semantics cannot have the signaller and signalled thread in the monitor simultaneously, which means: 1873 % \begin{enumerate} 1874 % \item 1875 % The signalling thread returns immediately and the signalled thread continues. 1876 % \item 1877 % The signalling thread continues and the signalled thread is marked for urgent unblocking at the next scheduling point (exit/wait). 1878 % \item 1879 % The signalling thread blocks but is marked for urgent unblocking at the next scheduling point and the signalled thread continues. 1880 % \end{enumerate} 1881 % The first approach is too restrictive, as it precludes solving a reasonable class of problems, \eg dating service (see Figure~\ref{f:DatingService}). 1882 % \CFA supports the next two semantics as both are useful. 1470 1883 1471 1884 \begin{figure} … … 1481 1894 }; 1482 1895 void ?{}( Buffer(T) & buffer ) with(buffer) { 1483 [front, back, count]= 0;1896 front = back = count = 0; 1484 1897 } 1485 1486 1898 void insert( Buffer(T) & mutex buffer, T elem ) 1487 1899 with(buffer) { … … 1500 1912 \end{lrbox} 1501 1913 1914 % \newbox\myboxB 1915 % \begin{lrbox}{\myboxB} 1916 % \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1917 % forall( otype T ) { // distribute forall 1918 % monitor Buffer { 1919 % 1920 % int front, back, count; 1921 % T elements[10]; 1922 % }; 1923 % void ?{}( Buffer(T) & buffer ) with(buffer) { 1924 % [front, back, count] = 0; 1925 % } 1926 % T remove( Buffer(T) & mutex buffer ); // forward 1927 % void insert( Buffer(T) & mutex buffer, T elem ) 1928 % with(buffer) { 1929 % if ( count == 10 ) `waitfor( remove, buffer )`; 1930 % // insert elem into buffer 1931 % 1932 % } 1933 % T remove( Buffer(T) & mutex buffer ) with(buffer) { 1934 % if ( count == 0 ) `waitfor( insert, buffer )`; 1935 % // remove elem from buffer 1936 % 1937 % return elem; 1938 % } 1939 % } 1940 % \end{cfa} 1941 % \end{lrbox} 1942 1502 1943 \newbox\myboxB 1503 1944 \begin{lrbox}{\myboxB} 1504 1945 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1505 forall( otype T ) { // distribute forall 1506 monitor Buffer { 1507 1508 int front, back, count; 1509 T elements[10]; 1510 }; 1511 void ?{}( Buffer(T) & buffer ) with(buffer) { 1512 [front, back, count] = 0; 1513 } 1514 T remove( Buffer(T) & mutex buffer ); // forward 1515 void insert( Buffer(T) & mutex buffer, T elem ) 1516 with(buffer) { 1517 if ( count == 10 ) `waitfor( remove, buffer )`; 1518 // insert elem into buffer 1519 1520 } 1521 T remove( Buffer(T) & mutex buffer ) with(buffer) { 1522 if ( count == 0 ) `waitfor( insert, buffer )`; 1523 // remove elem from buffer 1524 1525 return elem; 1526 } 1527 } 1946 monitor ReadersWriter { 1947 int rcnt, wcnt; // readers/writer using resource 1948 }; 1949 void ?{}( ReadersWriter & rw ) with(rw) { 1950 rcnt = wcnt = 0; 1951 } 1952 void EndRead( ReadersWriter & mutex rw ) with(rw) { 1953 rcnt -= 1; 1954 } 1955 void EndWrite( ReadersWriter & mutex rw ) with(rw) { 1956 wcnt = 0; 1957 } 1958 void StartRead( ReadersWriter & mutex rw ) with(rw) { 1959 if ( wcnt > 0 ) `waitfor( EndWrite, rw );` 1960 rcnt += 1; 1961 } 1962 void StartWrite( ReadersWriter & mutex rw ) with(rw) { 1963 if ( wcnt > 0 ) `waitfor( EndWrite, rw );` 1964 else while ( rcnt > 0 ) `waitfor( EndRead, rw );` 1965 wcnt = 1; 1966 } 1967 1528 1968 \end{cfa} 1529 1969 \end{lrbox} 1530 1970 1531 \subfloat[Internal Scheduling]{\label{f:BBInt}\usebox\myboxA} 1532 %\qquad 1533 \subfloat[External Scheduling]{\label{f:BBExt}\usebox\myboxB} 1534 \caption{Generic Bounded-Buffer} 1535 \label{f:GenericBoundedBuffer} 1971 \subfloat[Generic bounded buffer, internal scheduling]{\label{f:BBInt}\usebox\myboxA} 1972 \hspace{3pt} 1973 \vrule 1974 \hspace{3pt} 1975 \subfloat[Readers / writer lock, external scheduling]{\label{f:RWExt}\usebox\myboxB} 1976 1977 \caption{Internal / external scheduling} 1978 \label{f:InternalExternalScheduling} 1536 1979 \end{figure} 1537 1980 1538 Figure~\ref{f:BBExt} shows a \CFA bounded-buffer with external scheduling, where producers/consumers detecting a full/empty buffer block and prevent more producers/consumers from entering the monitor until the buffer has a free/empty slot. 1539 External scheduling is controlled by the @waitfor@ statement, which atomically blocks the calling thread, releases the monitor lock, and restricts the routine calls that can next acquire mutual exclusion. 1981 Figure~\ref{f:BBInt} can be transformed into external scheduling by removing the condition variables and signals/waits, and adding the following lines at the locations of the current @wait@s in @insert@/@remove@, respectively. 1982 \begin{cfa}[aboveskip=2pt,belowskip=1pt] 1983 if ( count == 10 ) `waitfor( remove, buffer )`; | if ( count == 0 ) `waitfor( insert, buffer )`; 1984 \end{cfa} 1985 Here, the producers/consumers detects a full/\-empty buffer and prevents more producers/consumers from entering the monitor until there is a free/empty slot in the buffer. 1986 External scheduling is controlled by the @waitfor@ statement, which atomically blocks the calling thread, releases the monitor lock, and restricts the function calls that can next acquire mutual exclusion. 1540 1987 If the buffer is full, only calls to @remove@ can acquire the buffer, and if the buffer is empty, only calls to @insert@ can acquire the buffer. 1541 Threads making calls to routines that are currently excluded block outside (externally) of the monitor on a calling queue, versus blocking on condition queues inside the monitor. 1542 1543 Both internal and external scheduling extend to multiple monitors in a natural way. 1544 \begin{cfa} 1545 monitor M { `condition e`; ... }; 1546 void foo( M & mutex m1, M & mutex m2 ) { 1547 ... wait( `e` ); ... $\C{// wait( e, m1, m2 )}$ 1548 ... wait( `e, m1` ); ... 1549 ... wait( `e, m2` ); ... 1550 } 1551 1552 void rtn$\(_1\)$( M & mutex m1, M & mutex m2 ); 1553 void rtn$\(_2\)$( M & mutex m1 ); 1554 void bar( M & mutex m1, M & mutex m2 ) { 1555 ... waitfor( `rtn` ); ... $\C{// waitfor( rtn\(_1\), m1, m2 )}$ 1556 ... waitfor( `rtn, m1` ); ... $\C{// waitfor( rtn\(_2\), m1 )}$ 1557 } 1558 \end{cfa} 1559 For @wait( e )@, the default semantics is to atomically block the signaller and release all acquired mutex types in the parameter list, \ie @wait( e, m1, m2 )@. 1560 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@. 1561 Wait statically verifies the released monitors are the acquired mutex-parameters so unconditional release is safe. 1562 Similarly, for @waitfor( rtn, ... )@, the default semantics is to atomically block the acceptor and release all acquired mutex types in the parameter list, \ie @waitfor( rtn, m1, m2 )@. 1563 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn, m1 )@. 1564 Waitfor statically verifies the released monitors are the same as the acquired mutex-parameters of the given routine or routine pointer. 1565 To statically verify the released monitors match with the accepted routine's mutex parameters, the routine (pointer) prototype must be accessible. 1566 1567 Given the ability to release a subset of acquired monitors can result in a \newterm{nested monitor}~\cite{Lister77} deadlock. 1568 \begin{cfa} 1569 void foo( M & mutex m1, M & mutex m2 ) { 1570 ... wait( `e, m1` ); ... $\C{// release m1, keeping m2 acquired )}$ 1571 void baz( M & mutex m1, M & mutex m2 ) { $\C{// must acquire m1 and m2 )}$ 1572 ... signal( `e` ); ... 1573 \end{cfa} 1574 The @wait@ only releases @m1@ so the signalling thread cannot acquire both @m1@ and @m2@ to enter @baz@ to get to the @signal@. 1575 While deadlock issues can occur with multiple/nesting acquisition, this issue results from the fact that locks, and by extension monitors, are not perfectly composable. 1576 1577 Finally, an important aspect of monitor implementation is barging, \ie can calling threads barge ahead of signalled threads? 1578 If barging is allowed, synchronization between a singller and signallee is difficult, often requiring multiple unblock/block cycles (looping around a wait rechecking if a condition is met). 1579 \begin{quote} 1580 However, we decree that a signal operation be followed immediately by resumption of a waiting program, without possibility of an intervening procedure call from yet a third program. 1581 It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signalling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74} 1582 \end{quote} 1583 \CFA scheduling \emph{precludes} barging, which simplifies synchronization among threads in the monitor and increases correctness. 1584 For example, there are no loops in either bounded buffer solution in Figure~\ref{f:GenericBoundedBuffer}. 1585 Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design and implementation of \CFA concurrency. 1586 1587 1588 \subsection{Barging Prevention} 1589 1590 Figure~\ref{f:BargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signalling semantics. 1591 The complexity begins at the end of the inner @mutex@ statement, where the semantics of internal scheduling need to be extended for multiple monitors. 1592 The problem is that bulk acquire is used in the inner @mutex@ statement where one of the monitors is already acquired. 1593 When the signalling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting thread to prevent barging into the outer @mutex@ statement by another thread. 1594 However, both the signalling and signalled threads still need monitor @m1@. 1595 1596 \begin{figure} 1597 \newbox\myboxA 1598 \begin{lrbox}{\myboxA} 1599 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1600 monitor M m1, m2; 1601 condition c; 1602 mutex( m1 ) { 1603 ... 1604 mutex( m1, m2 ) { 1605 ... `wait( c )`; // block and release m1, m2 1606 // m1, m2 acquired 1607 } // $\LstCommentStyle{\color{red}release m2}$ 1608 // m1 acquired 1609 } // release m1 1610 \end{cfa} 1611 \end{lrbox} 1612 1613 \newbox\myboxB 1614 \begin{lrbox}{\myboxB} 1615 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1616 1617 1618 mutex( m1 ) { 1619 ... 1620 mutex( m1, m2 ) { 1621 ... `signal( c )`; ... 1622 // m1, m2 acquired 1623 } // $\LstCommentStyle{\color{red}release m2}$ 1624 // m1 acquired 1625 } // release m1 1626 \end{cfa} 1627 \end{lrbox} 1628 1629 \newbox\myboxC 1630 \begin{lrbox}{\myboxC} 1631 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 1632 1633 1634 mutex( m1 ) { 1635 ... `wait( c )`; ... 1636 // m1 acquired 1637 } // $\LstCommentStyle{\color{red}release m1}$ 1638 1639 1640 1641 1642 \end{cfa} 1643 \end{lrbox} 1644 1645 \begin{cquote} 1646 \subfloat[Waiting Thread]{\label{f:WaitingThread}\usebox\myboxA} 1647 \hspace{2\parindentlnth} 1648 \subfloat[Signalling Thread]{\label{f:SignallingThread}\usebox\myboxB} 1649 \hspace{2\parindentlnth} 1650 \subfloat[Other Waiting Thread]{\label{f:SignallingThread}\usebox\myboxC} 1651 \end{cquote} 1652 \caption{Barging Prevention} 1653 \label{f:BargingPrevention} 1654 \end{figure} 1655 1656 The obvious solution to the problem of multi-monitor scheduling is to keep ownership of all locks until the last lock is ready to be transferred. 1657 It can be argued that that moment is when the last lock is no longer needed, because this semantics fits most closely to the behaviour of single-monitor scheduling. 1658 This solution has the main benefit of transferring ownership of groups of monitors, which simplifies the semantics from multiple objects to a single group of objects, effectively making the existing single-monitor semantic viable by simply changing monitors to monitor groups. 1659 This solution releases the monitors once every monitor in a group can be released. 1660 However, since some monitors are never released (\eg the monitor of a thread), this interpretation means a group might never be released. 1661 A more interesting interpretation is to transfer the group until all its monitors are released, which means the group is not passed further and a thread can retain its locks. 1662 1663 However, listing \ref{f:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors. 1664 Figure~\ref{f:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable. 1665 Because the third thread is signalled when secretly holding @B@, the goal becomes unreachable. 1666 Depending on the order of signals (listing \ref{f:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen: 1667 1668 \begin{comment} 1669 \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor @A@ needs to be passed to thread $\beta$ when thread $\alpha$ is done with it. 1670 \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor @B@ needs to be retained and passed to thread $\alpha$ along with monitor @A@, which can be done directly or possibly using thread $\beta$ as an intermediate. 1671 \\ 1672 1673 Note that ordering is not determined by a race condition but by whether signalled threads are enqueued in FIFO or FILO order. 1674 However, regardless of the answer, users can move line \ref{line:signal-a} before line \ref{line:signal-ab} and get the reverse effect for listing \ref{f:dependency}. 1675 1676 In both cases, the threads need to be able to distinguish, on a per monitor basis, which ones need to be released and which ones need to be transferred, which means knowing when to release a group becomes complex and inefficient (see next section) and therefore effectively precludes this approach. 1677 1678 1679 \subsubsection{Dependency graphs} 1680 1681 \begin{figure} 1682 \begin{multicols}{3} 1683 Thread $\alpha$ 1684 \begin{cfa}[numbers=left, firstnumber=1] 1685 acquire A 1686 acquire A & B 1687 wait A & B 1688 release A & B 1689 release A 1690 \end{cfa} 1691 \columnbreak 1692 Thread $\gamma$ 1693 \begin{cfa}[numbers=left, firstnumber=6, escapechar=|] 1694 acquire A 1695 acquire A & B 1696 |\label{line:signal-ab}|signal A & B 1697 |\label{line:release-ab}|release A & B 1698 |\label{line:signal-a}|signal A 1699 |\label{line:release-a}|release A 1700 \end{cfa} 1701 \columnbreak 1702 Thread $\beta$ 1703 \begin{cfa}[numbers=left, firstnumber=12, escapechar=|] 1704 acquire A 1705 wait A 1706 |\label{line:release-aa}|release A 1707 \end{cfa} 1708 \end{multicols} 1709 \begin{cfa}[caption={Pseudo-code for the three thread example.},label={f:dependency}] 1710 \end{cfa} 1711 \begin{center} 1712 \input{dependency} 1713 \end{center} 1714 \caption{Dependency graph of the statements in listing \ref{f:dependency}} 1715 \label{fig:dependency} 1716 \end{figure} 1717 1718 In listing \ref{f:int-bulk-cfa}, there is a solution that satisfies both barging prevention and mutual exclusion. 1719 If ownership of both monitors is transferred to the waiter when the signaller releases @A & B@ and then the waiter transfers back ownership of @A@ back to the signaller when it releases it, then the problem is solved (@B@ is no longer in use at this point). 1720 Dynamically finding the correct order is therefore the second possible solution. 1721 The problem is effectively resolving a dependency graph of ownership requirements. 1722 Here even the simplest of code snippets requires two transfers and has a super-linear complexity. 1723 This complexity can be seen in listing \ref{f:explosion}, which is just a direct extension to three monitors, requires at least three ownership transfer and has multiple solutions. 1724 Furthermore, the presence of multiple solutions for ownership transfer can cause deadlock problems if a specific solution is not consistently picked; In the same way that multiple lock acquiring order can cause deadlocks. 1725 \begin{figure} 1726 \begin{multicols}{2} 1727 \begin{cfa} 1728 acquire A 1729 acquire B 1730 acquire C 1731 wait A & B & C 1732 release C 1733 release B 1734 release A 1735 \end{cfa} 1736 1737 \columnbreak 1738 1739 \begin{cfa} 1740 acquire A 1741 acquire B 1742 acquire C 1743 signal A & B & C 1744 release C 1745 release B 1746 release A 1747 \end{cfa} 1748 \end{multicols} 1749 \begin{cfa}[caption={Extension to three monitors of listing \ref{f:int-bulk-cfa}},label={f:explosion}] 1750 \end{cfa} 1751 \end{figure} 1752 1753 Given the three threads example in listing \ref{f:dependency}, figure \ref{fig:dependency} shows the corresponding dependency graph that results, where every node is a statement of one of the three threads, and the arrows the dependency of that statement (\eg $\alpha1$ must happen before $\alpha2$). 1754 The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependencies unfold. 1755 Resolving dependency graphs being a complex and expensive endeavour, this solution is not the preferred one. 1756 1757 \subsubsection{Partial Signalling} \label{partial-sig} 1758 \end{comment} 1759 1760 Finally, the solution that is chosen for \CFA is to use partial signalling. 1761 Again using listing \ref{f:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@. 1762 Only when it reaches line \ref{line:lastRelease} does it actually wake up the waiting thread. 1763 This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met. 1764 This solution has a much simpler implementation than a dependency graph solving algorithms, which is why it was chosen. 1765 Furthermore, after being fully implemented, this solution does not appear to have any significant downsides. 1766 1767 Using partial signalling, listing \ref{f:dependency} can be solved easily: 1768 \begin{itemize} 1769 \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor @B@ to thread $\alpha$ and continues to hold monitor @A@. 1770 \item When thread $\gamma$ reaches line \ref{line:release-a} it transfers monitor @A@ to thread $\beta$ and wakes it up. 1771 \item When thread $\beta$ reaches line \ref{line:release-aa} it transfers monitor @A@ to thread $\alpha$ and wakes it up. 1772 \end{itemize} 1773 1774 1775 \subsection{Signalling: Now or Later} 1988 Threads calling excluded functions block outside of (external to) the monitor on the calling queue, versus blocking on condition queues inside of (internal to) the monitor. 1989 Figure~\ref{f:RWExt} shows a readers/writer lock written using external scheduling, where a waiting reader detects a writer using the resource and restricts further calls until the writer exits by calling @EndWrite@. 1990 The writer does a similar action for each reader or writer using the resource. 1991 Note, no new calls to @StarRead@/@StartWrite@ may occur when waiting for the call to @EndRead@/@EndWrite@. 1992 External scheduling allows waiting for events from other threads while restricting unrelated events, that would otherwise have to wait on conditions in the monitor. 1993 The mechnaism can be done in terms of control flow, \eg Ada @accept@ or \uC @_Accept@, or in terms of data, \eg Go @select@ on channels. 1994 While both mechanisms have strengths and weaknesses, this project uses the control-flow mechanism to be consistent with other language features. 1995 % Two challenges specific to \CFA for external scheduling are loose object-definitions (see Section~\ref{s:LooseObjectDefinitions}) and multiple-monitor functions (see Section~\ref{s:Multi-MonitorScheduling}). 1996 1997 Figure~\ref{f:DatingService} shows a dating service demonstrating non-blocking and blocking signalling. 1998 The dating service matches girl and boy threads with matching compatibility codes so they can exchange phone numbers. 1999 A thread blocks until an appropriate partner arrives. 2000 The complexity is exchanging phone numbers in the monitor because of the mutual-exclusion property. 2001 For signal scheduling, the @exchange@ condition is necessary to block the thread finding the match, while the matcher unblocks to take the opposite number, post its phone number, and unblock the partner. 2002 For signal-block scheduling, the implicit urgent-queue replaces the explict @exchange@-condition and @signal_block@ puts the finding thread on the urgent condition and unblocks the matcher. 2003 The dating service is an example of a monitor that cannot be written using external scheduling because it requires knowledge of calling parameters to make scheduling decisions, and parameters of waiting threads are unavailable; 2004 as well, an arriving thread may not find a partner and must wait, which requires a condition variable, and condition variables imply internal scheduling. 2005 Furthermore, barging corrupts the dating service during an exchange because a barger may also match and change the phone numbers, invalidating the previous exchange phone number. 2006 Putting loops around the @wait@s does not correct the problem; 2007 the simple solution must be restructured to account for barging. 1776 2008 1777 2009 \begin{figure} … … 1784 2016 int GirlPhNo, BoyPhNo; 1785 2017 condition Girls[CCodes], Boys[CCodes]; 1786 condition exchange;2018 `condition exchange;` 1787 2019 }; 1788 2020 int girl( DS & mutex ds, int phNo, int ccode ) { … … 1790 2022 wait( Girls[ccode] ); 1791 2023 GirlPhNo = phNo; 1792 exchange.signal();2024 `signal( exchange );` 1793 2025 } else { 1794 2026 GirlPhNo = phNo; 1795 signal( Boys[ccode] );1796 exchange.wait();1797 } // if2027 `signal( Boys[ccode] );` 2028 `wait( exchange );` 2029 } 1798 2030 return BoyPhNo; 1799 2031 } … … 1820 2052 } else { 1821 2053 GirlPhNo = phNo; // make phone number available 1822 signal_block( Boys[ccode] );// restart boy2054 `signal_block( Boys[ccode] );` // restart boy 1823 2055 1824 2056 } // if … … 1834 2066 \qquad 1835 2067 \subfloat[\lstinline@signal_block@]{\label{f:DatingSignalBlock}\usebox\myboxB} 1836 \caption{Dating service .}1837 \label{f:Dating service}2068 \caption{Dating service} 2069 \label{f:DatingService} 1838 2070 \end{figure} 1839 2071 1840 An important note is that, until now, signalling a monitor was a delayed operation. 1841 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the @signal@ statement. 1842 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the @signal_block@ routine. 1843 1844 The example in table \ref{tbl:datingservice} highlights the difference in behaviour. 1845 As mentioned, @signal@ only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed. 1846 To avoid this explicit synchronization, the @condition@ type offers the @signal_block@ routine, which handles the two-way handshake as shown in the example. 1847 This feature removes the need for a second condition variables and simplifies programming. 1848 Like every other monitor semantic, @signal_block@ uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to @signal_block@, meaning no other thread can acquire the monitor either before or after the call. 1849 1850 % ====================================================================== 1851 % ====================================================================== 1852 \section{External scheduling} \label{extsched} 1853 % ====================================================================== 1854 % ====================================================================== 1855 An alternative to internal scheduling is external scheduling (see Table~\ref{tbl:sched}). 1856 1857 \begin{comment} 1858 \begin{table} 1859 \begin{tabular}{|c|c|c|} 1860 Internal Scheduling & External Scheduling & Go\\ 1861 \hline 1862 \begin{uC++}[tabsize=3] 1863 _Monitor Semaphore { 1864 condition c; 1865 bool inUse; 1866 public: 1867 void P() { 1868 if(inUse) 1869 wait(c); 1870 inUse = true; 1871 } 1872 void V() { 1873 inUse = false; 1874 signal(c); 1875 } 1876 } 1877 \end{uC++}&\begin{uC++}[tabsize=3] 1878 _Monitor Semaphore { 1879 1880 bool inUse; 1881 public: 1882 void P() { 1883 if(inUse) 1884 _Accept(V); 1885 inUse = true; 1886 } 1887 void V() { 1888 inUse = false; 1889 1890 } 1891 } 1892 \end{uC++}&\begin{Go}[tabsize=3] 1893 type MySem struct { 1894 inUse bool 1895 c chan bool 1896 } 1897 1898 // acquire 1899 func (s MySem) P() { 1900 if s.inUse { 1901 select { 1902 case <-s.c: 1903 } 1904 } 1905 s.inUse = true 1906 } 1907 1908 // release 1909 func (s MySem) V() { 1910 s.inUse = false 1911 1912 // This actually deadlocks 1913 // when single thread 1914 s.c <- false 1915 } 1916 \end{Go} 2072 In summation, for internal scheduling, non-blocking signalling (as in the producer/consumer example) is used when the signaller is providing the cooperation for a waiting thread; 2073 the signaller enters the monitor and changes state, detects a waiting threads that can use the state, performs a non-blocking signal on the condition queue for the waiting thread, and exits the monitor to run concurrently. 2074 The waiter unblocks next from the urgent queue, uses/takes the state, and exits the monitor. 2075 Blocking signal is the reverse, where the waiter is providing the cooperation for the signalling thread; 2076 the signaller enters the monitor, detects a waiting thread providing the necessary state, performs a blocking signal to place it on the urgent queue and unblock the waiter. 2077 The waiter changes state and exits the monitor, and the signaller unblocks next from the urgent queue to use/take the state. 2078 2079 Both internal and external scheduling extend to multiple monitors in a natural way. 2080 \begin{cquote} 2081 \begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}} 2082 \begin{cfa} 2083 monitor M { `condition e`; ... }; 2084 void foo( M & mutex m1, M & mutex m2 ) { 2085 ... wait( `e` ); ... // wait( e, m1, m2 ) 2086 ... wait( `e, m1` ); ... 2087 ... wait( `e, m2` ); ... 2088 } 2089 \end{cfa} 2090 & 2091 \begin{cfa} 2092 void rtn$\(_1\)$( M & mutex m1, M & mutex m2 ); 2093 void rtn$\(_2\)$( M & mutex m1 ); 2094 void bar( M & mutex m1, M & mutex m2 ) { 2095 ... waitfor( `rtn` ); ... // $\LstCommentStyle{waitfor( rtn\(_1\), m1, m2 )}$ 2096 ... waitfor( `rtn, m1` ); ... // $\LstCommentStyle{waitfor( rtn\(_2\), m1 )}$ 2097 } 2098 \end{cfa} 1917 2099 \end{tabular} 1918 \caption{Different forms of scheduling.} 1919 \label{tbl:sched} 1920 \end{table} 1921 \end{comment} 1922 1923 This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency. 1924 Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring. 1925 External scheduling can generally be done either in terms of control flow (\eg Ada with @accept@, \uC with @_Accept@) or in terms of data (\eg Go with channels). 1926 Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics. 1927 Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines. 1928 The previous example shows a simple use @_Accept@ versus @wait@/@signal@ and its advantages. 1929 Note that while other languages often use @accept@/@select@ as the core external scheduling keyword, \CFA uses @waitfor@ to prevent name collisions with existing socket \textbf{api}s. 1930 1931 For the @P@ member above using internal scheduling, the call to @wait@ only guarantees that @V@ is the last routine to access the monitor, allowing a third routine, say @isInUse()@, acquire mutual exclusion several times while routine @P@ is waiting. 1932 On the other hand, external scheduling guarantees that while routine @P@ is waiting, no other routine than @V@ can acquire the monitor. 1933 1934 % ====================================================================== 1935 % ====================================================================== 1936 \subsection{Loose Object Definitions} 1937 % ====================================================================== 1938 % ====================================================================== 1939 In \uC, a monitor class declaration includes an exhaustive list of monitor operations. 1940 Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user: 1941 1942 \begin{cfa} 1943 monitor A {}; 1944 1945 void f(A & mutex a); 1946 void g(A & mutex a) { 1947 waitfor(f); // Obvious which f() to wait for 1948 } 1949 1950 void f(A & mutex a, int); // New different F added in scope 1951 void h(A & mutex a) { 1952 waitfor(f); // Less obvious which f() to wait for 1953 } 1954 \end{cfa} 1955 1956 Furthermore, external scheduling is an example where implementation constraints become visible from the interface. 1957 Here is the cfa-code for the entering phase of a monitor: 1958 \begin{center} 1959 \begin{tabular}{l} 1960 \begin{cfa} 1961 if monitor is free 1962 enter 1963 elif already own the monitor 1964 continue 1965 elif monitor accepts me 1966 enter 1967 else 1968 block 1969 \end{cfa} 1970 \end{tabular} 1971 \end{center} 1972 For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instructions. 1973 However, a fast check for @monitor accepts me@ is much harder to implement depending on the constraints put on the monitors. 1974 Indeed, monitors are often expressed as an entry queue and some acceptor queue as in Figure~\ref{fig:ClassicalMonitor}. 2100 \end{cquote} 2101 For @wait( e )@, the default semantics is to atomically block the signaller and release all acquired mutex parameters, \ie @wait( e, m1, m2 )@. 2102 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@. 2103 Wait cannot statically verifies the released monitors are the acquired mutex-parameters without disallowing separately compiled helper functions calling @wait@. 2104 While \CC supports bulk locking, @wait@ only accepts a single lock for a condition variable, so bulk locking with condition variables is asymmetric. 2105 Finally, a signaller, 2106 \begin{cfa} 2107 void baz( M & mutex m1, M & mutex m2 ) { 2108 ... signal( e ); ... 2109 } 2110 \end{cfa} 2111 must have acquired at least the same locks as the waiting thread signalled from a condition queue to allow the locks to be passed, and hence, prevent barging. 2112 2113 Similarly, for @waitfor( rtn )@, the default semantics is to atomically block the acceptor and release all acquired mutex parameters, \ie @waitfor( rtn, m1, m2 )@. 2114 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn, m1 )@. 2115 @waitfor@ does statically verify the monitor types passed are the same as the acquired mutex-parameters of the given function or function pointer, hence the function (pointer) prototype must be accessible. 2116 % When an overloaded function appears in an @waitfor@ statement, calls to any function with that name are accepted. 2117 % The rationale is that members with the same name should perform a similar function, and therefore, all should be eligible to accept a call. 2118 Overloaded functions can be disambiguated using a cast 2119 \begin{cfa} 2120 void rtn( M & mutex m ); 2121 `int` rtn( M & mutex m ); 2122 waitfor( (`int` (*)( M & mutex ))rtn, m ); 2123 \end{cfa} 2124 2125 The ability to release a subset of acquired monitors can result in a \newterm{nested monitor}~\cite{Lister77} deadlock. 2126 \begin{cfa} 2127 void foo( M & mutex m1, M & mutex m2 ) { 2128 ... wait( `e, m1` ); ... $\C{// release m1, keeping m2 acquired )}$ 2129 void bar( M & mutex m1, M & mutex m2 ) { $\C{// must acquire m1 and m2 )}$ 2130 ... signal( `e` ); ... 2131 \end{cfa} 2132 The @wait@ only releases @m1@ so the signalling thread cannot acquire @m1@ and @m2@ to enter @bar@ and @signal@ the condition. 2133 While deadlock can occur with multiple/nesting acquisition, this is a consequence of locks, and by extension monitors, not being perfectly composable. 2134 2135 2136 2137 \subsection{\texorpdfstring{Extended \protect\lstinline@waitfor@}{Extended waitfor}} 2138 2139 Figure~\ref{f:ExtendedWaitfor} shows the extended form of the @waitfor@ statement to conditionally accept one of a group of mutex functions, with an optional statement to be performed \emph{after} the mutex function finishes. 2140 For a @waitfor@ clause to be executed, its @when@ must be true and an outstanding call to its corresponding member(s) must exist. 2141 The \emph{conditional-expression} of a @when@ may call a function, but the function must not block or context switch. 2142 If there are multiple acceptable mutex calls, selection occurs top-to-bottom (prioritized) among the @waitfor@ clauses, whereas some programming languages with similar mechanisms accept nondeterministically for this case, \eg Go \lstinline[morekeywords=select]@select@. 2143 If some accept guards are true and there are no outstanding calls to these members, the acceptor is blocked until a call to one of these members is made. 2144 If there is a @timeout@ clause, it provides an upper bound on waiting. 2145 If all the accept guards are false, the statement does nothing, unless there is a terminating @else@ clause with a true guard, which is executed instead. 2146 Hence, the terminating @else@ clause allows a conditional attempt to accept a call without blocking. 2147 If both @timeout@ and @else@ clause are present, the @else@ must be conditional, or the @timeout@ is never triggered. 2148 There is also a traditional future wait queue (not shown) (\eg Microsoft (@WaitForMultipleObjects@)), to wait for a specified number of future elements in the queue. 1975 2149 1976 2150 \begin{figure} 1977 2151 \centering 1978 \subfloat[Classical Monitor] { 1979 \label{fig:ClassicalMonitor} 1980 {\resizebox{0.45\textwidth}{!}{\input{monitor}}} 1981 }% subfloat 1982 \qquad 1983 \subfloat[bulk acquire Monitor] { 1984 \label{fig:BulkMonitor} 1985 {\resizebox{0.45\textwidth}{!}{\input{ext_monitor}}} 1986 }% subfloat 1987 \caption{External Scheduling Monitor} 2152 \begin{cfa} 2153 `when` ( $\emph{conditional-expression}$ ) $\C{// optional guard}$ 2154 waitfor( $\emph{mutex-member-name}$ ) $\emph{statement}$ $\C{// action after call}$ 2155 `or` `when` ( $\emph{conditional-expression}$ ) $\C{// any number of functions}$ 2156 waitfor( $\emph{mutex-member-name}$ ) $\emph{statement}$ 2157 `or` ... 2158 `when` ( $\emph{conditional-expression}$ ) $\C{// optional guard}$ 2159 `timeout` $\emph{statement}$ $\C{// optional terminating timeout clause}$ 2160 `when` ( $\emph{conditional-expression}$ ) $\C{// optional guard}$ 2161 `else` $\emph{statement}$ $\C{// optional terminating clause}$ 2162 \end{cfa} 2163 \caption{Extended \protect\lstinline@waitfor@} 2164 \label{f:ExtendedWaitfor} 1988 2165 \end{figure} 1989 2166 1990 There are other alternatives to these pictures, but in the case of the left picture, implementing a fast accept check is relatively easy. 1991 Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (\eg 128) of mutex members. 1992 This approach requires a unique dense ordering of routines with an upper-bound and that ordering must be consistent across translation units. 1993 For OO languages these constraints are common, since objects only offer adding member routines consistently across translation units via inheritance. 1994 However, in \CFA users can extend objects with mutex routines that are only visible in certain translation unit. 1995 This means that establishing a program-wide dense-ordering among mutex routines can only be done in the program linking phase, and still could have issues when using dynamically shared objects. 1996 1997 The alternative is to alter the implementation as in Figure~\ref{fig:BulkMonitor}. 1998 Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept separate. 1999 Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions. 2000 Storing an array of accepted routine pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search. 2001 Furthermore, supporting nested external scheduling (\eg listing \ref{f:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued. 2167 Note, a group of conditional @waitfor@ clauses is \emph{not} the same as a group of @if@ statements, \eg: 2168 \begin{cfa} 2169 if ( C1 ) waitfor( mem1 ); when ( C1 ) waitfor( mem1 ); 2170 else if ( C2 ) waitfor( mem2 ); or when ( C2 ) waitfor( mem2 ); 2171 \end{cfa} 2172 The left example only accepts @mem1@ if @C1@ is true or only @mem2@ if @C2@ is true. 2173 The right example accepts either @mem1@ or @mem2@ if @C1@ and @C2@ are true. 2174 2175 An interesting use of @waitfor@ is accepting the @mutex@ destructor to know when an object is deallocated, \eg assume the bounded buffer is restructred from a monitor to a thread with the following @main@. 2176 \begin{cfa} 2177 void main( Buffer(T) & buffer ) with(buffer) { 2178 for () { 2179 `waitfor( ^?{}, buffer )` break; 2180 or when ( count != 20 ) waitfor( insert, buffer ) { ... } 2181 or when ( count != 0 ) waitfor( remove, buffer ) { ... } 2182 } 2183 // clean up 2184 } 2185 \end{cfa} 2186 When the program main deallocates the buffer, it first calls the buffer's destructor, which is accepted, the destructor runs, and the buffer is deallocated. 2187 However, the buffer thread cannot continue after the destructor call because the object is gone; 2188 hence, clean up in @main@ cannot occur, which means destructors for local objects are not run. 2189 To make this useful capability work, the semantics for accepting the destructor is the same as @signal@, \ie the destructor call is placed on urgent and the acceptor continues execution, which ends the loop, cleans up, and the thread terminates. 2190 Then, the destructor caller unblocks from urgent to deallocate the object. 2191 Accepting the destructor is the idiomatic way in \CFA to terminate a thread performing direct communication. 2192 2193 2194 \subsection{Bulk Barging Prevention} 2195 2196 Figure~\ref{f:BulkBargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signalling semantics. 2197 The complexity begins at the end of the inner @mutex@ statement, where the semantics of internal scheduling need to be extended for multiple monitors. 2198 The problem is that bulk acquire is used in the inner @mutex@ statement where one of the monitors is already acquired. 2199 When the signalling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting threads to prevent barging into the outer @mutex@ statement by another thread. 2200 However, both the signalling and waiting threads W1 and W2 need some subset of monitors @m1@ and @m2@. 2201 \begin{cquote} 2202 condition c: (order 1) W2(@m2@), W1(@m1@,@m2@)\ \ \ or\ \ \ (order 2) W1(@m1@,@m2@), W2(@m2@) \\ 2203 S: acq. @m1@ $\rightarrow$ acq. @m1,m2@ $\rightarrow$ @signal(c)@ $\rightarrow$ rel. @m2@ $\rightarrow$ pass @m2@ unblock W2 (order 2) $\rightarrow$ rel. @m1@ $\rightarrow$ pass @m1,m2@ unblock W1 \\ 2204 \hspace*{2.75in}$\rightarrow$ rel. @m1@ $\rightarrow$ pass @m1,m2@ unblock W1 (order 1) 2205 \end{cquote} 2002 2206 2003 2207 \begin{figure} 2004 \begin{cfa}[caption={Example of nested external scheduling},label={f:nest-ext}] 2005 monitor M {}; 2006 void foo( M & mutex a ) {} 2007 void bar( M & mutex b ) { 2008 // Nested in the waitfor(bar, c) call 2009 waitfor(foo, b); 2010 } 2011 void baz( M & mutex c ) { 2012 waitfor(bar, c); 2013 } 2014 2015 \end{cfa} 2208 \newbox\myboxA 2209 \begin{lrbox}{\myboxA} 2210 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2211 monitor M m1, m2; 2212 condition c; 2213 mutex( m1 ) { // $\LstCommentStyle{\color{red}outer}$ 2214 ... 2215 mutex( m1, m2 ) { // $\LstCommentStyle{\color{red}inner}$ 2216 ... `signal( c )`; ... 2217 // m1, m2 still acquired 2218 } // $\LstCommentStyle{\color{red}release m2}$ 2219 // m1 acquired 2220 } // release m1 2221 \end{cfa} 2222 \end{lrbox} 2223 2224 \newbox\myboxB 2225 \begin{lrbox}{\myboxB} 2226 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2227 2228 2229 mutex( m1 ) { 2230 ... 2231 mutex( m1, m2 ) { 2232 ... `wait( c )`; // release m1, m2 2233 // m1, m2 reacquired 2234 } // $\LstCommentStyle{\color{red}release m2}$ 2235 // m1 acquired 2236 } // release m1 2237 \end{cfa} 2238 \end{lrbox} 2239 2240 \newbox\myboxC 2241 \begin{lrbox}{\myboxC} 2242 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2243 2244 2245 mutex( m2 ) { 2246 ... `wait( c )`; // release m2 2247 // m2 reacquired 2248 } // $\LstCommentStyle{\color{red}release m2}$ 2249 2250 2251 2252 2253 \end{cfa} 2254 \end{lrbox} 2255 2256 \begin{cquote} 2257 \subfloat[Signalling Thread (S)]{\label{f:SignallingThread}\usebox\myboxA} 2258 \hspace{3\parindentlnth} 2259 \subfloat[Waiting Thread (W1)]{\label{f:WaitingThread}\usebox\myboxB} 2260 \hspace{2\parindentlnth} 2261 \subfloat[Waiting Thread (W2)]{\label{f:OtherWaitingThread}\usebox\myboxC} 2262 \end{cquote} 2263 \caption{Bulk Barging Prevention} 2264 \label{f:BulkBargingPrevention} 2016 2265 \end{figure} 2017 2266 2018 Note that in the right picture, tasks need to always keep track of the monitors associated with mutex routines, and the routine mask needs to have both a routine pointer and a set of monitors, as is discussed in the next section. 2019 These details are omitted from the picture for the sake of simplicity. 2020 2021 At this point, a decision must be made between flexibility and performance. 2022 Many design decisions in \CFA achieve both flexibility and performance, for example polymorphic routines add significant flexibility but inlining them means the optimizer can easily remove any runtime cost. 2023 Here, however, the cost of flexibility cannot be trivially removed. 2024 In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be hard to write. 2025 This decision is based on the assumption that writing fast but inflexible locks is closer to a solved problem than writing locks that are as flexible as external scheduling in \CFA. 2026 2027 % ====================================================================== 2028 % ====================================================================== 2267 One scheduling solution is for the signaller S to keep ownership of all locks until the last lock is ready to be transferred, because this semantics fits most closely to the behaviour of single-monitor scheduling. 2268 However, this solution is inefficient if W2 waited first and can be immediate passed @m2@ when released, while S retains @m1@ until completion of the outer mutex statement. 2269 If W1 waited first, the signaller must retain @m1@ amd @m2@ until completion of the outer mutex statement and then pass both to W1. 2270 % Furthermore, there is an execution sequence where the signaller always finds waiter W2, and hence, waiter W1 starves. 2271 To support this efficient semantics (and prevent barging), the implementation maintains a list of monitors acquired for each blocked thread. 2272 When a signaller exits or waits in a monitor function/statement, the front waiter on urgent is unblocked if all its monitors are released. 2273 Implementing a fast subset check for the necessary released monitors is important. 2274 % The benefit is encapsulating complexity into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met. 2275 2276 2277 \subsection{Loose Object Definitions} 2278 \label{s:LooseObjectDefinitions} 2279 2280 In an object-oriented programming language, a class includes an exhaustive list of operations. 2281 A new class can add members via static inheritance but the subclass still has an exhaustive list of operations. 2282 (Dynamic member adding, \eg JavaScript~\cite{JavaScript}, is not considered.) 2283 In the object-oriented scenario, the type and all its operators are always present at compilation (even separate compilation), so it is possible to number the operations in a bit mask and use an $O(1)$ compare with a similar bit mask created for the operations specified in a @waitfor@. 2284 2285 However, in \CFA, monitor functions can be statically added/removed in translation units, making a fast subset check difficult. 2286 \begin{cfa} 2287 monitor M { ... }; // common type, included in .h file 2288 translation unit 1 2289 void `f`( M & mutex m ); 2290 void g( M & mutex m ) { waitfor( `f`, m ); } 2291 translation unit 2 2292 void `f`( M & mutex m ); $\C{// replacing f and g for type M in this translation unit}$ 2293 void `g`( M & mutex m ); 2294 void h( M & mutex m ) { waitfor( `f`, m ) or waitfor( `g`, m ); } $\C{// extending type M in this translation unit}$ 2295 \end{cfa} 2296 The @waitfor@ statements in each translation unit cannot form a unique bit-mask because the monitor type does not carry that information. 2297 Hence, function pointers are used to identify the functions listed in the @waitfor@ statement, stored in a variable-sized array. 2298 Then, the same implementation approach used for the urgent stack is used for the calling queue. 2299 Each caller has a list of monitors acquired, and the @waitfor@ statement performs a (usually short) linear search matching functions in the @waitfor@ list with called functions, and then verifying the associated mutex locks can be transfers. 2300 (A possible way to construct a dense mapping is at link or load-time.) 2301 2302 2029 2303 \subsection{Multi-Monitor Scheduling} 2030 % ====================================================================== 2031 % ====================================================================== 2032 2033 External scheduling, like internal scheduling, becomes significantly more complex when introducing multi-monitor syntax. 2034 Even in the simplest possible case, some new semantics needs to be established: 2035 \begin{cfa} 2036 monitor M {}; 2037 2038 void f(M & mutex a); 2039 2040 void g(M & mutex b, M & mutex c) { 2041 waitfor(f); // two monitors M => unknown which to pass to f(M & mutex) 2042 } 2043 \end{cfa} 2044 The obvious solution is to specify the correct monitor as follows: 2045 2046 \begin{cfa} 2047 monitor M {}; 2048 2049 void f(M & mutex a); 2050 2051 void g(M & mutex a, M & mutex b) { 2052 // wait for call to f with argument b 2053 waitfor(f, b); 2054 } 2055 \end{cfa} 2056 This syntax is unambiguous. 2057 Both locks are acquired and kept by @g@. 2058 When routine @f@ is called, the lock for monitor @b@ is temporarily transferred from @g@ to @f@ (while @g@ still holds lock @a@). 2059 This behaviour can be extended to the multi-monitor @waitfor@ statement as follows. 2060 2061 \begin{cfa} 2062 monitor M {}; 2063 2064 void f(M & mutex a, M & mutex b); 2065 2066 void g(M & mutex a, M & mutex b) { 2067 // wait for call to f with arguments a and b 2068 waitfor(f, a, b); 2069 } 2070 \end{cfa} 2071 2072 Note that the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired in the routine. @waitfor@ used in any other context is undefined behaviour. 2073 2074 An important behaviour to note is when a set of monitors only match partially: 2075 2076 \begin{cfa} 2077 mutex struct A {}; 2078 2079 mutex struct B {}; 2080 2081 void g(A & mutex a, B & mutex b) { 2082 waitfor(f, a, b); 2083 } 2084 2085 A a1, a2; 2086 B b; 2087 2088 void foo() { 2089 g(a1, b); // block on accept 2090 } 2091 2092 void bar() { 2093 f(a2, b); // fulfill cooperation 2094 } 2095 \end{cfa} 2096 While the equivalent can happen when using internal scheduling, the fact that conditions are specific to a set of monitors means that users have to use two different condition variables. 2097 In both cases, partially matching monitor sets does not wakeup the waiting thread. 2098 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; @waitfor(f,a,b)@ and @waitfor(f,b,a)@ are indistinguishable waiting condition. 2099 2100 % ====================================================================== 2101 % ====================================================================== 2102 \subsection{\protect\lstinline|waitfor| Semantics} 2103 % ====================================================================== 2104 % ====================================================================== 2105 2106 Syntactically, the @waitfor@ statement takes a routine identifier and a set of monitors. 2107 While the set of monitors can be any list of expressions, the routine name is more restricted because the compiler validates at compile time the validity of the routine type and the parameters used with the @waitfor@ statement. 2108 It checks that the set of monitors passed in matches the requirements for a routine call. 2109 Figure~\ref{f:waitfor} shows various usages of the waitfor statement and which are acceptable. 2110 The choice of the routine type is made ignoring any non-@mutex@ parameter. 2111 One limitation of the current implementation is that it does not handle overloading, but overloading is possible. 2304 \label{s:Multi-MonitorScheduling} 2305 2306 External scheduling, like internal scheduling, becomes significantly more complex for multi-monitor semantics. 2307 Even in the simplest case, new semantics need to be established. 2308 \begin{cfa} 2309 monitor M { ... }; 2310 void f( M & mutex m1 ); 2311 void g( M & mutex m1, M & mutex m2 ) { `waitfor( f );` } $\C{// pass m1 or m2 to f?}$ 2312 \end{cfa} 2313 The solution is for the programmer to disambiguate: 2314 \begin{cfa} 2315 waitfor( f, `m2` ); $\C{// wait for call to f with argument m2}$ 2316 \end{cfa} 2317 Both locks are acquired by function @g@, so when function @f@ is called, the lock for monitor @m2@ is passed from @g@ to @f@, while @g@ still holds lock @m1@. 2318 This behaviour can be extended to the multi-monitor @waitfor@ statement. 2319 \begin{cfa} 2320 monitor M { ... }; 2321 void f( M & mutex m1, M & mutex m2 ); 2322 void g( M & mutex m1, M & mutex m2 ) { waitfor( f, `m1, m2` ); $\C{// wait for call to f with arguments m1 and m2}$ 2323 \end{cfa} 2324 Again, the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired by the accepting function. 2325 Also, the order of the monitors in a @waitfor@ statement is unimportant. 2326 2327 Figure~\ref{f:UnmatchedMutexSets} shows an example where, for internal and external scheduling with multiple monitors, a signalling or accepting thread must match exactly, \ie partial matching results in waiting. 2328 For both examples, the set of monitors is disjoint so unblocking is impossible. 2329 2112 2330 \begin{figure} 2113 \begin{cfa}[caption={Various correct and incorrect uses of the waitfor statement},label={f:waitfor}] 2114 monitor A{}; 2115 monitor B{}; 2116 2117 void f1( A & mutex ); 2118 void f2( A & mutex, B & mutex ); 2119 void f3( A & mutex, int ); 2120 void f4( A & mutex, int ); 2121 void f4( A & mutex, double ); 2122 2123 void foo( A & mutex a1, A & mutex a2, B & mutex b1, B & b2 ) { 2124 A * ap = & a1; 2125 void (*fp)( A & mutex ) = f1; 2126 2127 waitfor(f1, a1); // Correct : 1 monitor case 2128 waitfor(f2, a1, b1); // Correct : 2 monitor case 2129 waitfor(f3, a1); // Correct : non-mutex arguments are ignored 2130 waitfor(f1, *ap); // Correct : expression as argument 2131 2132 waitfor(f1, a1, b1); // Incorrect : Too many mutex arguments 2133 waitfor(f2, a1); // Incorrect : Too few mutex arguments 2134 waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match 2135 waitfor(f1, 1); // Incorrect : 1 not a mutex argument 2136 waitfor(f9, a1); // Incorrect : f9 routine does not exist 2137 waitfor(*fp, a1 ); // Incorrect : fp not an identifier 2138 waitfor(f4, a1); // Incorrect : f4 ambiguous 2139 2140 waitfor(f2, a1, b2); // Undefined behaviour : b2 not mutex 2141 } 2142 \end{cfa} 2331 \centering 2332 \begin{lrbox}{\myboxA} 2333 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2334 monitor M1 {} m11, m12; 2335 monitor M2 {} m2; 2336 condition c; 2337 void f( M1 & mutex m1, M2 & mutex m2 ) { 2338 signal( c ); 2339 } 2340 void g( M1 & mutex m1, M2 & mutex m2 ) { 2341 wait( c ); 2342 } 2343 g( `m11`, m2 ); // block on wait 2344 f( `m12`, m2 ); // cannot fulfil 2345 \end{cfa} 2346 \end{lrbox} 2347 2348 \begin{lrbox}{\myboxB} 2349 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2350 monitor M1 {} m11, m12; 2351 monitor M2 {} m2; 2352 2353 void f( M1 & mutex m1, M2 & mutex m2 ) { 2354 2355 } 2356 void g( M1 & mutex m1, M2 & mutex m2 ) { 2357 waitfor( f, m1, m2 ); 2358 } 2359 g( `m11`, m2 ); // block on accept 2360 f( `m12`, m2 ); // cannot fulfil 2361 \end{cfa} 2362 \end{lrbox} 2363 \subfloat[Internal scheduling]{\label{f:InternalScheduling}\usebox\myboxA} 2364 \hspace{3pt} 2365 \vrule 2366 \hspace{3pt} 2367 \subfloat[External scheduling]{\label{f:ExternalScheduling}\usebox\myboxB} 2368 \caption{Unmatched \protect\lstinline@mutex@ sets} 2369 \label{f:UnmatchedMutexSets} 2143 2370 \end{figure} 2144 2371 2145 Finally, for added flexibility, \CFA supports constructing a complex @waitfor@ statement using the @or@, @timeout@ and @else@. 2146 Indeed, multiple @waitfor@ clauses can be chained together using @or@; this chain forms a single statement that uses baton pass to any routine that fits one of the routine+monitor set passed in. 2147 To enable users to tell which accepted routine executed, @waitfor@s are followed by a statement (including the null statement @;@) or a compound statement, which is executed after the clause is triggered. 2148 A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching routine call already arrived and otherwise continues. 2149 Any and all of these clauses can be preceded by a @when@ condition to dynamically toggle the accept clauses on or off based on some current state. 2150 Figure~\ref{f:waitfor2} demonstrates several complex masks and some incorrect ones. 2372 2373 \subsection{\texorpdfstring{\protect\lstinline@mutex@ Threads}{mutex Threads}} 2374 2375 Threads in \CFA can also be monitors to allow \emph{direct communication} among threads, \ie threads can have mutex functions that are called by other threads. 2376 Hence, all monitor features are available when using threads. 2377 Figure~\ref{f:DirectCommunication} shows a comparison of direct call communication in \CFA with direct channel communication in Go. 2378 (Ada provides a similar mechanism to the \CFA direct communication.) 2379 The program main in both programs communicates directly with the other thread versus indirect communication where two threads interact through a passive monitor. 2380 Both direct and indirection thread communication are valuable tools in structuring concurrent programs. 2151 2381 2152 2382 \begin{figure} 2153 \lstset{language=CFA,deletedelim=**[is][]{`}{`}} 2154 \begin{cfa} 2155 monitor A{}; 2156 2157 void f1( A & mutex ); 2158 void f2( A & mutex ); 2159 2160 void foo( A & mutex a, bool b, int t ) { 2161 waitfor(f1, a); $\C{// Correct : blocking case}$ 2162 2163 waitfor(f1, a) { $\C{// Correct : block with statement}$ 2164 sout | "f1" | endl; 2383 \centering 2384 \begin{lrbox}{\myboxA} 2385 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2386 2387 struct Msg { int i, j; }; 2388 thread GoRtn { int i; float f; Msg m; }; 2389 void mem1( GoRtn & mutex gortn, int i ) { gortn.i = i; } 2390 void mem2( GoRtn & mutex gortn, float f ) { gortn.f = f; } 2391 void mem3( GoRtn & mutex gortn, Msg m ) { gortn.m = m; } 2392 void ^?{}( GoRtn & mutex ) {} 2393 2394 void main( GoRtn & gortn ) with( gortn ) { // thread starts 2395 2396 for () { 2397 2398 `waitfor( mem1, gortn )` sout | i; // wait for calls 2399 or `waitfor( mem2, gortn )` sout | f; 2400 or `waitfor( mem3, gortn )` sout | m.i | m.j; 2401 or `waitfor( ^?{}, gortn )` break; 2402 2165 2403 } 2166 waitfor(f1, a) { $\C{// Correct : block waiting for f1 or f2}$ 2167 sout | "f1" | endl; 2168 } or waitfor(f2, a) { 2169 sout | "f2" | endl; 2404 2405 } 2406 int main() { 2407 GoRtn gortn; $\C[2.0in]{// start thread}$ 2408 `mem1( gortn, 0 );` $\C{// different calls}\CRT$ 2409 `mem2( gortn, 2.5 );` 2410 `mem3( gortn, (Msg){1, 2} );` 2411 2412 2413 } // wait for completion 2414 \end{cfa} 2415 \end{lrbox} 2416 2417 \begin{lrbox}{\myboxB} 2418 \begin{Go}[aboveskip=0pt,belowskip=0pt] 2419 func main() { 2420 type Msg struct{ i, j int } 2421 2422 ch1 := make( chan int ) 2423 ch2 := make( chan float32 ) 2424 ch3 := make( chan Msg ) 2425 hand := make( chan string ) 2426 shake := make( chan string ) 2427 gortn := func() { $\C[1.5in]{// thread starts}$ 2428 var i int; var f float32; var m Msg 2429 L: for { 2430 select { $\C{// wait for messages}$ 2431 case `i = <- ch1`: fmt.Println( i ) 2432 case `f = <- ch2`: fmt.Println( f ) 2433 case `m = <- ch3`: fmt.Println( m ) 2434 case `<- hand`: break L $\C{// sentinel}$ 2435 } 2436 } 2437 `shake <- "SHAKE"` $\C{// completion}$ 2170 2438 } 2171 waitfor(f1, a); or else; $\C{// Correct : non-blocking case}$ 2172 2173 waitfor(f1, a) { $\C{// Correct : non-blocking case}$ 2174 sout | "blocked" | endl; 2175 } or else { 2176 sout | "didn't block" | endl; 2439 2440 go gortn() $\C{// start thread}$ 2441 `ch1 <- 0` $\C{// different messages}$ 2442 `ch2 <- 2.5` 2443 `ch3 <- Msg{1, 2}` 2444 `hand <- "HAND"` $\C{// sentinel value}$ 2445 `<- shake` $\C{// wait for completion}\CRT$ 2446 } 2447 \end{Go} 2448 \end{lrbox} 2449 2450 \subfloat[\CFA]{\label{f:CFAwaitfor}\usebox\myboxA} 2451 \hspace{3pt} 2452 \vrule 2453 \hspace{3pt} 2454 \subfloat[Go]{\label{f:Gochannel}\usebox\myboxB} 2455 \caption{Direct communication} 2456 \label{f:DirectCommunication} 2457 \end{figure} 2458 2459 \begin{comment} 2460 The following shows an example of two threads directly calling each other and accepting calls from each other in a cycle. 2461 \begin{cfa} 2462 \end{cfa} 2463 \vspace{-0.8\baselineskip} 2464 \begin{cquote} 2465 \begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}} 2466 \begin{cfa} 2467 thread Ping {} pi; 2468 void ping( Ping & mutex ) {} 2469 void main( Ping & pi ) { 2470 for ( 10 ) { 2471 `waitfor( ping, pi );` 2472 `pong( po );` 2177 2473 } 2178 waitfor(f1, a) { $\C{// Correct : block at most 10 seconds}$ 2179 sout | "blocked" | endl; 2180 } or timeout( 10`s) { 2181 sout | "didn't block" | endl; 2474 } 2475 int main() {} 2476 \end{cfa} 2477 & 2478 \begin{cfa} 2479 thread Pong {} po; 2480 void pong( Pong & mutex ) {} 2481 void main( Pong & po ) { 2482 for ( 10 ) { 2483 `ping( pi );` 2484 `waitfor( pong, po );` 2182 2485 } 2183 // Correct : block only if b == true if b == false, don't even make the call 2184 when(b) waitfor(f1, a); 2185 2186 // Correct : block only if b == true if b == false, make non-blocking call 2187 waitfor(f1, a); or when(!b) else; 2188 2189 // Correct : block only of t > 1 2190 waitfor(f1, a); or when(t > 1) timeout(t); or else; 2191 2192 // Incorrect : timeout clause is dead code 2193 waitfor(f1, a); or timeout(t); or else; 2194 2195 // Incorrect : order must be waitfor [or waitfor... [or timeout] [or else]] 2196 timeout(t); or waitfor(f1, a); or else; 2197 } 2198 \end{cfa} 2199 \caption{Correct and incorrect uses of the or, else, and timeout clause around a waitfor statement} 2200 \label{f:waitfor2} 2201 \end{figure} 2202 2203 % ====================================================================== 2204 % ====================================================================== 2205 \subsection{Waiting For The Destructor} 2206 % ====================================================================== 2207 % ====================================================================== 2208 An interesting use for the @waitfor@ statement is destructor semantics. 2209 Indeed, the @waitfor@ statement can accept any @mutex@ routine, which includes the destructor (see section \ref{data}). 2210 However, with the semantics discussed until now, waiting for the destructor does not make any sense, since using an object after its destructor is called is undefined behaviour. 2211 The simplest approach is to disallow @waitfor@ on a destructor. 2212 However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current @mutex@ routine, similarly to how a condition is signalled. 2213 \begin{figure} 2214 \begin{cfa}[caption={Example of an executor which executes action in series until the destructor is called.},label={f:dtor-order}] 2215 monitor Executer {}; 2216 struct Action; 2217 2218 void ^?{} (Executer & mutex this); 2219 void execute(Executer & mutex this, const Action & ); 2220 void run (Executer & mutex this) { 2221 while(true) { 2222 waitfor(execute, this); 2223 or waitfor(^?{} , this) { 2224 break; 2225 } 2226 } 2227 } 2228 \end{cfa} 2229 \end{figure} 2230 For example, listing \ref{f:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop. 2231 Switching the semantic meaning introduces an idiomatic way to terminate a task and/or wait for its termination via destruction. 2232 2233 2234 % ###### # ###### # # # ####### # ### ##### # # 2235 % # # # # # # # # # # # # # # # ## ## 2236 % # # # # # # # # # # # # # # # # # # 2237 % ###### # # ###### # # # # ##### # # ##### # # # 2238 % # ####### # # ####### # # # # # # # # 2239 % # # # # # # # # # # # # # # # # 2240 % # # # # # # # ####### ####### ####### ####### ### ##### # # 2241 \section{Parallelism} 2242 Historically, computer performance was about processor speeds and instruction counts. 2243 However, with heat dissipation being a direct consequence of speed increase, parallelism has become the new source for increased performance~\cite{Sutter05, Sutter05b}. 2244 In this decade, it is no longer reasonable to create a high-performance application without caring about parallelism. 2245 Indeed, parallelism is an important aspect of performance and more specifically throughput and hardware utilization. 2246 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like @fork@, @join@, \etc. 2247 However, since these have significant costs and limitations, \textbf{kthread} are now mostly used as an implementation tool rather than a user oriented one. 2248 There are several alternatives to solve these issues that all have strengths and weaknesses. 2249 While there are many variations of the presented paradigms, most of these variations do not actually change the guarantees or the semantics, they simply move costs in order to achieve better performance for certain workloads. 2250 2251 \section{Paradigms} 2252 \subsection{User-Level Threads} 2253 A direct improvement on the \textbf{kthread} approach is to use \textbf{uthread}. 2254 These threads offer most of the same features that the operating system already provides but can be used on a much larger scale. 2255 This approach is the most powerful solution as it allows all the features of multithreading, while removing several of the more expensive costs of kernel threads. 2256 The downside is that almost none of the low-level threading problems are hidden; users still have to think about data races, deadlocks and synchronization issues. 2257 These issues can be somewhat alleviated by a concurrency toolkit with strong guarantees, but the parallelism toolkit offers very little to reduce complexity in itself. 2258 2259 Examples of languages that support \textbf{uthread} are Erlang~\cite{Erlang} and \uC~\cite{uC++book}. 2260 2261 \subsection{Fibers : User-Level Threads Without Preemption} \label{fibers} 2262 A popular variant of \textbf{uthread} is what is often referred to as \textbf{fiber}. 2263 However, \textbf{fiber} do not present meaningful semantic differences with \textbf{uthread}. 2264 The significant difference between \textbf{uthread} and \textbf{fiber} is the lack of \textbf{preemption} in the latter. 2265 Advocates of \textbf{fiber} list their high performance and ease of implementation as major strengths, but the performance difference between \textbf{uthread} and \textbf{fiber} is controversial, and the ease of implementation, while true, is a weak argument in the context of language design. 2266 Therefore this proposal largely ignores fibers. 2267 2268 An example of a language that uses fibers is Go~\cite{Go} 2269 2270 \subsection{Jobs and Thread Pools} 2271 An approach on the opposite end of the spectrum is to base parallelism on \textbf{pool}. 2272 Indeed, \textbf{pool} offer limited flexibility but at the benefit of a simpler user interface. 2273 In \textbf{pool} based systems, users express parallelism as units of work, called jobs, and a dependency graph (either explicit or implicit) that ties them together. 2274 This approach means users need not worry about concurrency but significantly limit the interaction that can occur among jobs. 2275 Indeed, any \textbf{job} that blocks also block the underlying worker, which effectively means the CPU utilization, and therefore throughput, suffers noticeably. 2276 It can be argued that a solution to this problem is to use more workers than available cores. 2277 However, unless the number of jobs and the number of workers are comparable, having a significant number of blocked jobs always results in idles cores. 2278 2279 The gold standard of this implementation is Intel's TBB library~\cite{TBB}. 2280 2281 \subsection{Paradigm Performance} 2282 While the choice between the three paradigms listed above may have significant performance implications, it is difficult to pin down the performance implications of choosing a model at the language level. 2283 Indeed, in many situations one of these paradigms may show better performance but it all strongly depends on the workload. 2284 Having a large amount of mostly independent units of work to execute almost guarantees equivalent performance across paradigms and that the \textbf{pool}-based system has the best efficiency thanks to the lower memory overhead (\ie no thread stack per job). 2285 However, interactions among jobs can easily exacerbate contention. 2286 User-level threads allow fine-grain context switching, which results in better resource utilization, but a context switch is more expensive and the extra control means users need to tweak more variables to get the desired performance. 2287 Finally, if the units of uninterrupted work are large, enough the paradigm choice is largely amortized by the actual work done. 2288 2289 \section{The \protect\CFA\ Kernel : Processors, Clusters and Threads}\label{kernel} 2290 A \textbf{cfacluster} is a group of \textbf{kthread} executed in isolation. \textbf{uthread} are scheduled on the \textbf{kthread} of a given \textbf{cfacluster}, allowing organization between \textbf{uthread} and \textbf{kthread}. 2291 It is important that \textbf{kthread} belonging to a same \textbf{cfacluster} have homogeneous settings, otherwise migrating a \textbf{uthread} from one \textbf{kthread} to the other can cause issues. 2292 A \textbf{cfacluster} also offers a pluggable scheduler that can optimize the workload generated by the \textbf{uthread}. 2293 2294 \textbf{cfacluster} have not been fully implemented in the context of this paper. 2295 Currently \CFA only supports one \textbf{cfacluster}, the initial one. 2296 2297 \subsection{Future Work: Machine Setup}\label{machine} 2298 While this was not done in the context of this paper, another important aspect of clusters is affinity. 2299 While many common desktop and laptop PCs have homogeneous CPUs, other devices often have more heterogeneous setups. 2300 For example, a system using \textbf{numa} configurations may benefit from users being able to tie clusters and/or kernel threads to certain CPU cores. 2301 OS support for CPU affinity is now common~\cite{affinityLinux, affinityWindows, affinityFreebsd, affinityNetbsd, affinityMacosx}, which means it is both possible and desirable for \CFA to offer an abstraction mechanism for portable CPU affinity. 2302 2303 \subsection{Paradigms}\label{cfaparadigms} 2304 Given these building blocks, it is possible to reproduce all three of the popular paradigms. 2305 Indeed, \textbf{uthread} is the default paradigm in \CFA. 2306 However, disabling \textbf{preemption} on a cluster means threads effectively become fibers. 2307 Since several \textbf{cfacluster} with different scheduling policy can coexist in the same application, this allows \textbf{fiber} and \textbf{uthread} to coexist in the runtime of an application. 2308 Finally, it is possible to build executors for thread pools from \textbf{uthread} or \textbf{fiber}, which includes specialized jobs like actors~\cite{Actors}. 2309 2310 2311 2312 \section{Behind the Scenes} 2313 There are several challenges specific to \CFA when implementing concurrency. 2314 These challenges are a direct result of bulk acquire and loose object definitions. 2315 These two constraints are the root cause of most design decisions in the implementation. 2316 Furthermore, to avoid contention from dynamically allocating memory in a concurrent environment, the internal-scheduling design is (almost) entirely free of mallocs. 2317 This approach avoids the chicken and egg problem~\cite{Chicken} of having a memory allocator that relies on the threading system and a threading system that relies on the runtime. 2318 This extra goal means that memory management is a constant concern in the design of the system. 2319 2320 The main memory concern for concurrency is queues. 2321 All blocking operations are made by parking threads onto queues and all queues are designed with intrusive nodes, where each node has pre-allocated link fields for chaining, to avoid the need for memory allocation. 2322 Since several concurrency operations can use an unbound amount of memory (depending on bulk acquire), statically defining information in the intrusive fields of threads is insufficient.The only way to use a variable amount of memory without requiring memory allocation is to pre-allocate large buffers of memory eagerly and store the information in these buffers. 2323 Conveniently, the call stack fits that description and is easy to use, which is why it is used heavily in the implementation of internal scheduling, particularly variable-length arrays. 2324 Since stack allocation is based on scopes, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable-length array. 2325 The threads and the condition both have a fixed amount of memory, while @mutex@ routines and blocking calls allow for an unbound amount, within the stack size. 2326 2327 Note that since the major contributions of this paper are extending monitor semantics to bulk acquire and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed. 2328 2329 % ====================================================================== 2330 % ====================================================================== 2331 \section{Mutex Routines} 2332 % ====================================================================== 2333 % ====================================================================== 2334 2335 The first step towards the monitor implementation is simple @mutex@ routines. 2336 In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{f:entry1}. 2337 The entry/exit procedures do not have to be extended to support multiple monitors. 2338 Indeed it is sufficient to enter/leave monitors one-by-one as long as the order is correct to prevent deadlock~\cite{Havender68}. 2339 In \CFA, ordering of monitor acquisition relies on memory ordering. 2340 This approach is sufficient because all objects are guaranteed to have distinct non-overlapping memory layouts and mutual-exclusion for a monitor is only defined for its lifetime, meaning that destroying a monitor while it is acquired is undefined behaviour. 2341 When a mutex call is made, the concerned monitors are aggregated into a variable-length pointer array and sorted based on pointer values. 2342 This array persists for the entire duration of the mutual-exclusion and its ordering reused extensively. 2343 \begin{figure} 2344 \begin{multicols}{2} 2345 Entry 2346 \begin{cfa} 2347 if monitor is free 2348 enter 2349 elif already own the monitor 2350 continue 2351 else 2352 block 2353 increment recursions 2354 \end{cfa} 2355 \columnbreak 2356 Exit 2357 \begin{cfa} 2358 decrement recursion 2359 if recursion == 0 2360 if entry queue not empty 2361 wake-up thread 2362 \end{cfa} 2363 \end{multicols} 2364 \begin{cfa}[caption={Initial entry and exit routine for monitors},label={f:entry1}] 2365 \end{cfa} 2366 \end{figure} 2367 2368 \subsection{Details: Interaction with polymorphism} 2369 Depending on the choice of semantics for when monitor locks are acquired, interaction between monitors and \CFA's concept of polymorphism can be more complex to support. 2370 However, it is shown that entry-point locking solves most of the issues. 2371 2372 First of all, interaction between @otype@ polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying. 2373 Therefore, the main question is how to support @dtype@ polymorphism. 2374 It is important to present the difference between the two acquiring options: \textbf{callsite-locking} and entry-point locking, \ie acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call. 2375 For example: 2486 } 2487 2488 \end{cfa} 2489 \end{tabular} 2490 \end{cquote} 2491 % \lstMakeShortInline@% 2492 % \caption{Threads ping/pong using external scheduling} 2493 % \label{f:pingpong} 2494 % \end{figure} 2495 Note, the ping/pong threads are globally declared, @pi@/@po@, and hence, start (and possibly complete) before the program main starts. 2496 \end{comment} 2497 2498 2499 \subsection{Execution Properties} 2500 2501 Table~\ref{t:ObjectPropertyComposition} shows how the \CFA high-level constructs cover 3 fundamental execution properties: thread, stateful function, and mutual exclusion. 2502 Case 1 is a basic object, with none of the new execution properties. 2503 Case 2 allows @mutex@ calls to Case 1 to protect shared data. 2504 Case 3 allows stateful functions to suspend/resume but restricts operations because the state is stackless. 2505 Case 4 allows @mutex@ calls to Case 3 to protect shared data. 2506 Cases 5 and 6 are the same as 3 and 4 without restriction because the state is stackful. 2507 Cases 7 and 8 are rejected because a thread cannot execute without a stackful state in a preemptive environment when context switching from the signal handler. 2508 Cases 9 and 10 have a stackful thread without and with @mutex@ calls. 2509 For situations where threads do not require direct communication, case 9 provides faster creation/destruction by eliminating @mutex@ setup. 2510 2376 2511 \begin{table} 2377 \begin{center} 2378 \begin{tabular}{|c|c|c|} 2379 Mutex & \textbf{callsite-locking} & \textbf{entry-point-locking} \\ 2380 call & cfa-code & cfa-code \\ 2512 \caption{Object property composition} 2513 \centering 2514 \label{t:ObjectPropertyComposition} 2515 \renewcommand{\arraystretch}{1.25} 2516 %\setlength{\tabcolsep}{5pt} 2517 \begin{tabular}{c|c||l|l} 2518 \multicolumn{2}{c||}{object properties} & \multicolumn{2}{c}{mutual exclusion} \\ 2381 2519 \hline 2382 \begin{cfa}[tabsize=3] 2383 void foo(monitor& mutex a){ 2384 2385 // Do Work 2386 //... 2387 2388 } 2389 2390 void main() { 2391 monitor a; 2392 2393 foo(a); 2394 2395 } 2396 \end{cfa} & \begin{cfa}[tabsize=3] 2397 foo(& a) { 2398 2399 // Do Work 2400 //... 2401 2402 } 2403 2404 main() { 2405 monitor a; 2406 acquire(a); 2407 foo(a); 2408 release(a); 2409 } 2410 \end{cfa} & \begin{cfa}[tabsize=3] 2411 foo(& a) { 2412 acquire(a); 2413 // Do Work 2414 //... 2415 release(a); 2416 } 2417 2418 main() { 2419 monitor a; 2420 2421 foo(a); 2422 2423 } 2424 \end{cfa} 2425 \end{tabular} 2426 \end{center} 2427 \caption{Call-site vs entry-point locking for mutex calls} 2428 \label{tbl:locking-site} 2429 \end{table} 2430 2431 Note the @mutex@ keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, \eg: 2432 \begin{cfa} 2433 // Incorrect: T may not be monitor 2434 forall(dtype T) 2435 void foo(T * mutex t); 2436 2437 // Correct: this routine only works on monitors (any monitor) 2438 forall(dtype T | is_monitor(T)) 2439 void bar(T * mutex t)); 2440 \end{cfa} 2441 2442 Both entry point and \textbf{callsite-locking} are feasible implementations. 2443 The current \CFA implementation uses entry-point locking because it requires less work when using \textbf{raii}, effectively transferring the burden of implementation to object construction/destruction. 2444 It is harder to use \textbf{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, \ie the routine body. 2445 For example, the monitor call can appear in the middle of an expression. 2446 Furthermore, entry-point locking requires less code generation since any useful routine is called multiple times but there is only one entry point for many call sites. 2447 2448 % ====================================================================== 2449 % ====================================================================== 2450 \section{Threading} \label{impl:thread} 2451 % ====================================================================== 2452 % ====================================================================== 2453 2454 Figure \ref{fig:system1} shows a high-level picture if the \CFA runtime system in regards to concurrency. 2455 Each component of the picture is explained in detail in the flowing sections. 2456 2457 \begin{figure} 2458 \begin{center} 2459 {\resizebox{\textwidth}{!}{\input{system.pstex_t}}} 2460 \end{center} 2461 \caption{Overview of the entire system} 2462 \label{fig:system1} 2463 \end{figure} 2464 2465 \subsection{Processors} 2466 Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically @pthread@s in the current implementation of \CFA. 2467 Indeed, any parallelism must go through operating-system libraries. 2468 However, \textbf{uthread} are still the main source of concurrency, processors are simply the underlying source of parallelism. 2469 Indeed, processor \textbf{kthread} simply fetch a \textbf{uthread} from the scheduler and run it; they are effectively executers for user-threads. 2470 The main benefit of this approach is that it offers a well-defined boundary between kernel code and user code, for example, kernel thread quiescing, scheduling and interrupt handling. 2471 Processors internally use coroutines to take advantage of the existing context-switching semantics. 2472 2473 \subsection{Stack Management} 2474 One of the challenges of this system is to reduce the footprint as much as possible. 2475 Specifically, all @pthread@s created also have a stack created with them, which should be used as much as possible. 2476 Normally, coroutines also create their own stack to run on, however, in the case of the coroutines used for processors, these coroutines run directly on the \textbf{kthread} stack, effectively stealing the processor stack. 2477 The exception to this rule is the Main Processor, \ie the initial \textbf{kthread} that is given to any program. 2478 In order to respect C user expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor, which can grow very large. 2479 2480 \subsection{Context Switching} 2481 As mentioned in section \ref{coroutine}, coroutines are a stepping stone for implementing threading, because they share the same mechanism for context-switching between different stacks. 2482 To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific routine call. 2483 This assumption means that the context-switch only has to copy the callee-saved registers onto the stack and then switch the stack registers with the ones of the target coroutine/thread. 2484 Note that the instruction pointer can be left untouched since the context-switch is always inside the same routine 2485 Threads, however, do not context-switch between each other directly. 2486 They context-switch to the scheduler. 2487 This method is called a 2-step context-switch and has the advantage of having a clear distinction between user code and the kernel where scheduling and other system operations happen. 2488 Obviously, this doubles the context-switch cost because threads must context-switch to an intermediate stack. 2489 The alternative 1-step context-switch uses the stack of the ``from'' thread to schedule and then context-switches directly to the ``to'' thread. 2490 However, the performance of the 2-step context-switch is still superior to a @pthread_yield@ (see section \ref{results}). 2491 Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft @SwitchToFiber@~\cite{switchToWindows} routine). 2492 This option is not currently present in \CFA, but the changes required to add it are strictly additive. 2493 2494 \subsection{Preemption} \label{preemption} 2495 Finally, an important aspect for any complete threading system is preemption. 2496 As mentioned in section \ref{basics}, preemption introduces an extra degree of uncertainty, which enables users to have multiple threads interleave transparently, rather than having to cooperate among threads for proper scheduling and CPU distribution. 2497 Indeed, preemption is desirable because it adds a degree of isolation among threads. 2498 In a fully cooperative system, any thread that runs a long loop can starve other threads, while in a preemptive system, starvation can still occur but it does not rely on every thread having to yield or block on a regular basis, which reduces significantly a programmer burden. 2499 Obviously, preemption is not optimal for every workload. 2500 However any preemptive system can become a cooperative system by making the time slices extremely large. 2501 Therefore, \CFA uses a preemptive threading system. 2502 2503 Preemption in \CFA\footnote{Note that the implementation of preemption is strongly tied with the underlying threading system. 2504 For this reason, only the Linux implementation is cover, \CFA does not run on Windows at the time of writting} is based on kernel timers, which are used to run a discrete-event simulation. 2505 Every processor keeps track of the current time and registers an expiration time with the preemption system. 2506 When the preemption system receives a change in preemption, it inserts the time in a sorted order and sets a kernel timer for the closest one, effectively stepping through preemption events on each signal sent by the timer. 2507 These timers use the Linux signal {\tt SIGALRM}, which is delivered to the process rather than the kernel-thread. 2508 This results in an implementation problem, because when delivering signals to a process, the kernel can deliver the signal to any kernel thread for which the signal is not blocked, \ie: 2509 \begin{quote} 2510 A process-directed signal may be delivered to any one of the threads that does not currently have the signal blocked. 2511 If more than one of the threads has the signal unblocked, then the kernel chooses an arbitrary thread to which to deliver the signal. 2512 SIGNAL(7) - Linux Programmer's Manual 2513 \end{quote} 2514 For the sake of simplicity, and in order to prevent the case of having two threads receiving alarms simultaneously, \CFA programs block the {\tt SIGALRM} signal on every kernel thread except one. 2515 2516 Now because of how involuntary context-switches are handled, the kernel thread handling {\tt SIGALRM} cannot also be a processor thread. 2517 Hence, involuntary context-switching is done by sending signal {\tt SIGUSR1} to the corresponding proces\-sor and having the thread yield from inside the signal handler. 2518 This approach effectively context-switches away from the signal handler back to the kernel and the signal handler frame is eventually unwound when the thread is scheduled again. 2519 As a result, a signal handler can start on one kernel thread and terminate on a second kernel thread (but the same user thread). 2520 It is important to note that signal handlers save and restore signal masks because user-thread migration can cause a signal mask to migrate from one kernel thread to another. 2521 This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes ``async-signal-safe'' routines from other routines}. 2522 However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks. 2523 For this reason, the alarm thread is in a tight loop around a system call to @sigwaitinfo@, requiring very little CPU time for preemption. 2524 One final detail about the alarm thread is how to wake it when additional communication is required (\eg on thread termination). 2525 This unblocking is also done using {\tt SIGALRM}, but sent through the @pthread_sigqueue@. 2526 Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel. 2527 2528 \subsection{Scheduler} 2529 Finally, an aspect that was not mentioned yet is the scheduling algorithm. 2530 Currently, the \CFA scheduler uses a single ready queue for all processors, which is the simplest approach to scheduling. 2531 Further discussion on scheduling is present in section \ref{futur:sched}. 2532 2533 % ====================================================================== 2534 % ====================================================================== 2535 \section{Internal Scheduling} \label{impl:intsched} 2536 % ====================================================================== 2537 % ====================================================================== 2538 The following figure is the traditional illustration of a monitor (repeated from page~\pageref{fig:ClassicalMonitor} for convenience): 2539 2540 \begin{figure} 2541 \begin{center} 2542 {\resizebox{0.4\textwidth}{!}{\input{monitor}}} 2543 \end{center} 2544 \caption{Traditional illustration of a monitor} 2545 \end{figure} 2546 2547 This picture has several components, the two most important being the entry queue and the AS-stack. 2548 The entry queue is an (almost) FIFO list where threads waiting to enter are parked, while the acceptor/signaller (AS) stack is a FILO list used for threads that have been signalled or otherwise marked as running next. 2549 2550 For \CFA, this picture does not have support for blocking multiple monitors on a single condition. 2551 To support bulk acquire two changes to this picture are required. 2552 First, it is no longer helpful to attach the condition to \emph{a single} monitor. 2553 Secondly, the thread waiting on the condition has to be separated across multiple monitors, seen in figure \ref{fig:monitor_cfa}. 2554 2555 \begin{figure} 2556 \begin{center} 2557 {\resizebox{0.8\textwidth}{!}{\input{int_monitor}}} 2558 \end{center} 2559 \caption{Illustration of \CFA Monitor} 2560 \label{fig:monitor_cfa} 2561 \end{figure} 2562 2563 This picture and the proper entry and leave algorithms (see listing \ref{f:entry2}) is the fundamental implementation of internal scheduling. 2564 Note that when a thread is moved from the condition to the AS-stack, it is conceptually split into N pieces, where N is the number of monitors specified in the parameter list. 2565 The thread is woken up when all the pieces have popped from the AS-stacks and made active. 2566 In this picture, the threads are split into halves but this is only because there are two monitors. 2567 For a specific signalling operation every monitor needs a piece of thread on its AS-stack. 2568 2569 \begin{figure} 2570 \begin{multicols}{2} 2571 Entry 2572 \begin{cfa} 2573 if monitor is free 2574 enter 2575 elif already own the monitor 2576 continue 2577 else 2578 block 2579 increment recursion 2580 2581 \end{cfa} 2582 \columnbreak 2583 Exit 2584 \begin{cfa} 2585 decrement recursion 2586 if recursion == 0 2587 if signal_stack not empty 2588 set_owner to thread 2589 if all monitors ready 2590 wake-up thread 2591 2592 if entry queue not empty 2593 wake-up thread 2594 \end{cfa} 2595 \end{multicols} 2596 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling},label={f:entry2}] 2597 \end{cfa} 2598 \end{figure} 2599 2600 The solution discussed in \ref{s:InternalScheduling} can be seen in the exit routine of listing \ref{f:entry2}. 2601 Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership. 2602 This solution is deadlock safe as well as preventing any potential barging. 2603 The data structures used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call stack of the @wait@ and @signal_block@ routines. 2604 2605 \begin{figure} 2606 \begin{center} 2607 {\resizebox{0.8\textwidth}{!}{\input{monitor_structs.pstex_t}}} 2608 \end{center} 2609 \caption{Data structures involved in internal/external scheduling} 2610 \label{fig:structs} 2611 \end{figure} 2612 2613 Figure \ref{fig:structs} shows a high-level representation of these data structures. 2614 The main idea behind them is that, a thread cannot contain an arbitrary number of intrusive ``next'' pointers for linking onto monitors. 2615 The @condition node@ is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each @condition criterion@ is moved to the AS-stack. 2616 Once all the criteria have been popped from their respective AS-stacks, the thread is woken up, which is what is shown in listing \ref{f:entry2}. 2617 2618 % ====================================================================== 2619 % ====================================================================== 2620 \section{External Scheduling} 2621 % ====================================================================== 2622 % ====================================================================== 2623 Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentioned in section \ref{extsched}. 2624 For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (\ie no signal statement uses multiple conditions). 2625 However, in the case of external scheduling, there is no equivalent object which is associated with @waitfor@ statements. 2626 This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired. 2627 These monitors being the only objects that have sufficient lifetime and are available on both sides of the @waitfor@ statement. 2628 This requires an algorithm to choose which monitor holds the relevant queue. 2629 It is also important that said algorithm be independent of the order in which users list parameters. 2630 The proposed algorithm is to fall back on monitor lock ordering (sorting by address) and specify that the monitor that is acquired first is the one with the relevant waiting queue. 2631 This assumes that the lock acquiring order is static for the lifetime of all concerned objects but that is a reasonable constraint. 2632 2633 This algorithm choice has two consequences: 2634 \begin{itemize} 2635 \item The queue of the monitor with the lowest address is no longer a true FIFO queue because threads can be moved to the front of the queue. 2636 These queues need to contain a set of monitors for each of the waiting threads. 2637 Therefore, another thread whose set contains the same lowest address monitor but different lower priority monitors may arrive first but enter the critical section after a thread with the correct pairing. 2638 \item The queue of the lowest priority monitor is both required and potentially unused. 2639 Indeed, since it is not known at compile time which monitor is the monitor which has the lowest address, every monitor needs to have the correct queues even though it is possible that some queues go unused for the entire duration of the program, for example if a monitor is only used in a specific pair. 2640 \end{itemize} 2641 Therefore, the following modifications need to be made to support external scheduling: 2642 \begin{itemize} 2643 \item The threads waiting on the entry queue need to keep track of which routine they are trying to enter, and using which set of monitors. 2644 The @mutex@ routine already has all the required information on its stack, so the thread only needs to keep a pointer to that information. 2645 \item The monitors need to keep a mask of acceptable routines. 2646 This mask contains for each acceptable routine, a routine pointer and an array of monitors to go with it. 2647 It also needs storage to keep track of which routine was accepted. 2648 Since this information is not specific to any monitor, the monitors actually contain a pointer to an integer on the stack of the waiting thread. 2649 Note that if a thread has acquired two monitors but executes a @waitfor@ with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless. 2650 This becomes relevant when @when@ clauses affect the number of monitors passed to a @waitfor@ statement. 2651 \item The entry/exit routines need to be updated as shown in listing \ref{f:entry3}. 2652 \end{itemize} 2653 2654 \subsection{External Scheduling - Destructors} 2655 Finally, to support the ordering inversion of destructors, the code generation needs to be modified to use a special entry routine. 2656 This routine is needed because of the storage requirements of the call order inversion. 2657 Indeed, when waiting for the destructors, storage is needed for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for. 2658 For regular @waitfor@ statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later. 2659 The @waitfor@ semantics can then be adjusted correspondingly, as seen in listing \ref{f:entry-dtor} 2660 2661 \begin{figure} 2662 \begin{multicols}{2} 2663 Entry 2664 \begin{cfa} 2665 if monitor is free 2666 enter 2667 elif already own the monitor 2668 continue 2669 elif matches waitfor mask 2670 push criteria to AS-stack 2671 continue 2672 else 2673 block 2674 increment recursion 2675 \end{cfa} 2676 \columnbreak 2677 Exit 2678 \begin{cfa} 2679 decrement recursion 2680 if recursion == 0 2681 if signal_stack not empty 2682 set_owner to thread 2683 if all monitors ready 2684 wake-up thread 2685 endif 2686 endif 2687 2688 if entry queue not empty 2689 wake-up thread 2690 endif 2691 \end{cfa} 2692 \end{multicols} 2693 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={f:entry3}] 2694 \end{cfa} 2695 \end{figure} 2696 2697 \begin{figure} 2698 \begin{multicols}{2} 2699 Destructor Entry 2700 \begin{cfa} 2701 if monitor is free 2702 enter 2703 elif already own the monitor 2704 increment recursion 2705 return 2706 create wait context 2707 if matches waitfor mask 2708 reset mask 2709 push self to AS-stack 2710 baton pass 2711 else 2712 wait 2713 increment recursion 2714 \end{cfa} 2715 \columnbreak 2716 Waitfor 2717 \begin{cfa} 2718 if matching thread is already there 2719 if found destructor 2720 push destructor to AS-stack 2721 unlock all monitors 2722 else 2723 push self to AS-stack 2724 baton pass 2725 endif 2726 return 2727 endif 2728 if non-blocking 2729 Unlock all monitors 2730 Return 2731 endif 2732 2733 push self to AS-stack 2734 set waitfor mask 2735 block 2736 return 2737 \end{cfa} 2738 \end{multicols} 2739 \begin{cfa}[caption={Pseudo code for the \protect\lstinline|waitfor| routine and the \protect\lstinline|mutex| entry routine for destructors},label={f:entry-dtor}] 2740 \end{cfa} 2741 \end{figure} 2742 2743 2744 % ====================================================================== 2745 % ====================================================================== 2746 \section{Putting It All Together} 2747 % ====================================================================== 2748 % ====================================================================== 2749 2750 2751 \section{Threads As Monitors} 2752 As it was subtly alluded in section \ref{threads}, @thread@s in \CFA are in fact monitors, which means that all monitor features are available when using threads. 2753 For example, here is a very simple two thread pipeline that could be used for a simulator of a game engine: 2754 \begin{figure} 2755 \begin{cfa}[caption={Toy simulator using \protect\lstinline|thread|s and \protect\lstinline|monitor|s.},label={f:engine-v1}] 2756 // Visualization declaration 2757 thread Renderer {} renderer; 2758 Frame * simulate( Simulator & this ); 2759 2760 // Simulation declaration 2761 thread Simulator{} simulator; 2762 void render( Renderer & this ); 2763 2764 // Blocking call used as communication 2765 void draw( Renderer & mutex this, Frame * frame ); 2766 2767 // Simulation loop 2768 void main( Simulator & this ) { 2769 while( true ) { 2770 Frame * frame = simulate( this ); 2771 draw( renderer, frame ); 2772 } 2773 } 2774 2775 // Rendering loop 2776 void main( Renderer & this ) { 2777 while( true ) { 2778 waitfor( draw, this ); 2779 render( this ); 2780 } 2781 } 2782 \end{cfa} 2783 \end{figure} 2784 One of the obvious complaints of the previous code snippet (other than its toy-like simplicity) is that it does not handle exit conditions and just goes on forever. 2785 Luckily, the monitor semantics can also be used to clearly enforce a shutdown order in a concise manner: 2786 \begin{figure} 2787 \begin{cfa}[caption={Same toy simulator with proper termination condition.},label={f:engine-v2}] 2788 // Visualization declaration 2789 thread Renderer {} renderer; 2790 Frame * simulate( Simulator & this ); 2791 2792 // Simulation declaration 2793 thread Simulator{} simulator; 2794 void render( Renderer & this ); 2795 2796 // Blocking call used as communication 2797 void draw( Renderer & mutex this, Frame * frame ); 2798 2799 // Simulation loop 2800 void main( Simulator & this ) { 2801 while( true ) { 2802 Frame * frame = simulate( this ); 2803 draw( renderer, frame ); 2804 2805 // Exit main loop after the last frame 2806 if( frame->is_last ) break; 2807 } 2808 } 2809 2810 // Rendering loop 2811 void main( Renderer & this ) { 2812 while( true ) { 2813 waitfor( draw, this ); 2814 or waitfor( ^?{}, this ) { 2815 // Add an exit condition 2816 break; 2817 } 2818 2819 render( this ); 2820 } 2821 } 2822 2823 // Call destructor for simulator once simulator finishes 2824 // Call destructor for renderer to signify shutdown 2825 \end{cfa} 2826 \end{figure} 2827 2828 \section{Fibers \& Threads} 2829 As mentioned in section \ref{preemption}, \CFA uses preemptive threads by default but can use fibers on demand. 2830 Currently, using fibers is done by adding the following line of code to the program~: 2831 \begin{cfa} 2832 unsigned int default_preemption() { 2833 return 0; 2834 } 2835 \end{cfa} 2836 This routine is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, \ie no preemption. 2837 However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{f:fiber-uthread} 2838 \begin{figure} 2839 \lstset{language=CFA,deletedelim=**[is][]{`}{`}} 2840 \begin{cfa}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={f:fiber-uthread}] 2841 // Cluster forward declaration 2842 struct cluster; 2843 2844 // Processor forward declaration 2845 struct processor; 2846 2847 // Construct clusters with a preemption rate 2848 void ?{}(cluster& this, unsigned int rate); 2849 // Construct processor and add it to cluster 2850 void ?{}(processor& this, cluster& cluster); 2851 // Construct thread and schedule it on cluster 2852 void ?{}(thread& this, cluster& cluster); 2853 2854 // Declare two clusters 2855 cluster thread_cluster = { 10`ms }; // Preempt every 10 ms 2856 cluster fibers_cluster = { 0 }; // Never preempt 2857 2858 // Construct 4 processors 2859 processor processors[4] = { 2860 //2 for the thread cluster 2861 thread_cluster; 2862 thread_cluster; 2863 //2 for the fibers cluster 2864 fibers_cluster; 2865 fibers_cluster; 2866 }; 2867 2868 // Declares thread 2869 thread UThread {}; 2870 void ?{}(UThread& this) { 2871 // Construct underlying thread to automatically 2872 // be scheduled on the thread cluster 2873 (this){ thread_cluster } 2874 } 2875 2876 void main(UThread & this); 2877 2878 // Declares fibers 2879 thread Fiber {}; 2880 void ?{}(Fiber& this) { 2881 // Construct underlying thread to automatically 2882 // be scheduled on the fiber cluster 2883 (this.__thread){ fibers_cluster } 2884 } 2885 2886 void main(Fiber & this); 2887 \end{cfa} 2888 \end{figure} 2889 2890 2891 % ====================================================================== 2892 % ====================================================================== 2893 \section{Performance Results} \label{results} 2894 % ====================================================================== 2895 % ====================================================================== 2896 \section{Machine Setup} 2897 Table \ref{tab:machine} shows the characteristics of the machine used to run the benchmarks. 2898 All tests were made on this machine. 2899 \begin{table} 2900 \begin{center} 2901 \begin{tabular}{| l | r | l | r |} 2902 \hline 2903 Architecture & x86\_64 & NUMA node(s) & 8 \\ 2904 \hline 2905 CPU op-mode(s) & 32-bit, 64-bit & Model name & AMD Opteron\texttrademark Processor 6380 \\ 2906 \hline 2907 Byte Order & Little Endian & CPU Freq & 2.5\si{\giga\hertz} \\ 2908 \hline 2909 CPU(s) & 64 & L1d cache & \SI{16}{\kibi\byte} \\ 2910 \hline 2911 Thread(s) per core & 2 & L1i cache & \SI{64}{\kibi\byte} \\ 2912 \hline 2913 Core(s) per socket & 8 & L2 cache & \SI{2048}{\kibi\byte} \\ 2914 \hline 2915 Socket(s) & 4 & L3 cache & \SI{6144}{\kibi\byte} \\ 2520 thread & stateful & \multicolumn{1}{c|}{No} & \multicolumn{1}{c}{Yes} \\ 2916 2521 \hline 2917 2522 \hline 2918 Operating system & Ubuntu 16.04.3 LTS & Kernel & Linux 4.4-97-generic\\2523 No & No & \textbf{1}\ \ \ aggregate type & \textbf{2}\ \ \ @monitor@ aggregate type \\ 2919 2524 \hline 2920 Compiler & GCC 6.3 & Translator & CFA 1\\2525 No & Yes (stackless) & \textbf{3}\ \ \ @generator@ & \textbf{4}\ \ \ @monitor@ @generator@ \\ 2921 2526 \hline 2922 Java version & OpenJDK-9 & Go version & 1.9.2\\2527 No & Yes (stackful) & \textbf{5}\ \ \ @coroutine@ & \textbf{6}\ \ \ @monitor@ @coroutine@ \\ 2923 2528 \hline 2529 Yes & No / Yes (stackless) & \textbf{7}\ \ \ {\color{red}rejected} & \textbf{8}\ \ \ {\color{red}rejected} \\ 2530 \hline 2531 Yes & Yes (stackful) & \textbf{9}\ \ \ @thread@ & \textbf{10}\ \ @monitor@ @thread@ \\ 2924 2532 \end{tabular} 2925 \end{center}2926 \caption{Machine setup used for the tests}2927 \label{tab:machine}2928 2533 \end{table} 2929 2534 2930 \section{Micro Benchmarks} 2931 All benchmarks are run using the same harness to produce the results, seen as the @BENCH()@ macro in the following examples. 2932 This macro uses the following logic to benchmark the code: 2933 \begin{cfa} 2934 #define BENCH(run, result) \ 2935 before = gettime(); \ 2936 run; \ 2937 after = gettime(); \ 2938 result = (after - before) / N; 2939 \end{cfa} 2940 The method used to get time is @clock_gettime(CLOCK_THREAD_CPUTIME_ID);@. 2941 Each benchmark is using many iterations of a simple call to measure the cost of the call. 2942 The specific number of iterations depends on the specific benchmark. 2943 2944 \subsection{Context-Switching} 2945 The first interesting benchmark is to measure how long context-switches take. 2946 The simplest approach to do this is to yield on a thread, which executes a 2-step context switch. 2947 Yielding causes the thread to context-switch to the scheduler and back, more precisely: from the \textbf{uthread} to the \textbf{kthread} then from the \textbf{kthread} back to the same \textbf{uthread} (or a different one in the general case). 2948 In order to make the comparison fair, coroutines also execute a 2-step context-switch by resuming another coroutine which does nothing but suspending in a tight loop, which is a resume/suspend cycle instead of a yield. 2949 Figure~\ref{f:ctx-switch} shows the code for coroutines and threads with the results in table \ref{tab:ctx-switch}. 2950 All omitted tests are functionally identical to one of these tests. 2951 The difference between coroutines and threads can be attributed to the cost of scheduling. 2535 2536 \subsection{Low-level Locks} 2537 2538 For completeness and efficiency, \CFA provides a standard set of low-level locks: recursive mutex, condition, semaphore, barrier, \etc, and atomic instructions: @fetchAssign@, @fetchAdd@, @testSet@, @compareSet@, \etc. 2539 Some of these low-level mechanism are used in the \CFA runtime, but we strongly advocate using high-level mechanisms whenever possible. 2540 2541 2542 % \section{Parallelism} 2543 % \label{s:Parallelism} 2544 % 2545 % Historically, computer performance was about processor speeds. 2546 % However, with heat dissipation being a direct consequence of speed increase, parallelism is the new source for increased performance~\cite{Sutter05, Sutter05b}. 2547 % Therefore, high-performance applications must care about parallelism, which requires concurrency. 2548 % The lowest-level approach of parallelism is to use \newterm{kernel threads} in combination with semantics like @fork@, @join@, \etc. 2549 % However, kernel threads are better as an implementation tool because of complexity and higher cost. 2550 % Therefore, different abstractions are often layered onto kernel threads to simplify them, \eg pthreads. 2551 % 2552 % 2553 % \subsection{User Threads} 2554 % 2555 % A direct improvement on kernel threads is user threads, \eg Erlang~\cite{Erlang} and \uC~\cite{uC++book}. 2556 % This approach provides an interface that matches the language paradigms, gives more control over concurrency by the language runtime, and an abstract (and portable) interface to the underlying kernel threads across operating systems. 2557 % In many cases, user threads can be used on a much larger scale (100,000 threads). 2558 % Like kernel threads, user threads support preemption, which maximizes nondeterminism, but increases the potential for concurrency errors: race, livelock, starvation, and deadlock. 2559 % \CFA adopts user-threads to provide more flexibility and a low-cost mechanism to build any other concurrency approach, \eg thread pools and actors~\cite{Actors}. 2560 % 2561 % A variant of user thread is \newterm{fibres}, which removes preemption, \eg Go~\cite{Go} @goroutine@s. 2562 % Like functional programming, which removes mutation and its associated problems, removing preemption from concurrency reduces nondeterminism, making race and deadlock errors more difficult to generate. 2563 % However, preemption is necessary for fairness and to reduce tail-latency. 2564 % For concurrency that relies on spinning, if all cores spin the system is livelocked, whereas preemption breaks the livelock. 2565 2566 2567 \begin{comment} 2568 \subsection{Thread Pools} 2569 2570 In contrast to direct threading is indirect \newterm{thread pools}, \eg Java @executor@, where small jobs (work units) are inserted into a work pool for execution. 2571 If the jobs are dependent, \ie interact, there is an implicit/explicit dependency graph that ties them together. 2572 While removing direct concurrency, and hence the amount of context switching, thread pools significantly limit the interaction that can occur among jobs. 2573 Indeed, jobs should not block because that also blocks the underlying thread, which effectively means the CPU utilization, and therefore throughput, suffers. 2574 While it is possible to tune the thread pool with sufficient threads, it becomes difficult to obtain high throughput and good core utilization as job interaction increases. 2575 As well, concurrency errors return, which threads pools are suppose to mitigate. 2576 2952 2577 \begin{figure} 2578 \centering 2579 \begin{tabular}{@{}l|l@{}} 2580 \begin{cfa} 2581 struct Adder { 2582 int * row, cols; 2583 }; 2584 int operator()() { 2585 subtotal = 0; 2586 for ( int c = 0; c < cols; c += 1 ) 2587 subtotal += row[c]; 2588 return subtotal; 2589 } 2590 void ?{}( Adder * adder, int row[$\,$], int cols, int & subtotal ) { 2591 adder.[rows, cols, subtotal] = [rows, cols, subtotal]; 2592 } 2593 2594 2595 2596 2597 \end{cfa} 2598 & 2599 \begin{cfa} 2600 int main() { 2601 const int rows = 10, cols = 10; 2602 int matrix[rows][cols], subtotals[rows], total = 0; 2603 // read matrix 2604 Executor executor( 4 ); // kernel threads 2605 Adder * adders[rows]; 2606 for ( r; rows ) { // send off work for executor 2607 adders[r] = new( matrix[r], cols, &subtotal[r] ); 2608 executor.send( *adders[r] ); 2609 } 2610 for ( r; rows ) { // wait for results 2611 delete( adders[r] ); 2612 total += subtotals[r]; 2613 } 2614 sout | total; 2615 } 2616 \end{cfa} 2617 \end{tabular} 2618 \caption{Executor} 2619 \end{figure} 2620 \end{comment} 2621 2622 2623 \section{Runtime Structure} 2624 \label{s:CFARuntimeStructure} 2625 2626 Figure~\ref{f:RunTimeStructure} illustrates the runtime structure of a \CFA program. 2627 In addition to the new kinds of objects introduced by \CFA, there are two more runtime entities used to control parallel execution: cluster and (virtual) processor. 2628 An executing thread is illustrated by its containment in a processor. 2629 2630 \begin{figure} 2631 \centering 2632 \input{RunTimeStructure} 2633 \caption{\CFA Runtime structure} 2634 \label{f:RunTimeStructure} 2635 \end{figure} 2636 2637 2638 \subsection{Cluster} 2639 \label{s:RuntimeStructureCluster} 2640 2641 A \newterm{cluster} is a collection of threads and virtual processors (abstract kernel-thread) that execute the (user) threads from its own ready queue (like an OS executing kernel threads). 2642 The purpose of a cluster is to control the amount of parallelism that is possible among threads, plus scheduling and other execution defaults. 2643 The default cluster-scheduler is single-queue multi-server, which provides automatic load-balancing of threads on processors. 2644 However, the design allows changing the scheduler, \eg multi-queue multi-server with work-stealing/sharing across the virtual processors. 2645 If several clusters exist, both threads and virtual processors, can be explicitly migrated from one cluster to another. 2646 No automatic load balancing among clusters is performed by \CFA. 2647 2648 When a \CFA program begins execution, it creates a user cluster with a single processor and a special processor to handle preemption that does not execute user threads. 2649 The user cluster is created to contain the application user-threads. 2650 Having all threads execute on the one cluster often maximizes utilization of processors, which minimizes runtime. 2651 However, because of limitations of scheduling requirements (real-time), NUMA architecture, heterogeneous hardware, or issues with the underlying operating system, multiple clusters are sometimes necessary. 2652 2653 2654 \subsection{Virtual Processor} 2655 \label{s:RuntimeStructureProcessor} 2656 2657 A virtual processor is implemented by a kernel thread (\eg UNIX process), which are scheduled for execution on a hardware processor by the underlying operating system. 2658 Programs may use more virtual processors than hardware processors. 2659 On a multiprocessor, kernel threads are distributed across the hardware processors resulting in virtual processors executing in parallel. 2660 (It is possible to use affinity to lock a virtual processor onto a particular hardware processor~\cite{affinityLinux, affinityWindows, affinityFreebsd, affinityNetbsd, affinityMacosx}, which is used when caching issues occur or for heterogeneous hardware processors.) 2661 The \CFA runtime attempts to block unused processors and unblock processors as the system load increases; 2662 balancing the workload with processors is difficult because it requires future knowledge, \ie what will the applicaton workload do next. 2663 Preemption occurs on virtual processors rather than user threads, via operating-system interrupts. 2664 Thus virtual processors execute user threads, where preemption frequency applies to a virtual processor, so preemption occurs randomly across the executed user threads. 2665 Turning off preemption transforms user threads into fibres. 2666 2667 2668 \begin{comment} 2669 \section{Implementation} 2670 \label{s:Implementation} 2671 2672 A primary implementation challenge is avoiding contention from dynamically allocating memory because of bulk acquire, \eg the internal-scheduling design is (almost) free of allocations. 2673 All blocking operations are made by parking threads onto queues, therefore all queues are designed with intrusive nodes, where each node has preallocated link fields for chaining. 2674 Furthermore, several bulk-acquire operations need a variable amount of memory. 2675 This storage is allocated at the base of a thread's stack before blocking, which means programmers must add a small amount of extra space for stacks. 2676 2677 In \CFA, ordering of monitor acquisition relies on memory ordering to prevent deadlock~\cite{Havender68}, because all objects have distinct non-overlapping memory layouts, and mutual-exclusion for a monitor is only defined for its lifetime. 2678 When a mutex call is made, pointers to the concerned monitors are aggregated into a variable-length array and sorted. 2679 This array persists for the entire duration of the mutual exclusion and is used extensively for synchronization operations. 2680 2681 To improve performance and simplicity, context switching occurs inside a function call, so only callee-saved registers are copied onto the stack and then the stack register is switched; 2682 the corresponding registers are then restored for the other context. 2683 Note, the instruction pointer is untouched since the context switch is always inside the same function. 2684 Experimental results (not presented) for a stackless or stackful scheduler (1 versus 2 context switches) (see Section~\ref{s:Concurrency}) show the performance is virtually equivalent, because both approaches are dominated by locking to prevent a race condition. 2685 2686 All kernel threads (@pthreads@) created a stack. 2687 Each \CFA virtual processor is implemented as a coroutine and these coroutines run directly on the kernel-thread stack, effectively stealing this stack. 2688 The exception to this rule is the program main, \ie the initial kernel thread that is given to any program. 2689 In order to respect C expectations, the stack of the initial kernel thread is used by program main rather than the main processor, allowing it to grow dynamically as in a normal C program. 2690 \end{comment} 2691 2692 2693 \subsection{Preemption} 2694 2695 Nondeterministic preemption provides fairness from long-running threads, and forces concurrent programmers to write more robust programs, rather than relying on code between cooperative scheduling to be atomic. 2696 This atomic reliance can fail on multi-core machines, because execution across cores is nondeterministic. 2697 A different reason for not supporting preemption is that it significantly complicates the runtime system, \eg Microsoft runtime does not support interrupts and on Linux systems, interrupts are complex (see below). 2698 Preemption is normally handled by setting a countdown timer on each virtual processor. 2699 When the timer expires, an interrupt is delivered, and the interrupt handler resets the countdown timer, and if the virtual processor is executing in user code, the signal handler performs a user-level context-switch, or if executing in the language runtime kernel, the preemption is ignored or rolled forward to the point where the runtime kernel context switches back to user code. 2700 Multiple signal handlers may be pending. 2701 When control eventually switches back to the signal handler, it returns normally, and execution continues in the interrupted user thread, even though the return from the signal handler may be on a different kernel thread than the one where the signal is delivered. 2702 The only issue with this approach is that signal masks from one kernel thread may be restored on another as part of returning from the signal handler; 2703 therefore, the same signal mask is required for all virtual processors in a cluster. 2704 Because preemption frequency is usually long (1 millisecond) performance cost is negligible. 2705 2706 Linux switched a decade ago from specific to arbitrary process signal-delivery for applications with multiple kernel threads. 2707 \begin{cquote} 2708 A process-directed signal may be delivered to any one of the threads that does not currently have the signal blocked. 2709 If more than one of the threads has the signal unblocked, then the kernel chooses an arbitrary thread to which it will deliver the signal. 2710 SIGNAL(7) - Linux Programmer's Manual 2711 \end{cquote} 2712 Hence, the timer-expiry signal, which is generated \emph{externally} by the Linux kernel to an application, is delivered to any of its Linux subprocesses (kernel threads). 2713 To ensure each virtual processor receives a preemption signal, a discrete-event simulation is run on a special virtual processor, and only it sets and receives timer events. 2714 Virtual processors register an expiration time with the discrete-event simulator, which is inserted in sorted order. 2715 The simulation sets the countdown timer to the value at the head of the event list, and when the timer expires, all events less than or equal to the current time are processed. 2716 Processing a preemption event sends an \emph{internal} @SIGUSR1@ signal to the registered virtual processor, which is always delivered to that processor. 2717 2718 2719 \subsection{Debug Kernel} 2720 2721 There are two versions of the \CFA runtime kernel: debug and non-debug. 2722 The debugging version has many runtime checks and internal assertions, \eg stack (non-writable) guard page, and checks for stack overflow whenever context switches occur among coroutines and threads, which catches most stack overflows. 2723 After a program is debugged, the non-debugging version can be used to significantly decrease space and increase performance. 2724 2725 2726 \section{Performance} 2727 \label{s:Performance} 2728 2729 To verify the implementation of the \CFA runtime, a series of microbenchmarks are performed comparing \CFA with pthreads, Java OpenJDK-9, Go 1.12.6 and \uC 7.0.0. 2730 For comparison, the package must be multi-processor (M:N), which excludes libdill/libmil~\cite{libdill} (M:1)), and use a shared-memory programming model, \eg not message passing. 2731 The benchmark computer is an AMD Opteron\texttrademark\ 6380 NUMA 64-core, 8 socket, 2.5 GHz processor, running Ubuntu 16.04.6 LTS, and \CFA/\uC are compiled with gcc 6.5. 2732 2733 All benchmarks are run using the following harness. (The Java harness is augmented to circumvent JIT issues.) 2734 \begin{cfa} 2735 unsigned int N = 10_000_000; 2736 #define BENCH( `run` ) Time before = getTimeNsec(); `run;` Duration result = (getTimeNsec() - before) / N; 2737 \end{cfa} 2738 The method used to get time is @clock_gettime( CLOCK_REALTIME )@. 2739 Each benchmark is performed @N@ times, where @N@ varies depending on the benchmark; 2740 the total time is divided by @N@ to obtain the average time for a benchmark. 2741 Each benchmark experiment is run 31 times. 2742 All omitted tests for other languages are functionally identical to the \CFA tests and available online~\cite{CforallBenchMarks}. 2743 % tar --exclude=.deps --exclude=Makefile --exclude=Makefile.in --exclude=c.c --exclude=cxx.cpp --exclude=fetch_add.c -cvhf benchmark.tar benchmark 2744 2745 \paragraph{Object Creation} 2746 2747 Object creation is measured by creating/deleting the specific kind of concurrent object. 2748 Figure~\ref{f:creation} shows the code for \CFA, with results in Table~\ref{tab:creation}. 2749 The only note here is that the call stacks of \CFA coroutines are lazily created, therefore without priming the coroutine to force stack creation, the creation cost is artificially low. 2750 2953 2751 \begin{multicols}{2} 2954 \CFA Coroutines 2955 \begin{cfa} 2956 coroutine GreatSuspender {}; 2957 void main(GreatSuspender& this) { 2958 while(true) { suspend(); } 2959 } 2752 \lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}} 2753 \begin{cfa} 2754 @thread@ MyThread {}; 2755 void @main@( MyThread & ) {} 2960 2756 int main() { 2961 GreatSuspender s; 2962 resume(s); 2757 BENCH( for ( N ) { @MyThread m;@ } ) 2758 sout | result`ns; 2759 } 2760 \end{cfa} 2761 \captionof{figure}{\CFA object-creation benchmark} 2762 \label{f:creation} 2763 2764 \columnbreak 2765 2766 \vspace*{-16pt} 2767 \captionof{table}{Object creation comparison (nanoseconds)} 2768 \label{tab:creation} 2769 2770 \begin{tabular}[t]{@{}r*{3}{D{.}{.}{5.2}}@{}} 2771 \multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\ 2772 \CFA Coroutine Lazy & 13.2 & 13.1 & 0.44 \\ 2773 \CFA Coroutine Eager & 531.3 & 536.0 & 26.54 \\ 2774 \CFA Thread & 2074.9 & 2066.5 & 170.76 \\ 2775 \uC Coroutine & 89.6 & 90.5 & 1.83 \\ 2776 \uC Thread & 528.2 & 528.5 & 4.94 \\ 2777 Goroutine & 4068.0 & 4113.1 & 414.55 \\ 2778 Java Thread & 103848.5 & 104295.4 & 2637.57 \\ 2779 Pthreads & 33112.6 & 33127.1 & 165.90 2780 \end{tabular} 2781 \end{multicols} 2782 2783 2784 \paragraph{Context-Switching} 2785 2786 In procedural programming, the cost of a function call is important as modularization (refactoring) increases. 2787 (In many cases, a compiler inlines function calls to eliminate this cost.) 2788 Similarly, when modularization extends to coroutines/tasks, the time for a context switch becomes a relevant factor. 2789 The coroutine test is from resumer to suspender and from suspender to resumer, which is two context switches. 2790 The thread test is using yield to enter and return from the runtime kernel, which is two context switches. 2791 The difference in performance between coroutine and thread context-switch is the cost of scheduling for threads, whereas coroutines are self-scheduling. 2792 Figure~\ref{f:ctx-switch} only shows the \CFA code for coroutines/threads (other systems are similar) with all results in Table~\ref{tab:ctx-switch}. 2793 2794 \begin{multicols}{2} 2795 \lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}} 2796 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 2797 @coroutine@ C {} c; 2798 void main( C & ) { for ( ;; ) { @suspend;@ } } 2799 int main() { // coroutine test 2800 BENCH( for ( N ) { @resume( c );@ } ) 2801 sout | result`ns; 2802 } 2803 int main() { // task test 2804 BENCH( for ( N ) { @yield();@ } ) 2805 sout | result`ns; 2806 } 2807 \end{cfa} 2808 \captionof{figure}{\CFA context-switch benchmark} 2809 \label{f:ctx-switch} 2810 2811 \columnbreak 2812 2813 \vspace*{-16pt} 2814 \captionof{table}{Context switch comparison (nanoseconds)} 2815 \label{tab:ctx-switch} 2816 \begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}} 2817 \multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\ 2818 C function & 1.8 & 1.8 & 0.01 \\ 2819 \CFA generator & 2.4 & 2.2 & 0.25 \\ 2820 \CFA Coroutine & 36.2 & 36.2 & 0.25 \\ 2821 \CFA Thread & 93.2 & 93.5 & 2.09 \\ 2822 \uC Coroutine & 52.0 & 52.1 & 0.51 \\ 2823 \uC Thread & 96.2 & 96.3 & 0.58 \\ 2824 Goroutine & 141.0 & 141.3 & 3.39 \\ 2825 Java Thread & 374.0 & 375.8 & 10.38 \\ 2826 Pthreads Thread & 361.0 & 365.3 & 13.19 2827 \end{tabular} 2828 \end{multicols} 2829 2830 2831 \paragraph{Mutual-Exclusion} 2832 2833 Uncontented mutual exclusion, which frequently occurs, is measured by entering/leaving a critical section. 2834 For monitors, entering and leaving a monitor function is measured. 2835 To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured. 2836 Figure~\ref{f:mutex} shows the code for \CFA with all results in Table~\ref{tab:mutex}. 2837 Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects. 2838 2839 \begin{multicols}{2} 2840 \lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}} 2841 \begin{cfa} 2842 @monitor@ M {} m1/*, m2, m3, m4*/; 2843 void __attribute__((noinline)) 2844 do_call( M & @mutex m/*, m2, m3, m4*/@ ) {} 2845 int main() { 2963 2846 BENCH( 2964 for(size_t i=0; i<n; i++) { 2965 resume(s); 2966 }, 2967 result 2847 for( N ) do_call( m1/*, m2, m3, m4*/ ); 2968 2848 ) 2969 printf("%llu\n", result); 2970 } 2971 \end{cfa} 2849 sout | result`ns; 2850 } 2851 \end{cfa} 2852 \captionof{figure}{\CFA acquire/release mutex benchmark} 2853 \label{f:mutex} 2854 2972 2855 \columnbreak 2973 \CFA Threads 2974 \begin{cfa} 2975 2976 2977 2978 2979 int main() { 2980 2981 2982 BENCH( 2983 for(size_t i=0; i<n; i++) { 2984 yield(); 2985 }, 2986 result 2987 ) 2988 printf("%llu\n", result); 2989 } 2990 \end{cfa} 2856 2857 \vspace*{-16pt} 2858 \captionof{table}{Mutex comparison (nanoseconds)} 2859 \label{tab:mutex} 2860 \begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}} 2861 \multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\ 2862 test and test-and-test lock & 19.1 & 18.9 & 0.40 \\ 2863 \CFA @mutex@ function, 1 arg. & 45.9 & 46.6 & 1.45 \\ 2864 \CFA @mutex@ function, 2 arg. & 105.0 & 104.7 & 3.08 \\ 2865 \CFA @mutex@ function, 4 arg. & 165.0 & 167.6 & 5.65 \\ 2866 \uC @monitor@ member rtn. & 54.0 & 53.7 & 0.82 \\ 2867 Java synchronized method & 31.0 & 31.1 & 0.50 \\ 2868 Pthreads Mutex Lock & 33.6 & 32.6 & 1.14 2869 \end{tabular} 2991 2870 \end{multicols} 2992 \begin{cfa}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={f:ctx-switch}] 2993 \end{cfa} 2994 \end{figure} 2995 2996 \begin{table} 2997 \begin{center} 2998 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |} 2999 \cline{2-4} 3000 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3001 \hline 3002 Kernel Thread & 241.5 & 243.86 & 5.08 \\ 3003 \CFA Coroutine & 38 & 38 & 0 \\ 3004 \CFA Thread & 103 & 102.96 & 2.96 \\ 3005 \uC Coroutine & 46 & 45.86 & 0.35 \\ 3006 \uC Thread & 98 & 99.11 & 1.42 \\ 3007 Goroutine & 150 & 149.96 & 3.16 \\ 3008 Java Thread & 289 & 290.68 & 8.72 \\ 3009 \hline 3010 \end{tabular} 3011 \end{center} 3012 \caption{Context Switch comparison. 3013 All numbers are in nanoseconds(\si{\nano\second})} 3014 \label{tab:ctx-switch} 3015 \end{table} 3016 3017 \subsection{Mutual-Exclusion} 3018 The next interesting benchmark is to measure the overhead to enter/leave a critical-section. 3019 For monitors, the simplest approach is to measure how long it takes to enter and leave a monitor routine. 3020 Figure~\ref{f:mutex} shows the code for \CFA. 3021 To put the results in context, the cost of entering a non-inline routine and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured. 3022 The results can be shown in table \ref{tab:mutex}. 3023 3024 \begin{figure} 3025 \begin{cfa}[caption={\CFA benchmark code used to measure mutex routines.},label={f:mutex}] 3026 monitor M {}; 3027 void __attribute__((noinline)) call( M & mutex m /*, m2, m3, m4*/ ) {} 3028 3029 int main() { 3030 M m/*, m2, m3, m4*/; 3031 BENCH( 3032 for(size_t i=0; i<n; i++) { 3033 call(m/*, m2, m3, m4*/); 3034 }, 3035 result 3036 ) 3037 printf("%llu\n", result); 3038 } 3039 \end{cfa} 3040 \end{figure} 3041 3042 \begin{table} 3043 \begin{center} 3044 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |} 3045 \cline{2-4} 3046 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3047 \hline 3048 C routine & 2 & 2 & 0 \\ 3049 FetchAdd + FetchSub & 26 & 26 & 0 \\ 3050 Pthreads Mutex Lock & 31 & 31.86 & 0.99 \\ 3051 \uC @monitor@ member routine & 30 & 30 & 0 \\ 3052 \CFA @mutex@ routine, 1 argument & 41 & 41.57 & 0.9 \\ 3053 \CFA @mutex@ routine, 2 argument & 76 & 76.96 & 1.57 \\ 3054 \CFA @mutex@ routine, 4 argument & 145 & 146.68 & 3.85 \\ 3055 Java synchronized routine & 27 & 28.57 & 2.6 \\ 3056 \hline 3057 \end{tabular} 3058 \end{center} 3059 \caption{Mutex routine comparison. 3060 All numbers are in nanoseconds(\si{\nano\second})} 3061 \label{tab:mutex} 3062 \end{table} 3063 3064 \subsection{Internal Scheduling} 3065 The internal-scheduling benchmark measures the cost of waiting on and signalling a condition variable. 3066 Figure~\ref{f:int-sched} shows the code for \CFA, with results table \ref{tab:int-sched}. 3067 As with all other benchmarks, all omitted tests are functionally identical to one of these tests. 3068 3069 \begin{figure} 3070 \begin{cfa}[caption={Benchmark code for internal scheduling},label={f:int-sched}] 2871 2872 2873 \paragraph{External Scheduling} 2874 2875 External scheduling is measured using a cycle of two threads calling and accepting the call using the @waitfor@ statement. 2876 Figure~\ref{f:ext-sched} shows the code for \CFA, with results in Table~\ref{tab:ext-sched}. 2877 Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects. 2878 2879 \begin{multicols}{2} 2880 \lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}} 2881 \vspace*{-16pt} 2882 \begin{cfa} 3071 2883 volatile int go = 0; 3072 condition c; 3073 monitor M {}; 3074 M m1; 3075 3076 void __attribute__((noinline)) do_call( M & mutex a1 ) { signal(c); } 3077 2884 @monitor@ M {} m; 3078 2885 thread T {}; 3079 void ^?{}( T & mutex this ) {} 3080 void main( T & this ) { 3081 while(go == 0) { yield(); } 3082 while(go == 1) { do_call(m1); } 3083 } 3084 int __attribute__((noinline)) do_wait( M & mutex a1 ) { 3085 go = 1; 3086 BENCH( 3087 for(size_t i=0; i<n; i++) { 3088 wait(c); 3089 }, 3090 result 3091 ) 3092 printf("%llu\n", result); 3093 go = 0; 3094 return 0; 2886 void __attribute__((noinline)) 2887 do_call( M & @mutex@ ) {} 2888 void main( T & ) { 2889 while ( go == 0 ) { yield(); } 2890 while ( go == 1 ) { do_call( m ); } 2891 } 2892 int __attribute__((noinline)) 2893 do_wait( M & @mutex@ m ) { 2894 go = 1; // continue other thread 2895 BENCH( for ( N ) { @waitfor( do_call, m );@ } ) 2896 go = 0; // stop other thread 2897 sout | result`ns; 3095 2898 } 3096 2899 int main() { 3097 2900 T t; 3098 return do_wait(m1); 3099 } 3100 \end{cfa} 3101 \end{figure} 3102 3103 \begin{table} 3104 \begin{center} 3105 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |} 3106 \cline{2-4} 3107 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3108 \hline 3109 Pthreads Condition Variable & 5902.5 & 6093.29 & 714.78 \\ 3110 \uC @signal@ & 322 & 323 & 3.36 \\ 3111 \CFA @signal@, 1 @monitor@ & 352.5 & 353.11 & 3.66 \\ 3112 \CFA @signal@, 2 @monitor@ & 430 & 430.29 & 8.97 \\ 3113 \CFA @signal@, 4 @monitor@ & 594.5 & 606.57 & 18.33 \\ 3114 Java @notify@ & 13831.5 & 15698.21 & 4782.3 \\ 3115 \hline 2901 do_wait( m ); 2902 } 2903 \end{cfa} 2904 \captionof{figure}{\CFA external-scheduling benchmark} 2905 \label{f:ext-sched} 2906 2907 \columnbreak 2908 2909 \vspace*{-16pt} 2910 \captionof{table}{External-scheduling comparison (nanoseconds)} 2911 \label{tab:ext-sched} 2912 \begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}} 2913 \multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\ 2914 \CFA @waitfor@, 1 @monitor@ & 376.4 & 376.8 & 7.63 \\ 2915 \CFA @waitfor@, 2 @monitor@ & 491.4 & 492.0 & 13.31 \\ 2916 \CFA @waitfor@, 4 @monitor@ & 681.0 & 681.7 & 19.10 \\ 2917 \uC @_Accept@ & 331.1 & 331.4 & 2.66 3116 2918 \end{tabular} 3117 \end{ center}3118 \caption{Internal scheduling comparison. 3119 All numbers are in nanoseconds(\si{\nano\second})} 3120 \ label{tab:int-sched}3121 \end{table} 3122 3123 \subsection{External Scheduling} 3124 The Internal scheduling benchmark measures the cost of the @waitfor@ statement (@_Accept@ in \uC).3125 Figure~\ref{f:ext-sched} shows the code for \CFA, with results in table \ref{tab:ext-sched}.3126 As with all other benchmarks, all omitted tests are functionally identical to one of these tests. 3127 3128 \ begin{figure}3129 \begin{cfa} [caption={Benchmark code for external scheduling},label={f:ext-sched}]2919 \end{multicols} 2920 2921 2922 \paragraph{Internal Scheduling} 2923 2924 Internal scheduling is measured using a cycle of two threads signalling and waiting. 2925 Figure~\ref{f:int-sched} shows the code for \CFA, with results in Table~\ref{tab:int-sched}. 2926 Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects. 2927 Java scheduling is significantly greater because the benchmark explicitly creates multiple thread in order to prevent the JIT from making the program sequential, \ie removing all locking. 2928 2929 \begin{multicols}{2} 2930 \lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}} 2931 \begin{cfa} 3130 2932 volatile int go = 0; 3131 monitor M {}; 3132 M m1; 2933 @monitor@ M { @condition c;@ } m; 2934 void __attribute__((noinline)) 2935 do_call( M & @mutex@ a1 ) { @signal( c );@ } 3133 2936 thread T {}; 3134 3135 void __attribute__((noinline)) do_call( M & mutex a1 ) {}3136 3137 void ^?{}( T & mutex this ) {}3138 2937 void main( T & this ) { 3139 while(go == 0) { yield(); } 3140 while(go == 1) { do_call(m1); } 3141 } 3142 int __attribute__((noinline)) do_wait( M & mutex a1 ) { 3143 go = 1; 3144 BENCH( 3145 for(size_t i=0; i<n; i++) { 3146 waitfor(call, a1); 3147 }, 3148 result 3149 ) 3150 printf("%llu\n", result); 3151 go = 0; 3152 return 0; 2938 while ( go == 0 ) { yield(); } 2939 while ( go == 1 ) { do_call( m ); } 2940 } 2941 int __attribute__((noinline)) 2942 do_wait( M & mutex m ) with(m) { 2943 go = 1; // continue other thread 2944 BENCH( for ( N ) { @wait( c );@ } ); 2945 go = 0; // stop other thread 2946 sout | result`ns; 3153 2947 } 3154 2948 int main() { 3155 2949 T t; 3156 return do_wait(m1); 3157 } 3158 \end{cfa} 3159 \end{figure} 3160 3161 \begin{table} 3162 \begin{center} 3163 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |} 3164 \cline{2-4} 3165 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3166 \hline 3167 \uC @Accept@ & 350 & 350.61 & 3.11 \\ 3168 \CFA @waitfor@, 1 @monitor@ & 358.5 & 358.36 & 3.82 \\ 3169 \CFA @waitfor@, 2 @monitor@ & 422 & 426.79 & 7.95 \\ 3170 \CFA @waitfor@, 4 @monitor@ & 579.5 & 585.46 & 11.25 \\ 3171 \hline 2950 do_wait( m ); 2951 } 2952 \end{cfa} 2953 \captionof{figure}{\CFA Internal-scheduling benchmark} 2954 \label{f:int-sched} 2955 2956 \columnbreak 2957 2958 \vspace*{-16pt} 2959 \captionof{table}{Internal-scheduling comparison (nanoseconds)} 2960 \label{tab:int-sched} 2961 \bigskip 2962 2963 \begin{tabular}{@{}r*{3}{D{.}{.}{5.2}}@{}} 2964 \multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\ 2965 \CFA @signal@, 1 @monitor@ & 372.6 & 374.3 & 14.17 \\ 2966 \CFA @signal@, 2 @monitor@ & 492.7 & 494.1 & 12.99 \\ 2967 \CFA @signal@, 4 @monitor@ & 749.4 & 750.4 & 24.74 \\ 2968 \uC @signal@ & 320.5 & 321.0 & 3.36 \\ 2969 Java @notify@ & 10160.5 & 10169.4 & 267.71 \\ 2970 Pthreads Cond. Variable & 4949.6 & 5065.2 & 363 3172 2971 \end{tabular} 3173 \end{center} 3174 \caption{External scheduling comparison. 3175 All numbers are in nanoseconds(\si{\nano\second})} 3176 \label{tab:ext-sched} 3177 \end{table} 3178 3179 3180 \subsection{Object Creation} 3181 Finally, the last benchmark measures the cost of creation for concurrent objects. 3182 Figure~\ref{f:creation} shows the code for @pthread@s and \CFA threads, with results shown in table \ref{tab:creation}. 3183 As with all other benchmarks, all omitted tests are functionally identical to one of these tests. 3184 The only note here is that the call stacks of \CFA coroutines are lazily created, therefore without priming the coroutine, the creation cost is very low. 3185 3186 \begin{figure} 3187 \begin{center} 3188 @pthread@ 3189 \begin{cfa} 3190 int main() { 3191 BENCH( 3192 for(size_t i=0; i<n; i++) { 3193 pthread_t thread; 3194 if(pthread_create(&thread,NULL,foo,NULL)<0) { 3195 perror( "failure" ); 3196 return 1; 3197 } 3198 3199 if(pthread_join(thread, NULL)<0) { 3200 perror( "failure" ); 3201 return 1; 3202 } 3203 }, 3204 result 3205 ) 3206 printf("%llu\n", result); 3207 } 3208 \end{cfa} 3209 3210 3211 3212 \CFA Threads 3213 \begin{cfa} 3214 int main() { 3215 BENCH( 3216 for(size_t i=0; i<n; i++) { 3217 MyThread m; 3218 }, 3219 result 3220 ) 3221 printf("%llu\n", result); 3222 } 3223 \end{cfa} 3224 \end{center} 3225 \caption{Benchmark code for \protect\lstinline|pthread|s and \CFA to measure object creation} 3226 \label{f:creation} 3227 \end{figure} 3228 3229 \begin{table} 3230 \begin{center} 3231 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |} 3232 \cline{2-4} 3233 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3234 \hline 3235 Pthreads & 26996 & 26984.71 & 156.6 \\ 3236 \CFA Coroutine Lazy & 6 & 5.71 & 0.45 \\ 3237 \CFA Coroutine Eager & 708 & 706.68 & 4.82 \\ 3238 \CFA Thread & 1173.5 & 1176.18 & 15.18 \\ 3239 \uC Coroutine & 109 & 107.46 & 1.74 \\ 3240 \uC Thread & 526 & 530.89 & 9.73 \\ 3241 Goroutine & 2520.5 & 2530.93 & 61,56 \\ 3242 Java Thread & 91114.5 & 92272.79 & 961.58 \\ 3243 \hline 3244 \end{tabular} 3245 \end{center} 3246 \caption{Creation comparison. 3247 All numbers are in nanoseconds(\si{\nano\second}).} 3248 \label{tab:creation} 3249 \end{table} 3250 2972 \end{multicols} 3251 2973 3252 2974 3253 2975 \section{Conclusion} 3254 This paper has achieved a minimal concurrency \textbf{api} that is simple, efficient and usable as the basis for higher-level features. 3255 The approach presented is based on a lightweight thread-system for parallelism, which sits on top of clusters of processors. 3256 This M:N model is judged to be both more efficient and allow more flexibility for users. 3257 Furthermore, this document introduces monitors as the main concurrency tool for users. 3258 This paper also offers a novel approach allowing multiple monitors to be accessed simultaneously without running into the Nested Monitor Problem~\cite{Lister77}. 3259 It also offers a full implementation of the concurrency runtime written entirely in \CFA, effectively the largest \CFA code base to date. 3260 3261 3262 % ====================================================================== 3263 % ====================================================================== 2976 2977 Advanced control-flow will always be difficult, especially when there is temporal ordering and nondeterminism. 2978 However, many systems exacerbate the difficulty through their presentation mechanisms. 2979 This paper shows it is possible to present a hierarchy of control-flow features, generator, coroutine, thread, and monitor, providing an integrated set of high-level, efficient, and maintainable control-flow features. 2980 Eliminated from \CFA are spurious wakeup and barging, which are nonintuitive and lead to errors, and having to work with a bewildering set of low-level locks and acquisition techniques. 2981 \CFA high-level race-free monitors and tasks provide the core mechanisms for mutual exclusion and synchronization, without having to resort to magic qualifiers like @volatile@/@atomic@. 2982 Extending these mechanisms to handle high-level deadlock-free bulk acquire across both mutual exclusion and synchronization is a unique contribution. 2983 The \CFA runtime provides concurrency based on a preemptive M:N user-level threading-system, executing in clusters, which encapsulate scheduling of work on multiple kernel threads providing parallelism. 2984 The M:N model is judged to be efficient and provide greater flexibility than a 1:1 threading model. 2985 These concepts and the \CFA runtime-system are written in the \CFA language, extensively leveraging the \CFA type-system, which demonstrates the expressiveness of the \CFA language. 2986 Performance comparisons with other concurrent systems/languages show the \CFA approach is competitive across all low-level operations, which translates directly into good performance in well-written concurrent applications. 2987 C programmers should feel comfortable using these mechanisms for developing complex control-flow in applications, with the ability to obtain maximum available performance by selecting mechanisms at the appropriate level of need. 2988 2989 3264 2990 \section{Future Work} 3265 % ====================================================================== 3266 % ====================================================================== 3267 3268 \subsection{Performance} \label{futur:perf} 3269 This paper presents a first implementation of the \CFA concurrency runtime. 3270 Therefore, there is still significant work to improve performance. 3271 Many of the data structures and algorithms may change in the future to more efficient versions. 3272 For example, the number of monitors in a single bulk acquire is only bound by the stack size, this is probably unnecessarily generous. 3273 It may be possible that limiting the number helps increase performance. 3274 However, it is not obvious that the benefit would be significant. 3275 3276 \subsection{Flexible Scheduling} \label{futur:sched} 2991 2992 While control flow in \CFA has a strong start, development is still underway to complete a number of missing features. 2993 2994 \paragraph{Flexible Scheduling} 2995 \label{futur:sched} 2996 3277 2997 An important part of concurrency is scheduling. 3278 2998 Different scheduling algorithms can affect performance (both in terms of average and variation). 3279 2999 However, no single scheduler is optimal for all workloads and therefore there is value in being able to change the scheduler for given programs. 3280 One solution is to offer various tweaking options to users, allowing the scheduler to be adjusted to the requirements of the workload. 3281 However, in order to be truly flexible, it would be interesting to allow users to add arbitrary data and arbitrary scheduling algorithms. 3282 For example, a web server could attach Type-of-Service information to threads and have a ``ToS aware'' scheduling algorithm tailored to this specific web server. 3283 This path of flexible schedulers will be explored for \CFA. 3284 3285 \subsection{Non-Blocking I/O} \label{futur:nbio} 3286 While most of the parallelism tools are aimed at data parallelism and control-flow parallelism, many modern workloads are not bound on computation but on IO operations, a common case being web servers and XaaS (anything as a service). 3287 These types of workloads often require significant engineering around amortizing costs of blocking IO operations. 3288 At its core, non-blocking I/O is an operating system level feature that allows queuing IO operations (\eg network operations) and registering for notifications instead of waiting for requests to complete. 3289 In this context, the role of the language makes Non-Blocking IO easily available and with low overhead. 3290 The current trend is to use asynchronous programming using tools like callbacks and/or futures and promises, which can be seen in frameworks like Node.js~\cite{NodeJs} for JavaScript, Spring MVC~\cite{SpringMVC} for Java and Django~\cite{Django} for Python. 3291 However, while these are valid solutions, they lead to code that is harder to read and maintain because it is much less linear. 3292 3293 \subsection{Other Concurrency Tools} \label{futur:tools} 3294 While monitors offer a flexible and powerful concurrent core for \CFA, other concurrency tools are also necessary for a complete multi-paradigm concurrency package. 3295 Examples of such tools can include simple locks and condition variables, futures and promises~\cite{promises}, executors and actors. 3296 These additional features are useful when monitors offer a level of abstraction that is inadequate for certain tasks. 3297 3298 \subsection{Implicit Threading} \label{futur:implcit} 3299 Simpler applications can benefit greatly from having implicit parallelism. 3300 That is, parallelism that does not rely on the user to write concurrency. 3301 This type of parallelism can be achieved both at the language level and at the library level. 3302 The canonical example of implicit parallelism is parallel for loops, which are the simplest example of a divide and conquer algorithms~\cite{uC++book}. 3303 Table \ref{f:parfor} shows three different code examples that accomplish point-wise sums of large arrays. 3304 Note that none of these examples explicitly declare any concurrency or parallelism objects. 3305 3306 \begin{table} 3307 \begin{center} 3308 \begin{tabular}[t]{|c|c|c|} 3309 Sequential & Library Parallel & Language Parallel \\ 3310 \begin{cfa}[tabsize=3] 3311 void big_sum( 3312 int* a, int* b, 3313 int* o, 3314 size_t len) 3315 { 3316 for( 3317 int i = 0; 3318 i < len; 3319 ++i ) 3320 { 3321 o[i]=a[i]+b[i]; 3322 } 3323 } 3324 3325 3326 3327 3328 3329 int* a[10000]; 3330 int* b[10000]; 3331 int* c[10000]; 3332 //... fill in a & b 3333 big_sum(a,b,c,10000); 3334 \end{cfa} &\begin{cfa}[tabsize=3] 3335 void big_sum( 3336 int* a, int* b, 3337 int* o, 3338 size_t len) 3339 { 3340 range ar(a, a+len); 3341 range br(b, b+len); 3342 range or(o, o+len); 3343 parfor( ai, bi, oi, 3344 []( int* ai, 3345 int* bi, 3346 int* oi) 3347 { 3348 oi=ai+bi; 3349 }); 3350 } 3351 3352 3353 int* a[10000]; 3354 int* b[10000]; 3355 int* c[10000]; 3356 //... fill in a & b 3357 big_sum(a,b,c,10000); 3358 \end{cfa}&\begin{cfa}[tabsize=3] 3359 void big_sum( 3360 int* a, int* b, 3361 int* o, 3362 size_t len) 3363 { 3364 parfor (ai,bi,oi) 3365 in (a, b, o ) 3366 { 3367 oi = ai + bi; 3368 } 3369 } 3370 3371 3372 3373 3374 3375 3376 3377 int* a[10000]; 3378 int* b[10000]; 3379 int* c[10000]; 3380 //... fill in a & b 3381 big_sum(a,b,c,10000); 3382 \end{cfa} 3383 \end{tabular} 3384 \end{center} 3385 \caption{For loop to sum numbers: Sequential, using library parallelism and language parallelism.} 3386 \label{f:parfor} 3387 \end{table} 3388 3389 Implicit parallelism is a restrictive solution and therefore has its limitations. 3390 However, it is a quick and simple approach to parallelism, which may very well be sufficient for smaller applications and reduces the amount of boilerplate needed to start benefiting from parallelism in modern CPUs. 3391 3392 3393 % A C K N O W L E D G E M E N T S 3394 % ------------------------------- 3000 One solution is to offer various tuning options, allowing the scheduler to be adjusted to the requirements of the workload. 3001 However, to be truly flexible, a pluggable scheduler is necessary. 3002 Currently, the \CFA pluggable scheduler is too simple to handle complex scheduling, \eg quality of service and real-time, where the scheduler must interact with mutex objects to deal with issues like priority inversion~\cite{Buhr00b}. 3003 3004 \paragraph{Non-Blocking I/O} 3005 \label{futur:nbio} 3006 3007 Many modern workloads are not bound by computation but IO operations, a common case being web servers and XaaS~\cite{XaaS} (anything as a service). 3008 These types of workloads require significant engineering to amortizing costs of blocking IO-operations. 3009 At its core, non-blocking I/O is an operating-system level feature queuing IO operations, \eg network operations, and registering for notifications instead of waiting for requests to complete. 3010 Current trends use asynchronous programming like callbacks, futures, and/or promises, \eg Node.js~\cite{NodeJs} for JavaScript, Spring MVC~\cite{SpringMVC} for Java, and Django~\cite{Django} for Python. 3011 However, these solutions lead to code that is hard to create, read and maintain. 3012 A better approach is to tie non-blocking I/O into the concurrency system to provide ease of use with low overhead, \eg thread-per-connection web-services. 3013 A non-blocking I/O library is currently under development for \CFA. 3014 3015 \paragraph{Other Concurrency Tools} 3016 \label{futur:tools} 3017 3018 While monitors offer flexible and powerful concurrency for \CFA, other concurrency tools are also necessary for a complete multi-paradigm concurrency package. 3019 Examples of such tools can include futures and promises~\cite{promises}, executors and actors. 3020 These additional features are useful for applications that can be constructed without shared data and direct blocking. 3021 As well, new \CFA extensions should make it possible to create a uniform interface for virtually all mutual exclusion, including monitors and low-level locks. 3022 3023 \paragraph{Implicit Threading} 3024 \label{futur:implcit} 3025 3026 Basic concurrent (embarrassingly parallel) applications can benefit greatly from implicit concurrency, where sequential programs are converted to concurrent, possibly with some help from pragmas to guide the conversion. 3027 This type of concurrency can be achieved both at the language level and at the library level. 3028 The canonical example of implicit concurrency is concurrent nested @for@ loops, which are amenable to divide and conquer algorithms~\cite{uC++book}. 3029 The \CFA language features should make it possible to develop a reasonable number of implicit concurrency mechanism to solve basic HPC data-concurrency problems. 3030 However, implicit concurrency is a restrictive solution with significant limitations, so it can never replace explicit concurrent programming. 3031 3032 3395 3033 \section{Acknowledgements} 3396 3034 3397 Thanks to Aaron Moss, Rob Schluntz and Andrew Beach for their work on the \CFA project as well as all the discussions which helped concretize the ideas in this paper. 3398 Partial funding was supplied by the Natural Sciences and Engineering Research Council of Canada and a corporate partnership with Huawei Ltd. 3399 3400 3401 % B I B L I O G R A P H Y 3402 % ----------------------------- 3403 %\bibliographystyle{plain} 3035 The authors would like to recognize the design assistance of Aaron Moss, Rob Schluntz, Andrew Beach and Michael Brooks on the features described in this paper. 3036 Funding for this project has been provided by Huawei Ltd.\ (\url{http://www.huawei.com}). %, and Peter Buhr is partially funded by the Natural Sciences and Engineering Research Council of Canada. 3037 3038 {% 3039 \fontsize{9bp}{12bp}\selectfont% 3404 3040 \bibliography{pl,local} 3405 3041 }% 3406 3042 3407 3043 \end{document} -
doc/papers/concurrency/annex/local.bib
r7951100 rb067d9b 46 46 title = {Thread Building Blocks}, 47 47 howpublished= {Intel, \url{https://www.threadingbuildingblocks.org}}, 48 note = {Accessed: 2018-3},48 optnote = {Accessed: 2018-3}, 49 49 } 50 50 … … 66 66 } 67 67 68 @ article{BankTransfer,68 @misc{BankTransfer, 69 69 key = {Bank Transfer}, 70 70 keywords = {Bank Transfer}, 71 71 title = {Bank Account Transfer Problem}, 72 publisher = {Wiki Wiki Web}, 73 address = {http://wiki.c2.com}, 72 howpublished = {Wiki Wiki Web, \url{http://wiki.c2.com/?BankAccountTransferProblem}}, 74 73 year = 2010 75 74 } -
doc/papers/concurrency/figures/ext_monitor.fig
r7951100 rb067d9b 8 8 -2 9 9 1200 2 10 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 3150.000 3450.000 3150 3150 2850 3450 3150 375011 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 3150.000 4350.000 3150 4050 2850 4350 3150 465012 6 5850 1950 6150 225013 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2100 105 105 6000 2100 6105 220514 4 1 -1 0 0 0 10 0.0000 2 105 90 6000 2160 d\00110 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1500.000 3600.000 1500 3300 1200 3600 1500 3900 11 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1500.000 4500.000 1500 4200 1200 4500 1500 4800 12 6 4200 2100 4500 2400 13 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 2250 105 105 4350 2250 4455 2355 14 4 1 -1 0 0 0 10 0.0000 2 105 90 4350 2310 d\001 15 15 -6 16 6 5100 2100 5400 240017 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 5250 2250 105 105 5250 2250 5355 225018 4 1 -1 0 0 0 10 0.0000 2 105 120 5250 2295 X\00116 6 4200 1800 4500 2100 17 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1950 105 105 4350 1950 4455 2055 18 4 1 -1 0 0 0 10 0.0000 2 105 90 4350 2010 b\001 19 19 -6 20 6 5100 1800 5400 2100 21 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 5250 1950 105 105 5250 1950 5355 1950 22 4 1 -1 0 0 0 10 0.0000 2 105 120 5250 2010 Y\001 20 6 1420 5595 5625 5805 21 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 1500 5700 80 80 1500 5700 1580 5780 22 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2850 5700 105 105 2850 5700 2955 5805 23 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 4350 5700 105 105 4350 5700 4455 5805 24 4 0 -1 0 0 0 12 0.0000 2 135 1035 3075 5775 blocked task\001 25 4 0 -1 0 0 0 12 0.0000 2 135 870 1650 5775 active task\001 26 4 0 -1 0 0 0 12 0.0000 2 135 1050 4575 5775 routine mask\001 23 27 -6 24 6 5850 1650 6150 1950 25 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 1800 105 105 6000 1800 6105 1905 26 4 1 -1 0 0 0 10 0.0000 2 105 90 6000 1860 b\001 28 6 3450 1950 3750 2550 29 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3600 2100 105 105 3600 2100 3705 2100 30 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 31 3450 1950 3750 1950 3750 2550 3450 2550 3450 1950 32 4 1 4 0 0 0 10 0.0000 2 105 120 3600 2160 Y\001 27 33 -6 28 6 3070 5445 7275 5655 29 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3150 5550 80 80 3150 5550 3230 5630 30 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4500 5550 105 105 4500 5550 4605 5655 31 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 6000 5550 105 105 6000 5550 6105 5655 32 4 0 -1 0 0 0 12 0.0000 2 135 1035 4725 5625 blocked task\001 33 4 0 -1 0 0 0 12 0.0000 2 135 870 3300 5625 active task\001 34 4 0 -1 0 0 0 12 0.0000 2 135 1050 6225 5625 routine mask\001 34 6 3450 2250 3750 2550 35 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3600 2400 105 105 3600 2400 3705 2400 36 4 1 4 0 0 0 10 0.0000 2 105 120 3600 2445 X\001 35 37 -6 36 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3300 3600 105 105 3300 3600 3405 370537 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3600 3600 105 105 3600 3600 3705 370538 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6600 3900 105 105 6600 3900 6705 400539 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6900 3900 105 105 6900 3900 7005 400540 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2700 105 105 6000 2700 6105 280541 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2400 105 105 6000 2400 6105 250542 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 5100 4575 80 80 5100 4575 5180 465538 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1650 3750 105 105 1650 3750 1755 3855 39 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1950 3750 105 105 1950 3750 2055 3855 40 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4950 4050 105 105 4950 4050 5055 4155 41 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5250 4050 105 105 5250 4050 5355 4155 42 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 2850 105 105 4350 2850 4455 2955 43 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 2550 105 105 4350 2550 4455 2655 44 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3450 4725 80 80 3450 4725 3530 4805 43 45 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 44 4050 2925 5475 2925 5475 3225 4050 3225 4050 292546 2400 3075 3825 3075 3825 3375 2400 3375 2400 3075 45 47 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 46 3150 3750 3750 3750 3750 4050 3150 405048 1500 3900 2100 3900 2100 4200 1500 4200 47 49 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 48 3150 3450 3750 3450 3900 367550 1500 3600 2100 3600 2250 3825 49 51 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 50 3750 3150 3600 337552 2100 3300 1950 3525 51 53 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 52 3150 4350 3750 4350 3900 457554 1500 4500 2100 4500 2250 4725 53 55 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 54 3750 4050 3600 427556 2100 4200 1950 4425 55 57 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 56 3150 4650 3750 4650 3750 4950 4950 495058 1500 4800 2100 4800 2100 5100 3300 5100 57 59 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 58 6450 3750 6300 397560 4800 3900 4650 4125 59 61 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 60 4950 4950 5175 510062 3300 5100 3525 5250 61 63 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 9 62 5250 4950 6450 4950 6450 4050 7050 4050 7050 3750 6450 375063 6450 2850 6150 2850 6150 165064 3600 5100 4800 5100 4800 4200 5400 4200 5400 3900 4800 3900 65 4800 3000 4500 3000 4500 1800 64 66 2 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5 65 5850 4200 5850 3300 4350 3300 4350 4200 5850 420066 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1267 4200 4350 4200 3450 2700 3450 2700 4350 4200 4350 68 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 67 69 1 1 1.00 60.00 120.00 68 7 1 1.00 60.00 120.00 69 5250 3150 5250 2400 70 3600 3225 3600 2550 71 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 72 4050 3000 4500 3150 70 73 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 71 3150 3150 3750 3150 3750 2850 5700 2850 5700 1650 72 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 73 5700 2850 6150 3000 74 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 75 5100 1800 5400 1800 5400 2400 5100 2400 5100 1800 76 4 1 -1 0 0 0 10 0.0000 2 75 75 6000 2745 a\001 77 4 1 -1 0 0 0 10 0.0000 2 75 75 6000 2445 c\001 78 4 1 -1 0 0 0 12 0.0000 2 135 315 5100 5325 exit\001 79 4 1 -1 0 0 0 12 0.0000 2 135 135 3300 3075 A\001 80 4 1 -1 0 0 0 12 0.0000 2 135 795 3300 4875 condition\001 81 4 1 -1 0 0 0 12 0.0000 2 135 135 3300 5100 B\001 82 4 0 -1 0 0 0 12 0.0000 2 135 420 6600 3675 stack\001 83 4 0 -1 0 0 0 12 0.0000 2 180 750 6600 3225 acceptor/\001 84 4 0 -1 0 0 0 12 0.0000 2 180 750 6600 3450 signalled\001 85 4 1 -1 0 0 0 12 0.0000 2 135 795 3300 2850 condition\001 86 4 1 -1 0 0 0 12 0.0000 2 165 420 6000 1350 entry\001 87 4 1 -1 0 0 0 12 0.0000 2 135 495 6000 1575 queue\001 88 4 0 -1 0 0 0 12 0.0000 2 135 525 6300 2400 arrival\001 89 4 0 -1 0 0 0 12 0.0000 2 135 630 6300 2175 order of\001 90 4 1 -1 0 0 0 12 0.0000 2 135 525 5100 3675 shared\001 91 4 1 -1 0 0 0 12 0.0000 2 135 735 5100 3975 variables\001 92 4 0 0 50 -1 0 11 0.0000 2 165 855 4275 3150 Acceptables\001 93 4 0 0 50 -1 0 11 0.0000 2 120 165 5775 2700 W\001 94 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 2400 X\001 95 4 0 0 50 -1 0 11 0.0000 2 120 105 5775 2100 Z\001 96 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 1800 Y\001 74 1500 3300 2100 3300 2100 3000 4050 3000 4050 1800 75 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 2895 a\001 76 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 2595 c\001 77 4 1 -1 0 0 0 12 0.0000 2 135 315 3450 5475 exit\001 78 4 1 -1 0 0 0 12 0.0000 2 135 135 1650 3225 A\001 79 4 1 -1 0 0 0 12 0.0000 2 135 795 1650 5025 condition\001 80 4 1 -1 0 0 0 12 0.0000 2 135 135 1650 5250 B\001 81 4 0 -1 0 0 0 12 0.0000 2 135 420 4950 3825 stack\001 82 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 3375 acceptor/\001 83 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 3600 signalled\001 84 4 1 -1 0 0 0 12 0.0000 2 135 795 1650 3000 condition\001 85 4 0 -1 0 0 0 12 0.0000 2 135 525 4650 2550 arrival\001 86 4 0 -1 0 0 0 12 0.0000 2 135 630 4650 2325 order of\001 87 4 1 -1 0 0 0 12 0.0000 2 135 525 3450 3825 shared\001 88 4 1 -1 0 0 0 12 0.0000 2 135 735 3450 4125 variables\001 89 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2025 X\001 90 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2325 Y\001 91 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2625 Y\001 92 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2925 X\001 93 4 0 -1 0 0 3 12 0.0000 2 150 540 4950 4425 urgent\001 94 4 1 0 50 -1 0 11 0.0000 2 165 600 3075 3300 accepted\001 95 4 1 -1 0 0 0 12 0.0000 2 165 960 4275 1725 entry queue\001 -
doc/papers/concurrency/figures/monitor.fig
r7951100 rb067d9b 8 8 -2 9 9 1200 2 10 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1500.000 2700.000 1500 2400 1200 2700 1500 3000 11 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1500.000 3600.000 1500 3300 1200 3600 1500 3900 12 6 4200 1200 4500 1500 13 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1350 105 105 4350 1350 4455 1455 14 4 1 -1 0 0 0 10 0.0000 2 105 90 4350 1410 d\001 10 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1500.000 3300.000 1500 3000 1200 3300 1500 3600 11 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1500.000 4200.000 1500 3900 1200 4200 1500 4500 12 6 1350 5250 5325 5550 13 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 1500 5400 80 80 1500 5400 1580 5480 14 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2850 5400 105 105 2850 5400 2955 5505 15 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 4350 5400 105 105 4350 5400 4455 5505 16 4 0 -1 0 0 0 12 0.0000 2 180 765 4575 5475 duplicate\001 17 4 0 -1 0 0 0 12 0.0000 2 135 1035 3075 5475 blocked task\001 18 4 0 -1 0 0 0 12 0.0000 2 135 870 1650 5475 active task\001 15 19 -6 16 6 4200 900 4500 120017 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1 050 105 105 4350 1050 4455 115518 4 1 -1 0 0 0 10 0.0000 2 105 90 4350 1110 b\00120 6 4200 1800 4500 2100 21 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1950 105 105 4350 1950 4455 2055 22 4 1 -1 0 0 0 10 0.0000 2 105 90 4350 2010 d\001 19 23 -6 20 6 2400 1500 2700 180021 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 2550 1650 105 105 2550 1650 2655 165022 4 1 -1 0 0 0 10 0.0000 2 105 90 2550 1710 b\00124 6 4200 1500 4500 1800 25 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1650 105 105 4350 1650 4455 1755 26 4 1 -1 0 0 0 10 0.0000 2 105 90 4350 1710 b\001 23 27 -6 24 6 2400 1800 2700 2100 25 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 2550 1950 105 105 2550 1950 2655 1950 26 4 1 -1 0 0 0 10 0.0000 2 75 75 2550 1995 a\001 27 -6 28 6 3300 1500 3600 1800 29 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3450 1650 105 105 3450 1650 3555 1650 30 4 1 -1 0 0 0 10 0.0000 2 105 90 3450 1710 d\001 31 -6 32 6 1350 4650 5325 4950 33 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 1500 4800 80 80 1500 4800 1580 4880 34 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2850 4800 105 105 2850 4800 2955 4905 35 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 4350 4800 105 105 4350 4800 4455 4905 36 4 0 -1 0 0 0 12 0.0000 2 180 765 4575 4875 duplicate\001 37 4 0 -1 0 0 0 12 0.0000 2 135 1035 3075 4875 blocked task\001 38 4 0 -1 0 0 0 12 0.0000 2 135 870 1650 4875 active task\001 39 -6 40 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1650 2850 105 105 1650 2850 1755 2955 41 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1950 2850 105 105 1950 2850 2055 2955 42 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4950 3150 105 105 4950 3150 5055 3255 43 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5250 3150 105 105 5250 3150 5355 3255 44 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1950 105 105 4350 1950 4455 2055 45 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 1650 105 105 4350 1650 4455 1755 46 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3450 3825 80 80 3450 3825 3530 3905 47 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3450 1950 105 105 3450 1950 3555 1950 28 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1650 3450 105 105 1650 3450 1755 3555 29 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1950 3450 105 105 1950 3450 2055 3555 30 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4950 3750 105 105 4950 3750 5055 3855 31 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5250 3750 105 105 5250 3750 5355 3855 32 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3450 4425 80 80 3450 4425 3530 4505 33 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 2550 105 105 4350 2550 4455 2655 34 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4350 2250 105 105 4350 2250 4455 2355 35 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 5 36 1500 3000 2100 3000 2100 2700 2400 2700 2400 2100 37 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 38 1500 3600 2100 3600 2100 3900 1500 3900 39 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 40 1500 3300 2100 3300 2250 3525 48 41 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 49 2400 2100 2625 2250 42 2100 3000 1950 3225 43 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 44 1500 4200 2100 4200 2250 4425 50 45 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 51 3300 2100 3525 2250 46 2100 3900 1950 4125 47 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 48 1500 4500 2100 4500 2100 4800 3300 4800 52 49 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 53 4200 2100 4425 2250 54 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 5 55 1500 2400 2100 2400 2100 2100 2400 2100 2400 1500 50 4800 3600 4650 3825 51 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 52 3300 4800 3525 4950 53 2 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5 54 4200 4050 4200 3150 2700 3150 2700 4050 4200 4050 56 55 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 57 1500 3000 2100 3000 2100 3300 1500 3300 58 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 59 1500 2700 2100 2700 2250 2925 60 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 61 2100 2400 1950 2625 62 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3 63 1500 3600 2100 3600 2250 3825 64 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 65 2100 3300 1950 3525 66 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 67 1500 3900 2100 3900 2100 4200 3300 4200 68 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 69 4800 3000 4650 3225 70 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2 71 3300 4200 3525 4350 72 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 73 3600 1500 3600 2100 4200 2100 4200 900 74 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4 75 2700 1500 2700 2100 3300 2100 3300 1500 56 3600 2100 3600 2700 4050 2700 4050 1500 76 57 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 9 77 3600 4200 4800 4200 4800 3300 5400 3300 5400 3000 4800 3000 78 4800 2100 4500 2100 4500 900 79 2 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5 80 4200 3450 4200 2550 2700 2550 2700 3450 4200 3450 81 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 1995 a\001 82 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 1695 c\001 83 4 1 -1 0 0 0 12 0.0000 2 135 315 3450 4575 exit\001 84 4 1 -1 0 0 0 12 0.0000 2 135 135 1650 2325 A\001 85 4 1 -1 0 0 0 12 0.0000 2 135 795 1650 4125 condition\001 86 4 1 -1 0 0 0 12 0.0000 2 135 135 1650 4350 B\001 87 4 0 -1 0 0 0 12 0.0000 2 135 420 4950 2925 stack\001 88 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 2475 acceptor/\001 89 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 2700 signalled\001 90 4 1 -1 0 0 0 12 0.0000 2 135 795 1650 2100 condition\001 91 4 1 -1 0 0 0 12 0.0000 2 135 135 2550 1425 X\001 92 4 1 -1 0 0 0 12 0.0000 2 135 135 3450 1425 Y\001 93 4 1 -1 0 0 0 12 0.0000 2 165 420 4350 600 entry\001 94 4 1 -1 0 0 0 12 0.0000 2 135 495 4350 825 queue\001 95 4 0 -1 0 0 0 12 0.0000 2 135 525 4650 1650 arrival\001 96 4 0 -1 0 0 0 12 0.0000 2 135 630 4650 1425 order of\001 97 4 1 -1 0 0 0 12 0.0000 2 135 525 3450 2925 shared\001 98 4 1 -1 0 0 0 12 0.0000 2 135 735 3450 3225 variables\001 99 4 1 -1 0 0 0 12 0.0000 2 120 510 3000 975 mutex\001 100 4 1 -1 0 0 0 10 0.0000 2 75 75 3450 1995 c\001 101 4 1 -1 0 0 0 12 0.0000 2 135 570 3000 1200 queues\001 58 3600 4800 4800 4800 4800 3900 5400 3900 5400 3600 4800 3600 59 4800 2700 4500 2700 4500 1500 60 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 61 4050 2700 4500 2850 62 4 1 -1 0 0 0 12 0.0000 2 135 315 3450 5175 exit\001 63 4 1 -1 0 0 0 12 0.0000 2 135 795 1650 4725 condition\001 64 4 1 -1 0 0 0 12 0.0000 2 135 135 1650 4950 B\001 65 4 0 -1 0 0 0 12 0.0000 2 135 420 4950 3525 stack\001 66 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 3075 acceptor/\001 67 4 0 -1 0 0 0 12 0.0000 2 180 750 4950 3300 signalled\001 68 4 1 -1 0 0 0 12 0.0000 2 135 525 3450 3525 shared\001 69 4 1 -1 0 0 0 12 0.0000 2 135 735 3450 3825 variables\001 70 4 0 -1 0 0 3 12 0.0000 2 150 540 4950 4125 urgent\001 71 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 2595 a\001 72 4 1 -1 0 0 0 10 0.0000 2 75 75 4350 2295 c\001 73 4 0 -1 0 0 0 12 0.0000 2 135 525 4650 2250 arrival\001 74 4 0 -1 0 0 0 12 0.0000 2 135 630 4650 2025 order of\001 75 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 1725 X\001 76 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2025 Y\001 77 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2325 Y\001 78 4 0 4 50 -1 0 11 0.0000 2 120 135 4075 2625 X\001 79 4 1 -1 0 0 0 12 0.0000 2 165 960 4275 1425 entry queue\001 -
doc/papers/general/.gitignore
r7951100 rb067d9b 4 4 *.ps 5 5 6 Paper.tex.plain7 6 mail 8 7 Paper.out.ps -
doc/papers/general/Makefile
r7951100 rb067d9b 4 4 Figures = figures 5 5 Macros = ../AMA/AMA-stix/ama 6 TeXLIB = .:${Macros}:${Build}: ../../bibliography:6 TeXLIB = .:${Macros}:${Build}: 7 7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 BibTeX = BIBINPUTS= ${TeXLIB}&& export BIBINPUTS && bibtex8 BibTeX = BIBINPUTS=../../bibliography: && export BIBINPUTS && bibtex 9 9 10 10 MAKEFLAGS = --no-print-directory # --silent … … 46 46 47 47 Paper.zip : 48 zip -x general/.gitignore -x general/"*AMA*" -x general/Paper.out.ps -x general/Paper.tex.plain -x general/evaluation.zip -x general/mail -x general/response -x general/test.c -x general/evaluation.zip -x general/Paper.tex.plain -x general/Paper.ps -x general/Paper.pdf -x general/"*build*" -x general/evaluation/.gitignore -x general/evaluation/timing.xlsx -r Paper.zip general 48 zip -x general/.gitignore -x general/Paper.out.ps -x general/Paper.tex.plain -x -x general/WileyNJD-AMA.bst general/"*evaluation*" -x general/evaluation.zip \ 49 -x general/mail -x general/response -x general/test.c -x general/Paper.ps -x general/"*build*" -r Paper.zip general pl.bib 49 50 50 51 evaluation.zip : 51 zip -x evaluation/.gitignore -x evaluation/timing.xlsx -x evaluation/timing.dat -r evaluation.zip evaluation52 zip -x evaluation/.gitignore -x evaluation/timing.xlsx -x evaluation/timing.dat -r evaluation.zip evaluation 52 53 53 54 # File Dependencies # … … 59 60 dvips ${Build}/$< -o $@ 60 61 61 ${BASE}.dvi : Makefile ${B uild} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \62 ../../bibliography/pl.bib 62 ${BASE}.dvi : Makefile ${BASE}.out.ps ${Macros}/WileyNJD-v2.cls WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 63 ../../bibliography/pl.bib | ${Build} 63 64 # Must have *.aux file containing citations for bibtex 64 65 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 75 76 mkdir -p ${Build} 76 77 77 ${BASE}.out.ps : ${Build}78 ${BASE}.out.ps : | ${Build} 78 79 ln -fs ${Build}/Paper.out.ps . 79 80 … … 84 85 gnuplot -e Build="'${Build}/'" evaluation/timing.gp 85 86 86 %.tex : %.fig ${Build}87 %.tex : %.fig | ${Build} 87 88 fig2dev -L eepic $< > ${Build}/$@ 88 89 89 %.ps : %.fig ${Build}90 %.ps : %.fig | ${Build} 90 91 fig2dev -L ps $< > ${Build}/$@ 91 92 92 %.pstex : %.fig ${Build}93 %.pstex : %.fig | ${Build} 93 94 fig2dev -L pstex $< > ${Build}/$@ 94 95 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/papers/general/Paper.tex
r7951100 rb067d9b 1 1 \documentclass[AMA,STIX1COL]{WileyNJD-v2} 2 \setlength\typewidth{170mm} 3 \setlength\textwidth{170mm} 2 4 3 5 \articletype{RESEARCH ARTICLE}% 4 6 5 \received{26 April 2016} 6 \revised{6 June 2016} 7 \accepted{6 June 2016} 8 7 \received{12 March 2018} 8 \revised{8 May 2018} 9 \accepted{28 June 2018} 10 11 \setlength\typewidth{168mm} 12 \setlength\textwidth{168mm} 9 13 \raggedbottom 10 14 … … 187 191 } 188 192 189 \title{\texorpdfstring{\protect\CFA : Adding Modern Programming Language Features to C}{Cforall : Adding Modern Programming Language Features to C}}193 \title{\texorpdfstring{\protect\CFA : Adding modern programming language features to C}{Cforall : Adding modern programming language features to C}} 190 194 191 195 \author[1]{Aaron Moss} 192 196 \author[1]{Robert Schluntz} 193 \author[1]{Peter A. Buhr *}197 \author[1]{Peter A. Buhr} 194 198 \authormark{MOSS \textsc{et al}} 195 199 196 \address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, O N}, \country{Canada}}}197 198 \corres{ *Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON,N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}200 \address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, Ontario}, \country{Canada}}} 201 202 \corres{Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}} 199 203 200 204 \fundingInfo{Natural Sciences and Engineering Research Council of Canada} 201 205 202 206 \abstract[Summary]{ 203 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating-systems. 204 This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. 205 Nevertheless, C, first standardized almost forty years ago, lacks many features that make programming in more modern languages safer and more productive. 206 207 The goal of the \CFA project (pronounced ``C-for-all'') is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers. 208 Prior projects have attempted similar goals but failed to honour C programming-style; 209 for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers. 210 Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and programmers. 211 This paper presents a quick tour of \CFA features showing how their design avoids shortcomings of similar features in C and other C-like languages. 212 Finally, experimental results are presented to validate several of the new features. 207 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating systems. 208 This installation base and the programmers producing it represent a massive software engineering investment spanning decades and likely to continue for decades more. 209 Nevertheless, C, which was first standardized almost 30 years ago, lacks many features that make programming in more modern languages safer and more productive. 210 The goal of the \CFA project (pronounced ``C for all'') is to create an extension of C that provides modern safety and productivity features while still ensuring strong backward compatibility with C and its programmers. 211 Prior projects have attempted similar goals but failed to honor the C programming style; 212 for instance, adding object-oriented or functional programming with garbage collection is a nonstarter for many C developers. 213 Specifically, \CFA is designed to have an orthogonal feature set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and programmers. 214 This paper presents a quick tour of \CFA features, showing how their design avoids shortcomings of similar features in C and other C-like languages. 215 Experimental results are presented to validate several of the new features. 213 216 }% 214 217 215 \keywords{ generic types, tuple types, variadic types, polymorphic functions, C, Cforall}218 \keywords{C, Cforall, generic types, polymorphic functions, tuple types, variadic types} 216 219 217 220 218 221 \begin{document} 219 \linenumbers % comment out to turn off line numbering222 %\linenumbers % comment out to turn off line numbering 220 223 221 224 \maketitle 222 225 223 226 227 \vspace*{-10pt} 224 228 \section{Introduction} 225 229 226 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating -systems.227 This installation base and the programmers producing it represent a massive software -engineering investment spanning decades and likely to continue for decades more.228 The TIOBE ~\cite{TIOBE} ranks the top 5 most \emph{popular} programming languages as: Java 15\%, \Textbf{C 12\%}, \Textbf{\CC 5.5\%},Python 5\%, \Csharp 4.5\% = 42\%, where the next 50 languages are less than 4\% each with a long tail.229 The top 3 rankings over the past 30 years are:230 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating systems. 231 This installation base and the programmers producing it represent a massive software engineering investment spanning decades and likely to continue for decades more. 232 The TIOBE index~\cite{TIOBE} ranks the top five most \emph{popular} programming languages as Java 15\%, \Textbf{C 12\%}, \Textbf{\CC 5.5\%}, and Python 5\%, \Csharp 4.5\% = 42\%, where the next 50 languages are less than 4\% each with a long tail. 233 The top three rankings over the past 30 years are as follows. 230 234 \begin{center} 231 235 \setlength{\tabcolsep}{10pt} 232 \lstDeleteShortInline@% 233 \begin{tabular}{@{}rccccccc@{}} 234 & 2018 & 2013 & 2008 & 2003 & 1998 & 1993 & 1988 \\ \hline 235 Java & 1 & 2 & 1 & 1 & 18 & - & - \\ 236 \fontsize{9bp}{11bp}\selectfont 237 \lstDeleteShortInline@% 238 \begin{tabular}{@{}cccccccc@{}} 239 & 2018 & 2013 & 2008 & 2003 & 1998 & 1993 & 1988 \\ 240 Java & 1 & 2 & 1 & 1 & 18 & -- & -- \\ 236 241 \Textbf{C}& \Textbf{2} & \Textbf{1} & \Textbf{2} & \Textbf{2} & \Textbf{1} & \Textbf{1} & \Textbf{1} \\ 237 242 \CC & 3 & 4 & 3 & 3 & 2 & 2 & 5 \\ … … 241 246 Love it or hate it, C is extremely popular, highly used, and one of the few systems languages. 242 247 In many cases, \CC is often used solely as a better C. 243 Nevertheless, C, first standardized almost fortyyears ago~\cite{ANSI89:C}, lacks many features that make programming in more modern languages safer and more productive.244 245 \CFA (pronounced ``C -for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that adds modern language-features to C, while maintaining source and runtime compatibility in the familiar C programming model.246 The four key design goals for \CFA~\cite{Bilson03} are :247 (1) The behaviour of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler;248 (2) Standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler;249 (3) \CFA code must be at least as portable as standard C code;250 (4) Extensions introduced by \CFA must be translated in the most efficient way possible.251 These goals ensure existing C code-bases can be convertedto \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used.252 \CC is used similarly , but has the disadvantages of multiple legacy design-choices that cannot be updated and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project.253 254 All language sfeatures discussed in this paper are working, except some advanced exception-handling features.255 Not discussed in this paper are the integrated concurrency -constructs and user-level threading-library~\cite{Delisle18}.248 Nevertheless, C, which was first standardized almost 30 years ago~\cite{ANSI89:C}, lacks many features that make programming in more modern languages safer and more productive. 249 250 \CFA (pronounced ``C for all'' and written \CFA or Cforall) is an evolutionary extension of the C programming language that adds modern language features to C, while maintaining source and runtime compatibility in the familiar C programming model. 251 The four key design goals for \CFA~\cite{Bilson03} are as follows: 252 (1) the behavior of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler; 253 (2) the standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler; 254 (3) the \CFA code must be at least as portable as standard C code; 255 (4) extensions introduced by \CFA must be translated in the most efficient way possible. 256 These goals ensure that the existing C code bases can be converted into \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used. 257 \CC is used similarly but has the disadvantages of multiple legacy design choices that cannot be updated and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project. 258 259 All language features discussed in this paper are working, except some advanced exception-handling features. 260 Not discussed in this paper are the integrated concurrency constructs and user-level threading library~\cite{Delisle18}. 256 261 \CFA is an \emph{open-source} project implemented as a source-to-source translator from \CFA to the gcc-dialect of C~\cite{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by gcc, meeting goals (1)--(3). 257 Ultimately, a compiler is necessary for advanced features and optimal performance.258 262 % @plg2[9]% cd cfa-cc/src; cloc ArgTweak CodeGen CodeTools Common Concurrency ControlStruct Designators GenPoly InitTweak MakeLibCfa.cc MakeLibCfa.h Parser ResolvExpr SymTab SynTree Tuples driver prelude main.cc 259 263 % ------------------------------------------------------------------------------- … … 270 274 % SUM: 223 8203 8263 46479 271 275 % ------------------------------------------------------------------------------- 272 The \CFA translator is 200+ files and 46,000+ lines of code written in C/\CC. 273 Starting with a translator versus a compiler makes it easier and faster to generate and debug C object-code rather than intermediate, assembler or machine code. 274 The translator design is based on the \emph{visitor pattern}, allowing multiple passes over the abstract code-tree, which works well for incrementally adding new feature through additional visitor passes. 275 At the heart of the translator is the type resolver, which handles the polymorphic function/type overload-resolution. 276 The \CFA translator is 200+ files and 46\,000+ lines of code written in C/\CC. 277 A translator versus a compiler makes it easier and faster to generate and debug the C object code rather than the intermediate, assembler, or machine code; 278 ultimately, a compiler is necessary for advanced features and optimal performance. 279 % The translator design is based on the \emph{visitor pattern}, allowing multiple passes over the abstract code-tree, which works well for incrementally adding new feature through additional visitor passes. 280 Two key translator components are expression analysis, determining expression validity and what operations are required for its implementation, and code generation, dealing with multiple forms of overloading, polymorphism, and multiple return values by converting them into the C code for a C compiler that supports none of these features. 281 Details of these components are available in chapters 2 and 3 in the work of Bilson~\cite{Bilson03} and form the base for the current \CFA translator. 276 282 % @plg2[8]% cd cfa-cc/src; cloc libcfa 277 283 % ------------------------------------------------------------------------------- … … 288 294 % SUM: 100 1895 2785 11763 289 295 % ------------------------------------------------------------------------------- 290 The \CFA runtime system is 100+ files and 11 ,000+ lines of code, written in \CFA.296 The \CFA runtime system is 100+ files and 11\,000+ lines of code, written in \CFA. 291 297 Currently, the \CFA runtime is the largest \emph{user} of \CFA providing a vehicle to test the language features and implementation. 292 298 % @plg2[6]% cd cfa-cc/src; cloc tests examples benchmark … … 305 311 % SUM: 290 13175 3400 27776 306 312 % ------------------------------------------------------------------------------- 307 The \CFA tests are 290+ files and 27,000+ lines of code. 308 The tests illustrate syntactic and semantic features in \CFA, plus a growing number of runtime benchmarks. 309 The tests check for correctness and are used for daily regression testing of 3800+ commits. 310 311 Finally, it is impossible to describe a programming language without usages before definitions. 312 Therefore, syntax and semantics appear before explanations, and related work (Section~\ref{s:RelatedWork}) is deferred until \CFA is presented; 313 hence, patience is necessary until details are discussed. 314 315 313 % The \CFA tests are 290+ files and 27,000+ lines of code. 314 % The tests illustrate syntactic and semantic features in \CFA, plus a growing number of runtime benchmarks. 315 % The tests check for correctness and are used for daily regression testing of 3800+ commits. 316 317 Finally, it is impossible to describe a programming language without usage before definition. 318 Therefore, syntax and semantics appear before explanations; 319 hence, patience is necessary until sufficient details are presented and discussed. 320 Similarly, a detailed comparison with other programming languages is postponed until Section~\ref{s:RelatedWork}. 321 322 323 \vspace*{-6pt} 316 324 \section{Polymorphic Functions} 317 325 318 \CFA introduces both ad -hoc and parametric polymorphism to C, with a design originally formalized by Ditchfield~\cite{Ditchfield92},and first implemented by Bilson~\cite{Bilson03}.319 Shortcomings are identified in existing approaches to generic and variadic data types in C-like languages and how these shortcomings are avoided in \CFA.320 Specifically, the solution is both reusable and type -checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions.326 \CFA introduces both ad hoc and parametric polymorphism to C, with a design originally formalized by Ditchfield~\cite{Ditchfield92} and first implemented by Bilson~\cite{Bilson03}. 327 Shortcomings are identified in the existing approaches to generic and variadic data types in C-like languages and how these shortcomings are avoided in \CFA. 328 Specifically, the solution is both reusable and type checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions. 321 329 The new constructs are empirically compared with C and \CC approaches via performance experiments in Section~\ref{sec:eval}. 322 330 323 331 324 \subsection{Name Overloading} 332 \vspace*{-6pt} 333 \subsection{Name overloading} 325 334 \label{s:NameOverloading} 326 335 327 336 \begin{quote} 328 There are only two hard things in Computer Science: cache invalidation and \emph{naming things} --Phil Karlton337 ``There are only two hard things in Computer Science: cache invalidation and \emph{naming things}.''---Phil Karlton 329 338 \end{quote} 330 339 \vspace{-9pt} 331 C already has a limited form of ad -hoc polymorphism in its basic arithmetic operators, which apply to a variety of different types using identical syntax.340 C already has a limited form of ad hoc polymorphism in its basic arithmetic operators, which apply to a variety of different types using identical syntax. 332 341 \CFA extends the built-in operator overloading by allowing users to define overloads for any function, not just operators, and even any variable; 333 342 Section~\ref{sec:libraries} includes a number of examples of how this overloading simplifies \CFA programming relative to C. 334 343 Code generation for these overloaded functions and variables is implemented by the usual approach of mangling the identifier names to include a representation of their type, while \CFA decides which overload to apply based on the same ``usual arithmetic conversions'' used in C to disambiguate operator overloads. 335 As an example: 344 345 \newpage 336 346 \begin{cfa} 337 347 int max = 2147483647; $\C[4in]{// (1)}$ … … 339 349 int max( int a, int b ) { return a < b ? b : a; } $\C{// (3)}$ 340 350 double max( double a, double b ) { return a < b ? b : a; } $\C{// (4)}\CRT$ 341 max( 7, -max ); $\C {// uses (3) and (1), by matching int from constant 7}$351 max( 7, -max ); $\C[3in]{// uses (3) and (1), by matching int from constant 7}$ 342 352 max( max, 3.14 ); $\C{// uses (4) and (2), by matching double from constant 3.14}$ 343 353 max( max, -max ); $\C{// ERROR, ambiguous}$ 344 int m = max( max, -max ); $\C{// uses (3) and (1) twice, by matching return type} $354 int m = max( max, -max ); $\C{// uses (3) and (1) twice, by matching return type}\CRT$ 345 355 \end{cfa} 346 356 … … 348 358 In some cases, hundreds of names can be reduced to tens, resulting in a significant cognitive reduction. 349 359 In the above, the name @max@ has a consistent meaning, and a programmer only needs to remember the single concept: maximum. 350 To prevent significant ambiguities, \CFA uses the return type in selecting overloads, \eg in the assignment to @m@, the compiler use @m@'s type to unambiguously select the most appropriate call to function @max@ (as does Ada).360 To prevent significant ambiguities, \CFA uses the return type in selecting overloads, \eg in the assignment to @m@, the compiler uses @m@'s type to unambiguously select the most appropriate call to function @max@ (as does Ada). 351 361 As is shown later, there are a number of situations where \CFA takes advantage of available type information to disambiguate, where other programming languages generate ambiguities. 352 362 353 \Celeven added @_Generic@ expressions ~\cite[\S~6.5.1.1]{C11}, which is used with preprocessor macros to provide ad-hoc polymorphism;363 \Celeven added @_Generic@ expressions (see section~6.5.1.1 of the ISO/IEC 9899~\cite{C11}), which is used with preprocessor macros to provide ad hoc polymorphism; 354 364 however, this polymorphism is both functionally and ergonomically inferior to \CFA name overloading. 355 The macro wrapping the generic expression imposes some limitations; 356 \eg, it cannot implement the example above, because the variables @max@ are ambiguous with the functions @max@. 365 The macro wrapping the generic expression imposes some limitations, for instance, it cannot implement the example above, because the variables @max@ are ambiguous with the functions @max@. 357 366 Ergonomic limitations of @_Generic@ include the necessity to put a fixed list of supported types in a single place and manually dispatch to appropriate overloads, as well as possible namespace pollution from the dispatch functions, which must all have distinct names. 358 \CFA supports @_Generic@ expressions for backward s compatibility, but it is an unnecessary mechanism. \TODO{actually implement that}367 \CFA supports @_Generic@ expressions for backward compatibility, but it is an unnecessary mechanism. 359 368 360 369 % http://fanf.livejournal.com/144696.html … … 363 372 364 373 365 \subsection{\texorpdfstring{\protect\lstinline{forall} Functions}{forall Functions}} 374 \vspace*{-10pt} 375 \subsection{\texorpdfstring{\protect\lstinline{forall} functions}{forall functions}} 366 376 \label{sec:poly-fns} 367 377 368 The signature feature of \CFA is parametric-polymorphic functions~\cite{forceone:impl,Cormack90,Duggan96} with functions generalized using a @forall@ clause (giving the language its name) :378 The signature feature of \CFA is parametric-polymorphic functions~\cite{forceone:impl,Cormack90,Duggan96} with functions generalized using a @forall@ clause (giving the language its name). 369 379 \begin{cfa} 370 380 `forall( otype T )` T identity( T val ) { return val; } … … 373 383 This @identity@ function can be applied to any complete \newterm{object type} (or @otype@). 374 384 The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type. 375 The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor.376 If this extra information is not needed, \egfor a pointer, the type parameter can be declared as a \newterm{data type} (or @dtype@).377 378 In \CFA, the polymorphic runtime -cost is spread over each polymorphic call, because more arguments are passed to polymorphic functions;379 the experiments in Section~\ref{sec:eval} show this overhead is similar to \CC virtual -function calls.380 A design advantage is that, unlike \CC template -functions, \CFA polymorphic-functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat.381 382 Since bare polymorphic -types provide a restricted set of available operations, \CFA provides a \newterm{type assertion}~\cite[pp.~37-44]{Alphard} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable.383 For example, the function @twice@ can be defined using the \CFA syntax for operator overloading :385 The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor, and destructor. 386 If this extra information is not needed, for instance, for a pointer, the type parameter can be declared as a \newterm{data type} (or @dtype@). 387 388 In \CFA, the polymorphic runtime cost is spread over each polymorphic call, because more arguments are passed to polymorphic functions; 389 the experiments in Section~\ref{sec:eval} show this overhead is similar to \CC virtual function calls. 390 A design advantage is that, unlike \CC template functions, \CFA polymorphic functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat. 391 392 Since bare polymorphic types provide a restricted set of available operations, \CFA provides a \newterm{type assertion}~\cite[pp.~37-44]{Alphard} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type variable. 393 For example, the function @twice@ can be defined using the \CFA syntax for operator overloading. 384 394 \begin{cfa} 385 395 forall( otype T `| { T ?+?(T, T); }` ) T twice( T x ) { return x `+` x; } $\C{// ? denotes operands}$ 386 396 int val = twice( twice( 3.7 ) ); $\C{// val == 14}$ 387 397 \end{cfa} 388 which works for any type @T@ with a matching addition operator. 389 The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@. 390 There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Cormack81,Baker82,Ada} in its type analysis. 391 The first approach has a late conversion from @double@ to @int@ on the final assignment, while the second has an early conversion to @int@. 392 \CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition. 398 This works for any type @T@ with a matching addition operator. 399 The polymorphism is achieved by creating a wrapper function for calling @+@ with the @T@ bound to @double@ and then passing this function to the first call of @twice@. 400 There is now the option of using the same @twice@ and converting the result into @int@ on assignment or creating another @twice@ with the type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Cormack81,Baker82,Ada} in its type analysis. 401 The first approach has a late conversion from @double@ to @int@ on the final assignment, whereas the second has an early conversion to @int@. 402 \CFA minimizes the number of conversions and their potential to lose information; 403 hence, it selects the first approach, which corresponds with C programmer intuition. 393 404 394 405 Crucial to the design of a new programming language are the libraries to access thousands of external software features. 395 Like \CC, \CFA inherits a massive compatible library -base, where other programming languages must rewrite or provide fragile inter-language communication with C.396 A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted float array :406 Like \CC, \CFA inherits a massive compatible library base, where other programming languages must rewrite or provide fragile interlanguage communication with C. 407 A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted float array. 397 408 \begin{cfa} 398 409 void * bsearch( const void * key, const void * base, size_t nmemb, size_t size, … … 404 415 double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp ); $\C{// search sorted array}$ 405 416 \end{cfa} 406 which can be augmented simply with generalized, type-safe, \CFA-overloaded wrappers: 417 This can be augmented simply with generalized, type-safe, \CFA-overloaded wrappers. 407 418 \begin{cfa} 408 419 forall( otype T | { int ?<?( T, T ); } ) T * bsearch( T key, const T * arr, size_t size ) { … … 418 429 \end{cfa} 419 430 The nested function @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result. 420 Providing a hidden @comp@ function in \CC is awkward as lambdas do not use C calling-conventions and template declarations cannot appear at block scope. 421 As well, an alternate kind of return is made available: position versus pointer to found element. 422 \CC's type-system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a template @bsearch@. 431 % FIX 432 Providing a hidden @comp@ function in \CC is awkward as lambdas do not use C calling conventions and template declarations cannot appear in block scope. 433 In addition, an alternate kind of return is made available: position versus pointer to found element. 434 \CC's type system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a template @bsearch@. 423 435 424 436 \CFA has replacement libraries condensing hundreds of existing C functions into tens of \CFA overloaded functions, all without rewriting the actual computations (see Section~\ref{sec:libraries}). … … 430 442 \end{cfa} 431 443 432 Call -site inferencing and nested functions provide a localized form of inheritance.444 Call site inferencing and nested functions provide a localized form of inheritance. 433 445 For example, the \CFA @qsort@ only sorts in ascending order using @<@. 434 However, it is trivial to locally change this behavio ur:446 However, it is trivial to locally change this behavior. 435 447 \begin{cfa} 436 448 forall( otype T | { int ?<?( T, T ); } ) void qsort( const T * arr, size_t size ) { /* use C qsort */ } 437 449 int main() { 438 int ?<?( double x, double y ) { return x `>` y; } $\C{// locally override behavio ur}$450 int ?<?( double x, double y ) { return x `>` y; } $\C{// locally override behavior}$ 439 451 qsort( vals, 10 ); $\C{// descending sort}$ 440 452 } 441 453 \end{cfa} 442 454 The local version of @?<?@ performs @?>?@ overriding the built-in @?<?@ so it is passed to @qsort@. 443 Hence, programmers can easily form local environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types.444 445 To reduce duplication, it is possible to distribute a group of @forall@ (and storage-class qualifiers) over functions/types, s o each block declaration is prefixed by the group (see example in Appendix~\ref{s:CforallStack}).455 Therefore, programmers can easily form local environments, adding and modifying appropriate functions, to maximize the reuse of other existing functions and types. 456 457 To reduce duplication, it is possible to distribute a group of @forall@ (and storage-class qualifiers) over functions/types, such that each block declaration is prefixed by the group (see the example in Appendix~\ref{s:CforallStack}). 446 458 \begin{cfa} 447 459 forall( otype `T` ) { $\C{// distribution block, add forall qualifier to declarations}$ … … 454 466 455 467 456 \vspace*{-2pt}457 468 \subsection{Traits} 458 469 459 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each function declaration: 460 470 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each function declaration. 461 471 \begin{cquote} 462 472 \lstDeleteShortInline@% … … 485 495 \end{cquote} 486 496 487 Note ,the @sumable@ trait does not include a copy constructor needed for the right side of @?+=?@ and return;488 it is provided by @otype@, which is syntactic sugar for the following trait :497 Note that the @sumable@ trait does not include a copy constructor needed for the right side of @?+=?@ and return; 498 it is provided by @otype@, which is syntactic sugar for the following trait. 489 499 \begin{cfa} 490 500 trait otype( dtype T | sized(T) ) { // sized is a pseudo-trait for types with known size and alignment … … 495 505 }; 496 506 \end{cfa} 497 Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete type: stack -allocatable, default or copy-initialized, assigned, and deleted.498 499 In summation, the \CFA type -system uses \newterm{nominal typing} for concrete types, matching with the C type-system, and \newterm{structural typing} for polymorphic types.507 Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete type: stack allocatable, default or copy initialized, assigned, and deleted. 508 509 In summation, the \CFA type system uses \newterm{nominal typing} for concrete types, matching with the C type system, and \newterm{structural typing} for polymorphic types. 500 510 Hence, trait names play no part in type equivalence; 501 511 the names are simply macros for a list of polymorphic assertions, which are expanded at usage sites. 502 Nevertheless, trait names form a logical subtype -hierarchy with @dtype@ at the top, where traits often contain overlapping assertions, \eg operator @+@.503 Traits are used like interfaces in Java or abstract base -classes in \CC, but without the nominal inheritance-relationships.504 Instead, each polymorphic function (or generic type) defines the structural type needed for its execution (polymorphic type -key), and this key is fulfilled at each call site from the lexical environment, which is similar toGo~\cite{Go} interfaces.505 Hence, new lexical scopes and nested functions are used extensively to create local subtypes, as in the @qsort@ example, without having to manage a nominal -inheritance hierarchy.512 Nevertheless, trait names form a logical subtype hierarchy with @dtype@ at the top, where traits often contain overlapping assertions, \eg operator @+@. 513 Traits are used like interfaces in Java or abstract base classes in \CC, but without the nominal inheritance relationships. 514 Instead, each polymorphic function (or generic type) defines the structural type needed for its execution (polymorphic type key), and this key is fulfilled at each call site from the lexical environment, which is similar to the Go~\cite{Go} interfaces. 515 Hence, new lexical scopes and nested functions are used extensively to create local subtypes, as in the @qsort@ example, without having to manage a nominal inheritance hierarchy. 506 516 % (Nominal inheritance can be approximated with traits using marker variables or functions, as is done in Go.) 507 517 … … 534 544 535 545 A significant shortcoming of standard C is the lack of reusable type-safe abstractions for generic data structures and algorithms. 536 Broadly speaking, there are three approaches to implement abstract data -structures in C.537 One approach is to write bespoke data -structures for each context in which they are needed.538 While this approach is flexible and supports integration with the C type -checker and tooling, it is also tedious and error-prone, especially for more complex data structures.539 A second approach is to use @void *@-based polymorphism, \eg the C standard -library functions @bsearch@ and @qsort@, which allowreuse of code with common functionality.540 However, basing all polymorphism on @void *@ eliminates the type -checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that is not otherwiseneeded.541 A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type -checked, but errors may be difficult to interpret.546 Broadly speaking, there are three approaches to implement abstract data structures in C. 547 One approach is to write bespoke data structures for each context in which they are needed. 548 While this approach is flexible and supports integration with the C type checker and tooling, it is also tedious and error prone, especially for more complex data structures. 549 A second approach is to use @void *@-based polymorphism, \eg the C standard library functions @bsearch@ and @qsort@, which allow for the reuse of code with common functionality. 550 However, basing all polymorphism on @void *@ eliminates the type checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that is otherwise not needed. 551 A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type checked, but errors may be difficult to interpret. 542 552 Furthermore, writing and using preprocessor macros is unnatural and inflexible. 543 553 544 \CC, Java, and other languages use \newterm{generic types} to produce type-safe abstract data -types.545 \CFA generic types integrate efficiently and naturally with the existing polymorphic functions, while retaining backward scompatibility with C and providing separate compilation.554 \CC, Java, and other languages use \newterm{generic types} to produce type-safe abstract data types. 555 \CFA generic types integrate efficiently and naturally with the existing polymorphic functions, while retaining backward compatibility with C and providing separate compilation. 546 556 However, for known concrete parameters, the generic-type definition can be inlined, like \CC templates. 547 557 548 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration , and instantiated using a parenthesized list of types after the type name:558 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration and instantiated using a parenthesized list of types after the type name. 549 559 \begin{cquote} 550 560 \lstDeleteShortInline@% … … 574 584 575 585 \CFA classifies generic types as either \newterm{concrete} or \newterm{dynamic}. 576 Concrete types have a fixed memory layout regardless of type parameters, wh iledynamic types vary in memory layout depending on their type parameters.586 Concrete types have a fixed memory layout regardless of type parameters, whereas dynamic types vary in memory layout depending on their type parameters. 577 587 A \newterm{dtype-static} type has polymorphic parameters but is still concrete. 578 588 Polymorphic pointers are an example of dtype-static types; 579 given some type variable @T@, @T@ is a polymorphic type, as is @T *@, but @T *@ has a fixed size and can thereforebe represented by @void *@ in code generation.580 581 \CFA generic types also allow checked argument -constraints.582 For example, the following declaration of a sorted set -type ensures the set key supports equality and relational comparison:589 given some type variable @T@, @T@ is a polymorphic type, as is @T *@, but @T *@ has a fixed size and can, therefore, be represented by @void *@ in code generation. 590 591 \CFA generic types also allow checked argument constraints. 592 For example, the following declaration of a sorted set type ensures the set key supports equality and relational comparison. 583 593 \begin{cfa} 584 594 forall( otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); } ) struct sorted_set; … … 586 596 587 597 588 \subsection{Concrete Generic-Types}589 590 The \CFA translator template -expands concrete generic-types into new structure types, affording maximal inlining.591 To enable inter -operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated structure declarations where appropriate.592 A function declaration that accepts or returns a concrete generic -type produces a declaration for the instantiated structure in the same scope, which all callers may reuse.593 For example, the concrete instantiation for @pair( const char *, int )@ is :598 \subsection{Concrete generic types} 599 600 The \CFA translator template expands concrete generic types into new structure types, affording maximal inlining. 601 To enable interoperation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated structure declarations where appropriate. 602 A function declaration that accepts or returns a concrete generic type produces a declaration for the instantiated structure in the same scope, which all callers may reuse. 603 For example, the concrete instantiation for @pair( const char *, int )@ is 594 604 \begin{cfa} 595 605 struct _pair_conc0 { … … 598 608 \end{cfa} 599 609 600 A concrete generic -type with dtype-static parameters is also expanded to a structure type, but this type is used for all matching instantiations.601 In the above example, the @pair( F *, T * )@ parameter to @value@ is such a type; its expansion is below and it is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate:610 A concrete generic type with dtype-static parameters is also expanded to a structure type, but this type is used for all matching instantiations. 611 In the above example, the @pair( F *, T * )@ parameter to @value@ is such a type; its expansion is below, and it is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate. 602 612 \begin{cfa} 603 613 struct _pair_conc1 { … … 607 617 608 618 609 \subsection{Dynamic Generic-Types} 610 611 Though \CFA implements concrete generic-types efficiently, it also has a fully general system for dynamic generic types. 612 As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. 613 Dynamic generic-types also have an \newterm{offset array} containing structure-member offsets. 614 A dynamic generic-@union@ needs no such offset array, as all members are at offset 0, but size and alignment are still necessary. 615 Access to members of a dynamic structure is provided at runtime via base-displacement addressing with the structure pointer and the member offset (similar to the @offsetof@ macro), moving a compile-time offset calculation to runtime. 619 \subsection{Dynamic generic types} 620 621 Though \CFA implements concrete generic types efficiently, it also has a fully general system for dynamic generic types. 622 As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact, all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. 623 Dynamic generic types also have an \newterm{offset array} containing structure-member offsets. 624 A dynamic generic @union@ needs no such offset array, as all members are at offset 0, but size and alignment are still necessary. 625 Access to members of a dynamic structure is provided at runtime via base displacement addressing 626 % FIX 627 using the structure pointer and the member offset (similar to the @offsetof@ macro), moving a compile-time offset calculation to runtime. 616 628 617 629 The offset arrays are statically generated where possible. 618 If a dynamic generic -type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume the generic type is complete (\ie has a known layout) at any call-site, and the offset array is passed from the caller;630 If a dynamic generic type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume that the generic type is complete (\ie has a known layout) at any call site, and the offset array is passed from the caller; 619 631 if the generic type is concrete at the call site, the elements of this offset array can even be statically generated using the C @offsetof@ macro. 620 As an example, the body of the second @value@ function is implemented as :632 As an example, the body of the second @value@ function is implemented as 621 633 \begin{cfa} 622 634 _assign_T( _retval, p + _offsetof_pair[1] ); $\C{// return *p.second}$ 623 635 \end{cfa} 624 @_assign_T@ is passed in as an implicit parameter from @otype T@, and takes two @T *@ (@void *@ in the generated code), a destination and a source; @_retval@ is the pointer to a caller-allocated buffer for the return value, the usual \CFA method to handle dynamically-sized return types. 625 @_offsetof_pair@ is the offset array passed into @value@; this array is generated at the call site as: 636 \newpage 637 \noindent 638 Here, @_assign_T@ is passed in as an implicit parameter from @otype T@, and takes two @T *@ (@void *@ in the generated code), a destination and a source, and @_retval@ is the pointer to a caller-allocated buffer for the return value, the usual \CFA method to handle dynamically sized return types. 639 @_offsetof_pair@ is the offset array passed into @value@; 640 this array is generated at the call site as 626 641 \begin{cfa} 627 642 size_t _offsetof_pair[] = { offsetof( _pair_conc0, first ), offsetof( _pair_conc0, second ) } 628 643 \end{cfa} 629 644 630 In some cases the offset arrays cannot be statically generated.631 For instance, modularity is generally provided in C by including an opaque forward -declaration of a structure and associated accessor and mutator functions in a header file, with the actual implementations in a separately-compiled @.c@ file.632 \CFA supports this pattern for generic types, but the caller does not know the actual layout or size of the dynamic generic -type,and only holds it by a pointer.645 In some cases, the offset arrays cannot be statically generated. 646 For instance, modularity is generally provided in C by including an opaque forward declaration of a structure and associated accessor and mutator functions in a header file, with the actual implementations in a separately compiled @.c@ file. 647 \CFA supports this pattern for generic types, but the caller does not know the actual layout or size of the dynamic generic type and only holds it by a pointer. 633 648 The \CFA translator automatically generates \newterm{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed into a function from that function's caller. 634 649 These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic structure (un@sized@ parameters are forbidden from being used in a context that affects layout). … … 640 655 Whether a type is concrete, dtype-static, or dynamic is decided solely on the @forall@'s type parameters. 641 656 This design allows opaque forward declarations of generic types, \eg @forall(otype T)@ @struct Box@ -- like in C, all uses of @Box(T)@ can be separately compiled, and callers from other translation units know the proper calling conventions to use. 642 If the definition of a structure type is included in deciding whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T)@ @struct unique_ptr { T * p }@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off. 657 If the definition of a structure type is included in deciding whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T)@ @struct unique_ptr { T * p }@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.); 658 however, preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off. 643 659 644 660 … … 653 669 } 654 670 \end{cfa} 655 Since @pair( T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@, so the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type-checked to ensure the members of both pairs and the arguments to the comparison function match in type. 656 657 Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \newterm{tag-structures}. 658 Sometimes information is only used for type-checking and can be omitted at runtime, \eg: 671 Since @pair( T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@; 672 hence, the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type checked to ensure members of both pairs and arguments to the comparison function match in type. 673 674 Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \newterm{tag structures}. 675 Sometimes, information is only used for type checking and can be omitted at runtime. 659 676 \begin{cquote} 660 677 \lstDeleteShortInline@% … … 675 692 half_marathon; 676 693 scalar(litres) two_pools = pool + pool; 677 `marathon + pool;` // ERROR, mismatched types694 `marathon + pool;` // ERROR, mismatched types 678 695 \end{cfa} 679 696 \end{tabular} 680 697 \lstMakeShortInline@% 681 698 \end{cquote} 682 @scalar@ is a dtype-static type, so all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@. 699 Here, @scalar@ is a dtype-static type; 700 hence, all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@. 683 701 These implementations may even be separately compiled, unlike \CC template functions. 684 However, the \CFA type -checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume.702 However, the \CFA type checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume. 685 703 686 704 … … 688 706 \label{sec:tuples} 689 707 690 In many languages, functions can return at mostone value;708 In many languages, functions can return, at most, one value; 691 709 however, many operations have multiple outcomes, some exceptional. 692 710 Consider C's @div@ and @remquo@ functions, which return the quotient and remainder for a division of integer and float values, respectively. … … 699 717 double r = remquo( 13.5, 5.2, &q ); $\C{// return remainder, alias quotient}$ 700 718 \end{cfa} 701 @div@ aggregates the quotient/remainder in a structure, while@remquo@ aliases a parameter to an argument.719 Here, @div@ aggregates the quotient/remainder in a structure, whereas @remquo@ aliases a parameter to an argument. 702 720 Both approaches are awkward. 703 Alternatively, a programming language can directly support returning multiple values, \eg in \CFA: 721 % FIX 722 Alternatively, a programming language can directly support returning multiple values, \eg \CFA provides the following. 704 723 \begin{cfa} 705 724 [ int, int ] div( int num, int den ); $\C{// return two integers}$ … … 712 731 This approach is straightforward to understand and use; 713 732 therefore, why do few programming languages support this obvious feature or provide it awkwardly? 714 To answer, there are complex consequences that cascade through multiple aspects of the language, especially the type -system.715 This section show these consequences and how \CFA handles them.733 To answer, there are complex consequences that cascade through multiple aspects of the language, especially the type system. 734 This section shows these consequences and how \CFA handles them. 716 735 717 736 718 737 \subsection{Tuple Expressions} 719 738 720 The addition of multiple-return-value functions (MRVF ) are \emph{useless} without a syntax for accepting multiple values at the call-site.739 The addition of multiple-return-value functions (MRVFs) is \emph{useless} without a syntax for accepting multiple values at the call site. 721 740 The simplest mechanism for capturing the return values is variable assignment, allowing the values to be retrieved directly. 722 741 As such, \CFA allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions (as above), called a \newterm{tuple}. 723 742 724 However, functions also use \newterm{composition} (nested calls), with the direct consequence that MRVFs must also support composition to be orthogonal with single-returning-value functions (SRVF ), \eg:743 However, functions also use \newterm{composition} (nested calls), with the direct consequence that MRVFs must also support composition to be orthogonal with single-returning-value functions (SRVFs), \eg, \CFA provides the following. 725 744 \begin{cfa} 726 745 printf( "%d %d\n", div( 13, 5 ) ); $\C{// return values seperated into arguments}$ 727 746 \end{cfa} 728 747 Here, the values returned by @div@ are composed with the call to @printf@ by flattening the tuple into separate arguments. 729 However, the \CFA type-system must support significantly more complex composition :748 However, the \CFA type-system must support significantly more complex composition. 730 749 \begin{cfa} 731 750 [ int, int ] foo$\(_1\)$( int ); $\C{// overloaded foo functions}$ … … 734 753 `bar`( foo( 3 ), foo( 3 ) ); 735 754 \end{cfa} 736 The type-resolver only has the tuple return-types to resolve the call to @bar@ as the @foo@ parameters are identical, which involves unifying the possible @foo@ functions with @bar@'s parameter list. 737 No combination of @foo@s are an exact match with @bar@'s parameters, so the resolver applies C conversions. 755 The type resolver only has the tuple return types to resolve the call to @bar@ as the @foo@ parameters are identical, which involves unifying the possible @foo@ functions with @bar@'s parameter list. 756 No combination of @foo@s is an exact match with @bar@'s parameters; 757 thus, the resolver applies C conversions. 758 % FIX 738 759 The minimal cost is @bar( foo@$_1$@( 3 ), foo@$_2$@( 3 ) )@, giving (@int@, {\color{ForestGreen}@int@}, @double@) to (@int@, {\color{ForestGreen}@double@}, @double@) with one {\color{ForestGreen}safe} (widening) conversion from @int@ to @double@ versus ({\color{red}@double@}, {\color{ForestGreen}@int@}, {\color{ForestGreen}@int@}) to ({\color{red}@int@}, {\color{ForestGreen}@double@}, {\color{ForestGreen}@double@}) with one {\color{red}unsafe} (narrowing) conversion from @double@ to @int@ and two safe conversions. 739 760 740 761 741 \subsection{Tuple Variables}762 \subsection{Tuple variables} 742 763 743 764 An important observation from function composition is that new variable names are not required to initialize parameters from an MRVF. 744 \CFA also allows declaration of tuple variables that can be initialized from an MRVF, since it can be awkward to declare multiple variables of different types, \eg: 765 \CFA also allows declaration of tuple variables that can be initialized from an MRVF, since it can be awkward to declare multiple variables of different types. 766 \newpage 745 767 \begin{cfa} 746 768 [ int, int ] qr = div( 13, 5 ); $\C{// tuple-variable declaration and initialization}$ 747 769 [ double, double ] qr = div( 13.5, 5.2 ); 748 770 \end{cfa} 749 where the tuple variable-name serves the same purpose as the parameter name(s).771 Here, the tuple variable name serves the same purpose as the parameter name(s). 750 772 Tuple variables can be composed of any types, except for array types, since array sizes are generally unknown in C. 751 773 752 One way to access the tuple -variable components is with assignment or composition:774 One way to access the tuple variable components is with assignment or composition. 753 775 \begin{cfa} 754 776 [ q, r ] = qr; $\C{// access tuple-variable components}$ 755 777 printf( "%d %d\n", qr ); 756 778 \end{cfa} 757 \CFA also supports \newterm{tuple indexing} to access single components of a tuple expression :779 \CFA also supports \newterm{tuple indexing} to access single components of a tuple expression. 758 780 \begin{cfa} 759 781 [int, int] * p = &qr; $\C{// tuple pointer}$ … … 766 788 767 789 768 \subsection{Flattening and Restructuring}790 \subsection{Flattening and restructuring} 769 791 770 792 In function call contexts, tuples support implicit flattening and restructuring conversions. 771 793 Tuple flattening recursively expands a tuple into the list of its basic components. 772 Tuple structuring packages a list of expressions into a value of tuple type , \eg:794 Tuple structuring packages a list of expressions into a value of tuple type. 773 795 \begin{cfa} 774 796 int f( int, int ); … … 781 803 h( x, y ); $\C{// flatten and structure}$ 782 804 \end{cfa} 783 In the call to @f@, @x@ is implicitly flattened so the components of @x@ are passed as t he two arguments.805 In the call to @f@, @x@ is implicitly flattened so the components of @x@ are passed as two arguments. 784 806 In the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the parameter type of @g@. 785 807 Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. 786 The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both SRVF and MRVF, and with any number of arguments of arbitrarily complex structure. 787 788 789 \subsection{Tuple Assignment} 790 808 The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both SRVFs and MRVFs with any number of arguments of arbitrarily complex structure. 809 810 811 \subsection{Tuple assignment} 812 813 \enlargethispage{-10pt} 791 814 An assignment where the left side is a tuple type is called \newterm{tuple assignment}. 792 There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non -tuple type, called \newterm{multiple} and \newterm{mass assignment}, respectively.815 There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a nontuple type, called \newterm{multiple} and \newterm{mass assignment}, respectively. 793 816 \begin{cfa} 794 817 int x = 10; … … 800 823 [y, x] = 3.14; $\C{// mass assignment}$ 801 824 \end{cfa} 802 Both kinds of tuple assignment have parallel semantics, so that each value on the left and right side is evaluated before any assignments occur.825 Both kinds of tuple assignment have parallel semantics, so that each value on the left and right sides is evaluated before any assignments occur. 803 826 As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function, \eg, @[x, y] = [y, x]@. 804 827 This semantics means mass assignment differs from C cascading assignment (\eg @a = b = c@) in that conversions are applied in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. 805 For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, yielding @y == 3.14@ and @x == 3@; 806 whereas, C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, yielding @3@ in @y@ and @x@. 828 For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, yielding @y == 3.14@ and @x == 3@, whereas C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, yielding @3@ in @y@ and @x@. 807 829 Finally, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. 808 This example shows mass, multiple, and cascading assignment used in one expression :830 This example shows mass, multiple, and cascading assignment used in one expression. 809 831 \begin{cfa} 810 832 [void] f( [int, int] ); … … 813 835 814 836 815 \subsection{Member Access}816 817 It is also possible to access multiple members from a single expression using a \newterm{member -access}.818 The result is a single tuple-valued expression whose type is the tuple of the types of the members , \eg:837 \subsection{Member access} 838 839 It is also possible to access multiple members from a single expression using a \newterm{member access}. 840 The result is a single tuple-valued expression whose type is the tuple of the types of the members. 819 841 \begin{cfa} 820 842 struct S { int x; double y; char * z; } s; … … 830 852 [int, int, int] y = x.[2, 0, 2]; $\C{// duplicate: [y.0, y.1, y.2] = [x.2, x.0.x.2]}$ 831 853 \end{cfa} 832 It is also possible for a member access to contain other member accesses , \eg:854 It is also possible for a member access to contain other member accesses. 833 855 \begin{cfa} 834 856 struct A { double i; int j; }; … … 897 919 898 920 Tuples also integrate with \CFA polymorphism as a kind of generic type. 899 Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non -tuple types, \eg:921 Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with nontuple types. 900 922 \begin{cfa} 901 923 forall( otype T, dtype U ) void f( T x, U * y ); 902 924 f( [5, "hello"] ); 903 925 \end{cfa} 904 where@[5, "hello"]@ is flattened, giving argument list @5, "hello"@, and @T@ binds to @int@ and @U@ binds to @const char@.926 Here, @[5, "hello"]@ is flattened, giving argument list @5, "hello"@, and @T@ binds to @int@ and @U@ binds to @const char@. 905 927 Tuples, however, may contain polymorphic components. 906 928 For example, a plus operator can be written to sum two triples. … … 920 942 g( 5, 10.21 ); 921 943 \end{cfa} 944 \newpage 922 945 Hence, function parameter and return lists are flattened for the purposes of type unification allowing the example to pass expression resolution. 923 946 This relaxation is possible by extending the thunk scheme described by Bilson~\cite{Bilson03}. … … 930 953 931 954 932 \subsection{Variadic Tuples}955 \subsection{Variadic tuples} 933 956 \label{sec:variadic-tuples} 934 957 935 To define variadic functions, \CFA adds a new kind of type parameter, @ttype@ (tuple type).936 Matching against a @ttype@ parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types.937 In a given parameter list, there must be at mostone @ttype@ parameter that occurs last, which matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates.958 To define variadic functions, \CFA adds a new kind of type parameter, \ie @ttype@ (tuple type). 959 Matching against a @ttype@ parameter consumes all the remaining argument components and packages them into a tuple, binding to the resulting tuple of types. 960 In a given parameter list, there must be, at most, one @ttype@ parameter that occurs last, which matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. 938 961 As such, @ttype@ variables are also called \newterm{argument packs}. 939 962 … … 941 964 Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. 942 965 Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled. 943 For example, a generalized @sum@ function:966 For example, the following is a generalized @sum@ function. 944 967 \begin{cfa} 945 968 int sum$\(_0\)$() { return 0; } … … 950 973 \end{cfa} 951 974 Since @sum@\(_0\) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@. 952 In order to call @sum@\(_1\), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.975 In order to call @sum@\(_1\), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list, and @Params@ is bound to @[20, 30]@. 953 976 The process continues until @Params@ is bound to @[]@, requiring an assertion @int sum()@, which matches @sum@\(_0\) and terminates the recursion. 954 977 Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10 + sum(20, 30)@ $\rightarrow$ @10 + (20 + sum(30))@ $\rightarrow$ @10 + (20 + (30 + sum()))@ $\rightarrow$ @10 + (20 + (30 + 0))@. 955 978 956 It is reasonable to take the @sum@ function a step further to enforce a minimum number of arguments :979 It is reasonable to take the @sum@ function a step further to enforce a minimum number of arguments. 957 980 \begin{cfa} 958 981 int sum( int x, int y ) { return x + y; } … … 961 984 } 962 985 \end{cfa} 963 One more step permits the summation of any sumable type with all arguments of the same type :986 One more step permits the summation of any sumable type with all arguments of the same type. 964 987 \begin{cfa} 965 988 trait sumable( otype T ) { … … 990 1013 This example showcases a variadic-template-like decomposition of the provided argument list. 991 1014 The individual @print@ functions allow printing a single element of a type. 992 The polymorphic @print@ allows printing any list of types, where aseach individual type has a @print@ function.1015 The polymorphic @print@ allows printing any list of types, where each individual type has a @print@ function. 993 1016 The individual print functions can be used to build up more complicated @print@ functions, such as @S@, which cannot be done with @printf@ in C. 994 1017 This mechanism is used to seamlessly print tuples in the \CFA I/O library (see Section~\ref{s:IOLibrary}). 995 1018 996 1019 Finally, it is possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions. 997 For example, it is possible to write @new@ as a library function :1020 For example, it is possible to write @new@ as a library function. 998 1021 \begin{cfa} 999 1022 forall( otype R, otype S ) void ?{}( pair(R, S) *, R, S ); … … 1004 1027 \end{cfa} 1005 1028 The @new@ function provides the combination of type-safe @malloc@ with a \CFA constructor call, making it impossible to forget constructing dynamically allocated objects. 1006 This function provides the type -safety of @new@ in \CC, without the need to specify the allocated type again, thanksto return-type inference.1029 This function provides the type safety of @new@ in \CC, without the need to specify the allocated type again, due to return-type inference. 1007 1030 1008 1031 … … 1010 1033 1011 1034 Tuples are implemented in the \CFA translator via a transformation into \newterm{generic types}. 1012 For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated, \eg: 1035 For each $N$, the first time an $N$-tuple is seen in a scope, a generic type with $N$ type parameters is generated. 1036 For example, the following 1013 1037 \begin{cfa} 1014 1038 [int, int] f() { … … 1017 1041 } 1018 1042 \end{cfa} 1019 is transformed into :1043 is transformed into 1020 1044 \begin{cfa} 1021 1045 forall( dtype T0, dtype T1 | sized(T0) | sized(T1) ) struct _tuple2 { … … 1083 1107 1084 1108 The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. 1085 A variable is generated to store the value produced by a statement expression, since its members may need to be constructed with a non -trivial constructor and it may need to be referred to multiple time, \eg in a unique expression.1109 A variable is generated to store the value produced by a statement expression, since its members may need to be constructed with a nontrivial constructor and it may need to be referred to multiple time, \eg in a unique expression. 1086 1110 The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. 1087 1111 However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new. … … 1091 1115 \section{Control Structures} 1092 1116 1093 \CFA identifies inconsistent, problematic, and missing control structures in C, a ndextends, modifies, and adds control structures to increase functionality and safety.1094 1095 1096 \subsection{\texorpdfstring{\protect\lstinline {if} Statement}{if Statement}}1097 1098 The @if@ expression allows declarations, similar to @for@ declaration expression:1117 \CFA identifies inconsistent, problematic, and missing control structures in C, as well as extends, modifies, and adds control structures to increase functionality and safety. 1118 1119 1120 \subsection{\texorpdfstring{\protect\lstinline@if@ statement}{if statement}} 1121 1122 The @if@ expression allows declarations, similar to the @for@ declaration expression. 1099 1123 \begin{cfa} 1100 1124 if ( int x = f() ) ... $\C{// x != 0}$ … … 1103 1127 \end{cfa} 1104 1128 Unless a relational expression is specified, each variable is compared not equal to 0, which is the standard semantics for the @if@ expression, and the results are combined using the logical @&&@ operator.\footnote{\CC only provides a single declaration always compared not equal to 0.} 1105 The scope of the declaration(s) is local to the @if@ statement but exist within both the ``then'' and ``else'' clauses.1106 1107 1108 \subsection{\texorpdfstring{\protect\lstinline {switch} Statement}{switch Statement}}1129 The scope of the declaration(s) is local to the @if@ statement but exists within both the ``then'' and ``else'' clauses. 1130 1131 1132 \subsection{\texorpdfstring{\protect\lstinline@switch@ statement}{switch statement}} 1109 1133 1110 1134 There are a number of deficiencies with the C @switch@ statements: enumerating @case@ lists, placement of @case@ clauses, scope of the switch body, and fall through between case clauses. 1111 1135 1112 C has no shorthand for specifying a list of case values, whether the list is non -contiguous or contiguous\footnote{C provides this mechanism via fall through.}.1113 \CFA provides a shorthand for a non -contiguous list:1136 C has no shorthand for specifying a list of case values, whether the list is noncontiguous or contiguous\footnote{C provides this mechanism via fall through.}. 1137 \CFA provides a shorthand for a noncontiguous list: 1114 1138 \begin{cquote} 1115 1139 \lstDeleteShortInline@% … … 1126 1150 \lstMakeShortInline@% 1127 1151 \end{cquote} 1128 for a contiguous list:\footnote{gcc has the same mechanism but awkward syntax, \lstinline@2 ...42@, as a space is required after a number, otherwise the first period is a decimal point.} 1152 for a contiguous list:\footnote{gcc has the same mechanism but awkward syntax, \lstinline@2 ...42@, as a space is required after a number; 1153 otherwise, the first period is a decimal point.} 1129 1154 \begin{cquote} 1130 1155 \lstDeleteShortInline@% … … 1157 1182 } 1158 1183 \end{cfa} 1159 \CFA precludes this form of transfer \emph{into} a control structure because it causes undefined behaviour, especially with respect to missed initialization, and provides very limited functionality.1160 1161 C allows placement of declaration within the @switch@ body and unreachable code at the start, resulting in undefined behaviour:1184 \CFA precludes this form of transfer \emph{into} a control structure because it causes an undefined behavior, especially with respect to missed initialization, and provides very limited functionality. 1185 1186 C allows placement of declaration within the @switch@ body and unreachable code at the start, resulting in an undefined behavior. 1162 1187 \begin{cfa} 1163 1188 switch ( x ) { … … 1176 1201 1177 1202 C @switch@ provides multiple entry points into the statement body, but once an entry point is selected, control continues across \emph{all} @case@ clauses until the end of the @switch@ body, called \newterm{fall through}; 1178 @case@ clauses are made disjoint by the @break@ statement. 1203 @case@ clauses are made disjoint by the @break@ 1204 \newpage 1205 \noindent 1206 statement. 1179 1207 While fall through \emph{is} a useful form of control flow, it does not match well with programmer intuition, resulting in errors from missing @break@ statements. 1180 For backward s compatibility, \CFA provides a \emph{new} control structure,@choose@, which mimics @switch@, but reverses the meaning of fall through (see Figure~\ref{f:ChooseSwitchStatements}), similar to Go.1208 For backward compatibility, \CFA provides a \emph{new} control structure, \ie @choose@, which mimics @switch@, but reverses the meaning of fall through (see Figure~\ref{f:ChooseSwitchStatements}), similar to Go. 1181 1209 1182 1210 \begin{figure} 1183 1211 \centering 1212 \fontsize{9bp}{11bp}\selectfont 1184 1213 \lstDeleteShortInline@% 1185 1214 \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} … … 1218 1247 \end{tabular} 1219 1248 \lstMakeShortInline@% 1220 \caption{\lstinline|choose| versus \lstinline|switch| Statements}1249 \caption{\lstinline|choose| versus \lstinline|switch| statements} 1221 1250 \label{f:ChooseSwitchStatements} 1251 \vspace*{-11pt} 1222 1252 \end{figure} 1223 1253 1224 Finally, Figure~\ref{f:FallthroughStatement} shows @fallthrough@ may appear in contexts other than terminating a @case@ clause , and have an explicit transfer label allowing separate cases but common final-code for a set of cases.1254 Finally, Figure~\ref{f:FallthroughStatement} shows @fallthrough@ may appear in contexts other than terminating a @case@ clause and have an explicit transfer label allowing separate cases but common final code for a set of cases. 1225 1255 The target label must be below the @fallthrough@ and may not be nested in a control structure, \ie @fallthrough@ cannot form a loop, and the target label must be at the same or higher level as the containing @case@ clause and located at the same level as a @case@ clause; 1226 1256 the target label may be case @default@, but only associated with the current @switch@/@choose@ statement. … … 1228 1258 \begin{figure} 1229 1259 \centering 1260 \fontsize{9bp}{11bp}\selectfont 1230 1261 \lstDeleteShortInline@% 1231 1262 \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} … … 1256 1287 \end{tabular} 1257 1288 \lstMakeShortInline@% 1258 \caption{\lstinline|fallthrough| Statement}1289 \caption{\lstinline|fallthrough| statement} 1259 1290 \label{f:FallthroughStatement} 1291 \vspace*{-11pt} 1260 1292 \end{figure} 1261 1293 1262 1294 1263 \subsection{\texorpdfstring{Labelled \protect\lstinline{continue} / \protect\lstinline{break}}{Labelled continue / break}} 1295 \vspace*{-8pt} 1296 \subsection{\texorpdfstring{Labeled \protect\lstinline@continue@ / \protect\lstinline@break@}{Labeled continue / break}} 1264 1297 1265 1298 While C provides @continue@ and @break@ statements for altering control flow, both are restricted to one level of nesting for a particular control structure. 1266 Unfortunately, this restriction forces programmers to use @goto@ to achieve the equivalent control -flow for more than one level of nesting.1267 To prevent having to switch to the @goto@, \CFA extends the @continue@ and @break@ with a target label to support static multi-level exit~\cite{Buhr85}, as in Java.1299 Unfortunately, this restriction forces programmers to use @goto@ to achieve the equivalent control flow for more than one level of nesting. 1300 To prevent having to switch to the @goto@, \CFA extends @continue@ and @break@ with a target label to support static multilevel exit~\cite{Buhr85}, as in Java. 1268 1301 For both @continue@ and @break@, the target label must be directly associated with a @for@, @while@ or @do@ statement; 1269 1302 for @break@, the target label can also be associated with a @switch@, @if@ or compound (@{}@) statement. 1270 Figure~\ref{f:MultiLevelExit} shows @continue@ and @break@ indicating the specific control structure ,and the corresponding C program using only @goto@ and labels.1271 The innermost loop has 7 exit points, which cause continuation or termination of one or more of the 7 nested control-structures.1303 Figure~\ref{f:MultiLevelExit} shows @continue@ and @break@ indicating the specific control structure and the corresponding C program using only @goto@ and labels. 1304 The innermost loop has seven exit points, which cause a continuation or termination of one or more of the seven nested control structures. 1272 1305 1273 1306 \begin{figure} 1307 \fontsize{9bp}{11bp}\selectfont 1274 1308 \lstDeleteShortInline@% 1275 1309 \begin{tabular}{@{\hspace{\parindentlnth}}l|@{\hspace{\parindentlnth}}l@{\hspace{\parindentlnth}}l@{}} … … 1336 1370 \end{tabular} 1337 1371 \lstMakeShortInline@% 1338 \caption{Multi -level Exit}1372 \caption{Multilevel exit} 1339 1373 \label{f:MultiLevelExit} 1374 \vspace*{-5pt} 1340 1375 \end{figure} 1341 1376 1342 With respect to safety, both label led @continue@ and @break@ are a @goto@ restricted in the following ways:1343 \begin{ itemize}1377 With respect to safety, both labeled @continue@ and @break@ are @goto@ restricted in the following ways. 1378 \begin{list}{$\bullet$}{\topsep=4pt\itemsep=0pt\parsep=0pt} 1344 1379 \item 1345 1380 They cannot create a loop, which means only the looping constructs cause looping. … … 1347 1382 \item 1348 1383 They cannot branch into a control structure. 1349 This restriction prevents missing declarations and/or initializations at the start of a control structure resulting in undefined behaviour. 1350 \end{itemize} 1351 The advantage of the labelled @continue@/@break@ is allowing static multi-level exits without having to use the @goto@ statement, and tying control flow to the target control structure rather than an arbitrary point in a program. 1352 Furthermore, the location of the label at the \emph{beginning} of the target control structure informs the reader (eye candy) that complex control-flow is occurring in the body of the control structure. 1384 This restriction prevents missing declarations and/or initializations at the start of a control structure resulting in an undefined behavior. 1385 \end{list} 1386 The advantage of the labeled @continue@/@break@ is allowing static multilevel exits without having to use the @goto@ statement and tying control flow to the target control structure rather than an arbitrary point in a program. 1387 Furthermore, the location of the label at the \emph{beginning} of the target control structure informs the reader (eye candy) that complex control flow is 1388 occurring in the body of the control structure. 1353 1389 With @goto@, the label is at the end of the control structure, which fails to convey this important clue early enough to the reader. 1354 Finally, using an explicit target for the transfer instead of an implicit target allows new constructs to be added or removed without affecting existing constructs.1390 Finally, using an explicit target for the transfer instead of an implicit target allows new constructs to be added or removed without affecting the existing constructs. 1355 1391 Otherwise, the implicit targets of the current @continue@ and @break@, \ie the closest enclosing loop or @switch@, change as certain constructs are added or removed. 1356 1392 1357 1393 1358 \subsection{Exception Handling} 1359 1360 The following framework for \CFA exception-handling is in place, excluding some runtime type-information and virtual functions. 1394 \vspace*{-5pt} 1395 \subsection{Exception handling} 1396 1397 The following framework for \CFA exception handling is in place, excluding some runtime type information and virtual functions. 1361 1398 \CFA provides two forms of exception handling: \newterm{fix-up} and \newterm{recovery} (see Figure~\ref{f:CFAExceptionHandling})~\cite{Buhr92b,Buhr00a}. 1362 Both mechanisms provide dynamic call to a handler using dynamic name -lookup, where fix-up has dynamic return and recovery has static return from the handler.1399 Both mechanisms provide dynamic call to a handler using dynamic name lookup, where fix-up has dynamic return and recovery has static return from the handler. 1363 1400 \CFA restricts exception types to those defined by aggregate type @exception@. 1364 1401 The form of the raise dictates the set of handlers examined during propagation: \newterm{resumption propagation} (@resume@) only examines resumption handlers (@catchResume@); \newterm{terminating propagation} (@throw@) only examines termination handlers (@catch@). 1365 If @resume@ or @throw@ ha ve no exception type, it is a reresume/rethrow, meaning the currentlyexception continues propagation.1402 If @resume@ or @throw@ has no exception type, it is a reresume/rethrow, which means that the current exception continues propagation. 1366 1403 If there is no current exception, the reresume/rethrow results in a runtime error. 1367 1404 1368 1405 \begin{figure} 1406 \fontsize{9bp}{11bp}\selectfont 1407 \lstDeleteShortInline@% 1369 1408 \begin{cquote} 1370 \lstDeleteShortInline@%1371 1409 \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} 1372 1410 \multicolumn{1}{@{}c|@{\hspace{\parindentlnth}}}{\textbf{Resumption}} & \multicolumn{1}{c@{}}{\textbf{Termination}} \\ … … 1399 1437 \end{cfa} 1400 1438 \end{tabular} 1401 \lstMakeShortInline@%1402 1439 \end{cquote} 1403 \caption{\CFA Exception Handling} 1440 \lstMakeShortInline@% 1441 \caption{\CFA exception handling} 1404 1442 \label{f:CFAExceptionHandling} 1443 \vspace*{-5pt} 1405 1444 \end{figure} 1406 1445 1407 The set of exception types in a list of catch clause may include both a resumption and termination handler:1446 The set of exception types in a list of catch clauses may include both a resumption and a termination handler. 1408 1447 \begin{cfa} 1409 1448 try { … … 1419 1458 The termination handler is available because the resumption propagation did not unwind the stack. 1420 1459 1421 An additional feature is conditional matching in a catch clause :1460 An additional feature is conditional matching in a catch clause. 1422 1461 \begin{cfa} 1423 1462 try { … … 1428 1467 catch ( IOError err ) { ... } $\C{// handler error from other files}$ 1429 1468 \end{cfa} 1430 where the throw inserts the failing file-handle into the I/O exception.1431 Conditional catch cannot be trivially mimicked by other mechanisms because once an exception is caught, handler clauses in that @try@ statement are no longer eligible. .1432 1433 The resumption raise can specify an alternate stack on which to raise an exception, called a \newterm{nonlocal raise} :1469 Here, the throw inserts the failing file handle into the I/O exception. 1470 Conditional catch cannot be trivially mimicked by other mechanisms because once an exception is caught, handler clauses in that @try@ statement are no longer eligible. 1471 1472 The resumption raise can specify an alternate stack on which to raise an exception, called a \newterm{nonlocal raise}. 1434 1473 \begin{cfa} 1435 1474 resume( $\emph{exception-type}$, $\emph{alternate-stack}$ ) … … 1439 1478 Nonlocal raise is restricted to resumption to provide the exception handler the greatest flexibility because processing the exception does not unwind its stack, allowing it to continue after the handler returns. 1440 1479 1441 To facilitate nonlocal raise, \CFA provides dynamic enabling and disabling of nonlocal exception -propagation.1442 The constructs for controlling propagation of nonlocal exceptions are the @enable@ and the @disable@ blocks:1480 To facilitate nonlocal raise, \CFA provides dynamic enabling and disabling of nonlocal exception propagation. 1481 The constructs for controlling propagation of nonlocal exceptions are the @enable@ and @disable@ blocks. 1443 1482 \begin{cquote} 1444 1483 \lstDeleteShortInline@% … … 1446 1485 \begin{cfa} 1447 1486 enable $\emph{exception-type-list}$ { 1448 // allow non -local raise1487 // allow nonlocal raise 1449 1488 } 1450 1489 \end{cfa} … … 1452 1491 \begin{cfa} 1453 1492 disable $\emph{exception-type-list}$ { 1454 // disallow non -local raise1493 // disallow nonlocal raise 1455 1494 } 1456 1495 \end{cfa} … … 1460 1499 The arguments for @enable@/@disable@ specify the exception types allowed to be propagated or postponed, respectively. 1461 1500 Specifying no exception type is shorthand for specifying all exception types. 1462 Both @enable@ and @disable@ blocks can be nested, turning propagation on/off on entry, and on exit, the specified exception types are restored to their prior state. 1463 Coroutines and tasks start with non-local exceptions disabled, allowing handlers to be put in place, before non-local exceptions are explicitly enabled. 1501 Both @enable@ and @disable@ blocks can be nested; 1502 turning propagation on/off on entry and on exit, the specified exception types are restored to their prior state. 1503 Coroutines and tasks start with nonlocal exceptions disabled, allowing handlers to be put in place, before nonlocal exceptions are explicitly enabled. 1464 1504 \begin{cfa} 1465 1505 void main( mytask & t ) { $\C{// thread starts here}$ 1466 // non -local exceptions disabled1467 try { $\C{// establish handles for non -local exceptions}$1468 enable { $\C{// allow non -local exception delivery}$1506 // nonlocal exceptions disabled 1507 try { $\C{// establish handles for nonlocal exceptions}$ 1508 enable { $\C{// allow nonlocal exception delivery}$ 1469 1509 // task body 1470 1510 } … … 1474 1514 \end{cfa} 1475 1515 1476 Finally, \CFA provides a Java like @finally@ clause after the catch clauses:1516 Finally, \CFA provides a Java-like @finally@ clause after the catch clauses. 1477 1517 \begin{cfa} 1478 1518 try { … … 1483 1523 } 1484 1524 \end{cfa} 1485 The finally clause is always executed, i.e., if the try block ends normally or if an exception is raised.1525 The finally clause is always executed, \ie, if the try block ends normally or if an exception is raised. 1486 1526 If an exception is raised and caught, the handler is run before the finally clause. 1487 1527 Like a destructor (see Section~\ref{s:ConstructorsDestructors}), a finally clause can raise an exception but not if there is an exception being propagated. 1488 Mimicking the @finally@ clause with mechanisms like R AII is non-trivial when there are multiple types and local accesses.1489 1490 1491 \subsection{\texorpdfstring{\protect\lstinline{with} Statement}{with Statement}}1528 Mimicking the @finally@ clause with mechanisms like Resource Aquisition Is Initialization (RAII) is nontrivial when there are multiple types and local accesses. 1529 1530 1531 \subsection{\texorpdfstring{\protect\lstinline{with} statement}{with statement}} 1492 1532 \label{s:WithStatement} 1493 1533 1494 Heterogeneous data isoften aggregated into a structure/union.1495 To reduce syntactic noise, \CFA provides a @with@ statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate member-qualification by opening a scope containing the member identifiers.1534 Heterogeneous data are often aggregated into a structure/union. 1535 To reduce syntactic noise, \CFA provides a @with@ statement (see section~4.F in the Pascal User Manual and Report~\cite{Pascal}) to elide aggregate member qualification by opening a scope containing the member identifiers. 1496 1536 \begin{cquote} 1497 1537 \vspace*{-\baselineskip}%??? … … 1521 1561 Object-oriented programming languages only provide implicit qualification for the receiver. 1522 1562 1523 In detail, the @with@ statement has the form :1563 In detail, the @with@ statement has the form 1524 1564 \begin{cfa} 1525 1565 $\emph{with-statement}$: … … 1527 1567 \end{cfa} 1528 1568 and may appear as the body of a function or nested within a function body. 1529 Each expression in the expression -list provides a type and object.1569 Each expression in the expression list provides a type and object. 1530 1570 The type must be an aggregate type. 1531 1571 (Enumerations are already opened.) 1532 The object is the implicit qualifier for the open structure -members.1572 The object is the implicit qualifier for the open structure members. 1533 1573 1534 1574 All expressions in the expression list are open in parallel within the compound statement, which is different from Pascal, which nests the openings from left to right. 1535 The difference between parallel and nesting occurs for members with the same name and type :1575 The difference between parallel and nesting occurs for members with the same name and type. 1536 1576 \begin{cfa} 1537 1577 struct S { int `i`; int j; double m; } s, w; $\C{// member i has same type in structure types S and T}$ … … 1547 1587 } 1548 1588 \end{cfa} 1549 For parallel semantics, both @s.i@ and @t.i@ are visible , so@i@ is ambiguous without qualification;1550 for nested semantics, @t.i@ hides @s.i@ , so@i@ implies @t.i@.1589 For parallel semantics, both @s.i@ and @t.i@ are visible and, therefore, @i@ is ambiguous without qualification; 1590 for nested semantics, @t.i@ hides @s.i@ and, therefore, @i@ implies @t.i@. 1551 1591 \CFA's ability to overload variables means members with the same name but different types are automatically disambiguated, eliminating most qualification when opening multiple aggregates. 1552 1592 Qualification or a cast is used to disambiguate. 1553 1593 1554 There is an interesting problem between parameters and the function -body @with@, \eg:1594 There is an interesting problem between parameters and the function body @with@. 1555 1595 \begin{cfa} 1556 1596 void ?{}( S & s, int i ) with ( s ) { $\C{// constructor}$ … … 1558 1598 } 1559 1599 \end{cfa} 1560 Here, the assignment @s.i = i@ means @s.i = s.i@, which is meaningless, and there is no mechanism to qualify the parameter @i@, making the assignment impossible using the function -body @with@.1561 To solve this problem, parameters are treated like an initialized aggregate :1600 Here, the assignment @s.i = i@ means @s.i = s.i@, which is meaningless, and there is no mechanism to qualify the parameter @i@, making the assignment impossible using the function body @with@. 1601 To solve this problem, parameters are treated like an initialized aggregate 1562 1602 \begin{cfa} 1563 1603 struct Params { … … 1566 1606 } params; 1567 1607 \end{cfa} 1568 and implicitly opened \emph{after} a function-body open, to give them higher priority: 1608 \newpage 1609 and implicitly opened \emph{after} a function body open, to give them higher priority 1569 1610 \begin{cfa} 1570 1611 void ?{}( S & s, int `i` ) with ( s ) `{` `with( $\emph{\color{red}params}$ )` { … … 1572 1613 } `}` 1573 1614 \end{cfa} 1574 Finally, a cast may be used to disambiguate among overload variables in a @with@ expression :1615 Finally, a cast may be used to disambiguate among overload variables in a @with@ expression 1575 1616 \begin{cfa} 1576 1617 with ( w ) { ... } $\C{// ambiguous, same name and no context}$ 1577 1618 with ( (S)w ) { ... } $\C{// unambiguous, cast}$ 1578 1619 \end{cfa} 1579 and @with@ expressions may be complex expressions with type reference (see Section~\ref{s:References}) to aggregate :1620 and @with@ expressions may be complex expressions with type reference (see Section~\ref{s:References}) to aggregate 1580 1621 \begin{cfa} 1581 1622 struct S { int i, j; } sv; … … 1601 1642 \CFA attempts to correct and add to C declarations, while ensuring \CFA subjectively ``feels like'' C. 1602 1643 An important part of this subjective feel is maintaining C's syntax and procedural paradigm, as opposed to functional and object-oriented approaches in other systems languages such as \CC and Rust. 1603 Maintaining the C approach means that C coding -patterns remain not only useable but idiomatic in \CFA, reducing the mental burden of retraining C programmers and switching between C and \CFA development.1644 Maintaining the C approach means that C coding patterns remain not only useable but idiomatic in \CFA, reducing the mental burden of retraining C programmers and switching between C and \CFA development. 1604 1645 Nevertheless, some features from other approaches are undeniably convenient; 1605 1646 \CFA attempts to adapt these features to the C paradigm. 1606 1647 1607 1648 1608 \subsection{Alternative Declaration Syntax}1649 \subsection{Alternative declaration syntax} 1609 1650 1610 1651 C declaration syntax is notoriously confusing and error prone. 1611 For example, many C programmers are confused by a declaration as simple as :1652 For example, many C programmers are confused by a declaration as simple as the following. 1612 1653 \begin{cquote} 1613 1654 \lstDeleteShortInline@% … … 1621 1662 \lstMakeShortInline@% 1622 1663 \end{cquote} 1623 Is this an array of 5 pointers to integers or a pointer to an array of 5integers?1664 Is this an array of five pointers to integers or a pointer to an array of five integers? 1624 1665 If there is any doubt, it implies productivity and safety issues even for basic programs. 1625 1666 Another example of confusion results from the fact that a function name and its parameters are embedded within the return type, mimicking the way the return value is used at the function's call site. 1626 For example, a function returning a pointer to an array of integers is defined and used in the following way :1667 For example, a function returning a pointer to an array of integers is defined and used in the following way. 1627 1668 \begin{cfa} 1628 1669 int `(*`f`())[`5`]` {...}; $\C{// definition}$ … … 1632 1673 While attempting to make the two contexts consistent is a laudable goal, it has not worked out in practice. 1633 1674 1634 \CFA provides its own type, variable and function declarations, using a different syntax~\cite[pp.~856--859]{Buhr94a}. 1635 The new declarations place qualifiers to the left of the base type, while C declarations place qualifiers to the right. 1675 \newpage 1676 \CFA provides its own type, variable, and function declarations, using a different syntax~\cite[pp.~856--859]{Buhr94a}. 1677 The new declarations place qualifiers to the left of the base type, whereas C declarations place qualifiers to the right. 1636 1678 The qualifiers have the same meaning but are ordered left to right to specify a variable's type. 1637 1679 \begin{cquote} … … 1659 1701 \lstMakeShortInline@% 1660 1702 \end{cquote} 1661 The only exception is bit-field specification, which always appear to the right of the base type.1703 The only exception is bit-field specification, which always appears to the right of the base type. 1662 1704 % Specifically, the character @*@ is used to indicate a pointer, square brackets @[@\,@]@ are used to represent an array or function return value, and parentheses @()@ are used to indicate a function parameter. 1663 1705 However, unlike C, \CFA type declaration tokens are distributed across all variables in the declaration list. 1664 For instance, variables @x@ and @y@ of type pointer to integer are defined in \CFA as follows:1706 For instance, variables @x@ and @y@ of type pointer to integer are defined in \CFA as 1665 1707 \begin{cquote} 1666 1708 \lstDeleteShortInline@% … … 1725 1767 \end{comment} 1726 1768 1727 All specifiers (@extern@, @static@, \etc) and qualifiers (@const@, @volatile@, \etc) are used in the normal way with the new declarations and also appear left to right , \eg:1769 All specifiers (@extern@, @static@, \etc) and qualifiers (@const@, @volatile@, \etc) are used in the normal way with the new declarations and also appear left to right. 1728 1770 \begin{cquote} 1729 1771 \lstDeleteShortInline@% 1730 1772 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} 1731 1773 \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{C}} \\ 1732 \begin{cfa} 1774 \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{12bp}\selectfont\sf] 1733 1775 extern const * const int x; 1734 1776 static const * [5] const int y; 1735 1777 \end{cfa} 1736 1778 & 1737 \begin{cfa} 1779 \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{12bp}\selectfont\sf] 1738 1780 int extern const * const x; 1739 1781 static const int (* const y)[5] 1740 1782 \end{cfa} 1741 1783 & 1742 \begin{cfa} 1784 \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{12bp}\selectfont\sf] 1743 1785 // external const pointer to const int 1744 1786 // internal const pointer to array of 5 const int … … 1748 1790 \end{cquote} 1749 1791 Specifiers must appear at the start of a \CFA function declaration\footnote{\label{StorageClassSpecifier} 1750 The placement of a storage-class specifier other than at the beginning of the declaration specifiers in a declaration is an obsolescent feature .~\cite[\S~6.11.5(1)]{C11}}.1792 The placement of a storage-class specifier other than at the beginning of the declaration specifiers in a declaration is an obsolescent feature (see section~6.11.5(1) in ISO/IEC 9899~\cite{C11}).}. 1751 1793 1752 1794 The new declaration syntax can be used in other contexts where types are required, \eg casts and the pseudo-function @sizeof@: … … 1769 1811 1770 1812 The syntax of the new function-prototype declaration follows directly from the new function-definition syntax; 1771 a s well, parameter names are optional, \eg:1813 also, parameter names are optional. 1772 1814 \begin{cfa} 1773 1815 [ int x ] f ( /* void */ ); $\C[2.5in]{// returning int with no parameters}$ … … 1777 1819 [ * int, int ] j ( int ); $\C{// returning pointer to int and int with int parameter}$ 1778 1820 \end{cfa} 1779 This syntax allows a prototype declaration to be created by cutting and pasting source text from the function-definition header (or vice versa).1780 Like C, it is possible to declare multiple function -prototypes in a single declaration, where the return type is distributed across \emph{all} function names in the declaration list, \eg:1821 This syntax allows a prototype declaration to be created by cutting and pasting the source text from the function-definition header (or vice versa). 1822 Like C, it is possible to declare multiple function prototypes in a single declaration, where the return type is distributed across \emph{all} function names in the declaration list. 1781 1823 \begin{cquote} 1782 1824 \lstDeleteShortInline@% … … 1793 1835 \lstMakeShortInline@% 1794 1836 \end{cquote} 1795 where\CFA allows the last function in the list to define its body.1796 1797 The syntax for pointers to \CFA functions specifies the pointer name on the right , \eg:1837 Here, \CFA allows the last function in the list to define its body. 1838 1839 The syntax for pointers to \CFA functions specifies the pointer name on the right. 1798 1840 \begin{cfa} 1799 1841 * [ int x ] () fp; $\C{// pointer to function returning int with no parameters}$ … … 1802 1844 * [ * int, int ] ( int ) jp; $\C{// pointer to function returning pointer to int and int with int parameter}\CRT$ 1803 1845 \end{cfa} 1804 Note, the name of the function pointer is specified last, as for other variable declarations. 1805 1806 Finally, new \CFA declarations may appear together with C declarations in the same program block, but cannot be mixed within a specific declaration. 1807 Therefore, a programmer has the option of either continuing to use traditional C declarations or take advantage of the new style. 1808 Clearly, both styles need to be supported for some time due to existing C-style header-files, particularly for UNIX-like systems. 1846 \newpage 1847 \noindent 1848 Note that the name of the function pointer is specified last, as for other variable declarations. 1849 1850 Finally, new \CFA declarations may appear together with C declarations in the same program block but cannot be mixed within a specific declaration. 1851 Therefore, a programmer has the option of either continuing to use traditional C declarations or taking advantage of the new style. 1852 Clearly, both styles need to be supported for some time due to existing C-style header files, particularly for UNIX-like systems. 1809 1853 1810 1854 … … 1814 1858 All variables in C have an \newterm{address}, a \newterm{value}, and a \newterm{type}; 1815 1859 at the position in the program's memory denoted by the address, there exists a sequence of bits (the value), with the length and semantic meaning of this bit sequence defined by the type. 1816 The C type -system does not always track the relationship between a value and its address;1817 a value that does not have a corresponding address is called a \newterm{rvalue} (for ``right-hand value''), while a value that does have an address is called a\newterm{lvalue} (for ``left-hand value'').1818 For example, in @int x; x = 42;@ the variable expression @x@ on the left-hand -side of the assignment is a lvalue, while the constant expression @42@ on the right-hand-side of the assignment is arvalue.1819 Despite the nomenclature of ``left-hand'' and ``right-hand'', an expression's classification as lvalue or rvalue is entirely dependent on whether it has an address or not; in imperative programming, the address of a value is used for both reading and writing (mutating) a value, and as such, lvalues can be convertedto rvalues and read from, but rvalues cannot be mutated because they lack a location to store the updated value.1860 The C type system does not always track the relationship between a value and its address; 1861 a value that does not have a corresponding address is called an \newterm{rvalue} (for ``right-hand value''), whereas a value that does have an address is called an \newterm{lvalue} (for ``left-hand value''). 1862 For example, in @int x; x = 42;@ the variable expression @x@ on the left-hand side of the assignment is an lvalue, whereas the constant expression @42@ on the right-hand side of the assignment is an rvalue. 1863 Despite the nomenclature of ``left-hand'' and ``right-hand'', an expression's classification as an lvalue or an rvalue is entirely dependent on whether it has an address or not; in imperative programming, the address of a value is used for both reading and writing (mutating) a value, and as such, lvalues can be converted into rvalues and read from, but rvalues cannot be mutated because they lack a location to store the updated value. 1820 1864 1821 1865 Within a lexical scope, lvalue expressions have an \newterm{address interpretation} for writing a value or a \newterm{value interpretation} to read a value. 1822 For example, in @x = y@, @x@ has an address interpretation, wh ile@y@ has a value interpretation.1866 For example, in @x = y@, @x@ has an address interpretation, whereas @y@ has a value interpretation. 1823 1867 While this duality of interpretation is useful, C lacks a direct mechanism to pass lvalues between contexts, instead relying on \newterm{pointer types} to serve a similar purpose. 1824 1868 In C, for any type @T@ there is a pointer type @T *@, the value of which is the address of a value of type @T@. 1825 A pointer rvalue can be explicitly \newterm{dereferenced} to the pointed-to lvalue with the dereference operator @*?@, while the rvalue representing the address of a lvalue can be obtained with the address-of operator @&?@. 1826 1869 A pointer rvalue can be explicitly \newterm{dereferenced} to the pointed-to lvalue with the dereference operator @*?@, whereas the rvalue representing the address of an lvalue can be obtained with the address-of operator @&?@. 1827 1870 \begin{cfa} 1828 1871 int x = 1, y = 2, * p1, * p2, ** p3; … … 1832 1875 *p2 = ((*p1 + *p2) * (**p3 - *p1)) / (**p3 - 15); 1833 1876 \end{cfa} 1834 1835 1877 Unfortunately, the dereference and address-of operators introduce a great deal of syntactic noise when dealing with pointed-to values rather than pointers, as well as the potential for subtle bugs because of pointer arithmetic. 1836 1878 For both brevity and clarity, it is desirable for the compiler to figure out how to elide the dereference operators in a complex expression such as the assignment to @*p2@ above. 1837 However, since C defines a number of forms of \newterm{pointer arithmetic}, two similar expressions involving pointers to arithmetic types (\eg @*p1 + x@ and @p1 + x@) may each have well-defined but distinct semantics, introducing the possibility that a programmer may write one when they mean the other ,and precluding any simple algorithm for elision of dereference operators.1879 However, since C defines a number of forms of \newterm{pointer arithmetic}, two similar expressions involving pointers to arithmetic types (\eg @*p1 + x@ and @p1 + x@) may each have well-defined but distinct semantics, introducing the possibility that a programmer may write one when they mean the other and precluding any simple algorithm for elision of dereference operators. 1838 1880 To solve these problems, \CFA introduces reference types @T &@; 1839 a @T &@ has exactly the same value as a @T *@, but where the @T *@ takes the address interpretation by default, a @T &@ takes the value interpretation by default, as below: 1840 1881 a @T &@ has exactly the same value as a @T *@, but where the @T *@ takes the address interpretation by default, a @T &@ takes the value interpretation by default, as below. 1841 1882 \begin{cfa} 1842 1883 int x = 1, y = 2, & r1, & r2, && r3; … … 1846 1887 r2 = ((r1 + r2) * (r3 - r1)) / (r3 - 15); $\C{// implicit dereferencing}$ 1847 1888 \end{cfa} 1848 1849 1889 Except for auto-dereferencing by the compiler, this reference example is exactly the same as the previous pointer example. 1850 Hence, a reference behaves like a variable name -- an lvalue expression which is interpreted as a value --but also has the type system track the address of that value.1851 One way to conceptualize a reference is via a rewrite rule, where the compiler inserts a dereference operator before the reference variable for each reference qualifier in the reference variable declaration , so the previous example implicitly acts like:1852 1890 Hence, a reference behaves like a variable name---an lvalue expression that is interpreted as a value---but also has the type system track the address of that value. 1891 One way to conceptualize a reference is via a rewrite rule, where the compiler inserts a dereference operator before the reference variable for each reference qualifier in the reference variable declaration; 1892 thus, the previous example implicitly acts like the following. 1853 1893 \begin{cfa} 1854 1894 `*`r2 = ((`*`r1 + `*`r2) * (`**`r3 - `*`r1)) / (`**`r3 - 15); 1855 1895 \end{cfa} 1856 1857 1896 References in \CFA are similar to those in \CC, with important improvements, which can be seen in the example above. 1858 1897 Firstly, \CFA does not forbid references to references. 1859 This provides a much more orthogonal design for library implementors, obviating the need for workarounds such as @std::reference_wrapper@.1898 This provides a much more orthogonal design for library \mbox{implementors}, obviating the need for workarounds such as @std::reference_wrapper@. 1860 1899 Secondly, \CFA references are rebindable, whereas \CC references have a fixed address. 1861 Rebinding allows \CFA references to be default -initialized (\eg to a null pointer\footnote{1862 While effort has been made into non-null reference checking in \CC and Java, the exercise seems moot for any non -managed languages (C/\CC), given that it only handles one of many different error situations, \eg using a pointer after its storage is deleted.}) and point to different addresses throughout their lifetime, like pointers.1900 Rebinding allows \CFA references to be default initialized (\eg to a null pointer\footnote{ 1901 While effort has been made into non-null reference checking in \CC and Java, the exercise seems moot for any nonmanaged languages (C/\CC), given that it only handles one of many different error situations, \eg using a pointer after its storage is deleted.}) and point to different addresses throughout their lifetime, like pointers. 1863 1902 Rebinding is accomplished by extending the existing syntax and semantics of the address-of operator in C. 1864 1903 1865 In C, the address of a lvalue is always a rvalue, as in general that address is not stored anywhere in memory,and does not itself have an address.1866 In \CFA, the address of a @T &@ is a lvalue @T *@, as the address of the underlying @T@ is stored in the reference,and can thus be mutated there.1904 In C, the address of an lvalue is always an rvalue, as, in general, that address is not stored anywhere in memory and does not itself have an address. 1905 In \CFA, the address of a @T &@ is an lvalue @T *@, as the address of the underlying @T@ is stored in the reference and can thus be mutated there. 1867 1906 The result of this rule is that any reference can be rebound using the existing pointer assignment semantics by assigning a compatible pointer into the address of the reference, \eg @&r1 = &x;@ above. 1868 1907 This rebinding occurs to an arbitrary depth of reference nesting; 1869 1908 loosely speaking, nested address-of operators produce a nested lvalue pointer up to the depth of the reference. 1870 1909 These explicit address-of operators can be thought of as ``cancelling out'' the implicit dereference operators, \eg @(&`*`)r1 = &x@ or @(&(&`*`)`*`)r3 = &(&`*`)r1@ or even @(&`*`)r2 = (&`*`)`*`r3@ for @&r2 = &r3@. 1871 More precisely: 1910 The precise rules are 1872 1911 \begin{itemize} 1873 1912 \item 1874 if @R@ is an rvalue of type {@T &@$_1 \cdots$@ &@$_r$} where $r \ge 1$ references (@&@ symbols) then @&R@ has type {@T `*`&@$_{\color{red}2} \cdots$@ &@$_{\color{red}r}$}, \\ \ie @T@ pointer with $r-1$ references (@&@ symbols). 1875 1913 If @R@ is an rvalue of type @T &@$_1\cdots$ @&@$_r$, where $r \ge 1$ references (@&@ symbols), than @&R@ has type @T `*`&@$_{\color{red}2}\cdots$ @&@$_{\color{red}r}$, \ie @T@ pointer with $r-1$ references (@&@ symbols). 1876 1914 \item 1877 if @L@ is an lvalue of type {@T &@$_1 \cdots$@ &@$_l$} where $l \ge 0$ references (@&@ symbols) then @&L@ has type {@T `*`&@$_{\color{red}1} \cdots$@ &@$_{\color{red}l}$}, \\\ie @T@ pointer with $l$ references (@&@ symbols).1915 If @L@ is an lvalue of type @T &@$_1\cdots$ @&@$_l$, where $l \ge 0$ references (@&@ symbols), than @&L@ has type @T `*`&@$_{\color{red}1}\cdots$ @&@$_{\color{red}l}$, \ie @T@ pointer with $l$ references (@&@ symbols). 1878 1916 \end{itemize} 1879 Since pointers and references share the same internal representation, code using either is equally performant; in fact the \CFA compiler converts references to pointers internally, and the choice between them is made solely on convenience, \eg many pointer or value accesses. 1917 Since pointers and references share the same internal representation, code using either is equally performant; 1918 in fact, the \CFA compiler converts references into pointers internally, and the choice between them is made solely on convenience, \eg many pointer or value accesses. 1880 1919 1881 1920 By analogy to pointers, \CFA references also allow cv-qualifiers such as @const@: … … 1892 1931 There are three initialization contexts in \CFA: declaration initialization, argument/parameter binding, and return/temporary binding. 1893 1932 In each of these contexts, the address-of operator on the target lvalue is elided. 1894 The syntactic motivation is clearest when considering overloaded operator -assignment, \eg @int ?+=?(int &, int)@; given @int x, y@, the expected call syntax is @x += y@, not @&x += y@.1895 1896 More generally, this initialization of references from lvalues rather than pointers is an instance of a ``lvalue-to-reference'' conversion rather than an elision of the address-of operator;1933 The syntactic motivation is clearest when considering overloaded operator assignment, \eg @int ?+=?(int &, int)@; given @int x, y@, the expected call syntax is @x += y@, not @&x += y@. 1934 1935 More generally, this initialization of references from lvalues rather than pointers is an instance of an ``lvalue-to-reference'' conversion rather than an elision of the address-of operator; 1897 1936 this conversion is used in any context in \CFA where an implicit conversion is allowed. 1898 Similarly, use of athe value pointed to by a reference in an rvalue context can be thought of as a ``reference-to-rvalue'' conversion, and \CFA also includes a qualifier-adding ``reference-to-reference'' conversion, analogous to the @T *@ to @const T *@ conversion in standard C.1899 The final reference conversion included in \CFA is ``rvalue-to-reference'' conversion, implemented by means of an implicit temporary.1937 Similarly, use of the value pointed to by a reference in an rvalue context can be thought of as a ``reference-to-rvalue'' conversion, and \CFA also includes a qualifier-adding ``reference-to-reference'' conversion, analogous to the @T *@ to @const T *@ conversion in standard C. 1938 The final reference conversion included in \CFA is an ``rvalue-to-reference'' conversion, implemented by means of an implicit temporary. 1900 1939 When an rvalue is used to initialize a reference, it is instead used to initialize a hidden temporary value with the same lexical scope as the reference, and the reference is initialized to the address of this temporary. 1901 1940 \begin{cfa} … … 1905 1944 f( 3, x + y, (S){ 1.0, 7.0 }, (int [3]){ 1, 2, 3 } ); $\C{// pass rvalue to lvalue \(\Rightarrow\) implicit temporary}$ 1906 1945 \end{cfa} 1907 This allows complex values to be succinctly and efficiently passed to functions, without the syntactic overhead of explicit definition of a temporary variable or the runtime cost of pass-by-value. 1908 \CC allows a similar binding, but only for @const@ references; the more general semantics of \CFA are an attempt to avoid the \newterm{const poisoning} problem~\cite{Taylor10}, in which addition of a @const@ qualifier to one reference requires a cascading chain of added qualifiers. 1909 1910 1911 \subsection{Type Nesting} 1912 1913 Nested types provide a mechanism to organize associated types and refactor a subset of members into a named aggregate (\eg sub-aggregates @name@, @address@, @department@, within aggregate @employe@). 1914 Java nested types are dynamic (apply to objects), \CC are static (apply to the \lstinline[language=C++]@class@), and C hoists (refactors) nested types into the enclosing scope, meaning there is no need for type qualification. 1915 Since \CFA in not object-oriented, adopting dynamic scoping does not make sense; 1916 instead \CFA adopts \CC static nesting, using the member-selection operator ``@.@'' for type qualification, as does Java, rather than the \CC type-selection operator ``@::@'' (see Figure~\ref{f:TypeNestingQualification}). 1946 This allows complex values to be succinctly and efficiently passed to functions, without the syntactic overhead of the explicit definition of a temporary variable or the runtime cost of pass-by-value. 1947 \CC allows a similar binding, but only for @const@ references; the more general semantics of \CFA are an attempt to avoid the \newterm{const poisoning} problem~\cite{Taylor10}, in which the addition of a @const@ qualifier to one reference requires a cascading chain of added qualifiers. 1948 1949 1950 \subsection{Type nesting} 1951 1952 Nested types provide a mechanism to organize associated types and refactor a subset of members into a named aggregate (\eg subaggregates @name@, @address@, @department@, within aggregate @employe@). 1953 Java nested types are dynamic (apply to objects), \CC are static (apply to the \lstinline[language=C++]@class@), and C hoists (refactors) nested types into the enclosing scope, which means there is no need for type qualification. 1954 Since \CFA in not object oriented, adopting dynamic scoping does not make sense; 1955 instead, \CFA adopts \CC static nesting, using the member-selection operator ``@.@'' for type qualification, as does Java, rather than the \CC type-selection operator ``@::@'' (see Figure~\ref{f:TypeNestingQualification}). 1956 In the C left example, types @C@, @U@ and @T@ are implicitly hoisted outside of type @S@ into the containing block scope. 1957 In the \CFA right example, the types are not hoisted and accessible. 1958 1917 1959 \begin{figure} 1918 1960 \centering 1961 \fontsize{9bp}{11bp}\selectfont\sf 1919 1962 \lstDeleteShortInline@% 1920 1963 \begin{tabular}{@{}l@{\hspace{3em}}l|l@{}} … … 1978 2021 \end{tabular} 1979 2022 \lstMakeShortInline@% 1980 \caption{Type Nesting / Qualification}2023 \caption{Type nesting / qualification} 1981 2024 \label{f:TypeNestingQualification} 2025 \vspace*{-8pt} 1982 2026 \end{figure} 1983 In the C left example, types @C@, @U@ and @T@ are implicitly hoisted outside of type @S@ into the containing block scope. 1984 In the \CFA right example, the types are not hoisted and accessible. 1985 1986 1987 \subsection{Constructors and Destructors} 2027 2028 2029 \vspace*{-8pt} 2030 \subsection{Constructors and destructors} 1988 2031 \label{s:ConstructorsDestructors} 1989 2032 1990 One of the strengths (and weaknesses) of C is memory-management control, allowing resource release to be precisely specified versus unknown release with garbage-collected memory -management.2033 One of the strengths (and weaknesses) of C is memory-management control, allowing resource release to be precisely specified versus unknown release with garbage-collected memory management. 1991 2034 However, this manual approach is verbose, and it is useful to manage resources other than memory (\eg file handles) using the same mechanism as memory. 1992 \CC addresses these issues using R esource Aquisition Is Initialization (RAII), implemented by means of \newterm{constructor} and \newterm{destructor} functions;2035 \CC addresses these issues using RAII, implemented by means of \newterm{constructor} and \newterm{destructor} functions; 1993 2036 \CFA adopts constructors and destructors (and @finally@) to facilitate RAII. 1994 While constructors and destructors are a common feature of object-oriented programming -languages, they are an independent capability allowing \CFA to adopt them while retaining a procedural paradigm.1995 Specifically, \CFA constructors and destructors are denoted by name and first parameter -type versus name and nesting in an aggregate type.2037 While constructors and destructors are a common feature of object-oriented programming languages, they are an independent capability allowing \CFA to adopt them while retaining a procedural paradigm. 2038 Specifically, \CFA constructors and destructors are denoted by name and first parameter type versus name and nesting in an aggregate type. 1996 2039 Constructor calls seamlessly integrate with existing C initialization syntax, providing a simple and familiar syntax to C programmers and allowing constructor calls to be inserted into legacy C code with minimal code changes. 1997 2040 … … 2002 2045 The constructor and destructor have return type @void@, and the first parameter is a reference to the object type to be constructed or destructed. 2003 2046 While the first parameter is informally called the @this@ parameter, as in object-oriented languages, any variable name may be used. 2004 Both constructors and destructors allow additional parameters after the @this@ parameter for specifying values for initialization/de -initialization\footnote{2005 Destruction parameters are useful for specifying storage-management actions, such as de -initialize but not deallocate.}.2006 \begin{cfa} 2047 Both constructors and destructors allow additional parameters after the @this@ parameter for specifying values for initialization/deinitialization\footnote{ 2048 Destruction parameters are useful for specifying storage-management actions, such as deinitialize but not deallocate.}. 2049 \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{11bp}\selectfont\sf] 2007 2050 struct VLA { int size, * data; }; $\C{// variable length array of integers}$ 2008 2051 void ?{}( VLA & vla ) with ( vla ) { size = 10; data = alloc( size ); } $\C{// default constructor}$ … … 2013 2056 \end{cfa} 2014 2057 @VLA@ is a \newterm{managed type}\footnote{ 2015 A managed type affects the runtime environment versus a self-contained type.}: a type requiring a non -trivial constructor or destructor, or with a member of a managed type.2058 A managed type affects the runtime environment versus a self-contained type.}: a type requiring a nontrivial constructor or destructor, or with a member of a managed type. 2016 2059 A managed type is implicitly constructed at allocation and destructed at deallocation to ensure proper interaction with runtime resources, in this case, the @data@ array in the heap. 2017 For details of the code-generation placement of implicit constructor and destructor calls among complex executable statements see~\cite[\S~2.2]{Schluntz17}.2018 2019 \CFA also provides syntax for \newterm{initialization} and \newterm{copy} :2060 For details of the code-generation placement of implicit constructor and destructor calls among complex executable statements, see section~2.2 in the work of Schlintz~\cite{Schluntz17}. 2061 2062 \CFA also provides syntax for \newterm{initialization} and \newterm{copy}. 2020 2063 \begin{cfa} 2021 2064 void ?{}( VLA & vla, int size, char fill = '\0' ) { $\C{// initialization}$ … … 2026 2069 } 2027 2070 \end{cfa} 2028 (Note ,the example is purposely simplified using shallow-copy semantics.)2029 An initialization constructor -call has the same syntax as a C initializer, except the initialization values are passed as arguments to a matching constructor (number and type of paremeters).2071 (Note that the example is purposely simplified using shallow-copy semantics.) 2072 An initialization constructor call has the same syntax as a C initializer, except that the initialization values are passed as arguments to a matching constructor (number and type of parameters). 2030 2073 \begin{cfa} 2031 2074 VLA va = `{` 20, 0 `}`, * arr = alloc()`{` 5, 0 `}`; 2032 2075 \end{cfa} 2033 Note , the use of a \newterm{constructor expression} to initialize the storage from the dynamic storage-allocation.2076 Note the use of a \newterm{constructor expression} to initialize the storage from the dynamic storage allocation. 2034 2077 Like \CC, the copy constructor has two parameters, the second of which is a value parameter with the same type as the first parameter; 2035 2078 appropriate care is taken to not recursively call the copy constructor when initializing the second parameter. … … 2037 2080 \CFA constructors may be explicitly called, like Java, and destructors may be explicitly called, like \CC. 2038 2081 Explicit calls to constructors double as a \CC-style \emph{placement syntax}, useful for construction of members in user-defined constructors and reuse of existing storage allocations. 2039 Like the other operators in \CFA, there is a concise syntax for constructor/destructor function calls :2082 Like the other operators in \CFA, there is a concise syntax for constructor/destructor function calls. 2040 2083 \begin{cfa} 2041 2084 { … … 2053 2096 To provide a uniform type interface for @otype@ polymorphism, the \CFA compiler automatically generates a default constructor, copy constructor, assignment operator, and destructor for all types. 2054 2097 These default functions can be overridden by user-generated versions. 2055 For compatibility with the standard behavio ur of C, the default constructor and destructor for all basic, pointer, and reference types do nothing, whilethe copy constructor and assignment operator are bitwise copies;2056 if default zero -initialization is desired, the default constructors can be overridden.2098 For compatibility with the standard behavior of C, the default constructor and destructor for all basic, pointer, and reference types do nothing, whereas the copy constructor and assignment operator are bitwise copies; 2099 if default zero initialization is desired, the default constructors can be overridden. 2057 2100 For user-generated types, the four functions are also automatically generated. 2058 2101 @enum@ types are handled the same as their underlying integral type, and unions are also bitwise copied and no-op initialized and destructed. 2059 2102 For compatibility with C, a copy constructor from the first union member type is also defined. 2060 For @struct@ types, each of the four functions areimplicitly defined to call their corresponding functions on each member of the struct.2061 To better simulate the behavio ur of C initializers, a set of \newterm{member constructors} is also generated for structures.2062 A constructor is generated for each non -empty prefix of a structure's member-list to copy-construct the members passed as parameters and default-construct the remaining members.2103 For @struct@ types, each of the four functions is implicitly defined to call their corresponding functions on each member of the struct. 2104 To better simulate the behavior of C initializers, a set of \newterm{member constructors} is also generated for structures. 2105 A constructor is generated for each nonempty prefix of a structure's member list to copy-construct the members passed as parameters and default-construct the remaining members. 2063 2106 To allow users to limit the set of constructors available for a type, when a user declares any constructor or destructor, the corresponding generated function and all member constructors for that type are hidden from expression resolution; 2064 similarly, the generated default constructor is hidden upon declaration of any constructor.2107 similarly, the generated default constructor is hidden upon the declaration of any constructor. 2065 2108 These semantics closely mirror the rule for implicit declaration of constructors in \CC\cite[p.~186]{ANSI98:C++}. 2066 2109 2067 In some circumstance programmers may not wish to have implicit constructor and destructor generation and calls. 2068 In these cases, \CFA provides the initialization syntax \lstinline|S x `@=` {}|, and the object becomes unmanaged, so implicit constructor and destructor calls are not generated. 2110 In some circumstance, programmers may not wish to have implicit constructor and destructor generation and calls. 2111 In these cases, \CFA provides the initialization syntax \lstinline|S x `@=` {}|, and the object becomes unmanaged; 2112 hence, implicit \mbox{constructor} and destructor calls are not generated. 2069 2113 Any C initializer can be the right-hand side of an \lstinline|@=| initializer, \eg \lstinline|VLA a @= { 0, 0x0 }|, with the usual C initialization semantics. 2070 2114 The same syntax can be used in a compound literal, \eg \lstinline|a = (VLA)`@`{ 0, 0x0 }|, to create a C-style literal. 2071 The point of \lstinline|@=| is to provide a migration path from legacy C code to \CFA, by providing a mechanism to incrementally convert to implicit initialization.2115 The point of \lstinline|@=| is to provide a migration path from legacy C code to \CFA, by providing a mechanism to incrementally convert into implicit initialization. 2072 2116 2073 2117 … … 2077 2121 \section{Literals} 2078 2122 2079 C already includes limited polymorphism for literals -- @0@ can be either an integer or a pointer literal, depending on context, whilethe syntactic forms of literals of the various integer and float types are very similar, differing from each other only in suffix.2080 In keeping with the general \CFA approach of adding features while respecting the ``C -style'' of doing things, C's polymorphic constants and typed literal syntax are extended to interoperate with user-defined types, while maintaining a backwards-compatible semantics.2123 C already includes limited polymorphism for literals---@0@ can be either an integer or a pointer literal, depending on context, whereas the syntactic forms of literals of the various integer and float types are very similar, differing from each other only in suffix. 2124 In keeping with the general \CFA approach of adding features while respecting the ``C style'' of doing things, C's polymorphic constants and typed literal syntax are extended to interoperate with user-defined types, while maintaining a backward-compatible semantics. 2081 2125 2082 2126 A simple example is allowing the underscore, as in Ada, to separate prefixes, digits, and suffixes in all \CFA constants, \eg @0x`_`1.ffff`_`ffff`_`p`_`128`_`l@, where the underscore is also the standard separator in C identifiers. 2083 \CC uses a single quote as a separator but it is restricted among digits, precluding its use in the literal prefix or suffix, \eg @0x1.ffff@@`'@@ffffp128l@, and causes problems with most IDEs, which must be extended to deal with this alternate use of the single quote.2127 \CC uses a single quote as a separator, but it is restricted among digits, precluding its use in the literal prefix or suffix, \eg @0x1.ffff@@`'@@ffffp128l@, and causes problems with most integrated development environments (IDEs), which must be extended to deal with this alternate use of the single quote. 2084 2128 2085 2129 … … 2124 2168 2125 2169 In C, @0@ has the special property that it is the only ``false'' value; 2126 by the standard, any value that compares equal to @0@ is false, wh ileany value that compares unequal to @0@ is true.2127 As such, an expression @x@ in any boolean context (such as the condition of an @if@ or @while@ statement, or the arguments to @&&@, @||@, or @?:@\,) can be rewritten as @x != 0@ without changing its semantics.2170 by the standard, any value that compares equal to @0@ is false, whereas any value that compares unequal to @0@ is true. 2171 As such, an expression @x@ in any Boolean context (such as the condition of an @if@ or @while@ statement, or the arguments to @&&@, @||@, or @?:@\,) can be rewritten as @x != 0@ without changing its semantics. 2128 2172 Operator overloading in \CFA provides a natural means to implement this truth-value comparison for arbitrary types, but the C type system is not precise enough to distinguish an equality comparison with @0@ from an equality comparison with an arbitrary integer or pointer. 2129 2173 To provide this precision, \CFA introduces a new type @zero_t@ as the type of literal @0@ (somewhat analagous to @nullptr_t@ and @nullptr@ in \CCeleven); … … 2131 2175 With this addition, \CFA rewrites @if (x)@ and similar expressions to @if ( (x) != 0 )@ or the appropriate analogue, and any type @T@ is ``truthy'' by defining an operator overload @int ?!=?( T, zero_t )@. 2132 2176 \CC makes types truthy by adding a conversion to @bool@; 2133 prior to the addition of explicit cast operators in \CCeleven, this approach had the pitfall of making truthy types transitively convert ableto any numeric type;2177 prior to the addition of explicit cast operators in \CCeleven, this approach had the pitfall of making truthy types transitively convertible into any numeric type; 2134 2178 \CFA avoids this issue. 2135 2179 … … 2142 2186 2143 2187 2144 \subsection{User Literals}2188 \subsection{User literals} 2145 2189 2146 2190 For readability, it is useful to associate units to scale literals, \eg weight (stone, pound, kilogram) or time (seconds, minutes, hours). 2147 The left of Figure~\ref{f:UserLiteral} shows the \CFA alternative call -syntax (postfix: literal argument before function name), using the backquote, to convert basic literals into user literals.2191 The left of Figure~\ref{f:UserLiteral} shows the \CFA alternative call syntax (postfix: literal argument before function name), using the backquote, to convert basic literals into user literals. 2148 2192 The backquote is a small character, making the unit (function name) predominate. 2149 For examples, the multi -precision integer-type in Section~\ref{s:MultiPrecisionIntegers} has user literals:2193 For examples, the multiprecision integer type in Section~\ref{s:MultiPrecisionIntegers} has the following user literals. 2150 2194 {\lstset{language=CFA,moredelim=**[is][\color{red}]{|}{|},deletedelim=**[is][]{`}{`}} 2151 2195 \begin{cfa} … … 2153 2197 y = "12345678901234567890123456789"|`mp| + "12345678901234567890123456789"|`mp|; 2154 2198 \end{cfa} 2155 Because \CFA uses a standard function, all types and literals are applicable, as well as overloading and conversions, where @?`@ denotes a postfix-function name and @`@ denotes a postfix-function call.2199 Because \CFA uses a standard function, all types and literals are applicable, as well as overloading and conversions, where @?`@ denotes a postfix-function name and @`@ denotes a postfix-function call. 2156 2200 }% 2157 2201 \begin{cquote} … … 2195 2239 \end{cquote} 2196 2240 2197 The right of Figure~\ref{f:UserLiteral} shows the equivalent \CC version using the underscore for the call -syntax.2241 The right of Figure~\ref{f:UserLiteral} shows the equivalent \CC version using the underscore for the call syntax. 2198 2242 However, \CC restricts the types, \eg @unsigned long long int@ and @long double@ to represent integral and floating literals. 2199 2243 After which, user literals must match (no conversions); … … 2202 2246 \begin{figure} 2203 2247 \centering 2248 \fontsize{9bp}{11bp}\selectfont 2204 2249 \lstset{language=CFA,moredelim=**[is][\color{red}]{|}{|},deletedelim=**[is][]{`}{`}} 2205 2250 \lstDeleteShortInline@% … … 2257 2302 \end{tabular} 2258 2303 \lstMakeShortInline@% 2259 \caption{User Literal}2304 \caption{User literal} 2260 2305 \label{f:UserLiteral} 2261 2306 \end{figure} … … 2265 2310 \label{sec:libraries} 2266 2311 2267 As stated in Section~\ref{sec:poly-fns}, \CFA inherits a large corpus of library code, where other programming languages must rewrite or provide fragile inter -language communication with C.2312 As stated in Section~\ref{sec:poly-fns}, \CFA inherits a large corpus of library code, where other programming languages must rewrite or provide fragile interlanguage communication with C. 2268 2313 \CFA has replacement libraries condensing hundreds of existing C names into tens of \CFA overloaded names, all without rewriting the actual computations. 2269 In many cases, the interface is an inline wrapper providing overloading during compilation but zero cost at runtime.2314 In many cases, the interface is an inline wrapper providing overloading during compilation but of zero cost at runtime. 2270 2315 The following sections give a glimpse of the interface reduction to many C libraries. 2271 2316 In many cases, @signed@/@unsigned@ @char@, @short@, and @_Complex@ functions are available (but not shown) to ensure expression computations remain in a single type, as conversions can distort results. … … 2275 2320 2276 2321 C library @limits.h@ provides lower and upper bound constants for the basic types. 2277 \CFA name overloading is used to condense these typed constants , \eg:2322 \CFA name overloading is used to condense these typed constants. 2278 2323 \begin{cquote} 2279 2324 \lstDeleteShortInline@% … … 2294 2339 \lstMakeShortInline@% 2295 2340 \end{cquote} 2296 The result is a significant reduction in names to access typed constants , \eg:2341 The result is a significant reduction in names to access typed constants. 2297 2342 \begin{cquote} 2298 2343 \lstDeleteShortInline@% … … 2320 2365 2321 2366 C library @math.h@ provides many mathematical functions. 2322 \CFA function overloading is used to condense these mathematical functions , \eg:2367 \CFA function overloading is used to condense these mathematical functions. 2323 2368 \begin{cquote} 2324 2369 \lstDeleteShortInline@% … … 2339 2384 \lstMakeShortInline@% 2340 2385 \end{cquote} 2341 The result is a significant reduction in names to access math functions , \eg:2386 The result is a significant reduction in names to access math functions. 2342 2387 \begin{cquote} 2343 2388 \lstDeleteShortInline@% … … 2358 2403 \lstMakeShortInline@% 2359 2404 \end{cquote} 2360 While \Celeven has type-generic math ~\cite[\S~7.25]{C11}in @tgmath.h@ to provide a similar mechanism, these macros are limited, matching a function name with a single set of floating type(s).2405 While \Celeven has type-generic math (see section~7.25 of the ISO/IEC 9899\cite{C11}) in @tgmath.h@ to provide a similar mechanism, these macros are limited, matching a function name with a single set of floating type(s). 2361 2406 For example, it is impossible to overload @atan@ for both one and two arguments; 2362 instead the names @atan@ and @atan2@ are required (see Section~\ref{s:NameOverloading}).2363 The key observation is that only a restricted set of type-generic macros areprovided for a limited set of function names, which do not generalize across the type system, as in \CFA.2407 instead, the names @atan@ and @atan2@ are required (see Section~\ref{s:NameOverloading}). 2408 The key observation is that only a restricted set of type-generic macros is provided for a limited set of function names, which do not generalize across the type system, as in \CFA. 2364 2409 2365 2410 … … 2367 2412 2368 2413 C library @stdlib.h@ provides many general functions. 2369 \CFA function overloading is used to condense these utility functions , \eg:2414 \CFA function overloading is used to condense these utility functions. 2370 2415 \begin{cquote} 2371 2416 \lstDeleteShortInline@% … … 2386 2431 \lstMakeShortInline@% 2387 2432 \end{cquote} 2388 The result is a significant reduction in names to access utility functions, \eg:2433 The result is a significant reduction in names to access the utility functions. 2389 2434 \begin{cquote} 2390 2435 \lstDeleteShortInline@% … … 2405 2450 \lstMakeShortInline@% 2406 2451 \end{cquote} 2407 In addit on, there are polymorphic functions, like @min@ and @max@, that work on any type with operators@?<?@ or @?>?@.2452 In addition, there are polymorphic functions, like @min@ and @max@, that work on any type with operator @?<?@ or @?>?@. 2408 2453 2409 2454 The following shows one example where \CFA \emph{extends} an existing standard C interface to reduce complexity and provide safety. 2410 C/\Celeven provide a number of complex and overlapping storage-management operation to support the following capabilities:2411 \begin{ description}%[topsep=3pt,itemsep=2pt,parsep=0pt]2455 C/\Celeven provide a number of complex and overlapping storage-management operations to support the following capabilities. 2456 \begin{list}{}{\itemsep=0pt\parsep=0pt\labelwidth=0pt\leftmargin\parindent\itemindent-\leftmargin\let\makelabel\descriptionlabel} 2412 2457 \item[fill] 2413 2458 an allocation with a specified character. 2414 2459 \item[resize] 2415 2460 an existing allocation to decrease or increase its size. 2416 In either case, new storage may or may not be allocated and, if there is a new allocation, as much data from the existing allocation iscopied.2461 In either case, new storage may or may not be allocated, and if there is a new allocation, as much data from the existing allocation are copied. 2417 2462 For an increase in storage size, new storage after the copied data may be filled. 2463 \newpage 2418 2464 \item[align] 2419 2465 an allocation on a specified memory boundary, \eg, an address multiple of 64 or 128 for cache-line purposes. … … 2421 2467 allocation with a specified number of elements. 2422 2468 An array may be filled, resized, or aligned. 2423 \end{ description}2424 Table~\ref{t:StorageManagementOperations} shows the capabilities provided by C/\Celeven allocation -functions and how all the capabilities can be combined into two \CFA functions.2425 \CFA storage-management functions extend the C equivalents by overloading, providing shallow type -safety, and removing the need to specify the base allocation-size.2426 Figure~\ref{f:StorageAllocation} contrasts \CFA and C storage -allocation performing the same operations with the same type safety.2469 \end{list} 2470 Table~\ref{t:StorageManagementOperations} shows the capabilities provided by C/\Celeven allocation functions and how all the capabilities can be combined into two \CFA functions. 2471 \CFA storage-management functions extend the C equivalents by overloading, providing shallow type safety, and removing the need to specify the base allocation size. 2472 Figure~\ref{f:StorageAllocation} contrasts \CFA and C storage allocation performing the same operations with the same type safety. 2427 2473 2428 2474 \begin{table} 2429 \caption{Storage- Management Operations}2475 \caption{Storage-management operations} 2430 2476 \label{t:StorageManagementOperations} 2431 2477 \centering 2432 2478 \lstDeleteShortInline@% 2433 2479 \lstMakeShortInline~% 2434 \begin{tabular}{@{}r|r|l|l|l|l@{}} 2435 \multicolumn{1}{c}{}& & \multicolumn{1}{c|}{fill} & resize & align & array \\ 2436 \hline 2480 \begin{tabular}{@{}rrllll@{}} 2481 \multicolumn{1}{c}{}& & \multicolumn{1}{c}{fill} & resize & align & array \\ 2437 2482 C & ~malloc~ & no & no & no & no \\ 2438 2483 & ~calloc~ & yes (0 only) & no & no & yes \\ … … 2440 2485 & ~memalign~ & no & no & yes & no \\ 2441 2486 & ~posix_memalign~ & no & no & yes & no \\ 2442 \hline2443 2487 C11 & ~aligned_alloc~ & no & no & yes & no \\ 2444 \hline2445 2488 \CFA & ~alloc~ & yes/copy & no/yes & no & yes \\ 2446 2489 & ~align_alloc~ & yes & no & yes & yes \\ … … 2452 2495 \begin{figure} 2453 2496 \centering 2497 \fontsize{9bp}{11bp}\selectfont 2454 2498 \begin{cfa}[aboveskip=0pt,xleftmargin=0pt] 2455 2499 size_t dim = 10; $\C{// array dimension}$ … … 2489 2533 \end{tabular} 2490 2534 \lstMakeShortInline@% 2491 \caption{\CFA versus C Storage-Allocation}2535 \caption{\CFA versus C storage allocation} 2492 2536 \label{f:StorageAllocation} 2493 2537 \end{figure} 2494 2538 2495 2539 Variadic @new@ (see Section~\ref{sec:variadic-tuples}) cannot support the same overloading because extra parameters are for initialization. 2496 Hence, there are @new@ and @anew@ functions for single and array variables, and the fill value is the arguments to the constructor , \eg:2540 Hence, there are @new@ and @anew@ functions for single and array variables, and the fill value is the arguments to the constructor. 2497 2541 \begin{cfa} 2498 2542 struct S { int i, j; }; … … 2501 2545 S * as = anew( dim, 2, 3 ); $\C{// each array element initialized to 2, 3}$ 2502 2546 \end{cfa} 2503 Note ,\CC can only initialize array elements via the default constructor.2504 2505 Finally, the \CFA memory -allocator has \newterm{sticky properties} for dynamic storage: fill and alignment are remembered with an object's storage in the heap.2547 Note that \CC can only initialize array elements via the default constructor. 2548 2549 Finally, the \CFA memory allocator has \newterm{sticky properties} for dynamic storage: fill and alignment are remembered with an object's storage in the heap. 2506 2550 When a @realloc@ is performed, the sticky properties are respected, so that new storage is correctly aligned and initialized with the fill character. 2507 2551 … … 2510 2554 \label{s:IOLibrary} 2511 2555 2512 The goal of \CFA I/O is to simplify the common cases, while fully supporting polymorphism and user defined types in a consistent way.2556 The goal of \CFA I/O is to simplify the common cases, while fully supporting polymorphism and user-defined types in a consistent way. 2513 2557 The approach combines ideas from \CC and Python. 2514 2558 The \CFA header file for the I/O library is @fstream@. … … 2539 2583 \lstMakeShortInline@% 2540 2584 \end{cquote} 2541 The \CFA form has half the characters of the \CC form ,and is similar to Python I/O with respect to implicit separators.2585 The \CFA form has half the characters of the \CC form and is similar to Python I/O with respect to implicit separators. 2542 2586 Similar simplification occurs for tuple I/O, which prints all tuple values separated by ``\lstinline[showspaces=true]@, @''. 2543 2587 \begin{cfa} … … 2572 2616 \lstMakeShortInline@% 2573 2617 \end{cquote} 2574 There is a weak similarity between the \CFA logical-or operator and the Shell pipe -operator for moving data, where data flows in the correct direction for input butthe opposite direction for output.2618 There is a weak similarity between the \CFA logical-or operator and the Shell pipe operator for moving data, where data flow in the correct direction for input but in the opposite direction for output. 2575 2619 \begin{comment} 2576 2620 The implicit separator character (space/blank) is a separator not a terminator. … … 2593 2637 \end{itemize} 2594 2638 \end{comment} 2595 There are functions to set and get the separator string ,and manipulators to toggle separation on and off in the middle of output.2596 2597 2598 \subsection{Multi -precision Integers}2639 There are functions to set and get the separator string and manipulators to toggle separation on and off in the middle of output. 2640 2641 2642 \subsection{Multiprecision integers} 2599 2643 \label{s:MultiPrecisionIntegers} 2600 2644 2601 \CFA has an interface to the G MP multi-precision signed-integers~\cite{GMP}, similar to the \CC interface provided by GMP.2602 The \CFA interface wraps GMP functions into operator functions to make programming with multi -precision integers identical to using fixed-sized integers.2603 The \CFA type name for multi -precision signed-integers is @Int@ and the header file is @gmp@.2604 Figure~\ref{f:GMPInterface} shows a multi -precision factorial-program contrasting the GMP interface in \CFA and C.2605 2606 \begin{figure} 2645 \CFA has an interface to the GNU multiple precision (GMP) signed integers~\cite{GMP}, similar to the \CC interface provided by GMP. 2646 The \CFA interface wraps GMP functions into operator functions to make programming with multiprecision integers identical to using fixed-sized integers. 2647 The \CFA type name for multiprecision signed integers is @Int@ and the header file is @gmp@. 2648 Figure~\ref{f:GMPInterface} shows a multiprecision factorial program contrasting the GMP interface in \CFA and C. 2649 2650 \begin{figure}[b] 2607 2651 \centering 2652 \fontsize{9bp}{11bp}\selectfont 2608 2653 \lstDeleteShortInline@% 2609 2654 \begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}} … … 2636 2681 \end{tabular} 2637 2682 \lstMakeShortInline@% 2638 \caption{GMP Interface \CFA versus C}2683 \caption{GMP interface \CFA versus C} 2639 2684 \label{f:GMPInterface} 2640 2685 \end{figure} 2641 2686 2642 2687 2688 \vspace{-4pt} 2643 2689 \section{Polymorphism Evaluation} 2644 2690 \label{sec:eval} … … 2649 2695 % Though \CFA provides significant added functionality over C, these features have a low runtime penalty. 2650 2696 % In fact, it is shown that \CFA's generic programming can enable faster runtime execution than idiomatic @void *@-based C code. 2651 The experiment is a set of generic-stack micro -benchmarks~\cite{CFAStackEvaluation} in C, \CFA, and \CC (see implementations in Appendix~\ref{sec:BenchmarkStackImplementations}).2697 The experiment is a set of generic-stack microbenchmarks~\cite{CFAStackEvaluation} in C, \CFA, and \CC (see implementations in Appendix~\ref{sec:BenchmarkStackImplementations}). 2652 2698 Since all these languages share a subset essentially comprising standard C, maximal-performance benchmarks should show little runtime variance, differing only in length and clarity of source code. 2653 2699 A more illustrative comparison measures the costs of idiomatic usage of each language's features. 2654 Figure~\ref{fig:BenchmarkTest} shows the \CFA benchmark tests for a generic stack based on a singly linked -list.2700 Figure~\ref{fig:BenchmarkTest} shows the \CFA benchmark tests for a generic stack based on a singly linked list. 2655 2701 The benchmark test is similar for the other languages. 2656 2702 The experiment uses element types @int@ and @pair(short, char)@, and pushes $N=40M$ elements on a generic stack, copies the stack, clears one of the stacks, and finds the maximum value in the other stack. 2657 2703 2658 2704 \begin{figure} 2705 \fontsize{9bp}{11bp}\selectfont 2659 2706 \begin{cfa}[xleftmargin=3\parindentlnth,aboveskip=0pt,belowskip=0pt] 2660 2707 int main() { … … 2676 2723 } 2677 2724 \end{cfa} 2678 \caption{\protect\CFA Benchmark Test}2725 \caption{\protect\CFA benchmark test} 2679 2726 \label{fig:BenchmarkTest} 2727 \vspace*{-10pt} 2680 2728 \end{figure} 2681 2729 2682 The structure of each benchmark implemented is :C with @void *@-based polymorphism, \CFA with parametric polymorphism, \CC with templates, and \CC using only class inheritance for polymorphism, called \CCV.2730 The structure of each benchmark implemented is C with @void *@-based polymorphism, \CFA with parametric polymorphism, \CC with templates, and \CC using only class inheritance for polymorphism, called \CCV. 2683 2731 The \CCV variant illustrates an alternative object-oriented idiom where all objects inherit from a base @object@ class, mimicking a Java-like interface; 2684 hence runtime checks are necessary to safely down-cast objects.2685 The most notable difference among the implementations is in memory layout of generic types: \CFA and \CC inline the stack and pair elements into corresponding list and pair nodes, wh ile C and \CCV lack such a capability and instead must store generic objects via pointers to separately-allocated objects.2686 Note , the C benchmark uses unchecked casts as C has no runtime mechanism to perform such checks, while \CFA and \CC provide type-safety statically.2732 hence, runtime checks are necessary to safely downcast objects. 2733 The most notable difference among the implementations is in memory layout of generic types: \CFA and \CC inline the stack and pair elements into corresponding list and pair nodes, whereas C and \CCV lack such capability and, instead, must store generic objects via pointers to separately allocated objects. 2734 Note that the C benchmark uses unchecked casts as C has no runtime mechanism to perform such checks, whereas \CFA and \CC provide type safety statically. 2687 2735 2688 2736 Figure~\ref{fig:eval} and Table~\ref{tab:eval} show the results of running the benchmark in Figure~\ref{fig:BenchmarkTest} and its C, \CC, and \CCV equivalents. 2689 The graph plots the median of 5consecutive runs of each program, with an initial warm-up run omitted.2737 The graph plots the median of five consecutive runs of each program, with an initial warm-up run omitted. 2690 2738 All code is compiled at \texttt{-O2} by gcc or g++ 6.4.0, with all \CC code compiled as \CCfourteen. 2691 2739 The benchmarks are run on an Ubuntu 16.04 workstation with 16 GB of RAM and a 6-core AMD FX-6300 CPU with 3.5 GHz maximum clock frequency. … … 2693 2741 \begin{figure} 2694 2742 \centering 2695 \ input{timing}2696 \caption{Benchmark Timing Results (smaller is better)}2743 \resizebox{0.7\textwidth}{!}{\input{timing}} 2744 \caption{Benchmark timing results (smaller is better)} 2697 2745 \label{fig:eval} 2746 \vspace*{-10pt} 2698 2747 \end{figure} 2699 2748 2700 2749 \begin{table} 2750 \vspace*{-10pt} 2701 2751 \caption{Properties of benchmark code} 2702 2752 \label{tab:eval} 2703 2753 \centering 2754 \vspace*{-4pt} 2704 2755 \newcommand{\CT}[1]{\multicolumn{1}{c}{#1}} 2705 \begin{tabular}{ rrrrr}2706 & \CT{C} & \CT{\CFA} & \CT{\CC} & \CT{\CCV} \\ \hline2707 maximum memory usage (MB) & 10 ,001 & 2,502 & 2,503 & 11,253 \\2756 \begin{tabular}{lrrrr} 2757 & \CT{C} & \CT{\CFA} & \CT{\CC} & \CT{\CCV} \\ 2758 maximum memory usage (MB) & 10\,001 & 2\,502 & 2\,503 & 11\,253 \\ 2708 2759 source code size (lines) & 201 & 191 & 125 & 294 \\ 2709 2760 redundant type annotations (lines) & 27 & 0 & 2 & 16 \\ 2710 2761 binary size (KB) & 14 & 257 & 14 & 37 \\ 2711 2762 \end{tabular} 2763 \vspace*{-16pt} 2712 2764 \end{table} 2713 2765 2714 The C and \CCV variants are generally the slowest with the largest memory footprint, because of their less-efficient memory layout and the pointer-indirection necessary to implement generic types; 2766 \enlargethispage{-10pt} 2767 The C and \CCV variants are generally the slowest with the largest memory footprint, due to their less-efficient memory layout and the pointer indirection necessary to implement generic types; 2715 2768 this inefficiency is exacerbated by the second level of generic types in the pair benchmarks. 2716 By contrast, the \CFA and \CC variants run in roughly equivalent time for both the integer and pair because of equivalent storage layout, with the inlined libraries (\ie no separate compilation) and greater maturity of the \CC compiler contributing to its lead.2717 \CCV is slower than C largely due to the cost of runtime type -checking of down-casts (implemented with @dynamic_cast@);2769 By contrast, the \CFA and \CC variants run in roughly equivalent time for both the integer and pair because of the equivalent storage layout, with the inlined libraries (\ie no separate compilation) and greater maturity of the \CC compiler contributing to its lead. 2770 \CCV is slower than C largely due to the cost of runtime type checking of downcasts (implemented with @dynamic_cast@). 2718 2771 The outlier for \CFA, pop @pair@, results from the complexity of the generated-C polymorphic code. 2719 2772 The gcc compiler is unable to optimize some dead code and condense nested calls; … … 2721 2774 Finally, the binary size for \CFA is larger because of static linking with the \CFA libraries. 2722 2775 2723 \CFA is also competitive in terms of source code size, measured as a proxy for programmer effort. The line counts in Table~\ref{tab:eval} include implementations of @pair@ and @stack@ types for all four languages for purposes of direct comparison, though it should be noted that \CFA and \CC have pre-written data structures in their standard libraries that programmers would generally use instead. Use of these standard library types has minimal impact on the performance benchmarks, but shrinks the \CFA and \CC benchmarks to 39 and 42 lines, respectively.2776 \CFA is also competitive in terms of source code size, measured as a proxy for programmer effort. The line counts in Table~\ref{tab:eval} include implementations of @pair@ and @stack@ types for all four languages for purposes of direct comparison, although it should be noted that \CFA and \CC have prewritten data structures in their standard libraries that programmers would generally use instead. Use of these standard library types has minimal impact on the performance benchmarks, but shrinks the \CFA and \CC benchmarks to 39 and 42 lines, respectively. 2724 2777 The difference between the \CFA and \CC line counts is primarily declaration duplication to implement separate compilation; a header-only \CFA library would be similar in length to the \CC version. 2725 On the other hand, C does not have a generic collections -library in its standard distribution, resulting in frequent reimplementation of such collection types by C programmers.2726 \CCV does not use the \CC standard template library by construction , and in factincludes the definition of @object@ and wrapper classes for @char@, @short@, and @int@ in its line count, which inflates this count somewhat, as an actual object-oriented language would include these in the standard library;2778 On the other hand, C does not have a generic collections library in its standard distribution, resulting in frequent reimplementation of such collection types by C programmers. 2779 \CCV does not use the \CC standard template library by construction and, in fact, includes the definition of @object@ and wrapper classes for @char@, @short@, and @int@ in its line count, which inflates this count somewhat, as an actual object-oriented language would include these in the standard library; 2727 2780 with their omission, the \CCV line count is similar to C. 2728 2781 We justify the given line count by noting that many object-oriented languages do not allow implementing new interfaces on library types without subclassing or wrapper types, which may be similarly verbose. 2729 2782 2730 Line -count is a fairly rough measure of code complexity;2731 another important factor is how much type information the programmer must specify manually, especially where that information is not compiler -checked.2732 Such unchecked type information produces a heavier documentation burden and increased potential for runtime bugs ,and is much less common in \CFA than C, with its manually specified function pointer arguments and format codes, or \CCV, with its extensive use of un-type-checked downcasts, \eg @object@ to @integer@ when popping a stack.2783 Line count is a fairly rough measure of code complexity; 2784 another important factor is how much type information the programmer must specify manually, especially where that information is not compiler checked. 2785 Such unchecked type information produces a heavier documentation burden and increased potential for runtime bugs and is much less common in \CFA than C, with its manually specified function pointer arguments and format codes, or \CCV, with its extensive use of un-type-checked downcasts, \eg @object@ to @integer@ when popping a stack. 2733 2786 To quantify this manual typing, the ``redundant type annotations'' line in Table~\ref{tab:eval} counts the number of lines on which the type of a known variable is respecified, either as a format specifier, explicit downcast, type-specific function, or by name in a @sizeof@, struct literal, or @new@ expression. 2734 The \CC benchmark uses two redundant type annotations to create a new stack nodes, wh ilethe C and \CCV benchmarks have several such annotations spread throughout their code.2787 The \CC benchmark uses two redundant type annotations to create a new stack nodes, whereas the C and \CCV benchmarks have several such annotations spread throughout their code. 2735 2788 The \CFA benchmark is able to eliminate all redundant type annotations through use of the polymorphic @alloc@ function discussed in Section~\ref{sec:libraries}. 2736 2789 2737 We conjecture these results scale across most generic data-types as the underlying polymorphism implement is constant. 2738 2739 2790 We conjecture that these results scale across most generic data types as the underlying polymorphism implement is constant. 2791 2792 2793 \vspace*{-8pt} 2740 2794 \section{Related Work} 2741 2795 \label{s:RelatedWork} … … 2753 2807 \CC provides three disjoint polymorphic extensions to C: overloading, inheritance, and templates. 2754 2808 The overloading is restricted because resolution does not use the return type, inheritance requires learning object-oriented programming and coping with a restricted nominal-inheritance hierarchy, templates cannot be separately compiled resulting in compilation/code bloat and poor error messages, and determining how these mechanisms interact and which to use is confusing. 2755 In contrast, \CFA has a single facility for polymorphic code supporting type-safe separate -compilation of polymorphic functions and generic (opaque) types, which uniformly leverage the C procedural paradigm.2809 In contrast, \CFA has a single facility for polymorphic code supporting type-safe separate compilation of polymorphic functions and generic (opaque) types, which uniformly leverage the C procedural paradigm. 2756 2810 The key mechanism to support separate compilation is \CFA's \emph{explicit} use of assumed type properties. 2757 Until \CC concepts~\cite{C++Concepts} are standardized (anticipated for \CCtwenty), \CC provides no way to specifythe requirements of a generic function beyond compilation errors during template expansion;2811 Until \CC concepts~\cite{C++Concepts} are standardized (anticipated for \CCtwenty), \CC provides no way of specifying the requirements of a generic function beyond compilation errors during template expansion; 2758 2812 furthermore, \CC concepts are restricted to template polymorphism. 2759 2813 2760 2814 Cyclone~\cite{Grossman06} also provides capabilities for polymorphic functions and existential types, similar to \CFA's @forall@ functions and generic types. 2761 Cyclone existential types can include function pointers in a construct similar to a virtual function -table, but these pointers must be explicitly initialized at some point in the code,a tedious and potentially error-prone process.2815 Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, which is a tedious and potentially error-prone process. 2762 2816 Furthermore, Cyclone's polymorphic functions and types are restricted to abstraction over types with the same layout and calling convention as @void *@, \ie only pointer types and @int@. 2763 2817 In \CFA terms, all Cyclone polymorphism must be dtype-static. 2764 2818 While the Cyclone design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, it is more restrictive than \CFA's general model. 2765 Smith and Volpano~\cite{Smith98} present Polymorphic C, an ML dialect with polymorphic functions, C-like syntax, and pointer types; it lacks many of C's features, however, most notably structure types, and so is not a practical C replacement. 2819 Smith and Volpano~\cite{Smith98} present Polymorphic C, an ML dialect with polymorphic functions, C-like syntax, and pointer types; 2820 it lacks many of C's features, most notably structure types, and hence, is not a practical C replacement. 2766 2821 2767 2822 Objective-C~\cite{obj-c-book} is an industrially successful extension to C. 2768 However, Objective-C is a radical departure from C, using an object-oriented model with message -passing.2823 However, Objective-C is a radical departure from C, using an object-oriented model with message passing. 2769 2824 Objective-C did not support type-checked generics until recently \cite{xcode7}, historically using less-efficient runtime checking of object types. 2770 The GObject~\cite{GObject} framework also adds object-oriented programming with runtime type-checking and reference-counting garbage -collection to C;2771 these features are more intrusive additions than those provided by \CFA, in addition to the runtime overhead of reference -counting.2772 Vala~\cite{Vala} compiles to GObject-based C, adding the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code -bases.2773 Java~\cite{Java8} included generic types in Java~5, which are type -checked at compilation and type-erased at runtime, similar to \CFA's.2774 However, in Java, each object carries its own table of method pointers, wh ile\CFA passes the method pointers separately to maintain a C-compatible layout.2825 The GObject~\cite{GObject} framework also adds object-oriented programming with runtime type-checking and reference-counting garbage collection to C; 2826 these features are more intrusive additions than those provided by \CFA, in addition to the runtime overhead of reference counting. 2827 Vala~\cite{Vala} compiles to GObject-based C, adding the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code bases. 2828 Java~\cite{Java8} included generic types in Java~5, which are type checked at compilation and type erased at runtime, similar to \CFA's. 2829 However, in Java, each object carries its own table of method pointers, whereas \CFA passes the method pointers separately to maintain a C-compatible layout. 2775 2830 Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens. 2776 2831 2777 D~\cite{D}, Go, and Rust~\cite{Rust} are modern , compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Goand \emph{traits} in Rust.2832 D~\cite{D}, Go, and Rust~\cite{Rust} are modern compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go, and \emph{traits} in Rust. 2778 2833 However, each language represents a significant departure from C in terms of language model, and none has the same level of compatibility with C as \CFA. 2779 2834 D and Go are garbage-collected languages, imposing the associated runtime overhead. 2780 2835 The necessity of accounting for data transfer between managed runtimes and the unmanaged C runtime complicates foreign-function interfaces to C. 2781 2836 Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. 2782 D restricts garbage collection to its own heap by default, wh ile Rust is not garbage-collected, and thushas a lighter-weight runtime more interoperable with C.2837 D restricts garbage collection to its own heap by default, whereas Rust is not garbage collected and, thus, has a lighter-weight runtime more interoperable with C. 2783 2838 Rust also possesses much more powerful abstraction capabilities for writing generic code than Go. 2784 On the other hand, Rust's borrow -checker provides strong safety guarantees but is complex and difficult to learn and imposes a distinctly idiomatic programming style.2839 On the other hand, Rust's borrow checker provides strong safety guarantees but is complex and difficult to learn and imposes a distinctly idiomatic programming style. 2785 2840 \CFA, with its more modest safety features, allows direct ports of C code while maintaining the idiomatic style of the original source. 2786 2841 2787 2842 2788 \subsection{Tuples/Variadics} 2789 2843 \vspace*{-18pt} 2844 \subsection{Tuples/variadics} 2845 2846 \vspace*{-5pt} 2790 2847 Many programming languages have some form of tuple construct and/or variadic functions, \eg SETL, C, KW-C, \CC, D, Go, Java, ML, and Scala. 2791 2848 SETL~\cite{SETL} is a high-level mathematical programming language, with tuples being one of the primary data types. 2792 2849 Tuples in SETL allow subscripting, dynamic expansion, and multiple assignment. 2793 C provides variadic functions through @va_list@ objects, but the programmer is responsible for managing the number of arguments and their types, so the mechanism is type unsafe. 2850 C provides variadic functions through @va_list@ objects, but the programmer is responsible for managing the number of arguments and their types; 2851 thus, the mechanism is type unsafe. 2794 2852 KW-C~\cite{Buhr94a}, a predecessor of \CFA, introduced tuples to C as an extension of the C syntax, taking much of its inspiration from SETL. 2795 2853 The main contributions of that work were adding MRVF, tuple mass and multiple assignment, and record-member access. 2796 \CCeleven introduced @std::tuple@ as a library variadic template structure.2854 \CCeleven introduced @std::tuple@ as a library variadic-template structure. 2797 2855 Tuples are a generalization of @std::pair@, in that they allow for arbitrary length, fixed-size aggregation of heterogeneous values. 2798 2856 Operations include @std::get<N>@ to extract values, @std::tie@ to create a tuple of references used for assignment, and lexicographic comparisons. 2799 \CCseventeen proposes \emph{structured bindings}~\cite{Sutter15} to eliminate pre -declaring variables anduse of @std::tie@ for binding the results.2800 This extension requires the use of @auto@ to infer the types of the new variables , so complicated expressions with a non-obvious type must be documented with some other mechanism.2857 \CCseventeen proposes \emph{structured bindings}~\cite{Sutter15} to eliminate predeclaring variables and the use of @std::tie@ for binding the results. 2858 This extension requires the use of @auto@ to infer the types of the new variables; hence, complicated expressions with a nonobvious type must be documented with some other mechanism. 2801 2859 Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables. 2802 2860 Like \CC, D provides tuples through a library variadic-template structure. 2803 2861 Go does not have tuples but supports MRVF. 2804 Java's variadic functions appear similar to C's but are type -safe using homogeneous arrays, which are less useful than \CFA's heterogeneously-typed variadic functions.2862 Java's variadic functions appear similar to C's but are type safe using homogeneous arrays, which are less useful than \CFA's heterogeneously typed variadic functions. 2805 2863 Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML~\cite{sml}, Haskell, and Scala~\cite{Scala}, which decompose tuples using pattern matching. 2806 2864 2807 2865 2866 \vspace*{-18pt} 2808 2867 \subsection{C Extensions} 2809 2868 2810 \CC is the best known C-based language, and is similar to \CFA in that both are extensions to C with source and runtime backwards compatibility. 2811 Specific difference between \CFA and \CC have been identified in prior sections, with a final observation that \CFA has equal or fewer tokens to express the same notion in many cases. 2869 \vspace*{-5pt} 2870 \CC is the best known C-based language and is similar to \CFA in that both are extensions to C with source and runtime backward compatibility. 2871 Specific differences between \CFA and \CC have been identified in prior sections, with a final observation that \CFA has equal or fewer tokens to express the same notion in many cases. 2812 2872 The key difference in design philosophies is that \CFA is easier for C programmers to understand by maintaining a procedural paradigm and avoiding complex interactions among extensions. 2813 2873 \CC, on the other hand, has multiple overlapping features (such as the three forms of polymorphism), many of which have complex interactions with its object-oriented design. 2814 As a result, \CC has a steep learning curve for even experienced C programmers, especially when attempting to maintain performance equivalent to C legacy -code.2815 2816 There are several other C extension -languages with less usage and even more dramatic changes than \CC.2817 Objective-Cand Cyclone are two other extensions to C with different design goals than \CFA, as discussed above.2874 As a result, \CC has a steep learning curve for even experienced C programmers, especially when attempting to maintain performance equivalent to C legacy code. 2875 2876 There are several other C extension languages with less usage and even more dramatic changes than \CC. 2877 \mbox{Objective-C} and Cyclone are two other extensions to C with different design goals than \CFA, as discussed above. 2818 2878 Other languages extend C with more focused features. 2819 2879 $\mu$\CC~\cite{uC++book}, CUDA~\cite{Nickolls08}, ispc~\cite{Pharr12}, and Sierra~\cite{Leissa14} add concurrent or data-parallel primitives to C or \CC; 2820 data-parallel features have not yet been added to \CFA, but are easily incorporated within its design, wh ileconcurrency primitives similar to those in $\mu$\CC have already been added~\cite{Delisle18}.2821 Finally, CCured~\cite{Necula02} and Ironclad \CC~\cite{DeLozier13} attempt to provide a more memory-safe C by annotating pointer types with garbage collection information; type-checked polymorphism in \CFA covers several of C's memory-safety issues, but more aggressive approaches such as annotating all pointer types with their nullability or requiring runtime garbage collection are contradictory to \CFA's backward scompatibility goals.2880 data-parallel features have not yet been added to \CFA, but are easily incorporated within its design, whereas concurrency primitives similar to those in $\mu$\CC have already been added~\cite{Delisle18}. 2881 Finally, CCured~\cite{Necula02} and Ironclad \CC~\cite{DeLozier13} attempt to provide a more memory-safe C by annotating pointer types with garbage collection information; type-checked polymorphism in \CFA covers several of C's memory-safety issues, but more aggressive approaches such as annotating all pointer types with their nullability or requiring runtime garbage collection are contradictory to \CFA's backward compatibility goals. 2822 2882 2823 2883 2824 2884 \section{Conclusion and Future Work} 2825 2885 2826 The goal of \CFA is to provide an evolutionary pathway for large C development -environments to be more productive and safer, while respecting the talent and skill of C programmers.2827 While other programming languages purport to be a better C, they are in factnew and interesting languages in their own right, but not C extensions.2828 The purpose of this paper is to introduce \CFA, and showcase language features that illustrate the \CFA type -system and approaches taken to achieve the goal of evolutionary C extension.2829 The contributions are a powerful type -system using parametric polymorphism and overloading, generic types, tuples, advanced control structures, and extended declarations, which all have complex interactions.2886 The goal of \CFA is to provide an evolutionary pathway for large C development environments to be more productive and safer, while respecting the talent and skill of C programmers. 2887 While other programming languages purport to be a better C, they are, in fact, new and interesting languages in their own right, but not C extensions. 2888 The purpose of this paper is to introduce \CFA, and showcase language features that illustrate the \CFA type system and approaches taken to achieve the goal of evolutionary C extension. 2889 The contributions are a powerful type system using parametric polymorphism and overloading, generic types, tuples, advanced control structures, and extended declarations, which all have complex interactions. 2830 2890 The work is a challenging design, engineering, and implementation exercise. 2831 2891 On the surface, the project may appear as a rehash of similar mechanisms in \CC. 2832 2892 However, every \CFA feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supporting separate compilation. 2833 All of these new features are being used by the \CFA development -team to build the \CFA runtime-system.2893 All of these new features are being used by the \CFA development team to build the \CFA runtime system. 2834 2894 Finally, we demonstrate that \CFA performance for some idiomatic cases is better than C and close to \CC, showing the design is practically applicable. 2835 2895 2836 While all examples in the paper compile and run, a public beta-release of \CFA will take 6--8 months to reduce compilation time, provide better debugging, and add a few more libraries. 2837 There is also new work on a number of \CFA features, including arrays with size, runtime type-information, virtual functions, user-defined conversions, and modules. 2838 While \CFA polymorphic functions use dynamic virtual-dispatch with low runtime overhead (see Section~\ref{sec:eval}), it is not as low as \CC template-inlining. 2839 Hence it may be beneficial to provide a mechanism for performance-sensitive code. 2840 Two promising approaches are an @inline@ annotation at polymorphic function call sites to create a template-specialization of the function (provided the code is visible) or placing an @inline@ annotation on polymorphic function-definitions to instantiate a specialized version for some set of types (\CC template specialization). 2841 These approaches are not mutually exclusive and allow performance optimizations to be applied only when necessary, without suffering global code-bloat. 2842 In general, we believe separate compilation, producing smaller code, works well with loaded hardware-caches, which may offset the benefit of larger inlined-code. 2896 While all examples in the paper compile and run, there are ongoing efforts to reduce compilation time, provide better debugging, and add more libraries; 2897 when this work is complete in early 2019, a public beta release will be available at \url{https://github.com/cforall/cforall}. 2898 There is also new work on a number of \CFA features, including arrays with size, runtime type information, virtual functions, user-defined conversions, and modules. 2899 While \CFA polymorphic functions use dynamic virtual dispatch with low runtime overhead (see Section~\ref{sec:eval}), it is not as low as \CC template inlining. 2900 Hence, it may be beneficial to provide a mechanism for performance-sensitive code. 2901 Two promising approaches are an @inline@ annotation at polymorphic function call sites to create a template specialization of the function (provided the code is visible) or placing an @inline@ annotation on polymorphic function definitions to instantiate a specialized version for some set of types (\CC template specialization). 2902 These approaches are not mutually exclusive and allow performance optimizations to be applied only when necessary, without suffering global code bloat. 2903 In general, we believe separate compilation, producing smaller code, works well with loaded hardware caches, which may offset the benefit of larger inlined code. 2843 2904 2844 2905 2845 2906 \section{Acknowledgments} 2846 2907 2847 The authors would like to recognize the design assistance of Glen Ditchfield, Richard Bilson, Thierry Delisle, Andrew Beach and Brice Dobry on the features described in this paper,and thank Magnus Madsen for feedback on the writing.2848 Funding for this project has been provided by Huawei Ltd.\ (\url{http://www.huawei.com}), and Aaron Moss and Peter Buhr are partially funded by the Natural Sciences and Engineering Research Council of Canada.2908 The authors would like to recognize the design assistance of Glen Ditchfield, Richard Bilson, Thierry Delisle, Andrew Beach, and Brice Dobry on the features described in this paper and thank Magnus Madsen for feedback on the writing. 2909 Funding for this project was provided by Huawei Ltd (\url{http://www.huawei.com}), and Aaron Moss and Peter Buhr were partially funded by the Natural Sciences and Engineering Research Council of Canada. 2849 2910 2850 2911 {% 2851 2912 \fontsize{9bp}{12bp}\selectfont% 2913 \vspace*{-3pt} 2852 2914 \bibliography{pl} 2853 2915 }% … … 2928 2990 2929 2991 2992 \enlargethispage{1000pt} 2930 2993 \subsection{\CFA} 2931 2994 \label{s:CforallStack} … … 2994 3057 2995 3058 3059 \newpage 2996 3060 \subsection{\CC} 2997 3061 -
doc/proposals/ctordtor/Makefile
r7951100 rb067d9b 1 ## Define the appropriateconfiguration variables.1 ## Define the configuration variables. 2 2 3 MACROS = ../../LaTeXmacros 4 BIB = ../../bibliography 3 Build = build 4 Figures = figures 5 Macros = ../../LaTeXmacros 6 Bib = ../../bibliography 5 7 6 TeXLIB = .:$ (MACROS):$(MACROS)/listings:$(MACROS)/enumitem:$(BIB)/:7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error 8 TeXLIB = .:${Macros}:${MACROS}/listings:${MACROS}/enumitem:${Bib}/: 9 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 10 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex 11 12 MAKEFLAGS = --no-print-directory # --silent 13 VPATH = ${Build} ${Figures} 9 14 10 15 ## Define the text source files. … … 29 34 30 35 DOCUMENT = ctor.pdf 36 BASE = ${basename ${DOCUMENT}} 31 37 32 38 # Directives # 39 40 .PHONY : all clean # not file names 33 41 34 42 all : ${DOCUMENT} 35 43 36 44 clean : 37 rm -f *.bbl *.aux *.dvi *.idx *.ilg *.ind *.brf *.out *.log *.toc *.blg *.pstex_t *.cf \ 38 ${FIGURES} ${PICTURES} ${PROGRAMS} ${GRAPHS} ${basename ${DOCUMENT}}.ps ${DOCUMENT} 45 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 39 46 40 47 # File Dependencies # 41 48 42 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps49 ${DOCUMENT} : ${BASE}.ps 43 50 ps2pdf $< 44 51 45 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi46 dvips $ < -o $@52 ${BASE}.ps : ${BASE}.dvi 53 dvips ${Build}/$< -o $@ 47 54 48 ${ basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex\49 $ (MACROS)/common.tex $(MACROS)/indexstyle $(BIB)/cfa.bib55 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 56 ${Macros}/common.tex ${Macros}/indexstyle ${Bib}/pl.bib | ${Build} 50 57 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 51 if [ ! -r ${basename $@}.ind ] ; then touch${basename $@}.ind ; fi58 #if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi 52 59 # Must have *.aux file containing citations for bibtex 53 60 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi 54 -${BibTeX} ${ basename $@}55 # Some citations reference others so run stepsagain to resolve these citations61 -${BibTeX} ${Build}/${basename $@} 62 # Some citations reference others so run again to resolve these citations 56 63 ${LaTeX} ${basename $@}.tex 57 -${BibTeX} ${ basename $@}64 -${BibTeX} ${Build}/${basename $@} 58 65 # Make index from *.aux entries and input index at end of document 59 makeindex -s $(MACROS)/indexstyle ${basename $@}.idx 66 #makeindex -s ${Macros}/indexstyle ${Build}/${basename $@}.idx 67 # Run again to finish citations 60 68 ${LaTeX} ${basename $@}.tex 61 69 # Run again to get index title into table of contents … … 67 75 ## Define the default recipes. 68 76 69 %.tex : %.fig 70 fig2dev -L eepic $< > $@77 ${Build}: 78 mkdir -p ${Build} 71 79 72 %. ps : %.fig73 fig2dev -L ps $< >$@80 %.tex : %.fig | ${Build} 81 fig2dev -L eepic $< > ${Build}/$@ 74 82 75 %.pstex : %.fig 76 fig2dev -L pstex $< > $@ 77 fig2dev -L pstex_t -p $@ $< > $@_t 83 %.ps : %.fig | ${Build} 84 fig2dev -L ps $< > ${Build}/$@ 85 86 %.pstex : %.fig | ${Build} 87 fig2dev -L pstex $< > ${Build}/$@ 88 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t 78 89 79 90 # Local Variables: # -
doc/proposals/ctordtor/ctor.tex
r7951100 rb067d9b 1 % inline code ©...© (copyright symbol) emacs: C-q M-)2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"5 % LaTex escape §...§ (section symbol) emacs: C-q M-'6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^7 % math escape $...$ (dollar symbol)8 9 1 \documentclass[twoside,11pt]{article} 10 2 … … 15 7 \usepackage{textcomp} 16 8 \usepackage[latin1]{inputenc} 9 17 10 \usepackage{fullpage,times,comment} 18 11 \usepackage{epic,eepic} 19 \usepackage{upquote} % switch curled `'" to straight12 \usepackage{upquote} % switch curled `'" to straight 20 13 \usepackage{calc} 21 14 \usepackage{xspace} 22 15 \usepackage{graphicx} 23 \usepackage{varioref} % extended references24 \usepackage{listings} % format program code25 \usepackage[flushmargin]{footmisc} % support label/reference in footnote16 \usepackage{varioref} % extended references 17 \usepackage{listings} % format program code 18 \usepackage[flushmargin]{footmisc} % support label/reference in footnote 26 19 \usepackage{latexsym} % \Box glyph 27 20 \usepackage{mathptmx} % better math font with "times" … … 34 27 \renewcommand{\UrlFont}{\small\sf} 35 28 36 \setlength{\topmargin}{-0.45in} % move running title into header29 \setlength{\topmargin}{-0.45in} % move running title into header 37 30 \setlength{\headsep}{0.25in} 38 31 … … 43 36 44 37 \interfootnotelinepenalty=10000 38 39 \CFAStyle % use default CFA format-style 40 % inline code ©...© (copyright symbol) emacs: C-q M-) 41 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-. 42 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_ 43 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-" 44 % LaTex escape §...§ (section symbol) emacs: C-q M-' 45 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^ 46 % math escape $...$ (dollar symbol) 47 45 48 46 49 \title{ … … 83 86 \thispagestyle{plain} 84 87 \pagenumbering{arabic} 85 86 88 87 89 -
doc/proposals/flags.md
r7951100 rb067d9b 60 60 ``` 61 61 FunFlags f = some_val(); 62 if ( f ) { sout | "f has some flag(s) set" | endl; }63 if ( f & FOO ) { sout | "f has FOO set" | endl; }62 if ( f ) { sout | "f has some flag(s) set"; } 63 if ( f & FOO ) { sout | "f has FOO set"; } 64 64 f |= FOO; // set FOO 65 65 f -= FOO; // unset FOO … … 88 88 ``` 89 89 FunFlags f = some_val(); 90 if ( f.FOO ) { sout | "f has FOO set" | endl; }90 if ( f.FOO ) { sout | "f has FOO set"; } 91 91 f.FOO = true; // set FOO 92 92 f.FOO = false; // unset FOO -
doc/proposals/tuples/Makefile
r7951100 rb067d9b 1 ## Define the appropriateconfiguration variables.1 ## Define the configuration variables. 2 2 3 MACROS = ../../LaTeXmacros 4 BIB = ../../bibliography 3 Build = build 4 Figures = figures 5 Macros = ../../LaTeXmacros 6 Bib = ../../bibliography 5 7 6 TeXLIB = .:$ (MACROS):$(MACROS)/listings:$(MACROS)/enumitem:$(BIB)/:7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error 8 TeXLIB = .:${Macros}:${MACROS}/listings:${MACROS}/enumitem:${Bib}/: 9 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 10 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex 11 12 MAKEFLAGS = --no-print-directory --silent # 13 VPATH = ${Build} ${Figures} 9 14 10 15 ## Define the text source files. … … 29 34 30 35 DOCUMENT = tuples.pdf 36 BASE = ${basename ${DOCUMENT}} 31 37 32 38 # Directives # 39 40 .PHONY : all clean # not file names 33 41 34 42 all : ${DOCUMENT} 35 43 36 44 clean : 37 rm -f *.bbl *.aux *.dvi *.idx *.ilg *.ind *.brf *.out *.log *.toc *.blg *.pstex_t *.cf \ 38 ${FIGURES} ${PICTURES} ${PROGRAMS} ${GRAPHS} ${basename ${DOCUMENT}}.ps ${DOCUMENT} 45 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 39 46 40 47 # File Dependencies # 41 48 42 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps49 ${DOCUMENT} : ${BASE}.ps 43 50 ps2pdf $< 44 51 45 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi46 dvips $ < -o $@52 ${BASE}.ps : ${BASE}.dvi 53 dvips ${Build}/$< -o $@ 47 54 48 ${ basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex\49 $ (MACROS)/common.tex $(MACROS)/indexstyle $(BIB)/cfa.bib55 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 56 ${Macros}/common.tex ${Macros}/indexstyle ${Bib}/pl.bib | ${Build} 50 57 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 51 if [ ! -r ${basename $@}.ind ] ; then touch${basename $@}.ind ; fi58 #if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi 52 59 # Must have *.aux file containing citations for bibtex 53 60 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi 54 -${BibTeX} ${ basename $@}55 # Some citations reference others so run stepsagain to resolve these citations61 -${BibTeX} ${Build}/${basename $@} 62 # Some citations reference others so run again to resolve these citations 56 63 ${LaTeX} ${basename $@}.tex 57 -${BibTeX} ${ basename $@}64 -${BibTeX} ${Build}/${basename $@} 58 65 # Make index from *.aux entries and input index at end of document 59 makeindex -s $(MACROS)/indexstyle ${basename $@}.idx 66 #makeindex -s ${Macros}/indexstyle ${Build}/${basename $@}.idx 67 # Run again to finish citations 60 68 ${LaTeX} ${basename $@}.tex 61 69 # Run again to get index title into table of contents … … 67 75 ## Define the default recipes. 68 76 69 %.tex : %.fig 70 fig2dev -L eepic $< > $@77 ${Build}: 78 mkdir -p ${Build} 71 79 72 %. ps : %.fig73 fig2dev -L ps $< >$@80 %.tex : %.fig | ${Build} 81 fig2dev -L eepic $< > ${Build}/$@ 74 82 75 %.pstex : %.fig 76 fig2dev -L pstex $< > $@ 77 fig2dev -L pstex_t -p $@ $< > $@_t 83 %.ps : %.fig | ${Build} 84 fig2dev -L ps $< > ${Build}/$@ 85 86 %.pstex : %.fig | ${Build} 87 fig2dev -L pstex $< > ${Build}/$@ 88 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t 78 89 79 90 # Local Variables: # -
doc/proposals/tuples/tuples.tex
r7951100 rb067d9b 1 % inline code ©...© (copyright symbol) emacs: C-q M-)2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"5 % LaTex escape §...§ (section symbol) emacs: C-q M-'6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^7 % math escape $...$ (dollar symbol)8 9 1 \documentclass[twoside,11pt]{article} 10 2 … … 15 7 \usepackage{textcomp} 16 8 \usepackage[latin1]{inputenc} 9 17 10 \usepackage{fullpage,times,comment} 18 11 \usepackage{epic,eepic} 19 \usepackage{upquote} % switch curled `'" to straight12 \usepackage{upquote} % switch curled `'" to straight 20 13 \usepackage{calc} 21 14 \usepackage{xspace} 22 15 \usepackage{graphicx} 23 \usepackage{varioref} % extended references24 \usepackage{listings} % format program code25 \usepackage[flushmargin]{footmisc} % support label/reference in footnote16 \usepackage{varioref} % extended references 17 \usepackage{listings} % format program code 18 \usepackage[flushmargin]{footmisc} % support label/reference in footnote 26 19 \usepackage{latexsym} % \Box glyph 27 20 \usepackage{mathptmx} % better math font with "times" … … 34 27 \renewcommand{\UrlFont}{\small\sf} 35 28 36 \setlength{\topmargin}{-0.45in} % move running title into header29 \setlength{\topmargin}{-0.45in} % move running title into header 37 30 \setlength{\headsep}{0.25in} 38 31 … … 42 35 43 36 \interfootnotelinepenalty=10000 37 38 \CFAStyle % use default CFA format-style 39 % inline code ©...© (copyright symbol) emacs: C-q M-) 40 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-. 41 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_ 42 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-" 43 % LaTex escape §...§ (section symbol) emacs: C-q M-' 44 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^ 45 % math escape $...$ (dollar symbol) 46 44 47 45 48 \title{ -
doc/proposals/user_conversions.md
r7951100 rb067d9b 5 5 There is also a set of _explicit_ conversions that are only allowed through a 6 6 cast expression. 7 Based on Glen's notes on conversions [1], I propose that safe and unsafe 8 conversions be expressed as constructor variants, though I make explicit 9 (cast) conversions a constructor variant as well rather than a dedicated 10 operator. 7 I propose that safe, unsafe, and explicit (cast) conversions be expressed as 8 constructor variants. 11 9 Throughout this article, I will use the following operator names for 12 10 constructors and conversion functions from `From` to `To`: 13 11 14 void ?{} ( To*, To ); // copy constructor 15 void ?{} ( To*, From ); // explicit constructor 16 void ?{explicit} ( To*, From ); // explicit cast conversion 17 void ?{safe} ( To*, From ); // implicit safe conversion 18 void ?{unsafe} ( To*, From ); // implicit unsafe conversion 19 20 [1] http://plg.uwaterloo.ca/~cforall/Conversions/index.html 21 22 Glen's design made no distinction between constructors and unsafe implicit 12 void ?{} ( To&, To ); // copy constructor 13 void ?{} ( To&, From ); // explicit constructor 14 void ?{explicit} ( To&, From ); // explicit cast conversion 15 void ?{safe} ( To&, From ); // implicit safe conversion 16 void ?{unsafe} ( To&, From ); // implicit unsafe conversion 17 18 It has been suggested that all constructors would define unsafe implicit 23 19 conversions; this is elegant, but interacts poorly with tuples. 24 20 Essentially, without making this distinction, a constructor like the following … … 26 22 multiplying the space of possible interpretations of all functions: 27 23 28 void ?{}( Coord *this, int x, int y );24 void ?{}( Coord& this, int x, int y ); 29 25 30 26 That said, it would certainly be possible to make a multiple-argument implicit … … 32 28 used infrequently: 33 29 34 void ?{unsafe}( Coord *this, int x, int y );30 void ?{unsafe}( Coord& this, int x, int y ); 35 31 36 32 An alternate possibility would be to only count two-arg constructors 37 `void ?{} ( To *, From )` as unsafe conversions; under this semantics, safe and33 `void ?{} ( To&, From )` as unsafe conversions; under this semantics, safe and 38 34 explicit conversions should also have a compiler-enforced restriction to 39 35 ensure that they are two-arg functions (this restriction may be valuable … … 43 39 is convertable to `To`. 44 40 If user-defined conversions are not added to the language, 45 `void ?{} ( To *, From )` may be a suitable representation, relying on41 `void ?{} ( To&, From )` may be a suitable representation, relying on 46 42 conversions on the argument types to account for transitivity. 47 On the other hand, `To*` should perhaps match its target type exactly, so 48 another assertion syntax specific to conversions may be required, e.g. 49 `From -> To`. 43 Since `To&` should be an exact match on `To`, this should put all the implicit 44 conversions on the RHS. 45 On the other hand, under some models (like [1]), implicit conversions are not 46 allowed in assertion parameters, so another assertion syntax specific to 47 conversions may be required, e.g. `From -> To`. 48 It has also been suggested that, for programmer control, no implicit 49 conversions (except, possibly, for polymorphic specialization) should be 50 allowed in resolution of cast operators. 51 52 [1] ../working/assertion_resolution.md 50 53 51 54 ### Constructor Idiom ### … … 53 56 that we can use the full range of Cforall features for conversions, including 54 57 polymorphism. 55 Glen [1] defines a _constructor idiom_ that can be used to create chains of 56 safe conversions without duplicating code; given a type `Safe` which members 57 of another type `From` can be directly converted to, the constructor idiom 58 allows us to write a conversion for any type `To` which `Safe` converts to: 59 60 forall(otype To | { void ?{safe}( To*, Safe ) }) 61 void ?{safe}( To *this, From that ) { 58 In an earlier version of this proposal, Glen Ditchfield defines a 59 _constructor idiom_ that can be used to create chains of safe conversions 60 without duplicating code; given a type `Safe` which members of another type 61 `From` can be directly converted to, the constructor idiom allows us to write 62 a conversion for any type `To` which `Safe` converts to: 63 64 forall(otype To | { void ?{safe}( To&, Safe ) }) 65 void ?{safe}( To& this, From that ) { 62 66 Safe tmp = /* some expression involving that */; 63 *this = tmp; // usesassertion parameter67 this{ tmp }; // initialize from assertion parameter 64 68 } 65 69 … … 67 71 unsafe conversions. 68 72 73 Glen's original suggestion said the copy constructor for `To` should also be 74 accepted as a resolution for `void ?{safe}( To&, Safe )` (`Safe` == `To`), 75 allowing this same code to be used for the single-step conversion as well. 76 This proposal does come at the cost of an extra copy initialization of the 77 target value, though. 78 79 Contrariwise, if a monomorphic conversion from `From` to `Safe` is written, 80 e.g: 81 82 void ?{safe}( Safe& this, From that ) { 83 this{ /* some parameters involving that */ }; 84 } 85 86 Then the code for a transitive conversion from `From` to any `To` type 87 convertable from `Safe` is written: 88 89 forall(otype To | { void ?{safe}( To&, Safe ) }) 90 void ?{safe}( To& this, From that ) { 91 Safe tmp = that; // uses monomorphic conversion 92 this{ tmp }; // initialize from assertion parameter 93 } 94 95 Given the entirely-boilerplate nature of this code, but negative performance 96 implications of the unmodified constructor idiom, it might be fruitful to have 97 transitive and single step conversion operators, and let CFA build the 98 transitive conversions; some possible names: 99 100 void ?{safe} (To&, From); void ?{final safe} (To&, From); // single-step 101 void ?{safe*} (To&, From); void ?{safe} (To&, From); // transitive 102 69 103 What selective non-use of the constructor idiom gives us is the ability to 70 104 define a conversion that may only be the *last* conversion in a chain of such. 71 Constructing a conversion graph able to unambiguously represent the full 72 hierarchy of implicit conversions in C is provably impossible using only 73 single-step conversions with no additional information (see Appendix A), but 74 this mechanism is sufficiently powerful (see [1], though the design there has 75 some minor bugs; the general idea is to use the constructor idiom to define 76 two chains of conversions, one among the signed integral types, another among 77 the unsigned, and to use monomorphic conversions to allow conversions between 78 signed and unsigned integer types). 105 One use for this is to solve the problem that `explicit` conversions were 106 added to C++ for, that of conversions to `bool` chaining to become conversions 107 to any arithmetic type. 108 Another use is to unambiguously represent the full hierarchy of implicit 109 conversions in C by making sign conversions non-transitive, allowing the 110 compiler to resolve e.g. `int -> unsigned long` as 111 `int -> long -> unsigned long` over `int -> unsigned int -> unsigned long`. 112 See [2] for more details. 113 114 [2] ../working/glen_conversions/index.html#usual 79 115 80 116 ### Appendix A: Partial and Total Orders ### … … 153 189 convert from `int` to `unsigned long`, so we just put in a direct conversion 154 190 and make the compiler smart enough to figure out the costs" - this is the 155 approach taken by the existing compi pler, but given that in a user-defined191 approach taken by the existing compiler, but given that in a user-defined 156 192 conversion proposal the users can build an arbitrary graph of conversions, 157 193 this case still needs to be handled. … … 160 196 exists a chain of conversions from `a` to `b` (see Appendix A for description 161 197 of preorders and related constructs). 162 This preorder corresponds roughlyto a more usual type-theoretic concept of198 This preorder roughly corresponds to a more usual type-theoretic concept of 163 199 subtyping ("if I can convert `a` to `b`, `a` is a more specific type than 164 200 `b`"); however, since this graph is arbitrary, it may contain cycles, so if … … 192 228 and so is considered to be the nearer type. 193 229 By transitivity, then, the conversion from `X` to `Y2` should be cheaper than 194 the conversion from `X` to `W`, but in this case the ` X` and `W` are230 the conversion from `X` to `W`, but in this case the `Y2` and `W` are 195 231 incomparable by the conversion preorder, so the tie is broken by the shorter 196 232 path from `X` to `W` in favour of `W`, contradicting the transitivity property -
doc/refrat/Makefile
r7951100 rb067d9b 53 53 dvips ${Build}/$< -o $@ 54 54 55 ${BASE}.dvi : Makefile ${ Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \56 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib 55 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 56 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib | ${Build} 57 57 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 58 58 if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi … … 78 78 mkdir -p ${Build} 79 79 80 %.tex : %.fig ${Build}80 %.tex : %.fig | ${Build} 81 81 fig2dev -L eepic $< > ${Build}/$@ 82 82 83 %.ps : %.fig ${Build}83 %.ps : %.fig | ${Build} 84 84 fig2dev -L ps $< > ${Build}/$@ 85 85 86 %.pstex : %.fig ${Build}86 %.pstex : %.fig | ${Build} 87 87 fig2dev -L pstex $< > ${Build}/$@ 88 88 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/theses/aaron_moss_PhD/comp_II/Makefile
r7951100 rb067d9b 32 32 33 33 DOCUMENT = comp_II.pdf 34 BASE = ${basename ${DOCUMENT}} 34 35 35 36 # Directives # … … 40 41 41 42 clean : 42 @rm -frv ${DOCUMENT} ${ basename ${DOCUMENT}}.ps ${Build}43 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 43 44 44 45 # File Dependencies # 45 46 46 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps47 ${DOCUMENT} : ${BASE}.ps 47 48 ps2pdf $< 48 49 49 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi50 ${BASE}.ps : ${BASE}.dvi 50 51 dvips ${Build}/$< -o $@ 51 52 52 ${ basename ${DOCUMENT}}.dvi : Makefile ${Build}${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \53 ${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib 53 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 54 ${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib | ${Build} 54 55 # Must have *.aux file containing citations for bibtex 55 56 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 66 67 mkdir -p ${Build} 67 68 68 %.tex : %.fig 69 %.tex : %.fig ${Build} 69 70 fig2dev -L eepic $< > ${Build}/$@ 70 71 71 %.ps : %.fig 72 %.ps : %.fig | ${Build} 72 73 fig2dev -L ps $< > ${Build}/$@ 73 74 74 %.pstex : %.fig 75 %.pstex : %.fig | ${Build} 75 76 fig2dev -L pstex $< > ${Build}/$@ 76 77 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/theses/thierry_delisle_MMath/Makefile
r7951100 rb067d9b 51 51 52 52 DOCUMENT = thesis.pdf 53 BASE = ${basename ${DOCUMENT}} 53 54 54 55 # Directives # … … 59 60 60 61 clean : 61 @rm -frv ${DOCUMENT} ${ basename ${DOCUMENT}}.ps ${Build}62 @rm -frv ${DOCUMENT} ${BASE}.ps ${Build} 62 63 63 64 # File Dependencies # 64 65 65 ${DOCUMENT} : ${ basename ${DOCUMENT}}.ps66 ${DOCUMENT} : ${BASE}.ps 66 67 ps2pdf $< 67 68 68 ${ basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi69 ${BASE}.ps : ${BASE}.dvi 69 70 dvips ${Build}/$< -o $@ 70 71 71 ${ basename ${DOCUMENT}}.dvi : Makefile ${Build}${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \72 ${Macros}/common.tex ${Macros}/indexstyle annex/local.bib ../../bibliography/pl.bib 72 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 73 ${Macros}/common.tex ${Macros}/indexstyle annex/local.bib ../../bibliography/pl.bib | ${Build} 73 74 # Must have *.aux file containing citations for bibtex 74 75 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi … … 91 92 fig2dev -L eepic $< > ${Build}/$@ 92 93 93 %.ps : %.fig ${Build}94 %.ps : %.fig | ${Build} 94 95 fig2dev -L ps $< > ${Build}/$@ 95 96 96 %.pstex : %.fig ${Build}97 %.pstex : %.fig | ${Build} 97 98 fig2dev -L pstex $< > ${Build}/$@ 98 99 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/user/Makefile
r7951100 rb067d9b 4 4 Figures = figures 5 5 Macros = ../LaTeXmacros 6 TeXLIB = .:${Macros}:${Build}: ../bibliography:6 TeXLIB = .:${Macros}:${Build}: 7 7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 BibTeX = BIBINPUTS= ${TeXLIB}&& export BIBINPUTS && bibtex8 BibTeX = BIBINPUTS=../bibliography: && export BIBINPUTS && bibtex 9 9 10 10 MAKEFLAGS = --no-print-directory --silent # … … 51 51 # File Dependencies # 52 52 53 build/version: ../../configure | ${Build} 54 ../../configure --version | grep "cfa-cc configure" | grep -oEe "([0-9]+\.)+[0-9]+" > $@ 55 53 56 ${DOCUMENT} : ${BASE}.ps 54 57 ps2pdf $< … … 57 60 dvips ${Build}/$< -o $@ 58 61 59 ${BASE}.dvi : Makefile ${ Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \60 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib 62 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \ 63 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib build/version | ${Build} 61 64 # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run. 62 65 if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi … … 76 79 ## Define the default recipes. 77 80 78 ${Build} :81 ${Build} : 79 82 mkdir -p ${Build} 80 83 81 %.tex : %.fig ${Build}84 %.tex : %.fig | ${Build} 82 85 fig2dev -L eepic $< > ${Build}/$@ 83 86 84 %.ps : %.fig ${Build}87 %.ps : %.fig | ${Build} 85 88 fig2dev -L ps $< > ${Build}/$@ 86 89 87 %.pstex : %.fig ${Build}90 %.pstex : %.fig | ${Build} 88 91 fig2dev -L pstex $< > ${Build}/$@ 89 92 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/user/user.tex
r7951100 rb067d9b 1 1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -*- Mode: Latex -*- %%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 %% 2 %% 3 3 %% Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo 4 4 %% 5 5 %% The contents of this file are covered under the licence agreement in the 6 6 %% file "LICENCE" distributed with Cforall. 7 %% 8 %% user.tex -- 9 %% 7 %% 8 %% user.tex -- 9 %% 10 10 %% Author : Peter A. Buhr 11 11 %% Created On : Wed Apr 6 14:53:29 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : S un May 6 10:33:53 201814 %% Update Count : 3 31913 %% Last Modified On : Sat Jul 13 18:36:18 2019 14 %% Update Count : 3876 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 72 72 73 73 % Names used in the document. 74 \newcommand{\Version}{\input{ ../../version}}74 \newcommand{\Version}{\input{build/version}} 75 75 \newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}} 76 76 \newcommand{\Emph}[2][red]{{\color{#1}\textbf{\emph{#2}}}} … … 146 146 \CFA adds many modern programming-language features that directly lead to increased \emph{\Index{safety}} and \emph{\Index{productivity}}, while maintaining interoperability with existing C programs and achieving similar performance. 147 147 Like C, \CFA is a statically typed, procedural (non-\Index{object-oriented}) language with a low-overhead runtime, meaning there is no global \Index{garbage-collection}, but \Index{regional garbage-collection}\index{garbage-collection!regional} is possible. 148 The primary new features include p arametric-polymorphic routines and types, exceptions, concurrency, and modules.148 The primary new features include polymorphic routines and types, exceptions, concurrency, and modules. 149 149 150 150 One of the main design philosophies of \CFA is to ``\Index{describe not prescribe}'', which means \CFA tries to provide a pathway from low-level C programming to high-level \CFA programming, but it does not force programmers to ``do the right thing''. … … 155 155 As well, new programs can be written in \CFA using a combination of C and \CFA features. 156 156 157 \Index*[C++]{\CC{}} had a similar goal 30 years ago, allowing object-oriented programming to be incrementally added to C.158 However, \CC currently has the disadvantages of a strong object-oriented bias, multiple legacy design-choices that cannot be updated, and active divergence of the language model from C, all of which requiressignificant effort and training to incrementally add \CC to a C-based project.157 \Index*[C++]{\CC{}}~\cite{c++:v1} had a similar goal 30 years ago, allowing object-oriented programming to be incrementally added to C. 158 However, \CC currently has the disadvantages of a strong object-oriented bias, multiple legacy design-choices that cannot be updated, and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project. 159 159 In contrast, \CFA has 30 years of hindsight and a clean starting point. 160 160 161 161 Like \Index*[C++]{\CC{}}, there may be both an old and new ways to achieve the same effect. 162 For example, the following programs compare the \CFA, C, and \CC I/O mechanisms, where the programs output the same result.163 \begin{c quote}162 For example, the following programs compare the C, \CFA, and \CC I/O mechanisms, where the programs output the same result. 163 \begin{center} 164 164 \begin{tabular}{@{}l@{\hspace{1.5em}}l@{\hspace{1.5em}}l@{}} 165 165 \multicolumn{1}{c@{\hspace{1.5em}}}{\textbf{C}} & \multicolumn{1}{c}{\textbf{\CFA}} & \multicolumn{1}{c}{\textbf{\CC}} \\ … … 178 178 int main( void ) { 179 179 int x = 0, y = 1, z = 2; 180 ®sout | x | y | z | endl;®§\indexc{sout}§180 ®sout | x | y | z;®§\indexc{sout}§ 181 181 } 182 182 \end{cfa} … … 191 191 \end{cfa} 192 192 \end{tabular} 193 \end{c quote}193 \end{center} 194 194 While the \CFA I/O looks similar to the \Index*[C++]{\CC{}} output style, there are important differences, such as automatic spacing between variables as in \Index*{Python} (see~\VRef{s:IOLibrary}). 195 195 196 196 197 \subsection{Background} 197 198 198 199 This document is a programmer reference-manual for the \CFA programming language. 199 The manual covers the core features of the language and runtime-system, with simple examples illustrating syntax and semantics of each feature.200 The manual covers the core features of the language and runtime-system, with simple examples illustrating syntax and semantics of features. 200 201 The manual does not teach programming, \ie how to combine the new constructs to build complex programs. 201 A reader should alreadyhave an intermediate knowledge of control flow, data structures, and concurrency issues to understand the ideas presented, as well as some experience programming in C/\CC.202 The reader must have an intermediate knowledge of control flow, data structures, and concurrency issues to understand the ideas presented, as well as some experience programming in C/\CC. 202 203 Implementers should refer to the \CFA Programming Language Specification for details about the language syntax and semantics. 203 204 Changes to the syntax and additional features are expected to be included in later revisions. … … 206 207 \section{Why fix C?} 207 208 208 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems (especially UNIX systems) to hobby projects.209 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating-systems. 209 210 This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. 210 211 Even with all its problems, C continues to be popular because it allows writing software at virtually any level in a computer system without restriction. 211 212 For system programming, where direct access to hardware, storage management, and real-time issues are a requirement, C is usually the only language of choice. 212 The TIOBE index~\cite{TIOBE} for March 2016 showed the following programming-language popularity: \Index*{Java} 20.5\%, C 14.5\%, \Index*[C++]{\CC{}} 6.7\%, \Csharp 4.3\%, \Index*{Python} 4.3\%, where the next 50 languages are less than 3\% eachwith a long tail.213 As well, for 30 years, C has been the number 1 and 2 most popular programming language:213 The TIOBE index~\cite{TIOBE} for July 2018 ranks the top five most \emph{popular} programming languages as \Index*{Java} 16\%, C 14\%, \Index*[C++]{\CC{}} 7.5\%, Python 6\%, Visual Basic 4\% = 47.5\%, where the next 50 languages are less than 4\% each, with a long tail. 214 The top 3 rankings over the past 30 years are: 214 215 \begin{center} 215 \setlength{\tabcolsep}{1.5ex} 216 \begin{tabular}{@{}r|c|c|c|c|c|c|c@{}} 217 Ranking & 2016 & 2011 & 2006 & 2001 & 1996 & 1991 & 1986 \\ 218 \hline 219 Java & 1 & 1 & 1 & 3 & 29 & - & - \\ 220 \hline 221 \R{C} & \R{2} & \R{2} & \R{2} & \R{1} & \R{1} & \R{1} & \R{1} \\ 222 \hline 223 \CC & 3 & 3 & 3 & 2 & 2 & 2 & 7 \\ 216 \setlength{\tabcolsep}{10pt} 217 \begin{tabular}{@{}rccccccc@{}} 218 & 2018 & 2013 & 2008 & 2003 & 1998 & 1993 & 1988 \\ \hline 219 Java & 1 & 2 & 1 & 1 & 16 & - & - \\ 220 \R{C} & \R{2} & \R{1} & \R{2} & \R{2} & \R{1} & \R{1} & \R{1} \\ 221 \CC & 3 & 4 & 3 & 3 & 2 & 2 & 5 \\ 224 222 \end{tabular} 225 223 \end{center} 226 224 Hence, C is still an extremely important programming language, with double the usage of \Index*[C++]{\CC{}}; in many cases, \CC is often used solely as a better C. 227 225 Love it or hate it, C has been an important and influential part of computer science for 40 years and its appeal is not diminishing. 228 Unfortunately, C has many problems and omissions that make it an unacceptable programming language for modern needs.226 Nevertheless, C has many problems and omissions that make it an unacceptable programming language for modern needs. 229 227 230 228 As stated, the goal of the \CFA project is to engineer modern language-features into C in an evolutionary rather than revolutionary way. … … 236 234 These languages have different syntax and semantics from C, do not interoperate directly with C, and are not systems languages because of restrictive memory-management or garbage collection. 237 235 As a result, there is a significant learning curve to move to these languages, and C legacy-code must be rewritten. 238 These costs can be prohibitive for many companies with a large software-base in C/\CC, and a significant number of programmers require retraining to the new programming language. 239 240 The result of this project is a language that is largely backwards compatible with \Index*[C11]{\Celeven{}}~\cite{C11}, but fixes many of the well known C problems while containing modern language-features. 241 Without significant extension to the C programming language, it is becoming unable to cope with the needs of modern programming problems and programmers; 236 These costs can be prohibitive for many companies with a large software-base in C/\CC, and a significant number of programmers require retraining in the new programming language. 237 238 The result of this project is a language that is largely backwards compatible with \Index*[C11]{\Celeven{}}~\cite{C11}, but fixes many of the well known C problems while adding modern language-features. 239 To achieve these goals required a significant engineering exercise, where we had to ``think inside the existing C box''. 240 Without these significant extension to C, it is unable to cope with the needs of modern programming problems and programmers; 242 241 as a result, it will fade into disuse. 243 242 Considering the large body of existing C code and programmers, there is significant impetus to ensure C is transformed into a modern programming language. … … 255 254 \begin{lstlisting} 256 255 ®forall( otype T )® T identity( T val ) { return val; } 257 int forty_two = identity( 42 ); §\C{// T is bound to int, forty\_two == 42}§256 int forty_two = identity( 42 ); §\C{// T is bound to int, forty\_two == 42}§ 258 257 \end{lstlisting} 259 258 % extending the C type system with parametric polymorphism and overloading, as opposed to the \Index*[C++]{\CC{}} approach of object-oriented extensions. … … 283 282 284 283 double key = 5.0, vals[10] = { /* 10 sorted floating values */ }; 285 double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp ); §\C{// search sorted array}§284 double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp ); §\C{// search sorted array}§ 286 285 \end{lstlisting} 287 286 which can be augmented simply with a polymorphic, type-safe, \CFA-overloaded wrappers: … … 292 291 293 292 forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) { 294 T * result = bsearch( key, arr, size ); §\C{// call first version}§295 return result ? result - arr : size; } §\C{// pointer subtraction includes sizeof(T)}§296 297 double * val = bsearch( 5.0, vals, 10 ); §\C{// selection based on return type}§293 T * result = bsearch( key, arr, size ); §\C{// call first version}§ 294 return result ? result - arr : size; } §\C{// pointer subtraction includes sizeof(T)}§ 295 296 double * val = bsearch( 5.0, vals, 10 ); §\C{// selection based on return type}§ 298 297 int posn = bsearch( 5.0, vals, 10 ); 299 298 \end{lstlisting} … … 307 306 \begin{lstlisting} 308 307 forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)malloc( sizeof(T) ); } 309 int * ip = malloc(); §\C{// select type and size from left-hand side}§308 int * ip = malloc(); §\C{// select type and size from left-hand side}§ 310 309 double * dp = malloc(); 311 310 struct S {...} * sp = malloc(); … … 318 317 Whereas, \CFA wraps each of these routines into ones with the overloaded name ©abs©: 319 318 \begin{cfa} 320 char abs( char );321 ®extern "C" {® int abs( int ); ®}®§\C{// use default C routine for int}§322 long int abs( long int );323 long long int abs( long long int );324 float abs( float );325 double abs( double );326 long double abs( long double );327 float _Complex abs( float _Complex );328 double _Complex abs( double _Complex );329 long double _Complex abs( long double _Complex );319 char ®abs®( char ); 320 extern "C" { int ®abs®( int ); } §\C{// use default C routine for int}§ 321 long int ®abs®( long int ); 322 long long int ®abs®( long long int ); 323 float ®abs®( float ); 324 double ®abs®( double ); 325 long double ®abs®( long double ); 326 float _Complex ®abs®( float _Complex ); 327 double _Complex ®abs®( double _Complex ); 328 long double _Complex ®abs®( long double _Complex ); 330 329 \end{cfa} 331 330 The problem is the name clash between the library routine ©abs© and the \CFA names ©abs©. 332 331 Hence, names appearing in an ©extern "C"© block have \newterm*{C linkage}. 333 332 Then overloading polymorphism uses a mechanism called \newterm{name mangling}\index{mangling!name} to create unique names that are different from C names, which are not mangled. 334 Hence, there is the same need as in \CC, to know if a name is a C or \CFA name, so it can be correctly formed.335 There is no way around this problem, other than C's approach of creating unique names for each pairing of operation and type .333 Hence, there is the same need, as in \CC, to know if a name is a C or \CFA name, so it can be correctly formed. 334 There is no way around this problem, other than C's approach of creating unique names for each pairing of operation and types. 336 335 337 336 This example strongly illustrates a core idea in \CFA: \emph{the \Index{power of a name}}. … … 350 349 \begin{description} 351 350 \item 352 \Indexc{-std=gnu 99}\index{compilation option!-std=gnu99@{©-std=gnu99©}}353 The 1999C standard plus GNU extensions.354 \item 355 \Indexc[deletekeywords=inline]{-fgnu89-inline}\index{compilation option!-fgnu89-inline@{\lstinline[deletekeywords=inline] @-fgnu89-inline@}}356 Use the traditional GNU semantics for inline routines in C 99mode, which allows inline routines in header files.351 \Indexc{-std=gnu11}\index{compilation option!-std=gnu11@{©-std=gnu11©}} 352 The 2011 C standard plus GNU extensions. 353 \item 354 \Indexc[deletekeywords=inline]{-fgnu89-inline}\index{compilation option!-fgnu89-inline@{\lstinline[deletekeywords=inline]$-fgnu89-inline$}} 355 Use the traditional GNU semantics for inline routines in C11 mode, which allows inline routines in header files. 357 356 \end{description} 358 357 The following new \CFA options are available: … … 427 426 \begin{cfa} 428 427 #ifndef __CFORALL__ 429 #include <stdio.h>§\indexc{stdio.h}§ §\C{// C header file}§428 #include <stdio.h>§\indexc{stdio.h}§ §\C{// C header file}§ 430 429 #else 431 #include <fstream>§\indexc{fstream}§ §\C{// \CFA header file}§430 #include <fstream>§\indexc{fstream}§ §\C{// \CFA header file}§ 432 431 #endif 433 432 \end{cfa} 434 which conditionally includes the correct header file, if the program is compiled using \Indexc{gcc} or \Indexc{cfa}. 433 which conditionally includes the correct header file, if the program is compiled using \Indexc{gcc} or \Indexc{cfa}. 434 435 The \CFA translator has multiple steps. 436 The following flags control how the tranlator works, the stages run, and printing within a stage. 437 The majority of these flags are used by \CFA developers, but some are occasionally useful to programmers. 438 \begin{description}[topsep=5pt,itemsep=0pt,parsep=0pt] 439 \item 440 \Indexc{-h}\index{translator option!-h@{©-h©}}, \Indexc{--help}\index{translator option!--help@{©--help©}} \, print help message 441 \item 442 \Indexc{-l}\index{translator option!-l@{©-l©}}, \Indexc{--libcfa}\index{translator option!--libcfa@{©--libcfa©}} \, generate libcfa.c 443 \item 444 \Indexc{-L}\index{translator option!-L@{©-L©}}, \Indexc{--linemarks}\index{translator option!--linemarks@{©--linemarks©}} \, generate line marks 445 \item 446 \Indexc{-m}\index{translator option!-m@{©-m©}}, \Indexc{--no-main}\index{translator option!--no-main@{©--no-main©}} \, do not replace main 447 \item 448 \Indexc{-N}\index{translator option!-N@{©-N©}}, \Indexc{--no-linemarks}\index{translator option!--no-linemarks@{©--no-linemarks©}} \, do not generate line marks 449 \item 450 \Indexc{-n}\index{translator option!-n@{©-n©}}, \Indexc{--no-prelude}\index{translator option!--no-prelude@{©--no-prelude©}} \, do not read prelude 451 \item 452 \Indexc{-p}\index{translator option!-p@{©-p©}}, \Indexc{--prototypes}\index{translator option!--prototypes@{©--prototypes©}} \, generate prototypes for prelude functions 453 \item 454 \Indexc{-P}\index{translator option!-P@{©-P©}}, \Indexc{--print}\index{translator option!--print@{©--print©}} \, one of: 455 \begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt] 456 \item 457 \Indexc{altexpr}\index{translator option!-P@{©-P©}!©altexpr©}\index{translator option!--print@{©-print©}!©altexpr©} \, alternatives for expressions 458 \item 459 \Indexc{ascodegen}\index{translator option!-P@{©-P©}!©ascodegen©}\index{translator option!--print@{©-print©}!©ascodegen©} \, as codegen rather than AST 460 \item 461 \Indexc{ast}\index{translator option!-P@{©-P©}!©ast©}\index{translator option!--print@{©-print©}!©ast©} \, AST after parsing 462 \item 463 \Indexc{astdecl}\index{translator option!-P@{©-P©}!©astdecl©}\index{translator option!--print@{©-print©}!©astdecl©} \, AST after declaration validation pass 464 \item 465 \Indexc{asterr}\index{translator option!-P@{©-P©}!©asterr©}\index{translator option!--print@{©-print©}!©asterr©} \, AST on error 466 \item 467 \Indexc{astexpr}\index{translator option!-P@{©-P©}!©astexpr©}\index{translator option!--print@{©-print©}!©altexpr©} \, AST after expression analysis 468 \item 469 \Indexc{astgen}\index{translator option!-P@{©-P©}!©astgen©}\index{translator option!--print@{©-print©}!©astgen©} \, AST after instantiate generics 470 \item 471 \Indexc{box}\index{translator option!-P@{©-P©}!©box©}\index{translator option!--print@{©-print©}!©box©} \, before box step 472 \item 473 \Indexc{ctordtor}\index{translator option!-P@{©-P©}!©ctordtor©}\index{translator option!--print@{©-print©}!©ctordtor©} \, after ctor/dtor are replaced 474 \item 475 \Indexc{codegen}\index{translator option!-P@{©-P©}!©codegen©}\index{translator option!--print@{©-print©}!©codegen©} \, before code generation 476 \item 477 \Indexc{declstats}\index{translator option!-P@{©-P©}!©declstats©}\index{translator option!--print@{©-print©}!©declstats©} \, code property statistics 478 \item 479 \Indexc{parse}\index{translator option!-P@{©-P©}!©parse©}\index{translator option!--print@{©-print©}!©parse©} \, yacc (parsing) debug information 480 \item 481 \Indexc{pretty}\index{translator option!-P@{©-P©}!©pretty©}\index{translator option!--print@{©-print©}!©pretty©} \, prettyprint for ascodegen flag 482 \item 483 \Indexc{resolver}\index{translator option!-P@{©-P©}!©resolver©}\index{translator option!--print@{©-print©}!©resolver©} \, before resolver step 484 \item 485 \Indexc{rproto}\index{translator option!-P@{©-P©}!©rproto©}\index{translator option!--print@{©-print©}!©rproto©} \, resolver-proto instance 486 \item 487 \Indexc{rsteps}\index{translator option!-P@{©-P©}!©rsteps©}\index{translator option!--print@{©-print©}!©rsteps©} \, resolver steps 488 \item 489 \Indexc{symevt}\index{translator option!-P@{©-P©}!©symevt©}\index{translator option!--print@{©-print©}!©symevt©} \, symbol table events 490 \item 491 \Indexc{tree}\index{translator option!-P@{©-P©}!©tree©}\index{translator option!--print@{©-print©}!©tree©} \, parse tree 492 \item 493 \Indexc{tuple}\index{translator option!-P@{©-P©}!©tuple©}\index{translator option!--print@{©-print©}!©tuple©} \, after tuple expansion 494 \end{description} 495 \item 496 \Indexc{--prelude-dir} <directory> \, prelude directory for debug/nodebug 497 \item 498 \Indexc{-S}\index{translator option!-S@{©-S©}!©counters,heap,time,all,none©}, \Indexc{--statistics}\index{translator option!--statistics@{©--statistics©}!©counters,heap,time,all,none©} <option-list> \, enable profiling information: 499 \begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt] 500 \item 501 \Indexc{counters,heap,time,all,none} 502 \end{description} 503 \item 504 \Indexc{-t}\index{translator option!-t@{©-t©}}, \Indexc{--tree}\index{translator option!--tree@{©--tree©}} build in tree 505 \end{description} 506 507 508 \section{Backquote Identifiers} 509 \label{s:BackquoteIdentifiers} 510 511 \CFA introduces several new keywords (see \VRef{s:CFAKeywords}) that can clash with existing C variable-names in legacy code. 512 Keyword clashes are accommodated by syntactic transformations using the \CFA backquote escape-mechanism: 513 \begin{cfa} 514 int ®`®otype®`® = 3; §\C{// make keyword an identifier}§ 515 double ®`®forall®`® = 3.5; 516 \end{cfa} 517 518 Existing C programs with keyword clashes can be converted by enclosing keyword identifiers in backquotes, and eventually the identifier name can be changed to a non-keyword name. 519 \VRef[Figure]{f:HeaderFileInterposition} shows how clashes in existing C header-files (see~\VRef{s:StandardHeaders}) can be handled using preprocessor \newterm{interposition}: ©#include_next© and ©-I filename©. 520 Several common C header-files with keyword clashes are fixed in the standard \CFA header-library, so there is a seamless programming-experience. 521 522 \begin{figure} 523 \begin{cfa} 524 // include file uses the CFA keyword "with". 525 #if ! defined( with ) §\C{// nesting ?}§ 526 #define with ®`®with®`® §\C{// make keyword an identifier}§ 527 #define __CFA_BFD_H__ 528 #endif 529 530 ®#include_next <bfdlink.h> §\C{// must have internal check for multiple expansion}§ 531 ® 532 #if defined( with ) && defined( __CFA_BFD_H__ ) §\C{// reset only if set}§ 533 #undef with 534 #undef __CFA_BFD_H__ 535 #endif 536 \end{cfa} 537 \caption{Header-File Interposition} 538 \label{f:HeaderFileInterposition} 539 \end{figure} 435 540 436 541 … … 439 544 Numeric constants are extended to allow \Index{underscore}s\index{constant!underscore}, \eg: 440 545 \begin{cfa} 441 2®_®147®_®483®_®648; §\C{// decimal constant}§442 56®_®ul; §\C{// decimal unsigned long constant}§443 0®_®377; §\C{// octal constant}§444 0x®_®ff®_®ff; §\C{// hexadecimal constant}§445 0x®_®ef3d®_®aa5c; §\C{// hexadecimal constant}§446 3.141®_®592®_®654; §\C{// floating constant}§447 10®_®e®_®+1®_®00; §\C{// floating constant}§448 0x®_®ff®_®ff®_®p®_®3; §\C{// hexadecimal floating}§449 0x®_®1.ffff®_®ffff®_®p®_®128®_®l; §\C{// hexadecimal floating long constant}§450 L®_®§"\texttt{\textbackslash{x}}§®_®§\texttt{ff}§®_®§\texttt{ee}"§; §\C{// wide character constant}§546 2®_®147®_®483®_®648; §\C{// decimal constant}§ 547 56®_®ul; §\C{// decimal unsigned long constant}§ 548 0®_®377; §\C{// octal constant}§ 549 0x®_®ff®_®ff; §\C{// hexadecimal constant}§ 550 0x®_®ef3d®_®aa5c; §\C{// hexadecimal constant}§ 551 3.141®_®592®_®654; §\C{// floating constant}§ 552 10®_®e®_®+1®_®00; §\C{// floating constant}§ 553 0x®_®ff®_®ff®_®p®_®3; §\C{// hexadecimal floating}§ 554 0x®_®1.ffff®_®ffff®_®p®_®128®_®l; §\C{// hexadecimal floating long constant}§ 555 L®_®§"\texttt{\textbackslash{x}}§®_®§\texttt{ff}§®_®§\texttt{ee}"§; §\C{// wide character constant}§ 451 556 \end{cfa} 452 557 The rules for placement of underscores are: … … 469 574 470 575 471 \section{Backquote Identifiers} 472 \label{s:BackquoteIdentifiers} 473 474 \CFA introduces several new keywords (see \VRef{s:CFAKeywords}) that can clash with existing C variable-names in legacy code. 475 Keyword clashes are accommodated by syntactic transformations using the \CFA backquote escape-mechanism: 476 \begin{cfa} 477 int ®`®otype®`® = 3; §\C{// make keyword an identifier}§ 478 double ®`®forall®`® = 3.5; 479 \end{cfa} 480 481 Existing C programs with keyword clashes can be converted by enclosing keyword identifiers in backquotes, and eventually the identifier name can be changed to a non-keyword name. 482 \VRef[Figure]{f:HeaderFileInterposition} shows how clashes in existing C header-files (see~\VRef{s:StandardHeaders}) can be handled using preprocessor \newterm{interposition}: ©#include_next© and ©-I filename©. 483 Several common C header-files with keyword clashes are fixed in the standard \CFA header-library, so there is a seamless programming-experience. 576 \section{Exponentiation Operator} 577 578 C, \CC, and Java (and many other programming languages) have no exponentiation operator\index{exponentiation!operator}\index{operator!exponentiation}, \ie $x^y$, and instead use a routine, like \Indexc{pow}, to perform the exponentiation operation. 579 \CFA extends the basic operators with the exponentiation operator ©?\?©\index{?\\?@©?\?©} and ©?\=?©\index{?\\=?@©\=?©}, as in, ©x \ y© and ©x \= y©, which means $x^y$ and $x \leftarrow x^y$. 580 The priority of the exponentiation operator is between the cast and multiplicative operators, so that ©w * (int)x \ (int)y * z© is parenthesized as ©((w * (((int)x) \ ((int)y))) * z)©. 581 582 As for \Index{division}, there are exponentiation operators for integral and floating types, including the builtin \Index{complex} types. 583 Integral exponentiation\index{exponentiation!unsigned integral} is performed with repeated multiplication\footnote{The multiplication computation is $O(\log y)$.} (or shifting if the exponent is 2). 584 Overflow from large exponents or negative exponents return zero. 585 Floating exponentiation\index{exponentiation!floating} is performed using \Index{logarithm}s\index{exponentiation!logarithm}, so the exponent cannot be negative. 586 \begin{cfa} 587 sout | 1 ®\® 0 | 1 ®\® 1 | 2 ®\® 8 | -4 ®\® 3 | 5 ®\® 3 | 5 ®\® 32 | 5L ®\® 32 | 5L ®\® 64 | -4 ®\® -3 | -4.0 ®\® -3 | 4.0 ®\® 2.1 588 | (1.0f+2.0fi) ®\® (3.0f+2.0fi); 589 1 1 256 -64 125 ®0® 3273344365508751233 ®0® ®0® -0.015625 18.3791736799526 0.264715-1.1922i 590 \end{cfa} 591 Note, ©5 ®\® 32© and ©5L ®\® 64© overflow, and ©-4 ®\® -3© is a fraction but stored in an integer so all three computations generate an integral zero. 592 Parenthesis are necessary for complex constants or the expression is parsed as ©1.0f+®(®2.0fi \ 3.0f®)®+2.0fi©. 593 The exponentiation operator is available for all the basic types, but for user-defined types, only the integral-computation version is available. 594 \begin{cfa} 595 forall( otype OT | { void ?{}( OT & this, one_t ); OT ?*?( OT, OT ); } ) 596 OT ?®\®?( OT ep, unsigned int y ); 597 forall( otype OT | { void ?{}( OT & this, one_t ); OT ?*?( OT, OT ); } ) 598 OT ?®\®?( OT ep, unsigned long int y ); 599 \end{cfa} 600 The user type ©T© must define multiplication, one, ©1©, and, ©*©. 601 602 603 \section{Control Structures} 604 605 \CFA identifies inconsistent, problematic, and missing control structures in C, and extends, modifies, and adds control structures to increase functionality and safety. 606 607 608 %\subsection{\texorpdfstring{\protect\lstinline@if@/\protect\lstinline@while@ Statement}{if Statement}} 609 \subsection{\texorpdfstring{\LstKeywordStyle{if}/\LstKeywordStyle{while} Statement}{if/while Statement}} 610 611 The ©if©/©while© expression allows declarations, similar to ©for© declaration expression. 612 (Does not make sense for ©do©-©while©.) 613 \begin{cfa} 614 if ( ®int x = f()® ) ... §\C{// x != 0}§ 615 if ( ®int x = f(), y = g()® ) ... §\C{// x != 0 \&\& y != 0}§ 616 if ( ®int x = f(), y = g(); x < y® ) ... §\C{// relational expression}§ 617 if ( ®struct S { int i; } x = { f() }; x.i < 4® ) §\C{// relational expression}§ 618 619 while ( ®int x = f()® ) ... §\C{// x != 0}§ 620 while ( ®int x = f(), y = g()® ) ... §\C{// x != 0 \&\& y != 0}§ 621 while ( ®int x = f(), y = g(); x < y® ) ... §\C{// relational expression}§ 622 while ( ®struct S { int i; } x = { f() }; x.i < 4® ) ... §\C{// relational expression}§ 623 \end{cfa} 624 Unless a relational expression is specified, each variable is compared not equal to 0, which is the standard semantics for the ©if©/©while© expression, and the results are combined using the logical ©&&© operator.\footnote{\CC only provides a single declaration always compared not equal to 0.} 625 The scope of the declaration(s) is local to the @if@ statement but exist within both the ``then'' and ``else'' clauses. 626 627 628 \subsection{Loop Control} 629 630 The ©for©/©while©/©do-while© loop-control allows empty or simplified ranges (see Figure~\ref{f:LoopControlExamples}). 631 \begin{itemize} 632 \item 633 An empty conditional implies ©1©. 634 \item 635 The up-to range ©~©\index{~@©~©} means exclusive range [M,N). 636 \item 637 The up-to range ©~=©\index{~=@©~=©} means inclusive range [M,N]. 638 \item 639 The down-to range ©-~©\index{-~@©-~©} means exclusive range [N,M). 640 \item 641 The down-to range ©-~=©\index{-~=@©-~=©} means inclusive range [N,M]. 642 \item 643 ©@© means put nothing in this field. 644 \item 645 ©0© is the implicit start value; 646 \item 647 ©1© is the implicit increment value. 648 \item 649 The up-to range uses ©+=© for increment; 650 \item 651 The down-to range uses ©-=© for decrement. 652 \item 653 The loop index is polymorphic in the type of the start value or comparison value when start is implicitly ©0©. 654 \end{itemize} 484 655 485 656 \begin{figure} 486 \begin{cfa} 487 // include file uses the CFA keyword "with". 488 #if ! defined( with ) §\C{// nesting ?}§ 489 #define with ®`®with®`® §\C{// make keyword an identifier}§ 490 #define __CFA_BFD_H__ 491 #endif 492 493 ®#include_next <bfdlink.h> §\C{// must have internal check for multiple expansion}§ 494 ® 495 #if defined( with ) && defined( __CFA_BFD_H__ ) §\C{// reset only if set}§ 496 #undef with 497 #undef __CFA_BFD_H__ 498 #endif 499 \end{cfa} 500 \caption{Header-File Interposition} 501 \label{f:HeaderFileInterposition} 657 \begin{cquote} 658 \begin{tabular}{@{}l|l@{}} 659 \multicolumn{1}{c|}{loop control} & \multicolumn{1}{c}{output} \\ 660 \hline 661 \begin{cfa} 662 sout | nlOff; 663 while ®()® { sout | "empty"; break; } sout | nl; 664 do { sout | "empty"; break; } while ®()®; sout | nl; 665 for ®()® { sout | "empty"; break; } sout | nl; 666 for ( ®0® ) { sout | "A"; } sout | "zero" | nl; 667 for ( ®1® ) { sout | "A"; } sout | nl; 668 for ( ®10® ) { sout | "A"; } sout | nl; 669 for ( ®1 ~= 10 ~ 2® ) { sout | "B"; } sout | nl; 670 for ( ®10 -~= 1 ~ 2® ) { sout | "C"; } sout | nl; 671 for ( ®0.5 ~ 5.5® ) { sout | "D"; } sout | nl; 672 for ( ®5.5 -~ 0.5® ) { sout | "E"; } sout | nl; 673 for ( ®i; 10® ) { sout | i; } sout | nl; 674 for ( ®i; 1 ~= 10 ~ 2® ) { sout | i; } sout | nl; 675 for ( ®i; 10 -~= 1 ~ 2® ) { sout | i; } sout | nl; 676 for ( ®i; 0.5 ~ 5.5® ) { sout | i; } sout | nl; 677 for ( ®i; 5.5 -~ 0.5® ) { sout | i; } sout | nl; 678 for ( ®ui; 2u ~= 10u ~ 2u® ) { sout | ui; } sout | nl; 679 for ( ®ui; 10u -~= 2u ~ 2u® ) { sout | ui; } sout | nl; 680 enum { N = 10 }; 681 for ( ®N® ) { sout | "N"; } sout | nl; 682 for ( ®i; N® ) { sout | i; } sout | nl; 683 for ( ®i; N -~ 0® ) { sout | i; } sout | nl; 684 const int start = 3, comp = 10, inc = 2; 685 for ( ®i; start ~ comp ~ inc + 1® ) { sout | i; } sout | nl; 686 for ( ®i; 1 ~ @® ) { if ( i > 10 ) break; 687 sout | i; } sout | nl; 688 for ( ®i; 10 -~ @® ) { if ( i < 0 ) break; 689 sout | i; } sout | nl; 690 for ( ®i; 2 ~ @ ~ 2® ) { if ( i > 10 ) break; 691 sout | i; } sout | nl; 692 for ( ®i; 2.1 ~ @ ~ @® ) { if ( i > 10.5 ) break; 693 sout | i; i += 1.7; } sout | nl; 694 for ( ®i; 10 -~ @ ~ 2® ) { if ( i < 0 ) break; 695 sout | i; } sout | nl; 696 for ( ®i; 12.1 ~ @ ~ @® ) { if ( i < 2.5 ) break; 697 sout | i; i -= 1.7; } sout | nl; 698 for ( ®i; 5 : j; -5 ~ @® ) { sout | i | j; } sout | nl; 699 for ( ®i; 5 : j; -5 -~ @® ) { sout | i | j; } sout | nl; 700 for ( ®i; 5 : j; -5 ~ @ ~ 2® ) { sout | i | j; } sout | nl; 701 for ( ®i; 5 : j; -5 -~ @ ~ 2® ) { sout | i | j; } sout | nl; 702 for ( ®j; -5 ~ @ : i; 5® ) { sout | i | j; } sout | nl; 703 for ( ®j; -5 -~ @ : i; 5® ) { sout | i | j; } sout | nl; 704 for ( ®j; -5 ~ @ ~ 2 : i; 5® ) { sout | i | j; } sout | nl; 705 for ( ®j; -5 -~ @ ~ 2 : i; 5® ) { sout | i | j; } sout | nl; 706 for ( ®j; -5 -~ @ ~ 2 : i; 5 : k; 1.5 ~ @® ) { 707 sout | i | j | k; } sout | nl; 708 for ( ®j; -5 -~ @ ~ 2 : k; 1.5 ~ @ : i; 5® ) { 709 sout | i | j | k; } sout | nl; 710 for ( ®k; 1.5 ~ @ : j; -5 -~ @ ~ 2 : i; 5® ) { 711 sout | i | j | k; } sout | nl; 712 \end{cfa} 713 & 714 \begin{cfa} 715 716 empty 717 empty 718 empty 719 zero 720 A 721 A A A A A A A A A A 722 B B B B B 723 C C C C C 724 D D D D D 725 E E E E E 726 0 1 2 3 4 5 6 7 8 9 727 1 3 5 7 9 728 10 8 6 4 2 729 0.5 1.5 2.5 3.5 4.5 730 5.5 4.5 3.5 2.5 1.5 731 2 4 6 8 10 732 10 8 6 4 2 733 734 N N N N N N N N N N 735 0 1 2 3 4 5 6 7 8 9 736 10 9 8 7 6 5 4 3 2 1 737 738 3 6 9 739 740 1 2 3 4 5 6 7 8 9 10 741 742 10 9 8 7 6 5 4 3 2 1 0 743 744 2 4 6 8 10 745 746 2.1 3.8 5.5 7.2 8.9 747 748 10 8 6 4 2 0 749 750 12.1 10.4 8.7 7 5.3 3.6 751 0 -5 1 -4 2 -3 3 -2 4 -1 752 0 -5 1 -6 2 -7 3 -8 4 -9 753 0 -5 1 -3 2 -1 3 1 4 3 754 0 -5 1 -7 2 -9 3 -11 4 -13 755 0 -5 1 -4 2 -3 3 -2 4 -1 756 0 -5 1 -6 2 -7 3 -8 4 -9 757 0 -5 1 -3 2 -1 3 1 4 3 758 0 -5 1 -7 2 -9 3 -11 4 -13 759 760 0 -5 1.5 1 -7 2.5 2 -9 3.5 3 -11 4.5 4 -13 5.5 761 762 0 -5 1.5 1 -7 2.5 2 -9 3.5 3 -11 4.5 4 -13 5.5 763 764 0 -5 1.5 1 -7 2.5 2 -9 3.5 3 -11 4.5 4 -13 5.5 765 \end{cfa} 766 \end{tabular} 767 \end{cquote} 768 \caption{Loop Control Examples} 769 \label{f:LoopControlExamples} 502 770 \end{figure} 503 771 504 772 505 \section{Exponentiation Operator} 506 507 C, \CC, and Java (and many other programming languages) have no exponentiation operator\index{exponentiation!operator}\index{operator!exponentiation}, \ie $x^y$, and instead use a routine, like \Indexc{pow}, to perform the exponentiation operation. 508 \CFA extends the basic operators with the exponentiation operator ©?\?©\index{?\\?@\lstinline@?\?@} and ©?\=?©\index{?\\=?@\lstinline@?\=?@}, as in, ©x \ y© and ©x \= y©, which means $x^y$ and $x \leftarrow x^y$. 509 The priority of the exponentiation operator is between the cast and multiplicative operators, so that ©w * (int)x \ (int)y * z© is parenthesized as ©((w * (((int)x) \ ((int)y))) * z)©. 510 511 As for \Index{division}, there are exponentiation operators for integral and floating types, including the builtin \Index{complex} types. 512 Unsigned integral exponentiation\index{exponentiation!unsigned integral} is performed with repeated multiplication\footnote{The multiplication computation is $O(\log y)$.} (or shifting if the base is 2). 513 Signed integral exponentiation\index{exponentiation!signed integral} is performed with repeated multiplication (or shifting if the base is 2), but yields a floating result because $x^{-y}=1/x^y$. 514 Hence, it is important to designate exponent integral-constants as unsigned or signed: ©3 \ 3u© return an integral result, while ©3 \ 3© returns a floating result. 515 Floating exponentiation\index{exponentiation!floating} is performed using \Index{logarithm}s\index{exponentiation!logarithm}, so the base cannot be negative. 516 \begin{cfa} 517 sout | 2 ®\® 8u | 4 ®\® 3u | -4 ®\® 3u | 4 ®\® -3 | -4 ®\® -3 | 4.0 ®\® 2.1 | (1.0f+2.0fi) ®\® (3.0f+2.0fi) | endl; 518 256 64 -64 0.015625 -0.015625 18.3791736799526 0.264715-1.1922i 519 \end{cfa} 520 Parenthesis are necessary for the complex constants or the expresion is parsed as ©1.0f+(2.0fi \ 3.0f)+2.0fi©. 521 The exponentiation operator is available for all the basic types, but for user-defined types, only the integral-computation versions are available. 522 For returning an integral value, the user type ©T© must define multiplication, ©*©, and one, ©1©; 523 for returning a floating value, an additional divide of type ©T© into a ©double© returning a ©double© (©double ?/?( double, T )©) is necessary for negative exponents. 524 525 526 \section{\texorpdfstring{Labelled \protect\lstinline@continue@ / \protect\lstinline@break@}{Labelled continue / break}} 773 %\section{\texorpdfstring{\protect\lstinline@switch@ Statement}{switch Statement}} 774 \subsection{\texorpdfstring{\LstKeywordStyle{switch} Statement}{switch Statement}} 775 776 C allows a number of questionable forms for the ©switch© statement: 777 \begin{enumerate} 778 \item 779 By default, the end of a ©case© clause\footnote{ 780 In this section, the term \emph{case clause} refers to either a ©case© or ©default© clause.} 781 \emph{falls through} to the next ©case© clause in the ©switch© statement; 782 to exit a ©switch© statement from a ©case© clause requires explicitly terminating the clause with a transfer statement, most commonly ©break©: 783 \begin{cfa} 784 switch ( i ) { 785 case 1: 786 ... 787 // fall-through 788 case 2: 789 ... 790 break; // exit switch statement 791 } 792 \end{cfa} 793 The ability to fall-through to the next clause \emph{is} a useful form of control flow, specifically when a sequence of case actions compound: 794 \begin{cquote} 795 \begin{tabular}{@{}l@{\hspace{3em}}l@{}} 796 \begin{cfa} 797 switch ( argc ) { 798 case 3: 799 // open output file 800 // fall-through 801 case 2: 802 // open input file 803 break; // exit switch statement 804 default: 805 // usage message 806 } 807 \end{cfa} 808 & 809 \begin{cfa} 810 811 if ( argc == 3 ) { 812 // open output file 813 ®// open input file 814 ®} else if ( argc == 2 ) { 815 ®// open input file (duplicate) 816 817 ®} else { 818 // usage message 819 } 820 \end{cfa} 821 \end{tabular} 822 \end{cquote} 823 In this example, case 2 is always done if case 3 is done. 824 This control flow is difficult to simulate with if statements or a ©switch© statement without fall-through as code must be duplicated or placed in a separate routine. 825 C also uses fall-through to handle multiple case-values resulting in the same action: 826 \begin{cfa} 827 switch ( i ) { 828 ®case 1: case 3: case 5:® // odd values 829 // odd action 830 break; 831 ®case 2: case 4: case 6:® // even values 832 // even action 833 break; 834 } 835 \end{cfa} 836 However, this situation is handled in other languages without fall-through by allowing a list of case values. 837 While fall-through itself is not a problem, the problem occurs when fall-through is the default, as this semantics is unintuitive to many programmers and is different from virtually all other programming languages with a ©switch© statement. 838 Hence, default fall-through semantics results in a large number of programming errors as programmers often \emph{forget} the ©break© statement at the end of a ©case© clause, resulting in inadvertent fall-through. 839 840 \item 841 It is possible to place ©case© clauses on statements nested \emph{within} the body of the ©switch© statement: 842 \begin{cfa} 843 switch ( i ) { 844 case 0: 845 if ( j < k ) { 846 ... 847 ®case 1:® // transfer into "if" statement 848 ... 849 } // if 850 case 2: 851 while ( j < 5 ) { 852 ... 853 ®case 3:® // transfer into "while" statement 854 ... 855 } // while 856 } // switch 857 \end{cfa} 858 The problem with this usage is branching into control structures, which is known to cause both comprehension and technical difficulties. 859 The comprehension problem occurs from the inability to determine how control reaches a particular point due to the number of branches leading to it. 860 The technical problem results from the inability to ensure declaration and initialization of variables when blocks are not entered at the beginning. 861 There are no positive arguments for this kind of control flow, and therefore, there is a strong impetus to eliminate it. 862 Nevertheless, C does have an idiom where this capability is used, known as ``\Index*{Duff's device}''~\cite{Duff83}: 863 \begin{cfa} 864 register int n = (count + 7) / 8; 865 switch ( count % 8 ) { 866 case 0: do{ *to = *from++; 867 case 7: *to = *from++; 868 case 6: *to = *from++; 869 case 5: *to = *from++; 870 case 4: *to = *from++; 871 case 3: *to = *from++; 872 case 2: *to = *from++; 873 case 1: *to = *from++; 874 } while ( --n > 0 ); 875 } 876 \end{cfa} 877 which unrolls a loop N times (N = 8 above) and uses the ©switch© statement to deal with any iterations not a multiple of N. 878 While efficient, this sort of special purpose usage is questionable: 879 \begin{quote} 880 Disgusting, no? But it compiles and runs just fine. I feel a combination of pride and revulsion at this 881 discovery.~\cite{Duff83} 882 \end{quote} 883 \item 884 It is possible to place the ©default© clause anywhere in the list of labelled clauses for a ©switch© statement, rather than only at the end. 885 Virtually all programming languages with a ©switch© statement require the ©default© clause to appear last in the case-clause list. 886 The logic for this semantics is that after checking all the ©case© clauses without success, the ©default© clause is selected; 887 hence, physically placing the ©default© clause at the end of the ©case© clause list matches with this semantics. 888 This physical placement can be compared to the physical placement of an ©else© clause at the end of a series of connected ©if©/©else© statements. 889 890 \item 891 It is possible to place unreachable code at the start of a ©switch© statement, as in: 892 \begin{cfa} 893 switch ( x ) { 894 ®int y = 1;® §\C{// unreachable initialization}§ 895 ®x = 7;® §\C{// unreachable code without label/branch}§ 896 case 0: ... 897 ... 898 ®int z = 0;® §\C{// unreachable initialization, cannot appear after case}§ 899 z = 2; 900 case 1: 901 ®x = z;® §\C{// without fall through, z is uninitialized}§ 902 } 903 \end{cfa} 904 While the declaration of the local variable ©y© is useful with a scope across all ©case© clauses, the initialization for such a variable is defined to never be executed because control always transfers over it. 905 Furthermore, any statements before the first ©case© clause can only be executed if labelled and transferred to using a ©goto©, either from outside or inside of the ©switch©, both of which are problematic. 906 As well, the declaration of ©z© cannot occur after the ©case© because a label can only be attached to a statement, and without a fall through to case 3, ©z© is uninitialized. 907 The key observation is that the ©switch© statement branches into control structure, \ie there are multiple entry points into its statement body. 908 \end{enumerate} 909 910 Before discussing potential language changes to deal with these problems, it is worth observing that in a typical C program: 911 \begin{itemize} 912 \item 913 the number of ©switch© statements is small, 914 \item 915 most ©switch© statements are well formed (\ie no \Index*{Duff's device}), 916 \item 917 the ©default© clause is usually written as the last case-clause, 918 \item 919 and there is only a medium amount of fall-through from one ©case© clause to the next, and most of these result from a list of case values executing common code, rather than a sequence of case actions that compound. 920 \end{itemize} 921 These observations put into perspective the \CFA changes to the ©switch©. 922 \begin{enumerate} 923 \item 924 Eliminating default fall-through has the greatest potential for affecting existing code. 925 However, even if fall-through is removed, most ©switch© statements would continue to work because of the explicit transfers already present at the end of each ©case© clause, the common placement of the ©default© clause at the end of the case list, and the most common use of fall-through, \ie a list of ©case© clauses executing common code, \eg: 926 \begin{cfa} 927 case 1: case 2: case 3: ... 928 \end{cfa} 929 still works. 930 Nevertheless, reversing the default action would have a non-trivial effect on case actions that compound, such as the above example of processing shell arguments. 931 Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called ©choose©, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword ©fallthrough©/©fallthru©, \eg: 932 \begin{cfa} 933 ®choose® ( i ) { 934 case 1: case 2: case 3: 935 ... 936 ®// implicit end of switch (break) 937 ®case 5: 938 ... 939 ®fallthru®; §\C{// explicit fall through}§ 940 case 7: 941 ... 942 ®break® §\C{// explicit end of switch (redundant)}§ 943 default: 944 j = 3; 945 } 946 \end{cfa} 947 Like the ©switch© statement, the ©choose© statement retains the fall-through semantics for a list of ©case© clauses; 948 An implicit ©break© is applied only at the end of the \emph{statements} following a ©case© clause. 949 An explicit ©fallthru© is retained because it is a C-idiom most C programmers expect, and its absence might discourage programmers from using the ©choose© statement. 950 As well, allowing an explicit ©break© from the ©choose© is a carry over from the ©switch© statement, and expected by C programmers. 951 \item 952 \Index*{Duff's device} is eliminated from both ©switch© and ©choose© statements, and only invalidates a small amount of very questionable code. 953 Hence, the ©case© clause must appear at the same nesting level as the ©switch©/©choose© body, as is done in most other programming languages with ©switch© statements. 954 \item 955 The issue of ©default© at locations other than at the end of the cause clause can be solved by using good programming style, and there are a few reasonable situations involving fall-through where the ©default© clause needs to appear is locations other than at the end. 956 Therefore, no change is made for this issue. 957 \item 958 Dealing with unreachable code in a ©switch©/©choose© body is solved by restricting declarations and associated initialization to the start of statement body, which is executed \emph{before} the transfer to the appropriate ©case© clause\footnote{ 959 Essentially, these declarations are hoisted before the ©switch©/©choose© statement and both declarations and statement are surrounded by a compound statement.} and precluding statements before the first ©case© clause. 960 Further declarations at the same nesting level as the statement body are disallowed to ensure every transfer into the body is sound. 961 \begin{cfa} 962 switch ( x ) { 963 ®int i = 0;® §\C{// allowed only at start}§ 964 case 0: 965 ... 966 ®int j = 0;® §\C{// disallowed}§ 967 case 1: 968 { 969 ®int k = 0;® §\C{// allowed at different nesting levels}§ 970 ... 971 ®case 2:® §\C{// disallow case in nested statements}§ 972 } 973 ... 974 } 975 \end{cfa} 976 \end{enumerate} 977 978 979 %\section{\texorpdfstring{\protect\lstinline@case@ Clause}{case Clause}} 980 \subsection{\texorpdfstring{\LstKeywordStyle{case} Statement}{case Statement}} 981 982 C restricts the ©case© clause of a ©switch© statement to a single value. 983 For multiple ©case© clauses associated with the same statement, it is necessary to have multiple ©case© clauses rather than multiple values. 984 Requiring a ©case© clause for each value does not seem to be in the spirit of brevity normally associated with C. 985 Therefore, the ©case© clause is extended with a list of values, as in: 986 \begin{cquote} 987 \begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{2em}}l@{}} 988 \multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{C}} \\ 989 \begin{cfa} 990 switch ( i ) { 991 case ®1, 3, 5®: 992 ... 993 case ®2, 4, 6®: 994 ... 995 } 996 \end{cfa} 997 & 998 \begin{cfa} 999 switch ( i ) { 1000 case 1: case 3 : case 5: 1001 ... 1002 case 2: case 4 : case 6: 1003 ... 1004 } 1005 \end{cfa} 1006 & 1007 \begin{cfa} 1008 1009 // odd values 1010 1011 // even values 1012 1013 1014 \end{cfa} 1015 \end{tabular} 1016 \end{cquote} 1017 In addition, subranges are allowed to specify case values.\footnote{ 1018 gcc has the same mechanism but awkward syntax, \lstinline@2 ...42@, because a space is required after a number, otherwise the period is a decimal point.} 1019 \begin{cfa} 1020 switch ( i ) { 1021 case ®1~5:® §\C{// 1, 2, 3, 4, 5}§ 1022 ... 1023 case ®10~15:® §\C{// 10, 11, 12, 13, 14, 15}§ 1024 ... 1025 } 1026 \end{cfa} 1027 Lists of subranges are also allowed. 1028 \begin{cfa} 1029 case ®1~5, 12~21, 35~42®: 1030 \end{cfa} 1031 1032 1033 % for () => for ( ;; ) 1034 % for ( 10 - t ) => for ( typeof(10 - t) ? = 0 ; ? < 10 - t; ? += 1 ) // using 0 and 1 1035 % for ( i ; 10 - t ) => for ( typeof(10 - t) i = 0 ; i < 10 - t; i += 1 ) // using 0 and 1 1036 % for ( T i ; 10 - t ) => for ( T i = 0 ; i < 10 - t; i += 1 ) // using 0 and 1 1037 % for ( 3~9 ) => for ( int ? = 3 ; ? < 9; ? += 1 ) // using 1 1038 % for ( i ; 3~9 ) => for ( int i = 3 ; i < 9; i += 1 ) // using 1 1039 % for ( T i ; 3~9 ) => for ( T i = 3 ; i < 9; i += 1 ) // using 1 1040 1041 1042 %\subsection{\texorpdfstring{Labelled \protect\lstinline@continue@ / \protect\lstinline@break@}{Labelled continue / break}} 1043 \subsection{\texorpdfstring{Labelled \LstKeywordStyle{continue} / \LstKeywordStyle{break} Statement}{Labelled continue / break Statement}} 527 1044 528 1045 While C provides ©continue© and ©break© statements for altering control flow, both are restricted to one level of nesting for a particular control structure. 529 1046 Unfortunately, this restriction forces programmers to use \Indexc{goto} to achieve the equivalent control-flow for more than one level of nesting. 530 To prevent having to switch to the ©goto©, \CFA extends the \Indexc{continue}\index{continue@ \lstinline@continue@!labelled}\index{labelled!continue@©continue©} and \Indexc{break}\index{break@\lstinline@break@!labelled}\index{labelled!break@©break©} with a target label to support static multi-level exit\index{multi-level exit}\index{static multi-level exit}~\cite{Buhr85}, as in Java.1047 To prevent having to switch to the ©goto©, \CFA extends the \Indexc{continue}\index{continue@©continue©!labelled}\index{labelled!continue@©continue©} and \Indexc{break}\index{break@©break©!labelled}\index{labelled!break@©break©} with a target label to support static multi-level exit\index{multi-level exit}\index{static multi-level exit}~\cite{Buhr85}, as in Java. 531 1048 For both ©continue© and ©break©, the target label must be directly associated with a ©for©, ©while© or ©do© statement; 532 1049 for ©break©, the target label can also be associated with a ©switch©, ©if© or compound (©{}©) statement. … … 613 1130 \end{figure} 614 1131 615 Both labelled ©continue© and ©break© are a ©goto©\index{goto@ \lstinline@goto@!restricted} restricted in the following ways:1132 Both labelled ©continue© and ©break© are a ©goto©\index{goto@©goto©!restricted} restricted in the following ways: 616 1133 \begin{itemize} 617 1134 \item … … 626 1143 With ©goto©, the label is at the end of the control structure, which fails to convey this important clue early enough to the reader. 627 1144 Finally, using an explicit target for the transfer instead of an implicit target allows new constructs to be added or removed without affecting existing constructs. 628 The implicit targets of the current ©continue© and ©break©, \ie the closest enclosing loop or ©switch©, change as certain constructs are added or removed. 629 630 631 \section{\texorpdfstring{\protect\lstinline@switch@ Statement}{switch Statement}} 632 633 C allows a number of questionable forms for the ©switch© statement: 634 \begin{enumerate} 635 \item 636 By default, the end of a ©case© clause\footnote{ 637 In this section, the term \emph{case clause} refers to either a ©case© or ©default© clause.} 638 \emph{falls through} to the next ©case© clause in the ©switch© statement; 639 to exit a ©switch© statement from a ©case© clause requires explicitly terminating the clause with a transfer statement, most commonly ©break©: 640 \begin{cfa} 641 switch ( i ) { 642 case 1: 643 ... 644 // fall-through 645 case 2: 646 ... 647 break; // exit switch statement 648 } 649 \end{cfa} 650 The ability to fall-through to the next clause \emph{is} a useful form of control flow, specifically when a sequence of case actions compound: 651 \begin{cquote} 652 \begin{tabular}{@{}l@{\hspace{3em}}l@{}} 653 \begin{cfa} 654 switch ( argc ) { 655 case 3: 656 // open output file 657 // fall-through 658 case 2: 659 // open input file 660 break; // exit switch statement 661 default: 662 // usage message 663 } 664 \end{cfa} 665 & 666 \begin{cfa} 667 668 if ( argc == 3 ) { 669 // open output file 670 ®// open input file 671 ®} else if ( argc == 2 ) { 672 ®// open input file (duplicate) 673 674 ®} else { 675 // usage message 676 } 677 \end{cfa} 678 \end{tabular} 679 \end{cquote} 680 In this example, case 2 is always done if case 3 is done. 681 This control flow is difficult to simulate with if statements or a ©switch© statement without fall-through as code must be duplicated or placed in a separate routine. 682 C also uses fall-through to handle multiple case-values resulting in the same action: 683 \begin{cfa} 684 switch ( i ) { 685 ®case 1: case 3: case 5:® // odd values 686 // odd action 687 break; 688 ®case 2: case 4: case 6:® // even values 689 // even action 690 break; 691 } 692 \end{cfa} 693 However, this situation is handled in other languages without fall-through by allowing a list of case values. 694 While fall-through itself is not a problem, the problem occurs when fall-through is the default, as this semantics is unintuitive to many programmers and is different from virtually all other programming languages with a ©switch© statement. 695 Hence, default fall-through semantics results in a large number of programming errors as programmers often \emph{forget} the ©break© statement at the end of a ©case© clause, resulting in inadvertent fall-through. 696 697 \item 698 It is possible to place ©case© clauses on statements nested \emph{within} the body of the ©switch© statement: 699 \begin{cfa} 700 switch ( i ) { 701 case 0: 702 if ( j < k ) { 703 ... 704 ®case 1:® // transfer into "if" statement 705 ... 706 } // if 707 case 2: 708 while ( j < 5 ) { 709 ... 710 ®case 3:® // transfer into "while" statement 711 ... 712 } // while 713 } // switch 714 \end{cfa} 715 The problem with this usage is branching into control structures, which is known to cause both comprehension and technical difficulties. 716 The comprehension problem occurs from the inability to determine how control reaches a particular point due to the number of branches leading to it. 717 The technical problem results from the inability to ensure declaration and initialization of variables when blocks are not entered at the beginning. 718 There are no positive arguments for this kind of control flow, and therefore, there is a strong impetus to eliminate it. 719 Nevertheless, C does have an idiom where this capability is used, known as ``\Index*{Duff's device}''~\cite{Duff83}: 720 \begin{cfa} 721 register int n = (count + 7) / 8; 722 switch ( count % 8 ) { 723 case 0: do{ *to = *from++; 724 case 7: *to = *from++; 725 case 6: *to = *from++; 726 case 5: *to = *from++; 727 case 4: *to = *from++; 728 case 3: *to = *from++; 729 case 2: *to = *from++; 730 case 1: *to = *from++; 731 } while ( --n > 0 ); 732 } 733 \end{cfa} 734 which unrolls a loop N times (N = 8 above) and uses the ©switch© statement to deal with any iterations not a multiple of N. 735 While efficient, this sort of special purpose usage is questionable: 736 \begin{quote} 737 Disgusting, no? But it compiles and runs just fine. I feel a combination of pride and revulsion at this 738 discovery.~\cite{Duff83} 739 \end{quote} 740 \item 741 It is possible to place the ©default© clause anywhere in the list of labelled clauses for a ©switch© statement, rather than only at the end. 742 Virtually all programming languages with a ©switch© statement require the ©default© clause to appear last in the case-clause list. 743 The logic for this semantics is that after checking all the ©case© clauses without success, the ©default© clause is selected; 744 hence, physically placing the ©default© clause at the end of the ©case© clause list matches with this semantics. 745 This physical placement can be compared to the physical placement of an ©else© clause at the end of a series of connected ©if©/©else© statements. 746 747 \item 748 It is possible to place unreachable code at the start of a ©switch© statement, as in: 749 \begin{cfa} 750 switch ( x ) { 751 ®int y = 1;® §\C{// unreachable initialization}§ 752 ®x = 7;® §\C{// unreachable code without label/branch}§ 753 case 0: ... 754 ... 755 ®int z = 0;® §\C{// unreachable initialization, cannot appear after case}§ 756 z = 2; 757 case 1: 758 ®x = z;® §\C{// without fall through, z is uninitialized}§ 759 } 760 \end{cfa} 761 While the declaration of the local variable ©y© is useful with a scope across all ©case© clauses, the initialization for such a variable is defined to never be executed because control always transfers over it. 762 Furthermore, any statements before the first ©case© clause can only be executed if labelled and transferred to using a ©goto©, either from outside or inside of the ©switch©, both of which are problematic. 763 As well, the declaration of ©z© cannot occur after the ©case© because a label can only be attached to a statement, and without a fall through to case 3, ©z© is uninitialized. 764 The key observation is that the ©switch© statement branches into control structure, \ie there are multiple entry points into its statement body. 765 \end{enumerate} 766 767 Before discussing potential language changes to deal with these problems, it is worth observing that in a typical C program: 768 \begin{itemize} 769 \item 770 the number of ©switch© statements is small, 771 \item 772 most ©switch© statements are well formed (\ie no \Index*{Duff's device}), 773 \item 774 the ©default© clause is usually written as the last case-clause, 775 \item 776 and there is only a medium amount of fall-through from one ©case© clause to the next, and most of these result from a list of case values executing common code, rather than a sequence of case actions that compound. 777 \end{itemize} 778 These observations put into perspective the \CFA changes to the ©switch©. 779 \begin{enumerate} 780 \item 781 Eliminating default fall-through has the greatest potential for affecting existing code. 782 However, even if fall-through is removed, most ©switch© statements would continue to work because of the explicit transfers already present at the end of each ©case© clause, the common placement of the ©default© clause at the end of the case list, and the most common use of fall-through, \ie a list of ©case© clauses executing common code, \eg: 783 \begin{cfa} 784 case 1: case 2: case 3: ... 785 \end{cfa} 786 still works. 787 Nevertheless, reversing the default action would have a non-trivial effect on case actions that compound, such as the above example of processing shell arguments. 788 Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called ©choose©, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword ©fallthrough©/©fallthru©, \eg: 789 \begin{cfa} 790 ®choose® ( i ) { 791 case 1: case 2: case 3: 792 ... 793 ®// implicit end of switch (break) 794 ®case 5: 795 ... 796 ®fallthru®; §\C{// explicit fall through}§ 797 case 7: 798 ... 799 ®break® §\C{// explicit end of switch (redundant)}§ 800 default: 801 j = 3; 802 } 803 \end{cfa} 804 Like the ©switch© statement, the ©choose© statement retains the fall-through semantics for a list of ©case© clauses; 805 An implicit ©break© is applied only at the end of the \emph{statements} following a ©case© clause. 806 An explicit ©fallthru© is retained because it is a C-idiom most C programmers expect, and its absence might discourage programmers from using the ©choose© statement. 807 As well, allowing an explicit ©break© from the ©choose© is a carry over from the ©switch© statement, and expected by C programmers. 808 \item 809 \Index*{Duff's device} is eliminated from both ©switch© and ©choose© statements, and only invalidates a small amount of very questionable code. 810 Hence, the ©case© clause must appear at the same nesting level as the ©switch©/©choose© body, as is done in most other programming languages with ©switch© statements. 811 \item 812 The issue of ©default© at locations other than at the end of the cause clause can be solved by using good programming style, and there are a few reasonable situations involving fall-through where the ©default© clause needs to appear is locations other than at the end. 813 Therefore, no change is made for this issue. 814 \item 815 Dealing with unreachable code in a ©switch©/©choose© body is solved by restricting declarations and associated initialization to the start of statement body, which is executed \emph{before} the transfer to the appropriate ©case© clause\footnote{ 816 Essentially, these declarations are hoisted before the ©switch©/©choose© statement and both declarations and statement are surrounded by a compound statement.} and precluding statements before the first ©case© clause. 817 Further declarations at the same nesting level as the statement body are disallowed to ensure every transfer into the body is sound. 818 \begin{cfa} 819 switch ( x ) { 820 ®int i = 0;® §\C{// allowed only at start}§ 821 case 0: 822 ... 823 ®int j = 0;® §\C{// disallowed}§ 824 case 1: 825 { 826 ®int k = 0;® §\C{// allowed at different nesting levels}§ 827 ... 828 ®case 2:® §\C{// disallow case in nested statements}§ 829 } 830 ... 831 } 832 \end{cfa} 833 \end{enumerate} 834 835 836 \section{\texorpdfstring{\protect\lstinline@case@ Clause}{case Clause}} 837 838 C restricts the ©case© clause of a ©switch© statement to a single value. 839 For multiple ©case© clauses associated with the same statement, it is necessary to have multiple ©case© clauses rather than multiple values. 840 Requiring a ©case© clause for each value does not seem to be in the spirit of brevity normally associated with C. 841 Therefore, the ©case© clause is extended with a list of values, as in: 842 \begin{cquote} 843 \begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{2em}}l@{}} 844 \multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{C}} \\ 845 \begin{cfa} 846 switch ( i ) { 847 case ®1, 3, 5®: 848 ... 849 case ®2, 4, 6®: 850 ... 851 } 852 \end{cfa} 853 & 854 \begin{cfa} 855 switch ( i ) { 856 case 1: case 3 : case 5: 857 ... 858 case 2: case 4 : case 6: 859 ... 860 } 861 \end{cfa} 862 & 863 \begin{cfa} 864 865 // odd values 866 867 // even values 868 869 870 \end{cfa} 871 \end{tabular} 872 \end{cquote} 873 In addition, subranges are allowed to specify case values.\footnote{ 874 gcc has the same mechanism but awkward syntax, \lstinline@2 ...42@, because a space is required after a number, otherwise the period is a decimal point.} 875 \begin{cfa} 876 switch ( i ) { 877 case ®1~5:® §\C{// 1, 2, 3, 4, 5}§ 878 ... 879 case ®10~15:® §\C{// 10, 11, 12, 13, 14, 15}§ 880 ... 881 } 882 \end{cfa} 883 Lists of subranges are also allowed. 884 \begin{cfa} 885 case ®1~5, 12~21, 35~42®: 886 \end{cfa} 887 888 889 \section{\texorpdfstring{\protect\lstinline@with@ Statement}{with Statement}} 1145 Otherwise, the implicit targets of the current ©continue© and ©break©, \ie the closest enclosing loop or ©switch©, change as certain constructs are added or removed. 1146 1147 1148 %\section{\texorpdfstring{\protect\lstinline@with@ Statement}{with Statement}} 1149 \section{\texorpdfstring{\LstKeywordStyle{with} Statement}{with Statement}} 890 1150 \label{s:WithStatement} 891 1151 892 1152 Grouping heterogeneous data into \newterm{aggregate}s (structure/union) is a common programming practice, and an aggregate can be further organized into more complex structures, such as arrays and containers: 893 1153 \begin{cfa} 894 struct S { §\C{// aggregate}§895 char c; §\C{// fields}§1154 struct S { §\C{// aggregate}§ 1155 char c; §\C{// fields}§ 896 1156 int i; 897 1157 double d; … … 902 1162 \begin{cfa} 903 1163 void f( S s ) { 904 `s.`c; `s.`i; `s.`d;§\C{// access containing fields}§1164 ®s.®c; ®s.®i; ®s.®d; §\C{// access containing fields}§ 905 1165 } 906 1166 \end{cfa} … … 909 1169 \begin{C++} 910 1170 struct S { 911 char c; §\C{// fields}§1171 char c; §\C{// fields}§ 912 1172 int i; 913 1173 double d; 914 void f() { §\C{// implicit ``this'' aggregate}§915 `this->`c; `this->`i; `this->`d;§\C{// access containing fields}§1174 void f() { §\C{// implicit ``this'' aggregate}§ 1175 ®this->®c; ®this->®i; ®this->®d; §\C{// access containing fields}§ 916 1176 } 917 1177 } 918 1178 \end{C++} 919 Object-oriented nesting of member functions in a \lstinline[language=C++]@class/struct@ allows eliding \lstinline[language=C++] $this->$because of lexical scoping.1179 Object-oriented nesting of member functions in a \lstinline[language=C++]@class/struct@ allows eliding \lstinline[language=C++]@this->@ because of lexical scoping. 920 1180 However, for other aggregate parameters, qualification is necessary: 921 1181 \begin{cfa} 922 1182 struct T { double m, n; }; 923 int S::f( T & t ) { §\C{// multiple aggregate parameters}§924 c; i; d; §\C{\color{red}// this--{\textgreater}.c, this--{\textgreater}.i, this--{\textgreater}.d}§925 `t.`m; `t.`n;§\C{// must qualify}§926 } 927 \end{cfa} 928 929 To simplify the programmer experience, \CFA provides a @with@statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate qualification to fields by opening a scope containing the field identifiers.1183 int S::f( T & t ) { §\C{// multiple aggregate parameters}§ 1184 c; i; d; §\C{\color{red}// this--{\textgreater}.c, this--{\textgreater}.i, this--{\textgreater}.d}§ 1185 ®t.®m; ®t.®n; §\C{// must qualify}§ 1186 } 1187 \end{cfa} 1188 1189 To simplify the programmer experience, \CFA provides a ©with© statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate qualification to fields by opening a scope containing the field identifiers. 930 1190 Hence, the qualified fields become variables with the side-effect that it is easier to optimizing field references in a block. 931 1191 \begin{cfa} 932 void f( S & this ) `with ( this )` {§\C{// with statement}§933 c; i; d; §\C{\color{red}// this.c, this.i, this.d}§1192 void f( S & this ) ®with ( this )® { §\C{// with statement}§ 1193 c; i; d; §\C{\color{red}// this.c, this.i, this.d}§ 934 1194 } 935 1195 \end{cfa} 936 1196 with the generality of opening multiple aggregate-parameters: 937 1197 \begin{cfa} 938 void f( S & s, T & t ) `with ( s, t )` {§\C{// multiple aggregate parameters}§939 c; i; d; §\C{\color{red}// s.c, s.i, s.d}§940 m; n; §\C{\color{red}// t.m, t.n}§941 } 942 \end{cfa} 943 944 In detail, the @with@statement has the form:1198 void f( S & s, T & t ) ®with ( s, t )® { §\C{// multiple aggregate parameters}§ 1199 c; i; d; §\C{\color{red}// s.c, s.i, s.d}§ 1200 m; n; §\C{\color{red}// t.m, t.n}§ 1201 } 1202 \end{cfa} 1203 1204 In detail, the ©with© statement has the form: 945 1205 \begin{cfa} 946 1206 §\emph{with-statement}§: … … 957 1217 The difference between parallel and nesting occurs for fields with the same name and type: 958 1218 \begin{cfa} 959 struct S { int `i`; int j; double m; } s, w;960 struct T { int `i`; int k; int m; } t, w;1219 struct S { int ®i®; int j; double m; } s, w; 1220 struct T { int ®i®; int k; int m; } t, w; 961 1221 with ( s, t ) { 962 j + k; §\C{// unambiguous, s.j + t.k}§963 m = 5.0; §\C{// unambiguous, t.m = 5.0}§964 m = 1; §\C{// unambiguous, s.m = 1}§965 int a = m; §\C{// unambiguous, a = s.i }§966 double b = m; §\C{// unambiguous, b = t.m}§967 int c = s.i + t.i; §\C{// unambiguous, qualification}§968 (double)m; §\C{// unambiguous, cast}§969 } 970 \end{cfa} 971 For parallel semantics, both @s.i@ and @t.i@ are visible, so @i@is ambiguous without qualification;972 for nested semantics, @t.i@ hides @s.i@, so @i@ implies @t.i@.1222 j + k; §\C{// unambiguous, s.j + t.k}§ 1223 m = 5.0; §\C{// unambiguous, t.m = 5.0}§ 1224 m = 1; §\C{// unambiguous, s.m = 1}§ 1225 int a = m; §\C{// unambiguous, a = s.i }§ 1226 double b = m; §\C{// unambiguous, b = t.m}§ 1227 int c = s.i + t.i; §\C{// unambiguous, qualification}§ 1228 (double)m; §\C{// unambiguous, cast}§ 1229 } 1230 \end{cfa} 1231 For parallel semantics, both ©s.i© and ©t.i© are visible, so ©i© is ambiguous without qualification; 1232 for nested semantics, ©t.i© hides ©s.i©, so ©i© implies ©t.i©. 973 1233 \CFA's ability to overload variables means fields with the same name but different types are automatically disambiguated, eliminating most qualification when opening multiple aggregates. 974 1234 Qualification or a cast is used to disambiguate. 975 1235 976 There is an interesting problem between parameters and the function-body @with@, \eg:977 \begin{cfa} 978 void ?{}( S & s, int i ) with ( s ) { §\C{// constructor}§979 `s.i = i;` j = 3; m = 5.5;§\C{// initialize fields}§980 } 981 \end{cfa} 982 Here, the assignment @s.i = i@ means @s.i = s.i@, which is meaningless, and there is no mechanism to qualify the parameter @i@, making the assignment impossible using the function-body @with@.1236 There is an interesting problem between parameters and the function-body ©with©, \eg: 1237 \begin{cfa} 1238 void ?{}( S & s, int i ) with ( s ) { §\C{// constructor}§ 1239 ®s.i = i;® j = 3; m = 5.5; §\C{// initialize fields}§ 1240 } 1241 \end{cfa} 1242 Here, the assignment ©s.i = i© means ©s.i = s.i©, which is meaningless, and there is no mechanism to qualify the parameter ©i©, making the assignment impossible using the function-body ©with©. 983 1243 To solve this problem, parameters are treated like an initialized aggregate: 984 1244 \begin{cfa} … … 990 1250 and implicitly opened \emph{after} a function-body open, to give them higher priority: 991 1251 \begin{cfa} 992 void ?{}( S & s, int `i` ) with ( s ) `with( §\emph{\color{red}params}§ )`{993 s.i = `i`; j = 3; m = 5.5;994 } 995 \end{cfa} 996 Finally, a cast may be used to disambiguate among overload variables in a @with@expression:997 \begin{cfa} 998 with ( w ) { ... } §\C{// ambiguous, same name and no context}§999 with ( (S)w ) { ... } §\C{// unambiguous, cast}§1000 \end{cfa} 1001 and @with@expressions may be complex expressions with type reference (see Section~\ref{s:References}) to aggregate:1252 void ?{}( S & s, int ®i® ) with ( s ) ®with( §\emph{\color{red}params}§ )® { 1253 s.i = ®i®; j = 3; m = 5.5; 1254 } 1255 \end{cfa} 1256 Finally, a cast may be used to disambiguate among overload variables in a ©with© expression: 1257 \begin{cfa} 1258 with ( w ) { ... } §\C{// ambiguous, same name and no context}§ 1259 with ( (S)w ) { ... } §\C{// unambiguous, cast}§ 1260 \end{cfa} 1261 and ©with© expressions may be complex expressions with type reference (see Section~\ref{s:References}) to aggregate: 1002 1262 % \begin{cfa} 1003 1263 % struct S { int i, j; } sv; 1004 % with ( sv ) { §\C{// implicit reference}§1264 % with ( sv ) { §\C{// implicit reference}§ 1005 1265 % S & sr = sv; 1006 % with ( sr ) { §\C{// explicit reference}§1266 % with ( sr ) { §\C{// explicit reference}§ 1007 1267 % S * sp = &sv; 1008 % with ( *sp ) { §\C{// computed reference}§1009 % i = 3; j = 4; §\C{\color{red}// sp--{\textgreater}i, sp--{\textgreater}j}§1268 % with ( *sp ) { §\C{// computed reference}§ 1269 % i = 3; j = 4; §\C{\color{red}// sp--{\textgreater}i, sp--{\textgreater}j}§ 1010 1270 % } 1011 % i = 2; j = 3; §\C{\color{red}// sr.i, sr.j}§1271 % i = 2; j = 3; §\C{\color{red}// sr.i, sr.j}§ 1012 1272 % } 1013 % i = 1; j = 2; §\C{\color{red}// sv.i, sv.j}§1273 % i = 1; j = 2; §\C{\color{red}// sv.i, sv.j}§ 1014 1274 % } 1015 1275 % \end{cfa} … … 1019 1279 class C { 1020 1280 int i, j; 1021 int mem() { §\C{\color{red}// implicit "this" parameter}§1022 i = 1; §\C{\color{red}// this->i}§1023 j = 2; §\C{\color{red}// this->j}§1281 int mem() { §\C{\color{red}// implicit "this" parameter}§ 1282 i = 1; §\C{\color{red}// this->i}§ 1283 j = 2; §\C{\color{red}// this->j}§ 1024 1284 } 1025 1285 } … … 1028 1288 \begin{cfa} 1029 1289 struct S { int i, j; }; 1030 int mem( S & ®this® ) { §\C{// explicit "this" parameter}§1031 ®this.®i = 1; §\C{// "this" is not elided}§1290 int mem( S & ®this® ) { §\C{// explicit "this" parameter}§ 1291 ®this.®i = 1; §\C{// "this" is not elided}§ 1032 1292 ®this.®j = 2; 1033 1293 } … … 1038 1298 \begin{cfa} 1039 1299 int mem( S & this ) ®with( this )® { §\C{// with clause}§ 1040 i = 1; §\C{\color{red}// this.i}§1041 j = 2; §\C{\color{red}// this.j}§1300 i = 1; §\C{\color{red}// this.i}§ 1301 j = 2; §\C{\color{red}// this.j}§ 1042 1302 } 1043 1303 \end{cfa} … … 1056 1316 struct S1 { ... } s1; 1057 1317 struct S2 { ... } s2; 1058 ®with( s1 )® { §\C{// with statement}§1318 ®with( s1 )® { §\C{// with statement}§ 1059 1319 // access fields of s1 without qualification 1060 ®with s2® { §\C{// nesting}§1320 ®with s2® { §\C{// nesting}§ 1061 1321 // access fields of s1 and s2 without qualification 1062 1322 } … … 1113 1373 Non-local transfer can cause stack unwinding, \ie non-local routine termination, depending on the kind of raise. 1114 1374 \begin{cfa} 1115 exception_t E {}; §\C{// exception type}§1375 exception_t E {}; §\C{// exception type}§ 1116 1376 void f(...) { 1117 ... throw E{}; ... §\C{// termination}§1118 ... throwResume E{}; ... §\C{// resumption}§1377 ... throw E{}; ... §\C{// termination}§ 1378 ... throwResume E{}; ... §\C{// resumption}§ 1119 1379 } 1120 1380 try { 1121 1381 f(...); 1122 } catch( E e ; §boolean-predicate§ ) { §\C[8cm]{// termination handler}§1382 } catch( E e ; §boolean-predicate§ ) { §\C[8cm]{// termination handler}§ 1123 1383 // recover and continue 1124 } catchResume( E e ; §boolean-predicate§ ) { §\C{// resumption handler}\CRT§1384 } catchResume( E e ; §boolean-predicate§ ) { §\C{// resumption handler}\CRT§ 1125 1385 // repair and return 1126 1386 } finally { … … 1182 1442 For example, a routine returning a \Index{pointer} to an array of integers is defined and used in the following way: 1183 1443 \begin{cfa} 1184 int ®(*®f®())[®5®]® {...}; §\C{// definition}§1185 ... ®(*®f®())[®3®]® += 1; §\C{// usage}§1444 int ®(*®f®())[®5®]® {...}; §\C{// definition}§ 1445 ... ®(*®f®())[®3®]® += 1; §\C{// usage}§ 1186 1446 \end{cfa} 1187 1447 Essentially, the return type is wrapped around the routine name in successive layers (like an \Index{onion}). 1188 While attempting to make the two contexts consistent is a laudable goal, it has not worked out in practice. 1448 While attempting to make the two contexts consistent is a laudable goal, it has not worked out in practice, even though Dennis Richie believed otherwise: 1449 \begin{quote} 1450 In spite of its difficulties, I believe that the C's approach to declarations remains plausible, and am comfortable with it; it is a useful unifying principle.~\cite[p.~12]{Ritchie93} 1451 \end{quote} 1189 1452 1190 1453 \CFA provides its own type, variable and routine declarations, using a different syntax. … … 1372 1635 *x = 3; // implicit dereference 1373 1636 int * ®const® y = (int *)104; 1374 *y = *x; // implicit dereference1637 *y = *x; // implicit dereference 1375 1638 \end{cfa} 1376 1639 \end{tabular} … … 1386 1649 \hline 1387 1650 \begin{cfa} 1388 lda r1,100 // load address of x1389 ld r2,(r1) // load value of x1390 lda r3,104 // load address of y1391 st r2,(r3) // store x into y1651 lda r1,100 // load address of x 1652 ld r2,(r1) // load value of x 1653 lda r3,104 // load address of y 1654 st r2,(r3) // store x into y 1392 1655 \end{cfa} 1393 1656 & 1394 1657 \begin{cfa} 1395 1658 1396 ld r2,(100) // load value of x1397 1398 st r2,(104) // store x into y1659 ld r2,(100) // load value of x 1660 1661 st r2,(104) // store x into y 1399 1662 \end{cfa} 1400 1663 \end{tabular} … … 1410 1673 \begin{cfa} 1411 1674 int x, y, ®*® p1, ®*® p2, ®**® p3; 1412 p1 = ®&®x; // p1 points to x1413 p2 = p1; // p2 points to x1414 p1 = ®&®y; // p1 points to y1415 p3 = &p2; // p3 points to p21675 p1 = ®&®x; // p1 points to x 1676 p2 = p1; // p2 points to x 1677 p1 = ®&®y; // p1 points to y 1678 p3 = &p2; // p3 points to p2 1416 1679 \end{cfa} 1417 1680 & … … 1424 1687 For example, \Index*{Algol68}~\cite{Algol68} infers pointer dereferencing to select the best meaning for each pointer usage 1425 1688 \begin{cfa} 1426 p2 = p1 + x; §\C{// compiler infers *p2 = *p1 + x;}§1689 p2 = p1 + x; §\C{// compiler infers *p2 = *p1 + x;}§ 1427 1690 \end{cfa} 1428 1691 Algol68 infers the following dereferencing ©*p2 = *p1 + x©, because adding the arbitrary integer value in ©x© to the address of ©p1© and storing the resulting address into ©p2© is an unlikely operation. 1429 Unfortunately, automatic dereferencing does not work in all cases, and so some mechanism is necessary to fix incorrect choices. 1692 Unfortunately, automatic dereferencing does not work in all cases, and so some mechanism is necessary to fix incorrect choices. 1430 1693 1431 1694 Rather than inferring dereference, most programming languages pick one implicit dereferencing semantics, and the programmer explicitly indicates the other to resolve address-duality. 1432 1695 In C, objects of pointer type always manipulate the pointer object's address: 1433 1696 \begin{cfa} 1434 p1 = p2; §\C{// p1 = p2\ \ rather than\ \ *p1 = *p2}§1435 p2 = p1 + x; §\C{// p2 = p1 + x\ \ rather than\ \ *p2 = *p1 + x}§1697 p1 = p2; §\C{// p1 = p2\ \ rather than\ \ *p1 = *p2}§ 1698 p2 = p1 + x; §\C{// p2 = p1 + x\ \ rather than\ \ *p2 = *p1 + x}§ 1436 1699 \end{cfa} 1437 1700 even though the assignment to ©p2© is likely incorrect, and the programmer probably meant: 1438 1701 \begin{cfa} 1439 p1 = p2; §\C{// pointer address assignment}§1440 ®*®p2 = ®*®p1 + x; §\C{// pointed-to value assignment / operation}§1702 p1 = p2; §\C{// pointer address assignment}§ 1703 ®*®p2 = ®*®p1 + x; §\C{// pointed-to value assignment / operation}§ 1441 1704 \end{cfa} 1442 1705 The C semantics work well for situations where manipulation of addresses is the primary meaning and data is rarely accessed, such as storage management (©malloc©/©free©). … … 1455 1718 \begin{cfa} 1456 1719 int x, y, ®&® r1, ®&® r2, ®&&® r3; 1457 ®&®r1 = &x; §\C{// r1 points to x}§1458 ®&®r2 = &r1; §\C{// r2 points to x}§1459 ®&®r1 = &y; §\C{// r1 points to y}§1460 ®&&®r3 = ®&®&r2; §\C{// r3 points to r2}§1720 ®&®r1 = &x; §\C{// r1 points to x}§ 1721 ®&®r2 = &r1; §\C{// r2 points to x}§ 1722 ®&®r1 = &y; §\C{// r1 points to y}§ 1723 ®&&®r3 = ®&®&r2; §\C{// r3 points to r2}§ 1461 1724 r2 = ((r1 + r2) * (r3 - r1)) / (r3 - 15); §\C{// implicit dereferencing}§ 1462 1725 \end{cfa} … … 1474 1737 For a \CFA reference type, the cancellation on the left-hand side of assignment leaves the reference as an address (\Index{lvalue}): 1475 1738 \begin{cfa} 1476 (&®*®)r1 = &x; §\C{// (\&*) cancel giving address in r1 not variable pointed-to by r1}§1739 (&®*®)r1 = &x; §\C{// (\&*) cancel giving address in r1 not variable pointed-to by r1}§ 1477 1740 \end{cfa} 1478 1741 Similarly, the address of a reference can be obtained for assignment or computation (\Index{rvalue}): 1479 1742 \begin{cfa} 1480 (&(&®*®)®*®)r3 = &(&®*®)r2; §\C{// (\&*) cancel giving address in r2, (\&(\&*)*) cancel giving address in r3}§1743 (&(&®*®)®*®)r3 = &(&®*®)r2; §\C{// (\&*) cancel giving address in r2, (\&(\&*)*) cancel giving address in r3}§ 1481 1744 \end{cfa} 1482 1745 Cancellation\index{cancellation!pointer/reference}\index{pointer!cancellation} works to arbitrary depth. … … 1486 1749 int x, *p1 = &x, **p2 = &p1, ***p3 = &p2, 1487 1750 &r1 = x, &&r2 = r1, &&&r3 = r2; 1488 ***p3 = 3; §\C{// change x}§1489 r3 = 3; §\C{// change x, ***r3}§1490 **p3 = ...; §\C{// change p1}§1491 &r3 = ...; §\C{// change r1, (\&*)**r3, 1 cancellation}§1492 *p3 = ...; §\C{// change p2}§1493 &&r3 = ...; §\C{// change r2, (\&(\&*)*)*r3, 2 cancellations}§1494 &&&r3 = p3; §\C{// change r3 to p3, (\&(\&(\&*)*)*)r3, 3 cancellations}§1751 ***p3 = 3; §\C{// change x}§ 1752 r3 = 3; §\C{// change x, ***r3}§ 1753 **p3 = ...; §\C{// change p1}§ 1754 &r3 = ...; §\C{// change r1, (\&*)**r3, 1 cancellation}§ 1755 *p3 = ...; §\C{// change p2}§ 1756 &&r3 = ...; §\C{// change r2, (\&(\&*)*)*r3, 2 cancellations}§ 1757 &&&r3 = p3; §\C{// change r3 to p3, (\&(\&(\&*)*)*)r3, 3 cancellations}§ 1495 1758 \end{cfa} 1496 1759 Furthermore, both types are equally performant, as the same amount of dereferencing occurs for both types. … … 1499 1762 As for a pointer type, a reference type may have qualifiers: 1500 1763 \begin{cfa} 1501 const int cx = 5; §\C{// cannot change cx;}§1502 const int & cr = cx; §\C{// cannot change what cr points to}§1503 ®&®cr = &cx; §\C{// can change cr}§1504 cr = 7; §\C{// error, cannot change cx}§1505 int & const rc = x; §\C{// must be initialized}§1506 ®&®rc = &x; §\C{// error, cannot change rc}§1507 const int & const crc = cx; §\C{// must be initialized}§1508 crc = 7; §\C{// error, cannot change cx}§1509 ®&®crc = &cx; §\C{// error, cannot change crc}§1764 const int cx = 5; §\C{// cannot change cx;}§ 1765 const int & cr = cx; §\C{// cannot change what cr points to}§ 1766 ®&®cr = &cx; §\C{// can change cr}§ 1767 cr = 7; §\C{// error, cannot change cx}§ 1768 int & const rc = x; §\C{// must be initialized}§ 1769 ®&®rc = &x; §\C{// error, cannot change rc}§ 1770 const int & const crc = cx; §\C{// must be initialized}§ 1771 crc = 7; §\C{// error, cannot change cx}§ 1772 ®&®crc = &cx; §\C{// error, cannot change crc}§ 1510 1773 \end{cfa} 1511 1774 Hence, for type ©& const©, there is no pointer assignment, so ©&rc = &x© is disallowed, and \emph{the address value cannot be the null pointer unless an arbitrary pointer is coerced\index{coercion} into the reference}: 1512 1775 \begin{cfa} 1513 int & const cr = *0; §\C{// where 0 is the int * zero}§1776 int & const cr = *0; §\C{// where 0 is the int * zero}§ 1514 1777 \end{cfa} 1515 1778 Note, constant reference-types do not prevent \Index{addressing errors} because of explicit storage-management: … … 1518 1781 cr = 5; 1519 1782 free( &cr ); 1520 cr = 7; §\C{// unsound pointer dereference}§1783 cr = 7; §\C{// unsound pointer dereference}§ 1521 1784 \end{cfa} 1522 1785 … … 1543 1806 \begin{cfa} 1544 1807 int w, x, y, z, & ar[3] = { x, y, z }; §\C{// initialize array of references}§ 1545 &ar[1] = &w; §\C{// change reference array element}§1546 typeof( ar[1] ) p; §\C{// (gcc) is int, \ie the type of referenced object}§1547 typeof( &ar[1] ) q; §\C{// (gcc) is int \&, \ie the type of reference}§1548 sizeof( ar[1] ) == sizeof( int ); §\C{// is true, \ie the size of referenced object}§1549 sizeof( &ar[1] ) == sizeof( int *) §\C{// is true, \ie the size of a reference}§1808 &ar[1] = &w; §\C{// change reference array element}§ 1809 typeof( ar[1] ) p; §\C{// (gcc) is int, \ie the type of referenced object}§ 1810 typeof( &ar[1] ) q; §\C{// (gcc) is int \&, \ie the type of reference}§ 1811 sizeof( ar[1] ) == sizeof( int ); §\C{// is true, \ie the size of referenced object}§ 1812 sizeof( &ar[1] ) == sizeof( int *) §\C{// is true, \ie the size of a reference}§ 1550 1813 \end{cfa} 1551 1814 … … 1564 1827 Therefore, for pointer/reference initialization, the initializing value must be an address not a value. 1565 1828 \begin{cfa} 1566 int * p = &x; §\C{// assign address of x}§1567 ®int * p = x;® §\C{// assign value of x}§1568 int & r = x; §\C{// must have address of x}§1829 int * p = &x; §\C{// assign address of x}§ 1830 ®int * p = x;® §\C{// assign value of x}§ 1831 int & r = x; §\C{// must have address of x}§ 1569 1832 \end{cfa} 1570 1833 Like the previous example with C pointer-arithmetic, it is unlikely assigning the value of ©x© into a pointer is meaningful (again, a warning is usually given). … … 1575 1838 Similarly, when a reference type is used for a parameter/return type, the call-site argument does not require a reference operator for the same reason. 1576 1839 \begin{cfa} 1577 int & f( int & r ); §\C{// reference parameter and return}§1578 z = f( x ) + f( y ); §\C{// reference operator added, temporaries needed for call results}§1840 int & f( int & r ); §\C{// reference parameter and return}§ 1841 z = f( x ) + f( y ); §\C{// reference operator added, temporaries needed for call results}§ 1579 1842 \end{cfa} 1580 1843 Within routine ©f©, it is possible to change the argument by changing the corresponding parameter, and parameter ©r© can be locally reassigned within ©f©. … … 1603 1866 void f( int & r ); 1604 1867 void g( int * p ); 1605 f( 3 ); g( ®&®3 ); §\C{// compiler implicit generates temporaries}§1606 f( x + y ); g( ®&®(x + y) ); §\C{// compiler implicit generates temporaries}§1868 f( 3 ); g( ®&®3 ); §\C{// compiler implicit generates temporaries}§ 1869 f( x + y ); g( ®&®(x + y) ); §\C{// compiler implicit generates temporaries}§ 1607 1870 \end{cfa} 1608 1871 Essentially, there is an implicit \Index{rvalue} to \Index{lvalue} conversion in this case.\footnote{ … … 1615 1878 \begin{cfa} 1616 1879 void f( int i ); 1617 void (* fp)( int ); §\C{// routine pointer}§1618 fp = f; §\C{// reference initialization}§1619 fp = &f; §\C{// pointer initialization}§1620 fp = *f; §\C{// reference initialization}§1621 fp(3); §\C{// reference invocation}§1622 (*fp)(3); §\C{// pointer invocation}§1880 void (* fp)( int ); §\C{// routine pointer}§ 1881 fp = f; §\C{// reference initialization}§ 1882 fp = &f; §\C{// pointer initialization}§ 1883 fp = *f; §\C{// reference initialization}§ 1884 fp(3); §\C{// reference invocation}§ 1885 (*fp)(3); §\C{// pointer invocation}§ 1623 1886 \end{cfa} 1624 1887 While C's treatment of routine objects has similarity to inferring a reference type in initialization contexts, the examples are assignment not initialization, and all possible forms of assignment are possible (©f©, ©&f©, ©*f©) without regard for type. 1625 1888 Instead, a routine object should be referenced by a ©const© reference: 1626 1889 \begin{cfa} 1627 ®const® void (®&® fr)( int ) = f; §\C{// routine reference}§1628 fr = ... §\C{// error, cannot change code}§1629 &fr = ...; §\C{// changing routine reference}§1630 fr( 3 ); §\C{// reference call to f}§1631 (*fr)(3); §\C{// error, incorrect type}§1890 ®const® void (®&® fr)( int ) = f; §\C{// routine reference}§ 1891 fr = ... §\C{// error, cannot change code}§ 1892 &fr = ...; §\C{// changing routine reference}§ 1893 fr( 3 ); §\C{// reference call to f}§ 1894 (*fr)(3); §\C{// error, incorrect type}§ 1632 1895 \end{cfa} 1633 1896 because the value of the routine object is a routine literal, \ie the routine code is normally immutable during execution.\footnote{ … … 1642 1905 \begin{itemize} 1643 1906 \item 1644 if ©R© is an \Index{rvalue} of type ©T & $_1$...&$_r$© where $r \ge 1$ references (©&© symbols) then ©&R© has type ©T ®*®&$_{\color{red}2}$...&$_{\color{red}r}$©, \ie ©T© pointer with $r-1$ references (©&© symbols).1645 1646 \item 1647 if ©L© is an \Index{lvalue} of type ©T & $_1$...&$_l$© where $l \ge 0$ references (©&© symbols) then ©&L© has type ©T ®*®&$_{\color{red}1}$...&$_{\color{red}l}$©, \ie ©T© pointer with $l$ references (©&© symbols).1907 if ©R© is an \Index{rvalue} of type ©T &©$_1\cdots$ ©&©$_r$, where $r \ge 1$ references (©&© symbols), than ©&R© has type ©T ®*®&©$_{\color{red}2}\cdots$ ©&©$_{\color{red}r}$, \ie ©T© pointer with $r-1$ references (©&© symbols). 1908 1909 \item 1910 if ©L© is an \Index{lvalue} of type ©T &©$_1\cdots$ ©&©$_l$, where $l \ge 0$ references (©&© symbols), than ©&L© has type ©T ®*®&©$_{\color{red}1}\cdots$ ©&©$_{\color{red}l}$, \ie ©T© pointer with $l$ references (©&© symbols). 1648 1911 \end{itemize} 1649 1912 The following example shows the first rule applied to different \Index{rvalue} contexts: … … 1651 1914 int x, * px, ** ppx, *** pppx, **** ppppx; 1652 1915 int & rx = x, && rrx = rx, &&& rrrx = rrx ; 1653 x = rrrx; // rrrx is an lvalue with type int &&& (equivalent to x)1654 px = &rrrx; // starting from rrrx, &rrrx is an rvalue with type int *&&& (&x)1655 ppx = &&rrrx; // starting from &rrrx, &&rrrx is an rvalue with type int **&& (&rx)1656 pppx = &&&rrrx; // starting from &&rrrx, &&&rrrx is an rvalue with type int ***& (&rrx)1657 ppppx = &&&&rrrx; // starting from &&&rrrx, &&&&rrrx is an rvalue with type int **** (&rrrx)1916 x = rrrx; §\C[2.0in]{// rrrx is an lvalue with type int \&\&\& (equivalent to x)}§ 1917 px = &rrrx; §\C{// starting from rrrx, \&rrrx is an rvalue with type int *\&\&\& (\&x)}§ 1918 ppx = &&rrrx; §\C{// starting from \&rrrx, \&\&rrrx is an rvalue with type int **\&\& (\&rx)}§ 1919 pppx = &&&rrrx; §\C{// starting from \&\&rrrx, \&\&\&rrrx is an rvalue with type int ***\& (\&rrx)}§ 1920 ppppx = &&&&rrrx; §\C{// starting from \&\&\&rrrx, \&\&\&\&rrrx is an rvalue with type int **** (\&rrrx)}§ 1658 1921 \end{cfa} 1659 1922 The following example shows the second rule applied to different \Index{lvalue} contexts: … … 1661 1924 int x, * px, ** ppx, *** pppx; 1662 1925 int & rx = x, && rrx = rx, &&& rrrx = rrx ; 1663 rrrx = 2; // rrrx is an lvalue with type int &&& (equivalent to x)1664 &rrrx = px; // starting from rrrx, &rrrx is an rvalue with type int *&&& (rx)1665 &&rrrx = ppx; // starting from &rrrx, &&rrrx is an rvalue with type int **&& (rrx)1666 &&&rrrx = pppx; // starting from &&rrrx, &&&rrrx is an rvalue with type int ***& (rrrx)1926 rrrx = 2; §\C{// rrrx is an lvalue with type int \&\&\& (equivalent to x)}§ 1927 &rrrx = px; §\C{// starting from rrrx, \&rrrx is an rvalue with type int *\&\&\& (rx)}§ 1928 &&rrrx = ppx; §\C{// starting from \&rrrx, \&\&rrrx is an rvalue with type int **\&\& (rrx)}§ 1929 &&&rrrx = pppx; §\C{// starting from \&\&rrrx, \&\&\&rrrx is an rvalue with type int ***\& (rrrx)}\CRT§ 1667 1930 \end{cfa} 1668 1931 … … 1677 1940 \begin{cfa} 1678 1941 int x; 1679 x + 1; // lvalue variable (int) converts to rvalue for expression1942 x + 1; §\C[2.0in]{// lvalue variable (int) converts to rvalue for expression}§ 1680 1943 \end{cfa} 1681 1944 An rvalue has no type qualifiers (©cv©), so the lvalue qualifiers are dropped. … … 1687 1950 \begin{cfa} 1688 1951 int x, &r = x, f( int p ); 1689 x = ®r® + f( ®r® ); // lvalue reference converts to rvalue1952 x = ®r® + f( ®r® ); §\C{// lvalue reference converts to rvalue}§ 1690 1953 \end{cfa} 1691 1954 An rvalue has no type qualifiers (©cv©), so the reference qualifiers are dropped. … … 1694 1957 lvalue to reference conversion: \lstinline[deletekeywords=lvalue]@lvalue-type cv1 T@ converts to ©cv2 T &©, which allows implicitly converting variables to references. 1695 1958 \begin{cfa} 1696 int x, &r = ®x®, f( int & p ); // lvalue variable (int) convert to reference (int &)1697 f( ®x® ); // lvalue variable (int) convert to reference (int &)1959 int x, &r = ®x®, f( int & p ); §\C{// lvalue variable (int) convert to reference (int \&)}§ 1960 f( ®x® ); §\C{// lvalue variable (int) convert to reference (int \&)}§ 1698 1961 \end{cfa} 1699 1962 Conversion can restrict a type, where ©cv1© $\le$ ©cv2©, \eg passing an ©int© to a ©const volatile int &©, which has low cost. … … 1705 1968 \begin{cfa} 1706 1969 int x, & f( int & p ); 1707 f( ®x + 3® ); // rvalue parameter (int) implicitly converts to lvalue temporary reference (int &)1708 ®&f®(...) = &x; // rvalue result (int &) implicitly converts to lvalue temporary reference (int &)1970 f( ®x + 3® ); §\C[1.5in]{// rvalue parameter (int) implicitly converts to lvalue temporary reference (int \&)}§ 1971 ®&f®(...) = &x; §\C{// rvalue result (int \&) implicitly converts to lvalue temporary reference (int \&)}\CRT§ 1709 1972 \end{cfa} 1710 1973 In both case, modifications to the temporary are inaccessible (\Index{warning}). … … 1895 2158 in both cases the type is assumed to be void as opposed to old style C defaults of int return type and unknown parameter types, respectively, as in: 1896 2159 \begin{cfa} 1897 [§\,§] g(); §\C{// no input or output parameters}§1898 [ void ] g( void ); §\C{// no input or output parameters}§2160 [§\,§] g(); §\C{// no input or output parameters}§ 2161 [ void ] g( void ); §\C{// no input or output parameters}§ 1899 2162 \end{cfa} 1900 2163 … … 1914 2177 \begin{cfa} 1915 2178 typedef int foo; 1916 int f( int (* foo) ); §\C{// foo is redefined as a parameter name}§2179 int f( int (* foo) ); §\C{// foo is redefined as a parameter name}§ 1917 2180 \end{cfa} 1918 2181 The string ``©int (* foo)©'' declares a C-style named-parameter of type pointer to an integer (the parenthesis are superfluous), while the same string declares a \CFA style unnamed parameter of type routine returning integer with unnamed parameter of type pointer to foo. … … 1922 2185 C-style declarations can be used to declare parameters for \CFA style routine definitions, \eg: 1923 2186 \begin{cfa} 1924 [ int ] f( * int, int * ); §\C{// returns an integer, accepts 2 pointers to integers}§1925 [ * int, int * ] f( int ); §\C{// returns 2 pointers to integers, accepts an integer}§2187 [ int ] f( * int, int * ); §\C{// returns an integer, accepts 2 pointers to integers}§ 2188 [ * int, int * ] f( int ); §\C{// returns 2 pointers to integers, accepts an integer}§ 1926 2189 \end{cfa} 1927 2190 The reason for allowing both declaration styles in the new context is for backwards compatibility with existing preprocessor macros that generate C-style declaration-syntax, as in: 1928 2191 \begin{cfa} 1929 2192 #define ptoa( n, d ) int (*n)[ d ] 1930 int f( ptoa( p, 5 ) ) ... §\C{// expands to int f( int (*p)[ 5 ] )}§1931 [ int ] f( ptoa( p, 5 ) ) ... §\C{// expands to [ int ] f( int (*p)[ 5 ] )}§2193 int f( ptoa( p, 5 ) ) ... §\C{// expands to int f( int (*p)[ 5 ] )}§ 2194 [ int ] f( ptoa( p, 5 ) ) ... §\C{// expands to [ int ] f( int (*p)[ 5 ] )}§ 1932 2195 \end{cfa} 1933 2196 Again, programmers are highly encouraged to use one declaration form or the other, rather than mixing the forms. … … 1951 2214 int z; 1952 2215 ... x = 0; ... y = z; ... 1953 ®return;® §\C{// implicitly return x, y}§2216 ®return;® §\C{// implicitly return x, y}§ 1954 2217 } 1955 2218 \end{cfa} … … 1961 2224 [ int x, int y ] f() { 1962 2225 ... 1963 } §\C{// implicitly return x, y}§2226 } §\C{// implicitly return x, y}§ 1964 2227 \end{cfa} 1965 2228 In this case, the current values of ©x© and ©y© are returned to the calling routine just as if a ©return© had been encountered. … … 1970 2233 [ int x, int y ] f( int, x, int y ) { 1971 2234 ... 1972 } §\C{// implicitly return x, y}§2235 } §\C{// implicitly return x, y}§ 1973 2236 \end{cfa} 1974 2237 This notation allows the compiler to eliminate temporary variables in nested routine calls. 1975 2238 \begin{cfa} 1976 [ int x, int y ] f( int, x, int y ); §\C{// prototype declaration}§2239 [ int x, int y ] f( int, x, int y ); §\C{// prototype declaration}§ 1977 2240 int a, b; 1978 2241 [a, b] = f( f( f( a, b ) ) ); … … 1988 2251 as well, parameter names are optional, \eg: 1989 2252 \begin{cfa} 1990 [ int x ] f (); §\C{// returning int with no parameters}§1991 [ * int ] g (int y); §\C{// returning pointer to int with int parameter}§1992 [ ] h ( int, char ); §\C{// returning no result with int and char parameters}§1993 [ * int, int ] j ( int ); §\C{// returning pointer to int and int, with int parameter}§2253 [ int x ] f (); §\C{// returning int with no parameters}§ 2254 [ * int ] g (int y); §\C{// returning pointer to int with int parameter}§ 2255 [ ] h ( int, char ); §\C{// returning no result with int and char parameters}§ 2256 [ * int, int ] j ( int ); §\C{// returning pointer to int and int, with int parameter}§ 1994 2257 \end{cfa} 1995 2258 This syntax allows a prototype declaration to be created by cutting and pasting source text from the routine definition header (or vice versa). … … 2012 2275 The syntax for pointers to \CFA routines specifies the pointer name on the right, \eg: 2013 2276 \begin{cfa} 2014 * [ int x ] () fp; §\C{// pointer to routine returning int with no parameters}§2015 * [ * int ] (int y) gp; §\C{// pointer to routine returning pointer to int with int parameter}§2016 * [ ] (int,char) hp; §\C{// pointer to routine returning no result with int and char parameters}§2017 * [ * int,int ] ( int ) jp; §\C{// pointer to routine returning pointer to int and int, with int parameter}§2277 * [ int x ] () fp; §\C{// pointer to routine returning int with no parameters}§ 2278 * [ * int ] (int y) gp; §\C{// pointer to routine returning pointer to int with int parameter}§ 2279 * [ ] (int,char) hp; §\C{// pointer to routine returning no result with int and char parameters}§ 2280 * [ * int,int ] ( int ) jp; §\C{// pointer to routine returning pointer to int and int, with int parameter}§ 2018 2281 \end{cfa} 2019 2282 While parameter names are optional, \emph{a routine name cannot be specified}; 2020 2283 for example, the following is incorrect: 2021 2284 \begin{cfa} 2022 * [ int x ] f () fp; §\C{// routine name "f" is not allowed}§2285 * [ int x ] f () fp; §\C{// routine name "f" is not allowed}§ 2023 2286 \end{cfa} 2024 2287 … … 2043 2306 whereas a named (keyword) call may be: 2044 2307 \begin{cfa} 2045 p( z : 3, x : 4, y : 7 ); §\C{// rewrite $\Rightarrow$ p( 4, 7, 3 )}§2308 p( z : 3, x : 4, y : 7 ); §\C{// rewrite $\Rightarrow$ p( 4, 7, 3 )}§ 2046 2309 \end{cfa} 2047 2310 Here the order of the arguments is unimportant, and the names of the parameters are used to associate argument values with the corresponding parameters. … … 2060 2323 For example, the following routine prototypes and definition are all valid. 2061 2324 \begin{cfa} 2062 void p( int, int, int ); §\C{// equivalent prototypes}§2325 void p( int, int, int ); §\C{// equivalent prototypes}§ 2063 2326 void p( int x, int y, int z ); 2064 2327 void p( int y, int x, int z ); 2065 2328 void p( int z, int y, int x ); 2066 void p( int q, int r, int s ) {} §\C{// match with this definition}§2329 void p( int q, int r, int s ) {} §\C{// match with this definition}§ 2067 2330 \end{cfa} 2068 2331 Forcing matching parameter names in routine prototypes with corresponding routine definitions is possible, but goes against a strong tradition in C programming. … … 2076 2339 int f( int x, double y ); 2077 2340 2078 f( j : 3, i : 4 ); §\C{// 1st f}§2079 f( x : 7, y : 8.1 ); §\C{// 2nd f}§2080 f( 4, 5 ); §\C{// ambiguous call}§2341 f( j : 3, i : 4 ); §\C{// 1st f}§ 2342 f( x : 7, y : 8.1 ); §\C{// 2nd f}§ 2343 f( 4, 5 ); §\C{// ambiguous call}§ 2081 2344 \end{cfa} 2082 2345 However, named arguments compound routine resolution in conjunction with conversions: 2083 2346 \begin{cfa} 2084 f( i : 3, 5.7 ); §\C{// ambiguous call ?}§2347 f( i : 3, 5.7 ); §\C{// ambiguous call ?}§ 2085 2348 \end{cfa} 2086 2349 Depending on the cost associated with named arguments, this call could be resolvable or ambiguous. … … 2096 2359 the allowable positional calls are: 2097 2360 \begin{cfa} 2098 p(); §\C{// rewrite $\Rightarrow$ p( 1, 2, 3 )}§2099 p( 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 2, 3 )}§2100 p( 4, 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 4, 3 )}§2101 p( 4, 4, 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 4, 4 )}§2361 p(); §\C{// rewrite $\Rightarrow$ p( 1, 2, 3 )}§ 2362 p( 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 2, 3 )}§ 2363 p( 4, 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 4, 3 )}§ 2364 p( 4, 4, 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 4, 4 )}§ 2102 2365 // empty arguments 2103 p( , 4, 4 ); §\C{// rewrite $\Rightarrow$ p( 1, 4, 4 )}§2104 p( 4, , 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 2, 4 )}§2105 p( 4, 4, ); §\C{// rewrite $\Rightarrow$ p( 4, 4, 3 )}§2106 p( 4, , ); §\C{// rewrite $\Rightarrow$ p( 4, 2, 3 )}§2107 p( , 4, ); §\C{// rewrite $\Rightarrow$ p( 1, 4, 3 )}§2108 p( , , 4 ); §\C{// rewrite $\Rightarrow$ p( 1, 2, 4 )}§2109 p( , , ); §\C{// rewrite $\Rightarrow$ p( 1, 2, 3 )}§2366 p( , 4, 4 ); §\C{// rewrite $\Rightarrow$ p( 1, 4, 4 )}§ 2367 p( 4, , 4 ); §\C{// rewrite $\Rightarrow$ p( 4, 2, 4 )}§ 2368 p( 4, 4, ); §\C{// rewrite $\Rightarrow$ p( 4, 4, 3 )}§ 2369 p( 4, , ); §\C{// rewrite $\Rightarrow$ p( 4, 2, 3 )}§ 2370 p( , 4, ); §\C{// rewrite $\Rightarrow$ p( 1, 4, 3 )}§ 2371 p( , , 4 ); §\C{// rewrite $\Rightarrow$ p( 1, 2, 4 )}§ 2372 p( , , ); §\C{// rewrite $\Rightarrow$ p( 1, 2, 3 )}§ 2110 2373 \end{cfa} 2111 2374 Here the missing arguments are inserted from the default values in the parameter list. … … 2131 2394 Default values may only appear in a prototype versus definition context: 2132 2395 \begin{cfa} 2133 void p( int x, int y = 2, int z = 3 ); §\C{// prototype: allowed}§2134 void p( int, int = 2, int = 3 ); §\C{// prototype: allowed}§2135 void p( int x, int y = 2, int z = 3 ) {} §\C{// definition: not allowed}§2396 void p( int x, int y = 2, int z = 3 ); §\C{// prototype: allowed}§ 2397 void p( int, int = 2, int = 3 ); §\C{// prototype: allowed}§ 2398 void p( int x, int y = 2, int z = 3 ) {} §\C{// definition: not allowed}§ 2136 2399 \end{cfa} 2137 2400 The reason for this restriction is to allow separate compilation. … … 2158 2421 \begin{cfa} 2159 2422 void p( int x, int y = 2, int z = 3... ); 2160 p( 1, 4, 5, 6, z : 3 ); §\C{// assume p( /* positional */, ... , /* named */ );}§2161 p( 1, z : 3, 4, 5, 6 ); §\C{// assume p( /* positional */, /* named */, ... );}§2423 p( 1, 4, 5, 6, z : 3 ); §\C{// assume p( /* positional */, ... , /* named */ );}§ 2424 p( 1, z : 3, 4, 5, 6 ); §\C{// assume p( /* positional */, /* named */, ... );}§ 2162 2425 \end{cfa} 2163 2426 The first call is an error because arguments 4 and 5 are actually positional not ellipse arguments; … … 2189 2452 Furthermore, overloading cannot handle accessing default arguments in the middle of a positional list, via a missing argument, such as: 2190 2453 \begin{cfa} 2191 p( 1, /* default */, 5 ); §\C{// rewrite $\Rightarrow$ p( 1, 2, 5 )}§2454 p( 1, /* default */, 5 ); §\C{// rewrite $\Rightarrow$ p( 1, 2, 5 )}§ 2192 2455 \end{cfa} 2193 2456 … … 2202 2465 \begin{cfa} 2203 2466 struct { 2204 int f1; §\C{// named field}§2205 int f2 : 4; §\C{// named field with bit field size}§2206 int : 3; §\C{// unnamed field for basic type with bit field size}§2207 int ; §\C{// disallowed, unnamed field}§2208 int *; §\C{// disallowed, unnamed field}§2209 int (*)( int ); §\C{// disallowed, unnamed field}§2467 int f1; §\C{// named field}§ 2468 int f2 : 4; §\C{// named field with bit field size}§ 2469 int : 3; §\C{// unnamed field for basic type with bit field size}§ 2470 int ; §\C{// disallowed, unnamed field}§ 2471 int *; §\C{// disallowed, unnamed field}§ 2472 int (*)( int ); §\C{// disallowed, unnamed field}§ 2210 2473 }; 2211 2474 \end{cfa} … … 2215 2478 \begin{cfa} 2216 2479 struct { 2217 int , , ; §\C{// 3 unnamed fields}§2480 int , , ; §\C{// 3 unnamed fields}§ 2218 2481 } 2219 2482 \end{cfa} … … 2262 2525 struct T t; 2263 2526 } s; 2264 2527 2265 2528 2266 2529 … … 2309 2572 const unsigned int size = 5; 2310 2573 int ia[size]; 2311 ... §\C{// assign values to array ia}§2312 qsort( ia, size ); §\C{// sort ascending order using builtin ?<?}§2574 ... §\C{// assign values to array ia}§ 2575 qsort( ia, size ); §\C{// sort ascending order using builtin ?<?}§ 2313 2576 { 2314 2577 ®int ?<?( int x, int y ) { return x > y; }® §\C{// nested routine}§ 2315 qsort( ia, size ); §\C{// sort descending order by local redefinition}§2578 qsort( ia, size ); §\C{// sort descending order by local redefinition}§ 2316 2579 } 2317 2580 \end{cfa} … … 2321 2584 The following program in undefined in \CFA (and Indexc{gcc}) 2322 2585 \begin{cfa} 2323 [* [int]( int )] foo() { §\C{// int (* foo())( int )}§2586 [* [int]( int )] foo() { §\C{// int (* foo())( int )}§ 2324 2587 int ®i® = 7; 2325 2588 int bar( int p ) { 2326 ®i® += 1; §\C{// dependent on local variable}§2327 sout | ®i® | endl;2589 ®i® += 1; §\C{// dependent on local variable}§ 2590 sout | ®i®; 2328 2591 } 2329 return bar; §\C{// undefined because of local dependence}§2592 return bar; §\C{// undefined because of local dependence}§ 2330 2593 } 2331 2594 int main() { 2332 * [int]( int ) fp = foo(); §\C{// int (* fp)( int )}§2333 sout | fp( 3 ) | endl;2334 } 2335 \end{cfa} 2336 because 2595 * [int]( int ) fp = foo(); §\C{// int (* fp)( int )}§ 2596 sout | fp( 3 ); 2597 } 2598 \end{cfa} 2599 because 2337 2600 2338 2601 Currently, there are no \Index{lambda} expressions, \ie unnamed routines because routine names are very important to properly select the correct routine. … … 2343 2606 In C and \CFA, lists of elements appear in several contexts, such as the parameter list of a routine call. 2344 2607 \begin{cfa} 2345 f( ®2, x, 3 + i® ); §\C{// element list}§2608 f( ®2, x, 3 + i® ); §\C{// element list}§ 2346 2609 \end{cfa} 2347 2610 A list of elements is called a \newterm{tuple}, and is different from a \Index{comma expression}. … … 2360 2623 typedef struct { int quot, rem; } div_t; §\C[7cm]{// from include stdlib.h}§ 2361 2624 div_t div( int num, int den ); 2362 div_t qr = div( 13, 5 ); §\C{// return quotient/remainder aggregate}§2363 printf( "%d %d\n", qr.quot, qr.rem ); §\C{// print quotient/remainder}§2625 div_t qr = div( 13, 5 ); §\C{// return quotient/remainder aggregate}§ 2626 printf( "%d %d\n", qr.quot, qr.rem ); §\C{// print quotient/remainder}§ 2364 2627 \end{cfa} 2365 2628 This approach requires a name for the return type and fields, where \Index{naming} is a common programming-language issue. … … 2371 2634 For example, consider C's \Indexc{modf} function, which returns the integral and fractional part of a floating value. 2372 2635 \begin{cfa} 2373 double modf( double x, double * i ); §\C{// from include math.h}§2374 double intp, frac = modf( 13.5, &intp ); §\C{// return integral and fractional components}§2375 printf( "%g %g\n", intp, frac ); §\C{// print integral/fractional components}§2636 double modf( double x, double * i ); §\C{// from include math.h}§ 2637 double intp, frac = modf( 13.5, &intp ); §\C{// return integral and fractional components}§ 2638 printf( "%g %g\n", intp, frac ); §\C{// print integral/fractional components}§ 2376 2639 \end{cfa} 2377 2640 This approach requires allocating storage for the return values, which complicates the call site with a sequence of variable declarations leading to the call. … … 2400 2663 When a function call is passed as an argument to another call, the best match of actual arguments to formal parameters is evaluated given all possible expression interpretations in the current scope. 2401 2664 \begin{cfa} 2402 void g( int, int ); §\C{// 1}§2403 void g( double, double ); §\C{// 2}§2404 g( div( 13, 5 ) ); §\C{// select 1}§2405 g( modf( 13.5 ) ); §\C{// select 2}§2665 void g( int, int ); §\C{// 1}§ 2666 void g( double, double ); §\C{// 2}§ 2667 g( div( 13, 5 ) ); §\C{// select 1}§ 2668 g( modf( 13.5 ) ); §\C{// select 2}§ 2406 2669 \end{cfa} 2407 2670 In this case, there are two overloaded ©g© routines. … … 2412 2675 The previous examples can be rewritten passing the multiple returned-values directly to the ©printf© function call. 2413 2676 \begin{cfa} 2414 [ int, int ] div( int x, int y ); §\C{// from include stdlib}§2415 printf( "%d %d\n", div( 13, 5 ) ); §\C{// print quotient/remainder}§2416 2417 [ double, double ] modf( double x ); §\C{// from include math}§2418 printf( "%g %g\n", modf( 13.5 ) ); §\C{// print integral/fractional components}§2677 [ int, int ] div( int x, int y ); §\C{// from include stdlib}§ 2678 printf( "%d %d\n", div( 13, 5 ) ); §\C{// print quotient/remainder}§ 2679 2680 [ double, double ] modf( double x ); §\C{// from include math}§ 2681 printf( "%g %g\n", modf( 13.5 ) ); §\C{// print integral/fractional components}§ 2419 2682 \end{cfa} 2420 2683 This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type. … … 2426 2689 \begin{cfa} 2427 2690 int quot, rem; 2428 [ quot, rem ] = div( 13, 5 ); §\C{// assign multiple variables}§2429 printf( "%d %d\n", quot, rem ); §\C{// print quotient/remainder}\CRT§2691 [ quot, rem ] = div( 13, 5 ); §\C{// assign multiple variables}§ 2692 printf( "%d %d\n", quot, rem ); §\C{// print quotient/remainder}\CRT§ 2430 2693 \end{cfa} 2431 2694 Here, the multiple return-values are matched in much the same way as passing multiple return-values to multiple parameters in a call. … … 2433 2696 2434 2697 \subsection{Expressions} 2698 2699 % Change order of expression evaluation. 2700 % http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0145r2.pdf 2435 2701 2436 2702 Multiple-return-value functions provide \CFA with a new syntax for expressing a combination of expressions in the return statement and a combination of types in a function signature. … … 2453 2719 In \CFA, it is possible to overcome this restriction by declaring a \newterm{tuple variable}. 2454 2720 \begin{cfa} 2455 [int, int] ®qr® = div( 13, 5 ); §\C{// initialize tuple variable}§2456 printf( "%d %d\n", ®qr® ); §\C{// print quotient/remainder}§2721 [int, int] ®qr® = div( 13, 5 ); §\C{// initialize tuple variable}§ 2722 printf( "%d %d\n", ®qr® ); §\C{// print quotient/remainder}§ 2457 2723 \end{cfa} 2458 2724 It is now possible to match the multiple return-values to a single variable, in much the same way as \Index{aggregation}. … … 2460 2726 One way to access the individual components of a tuple variable is with assignment. 2461 2727 \begin{cfa} 2462 [ quot, rem ] = qr; §\C{// assign multiple variables}§2728 [ quot, rem ] = qr; §\C{// assign multiple variables}§ 2463 2729 \end{cfa} 2464 2730 … … 2483 2749 [int, double] * p; 2484 2750 2485 int y = x.0; §\C{// access int component of x}§2486 y = f().1; §\C{// access int component of f}§2487 p->0 = 5; §\C{// access int component of tuple pointed-to by p}§2488 g( x.1, x.0 ); §\C{// rearrange x to pass to g}§2489 double z = [ x, f() ].0.1; §\C{// access second component of first component of tuple expression}§2751 int y = x.0; §\C{// access int component of x}§ 2752 y = f().1; §\C{// access int component of f}§ 2753 p->0 = 5; §\C{// access int component of tuple pointed-to by p}§ 2754 g( x.1, x.0 ); §\C{// rearrange x to pass to g}§ 2755 double z = [ x, f() ].0.1; §\C{// access second component of first component of tuple expression}§ 2490 2756 \end{cfa} 2491 2757 Tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. … … 2554 2820 double y; 2555 2821 [int, double] z; 2556 [y, x] = 3.14; §\C{// mass assignment}§2822 [y, x] = 3.14; §\C{// mass assignment}§ 2557 2823 [x, y] = z; §\C{// multiple assignment}§ 2558 2824 z = 10; §\C{// mass assignment}§ 2559 z = [x, y]; §\C{// multiple assignment}§2825 z = [x, y]; §\C{// multiple assignment}§ 2560 2826 \end{cfa} 2561 2827 Let $L_i$ for $i$ in $[0, n)$ represent each component of the flattened left side, $R_i$ represent each component of the flattened right side of a multiple assignment, and $R$ represent the right side of a mass assignment. … … 2601 2867 double c, d; 2602 2868 [ void ] f( [ int, int ] ); 2603 f( [ c, a ] = [ b, d ] = 1.5 ); // assignments in parameter list2869 f( [ c, a ] = [ b, d ] = 1.5 ); §\C{// assignments in parameter list}§ 2604 2870 \end{cfa} 2605 2871 The tuple expression begins with a mass assignment of ©1.5© into ©[b, d]©, which assigns ©1.5© into ©b©, which is truncated to ©1©, and ©1.5© into ©d©, producing the tuple ©[1, 1.5]© as a result. … … 2614 2880 \begin{cfa} 2615 2881 struct S; 2616 void ?{}(S *); // (1)2617 void ?{}(S *, int); // (2)2618 void ?{}(S * double); // (3)2619 void ?{}(S *, S); // (4)2620 2621 [S, S] x = [3, 6.28]; // uses (2), (3), specialized constructors2622 [S, S] y; // uses (1), (1), default constructor2623 [S, S] z = x.0; // uses (4), (4), copy constructor2882 void ?{}(S *); §\C{// (1)}§ 2883 void ?{}(S *, int); §\C{// (2)}§ 2884 void ?{}(S * double); §\C{// (3)}§ 2885 void ?{}(S *, S); §\C{// (4)}§ 2886 2887 [S, S] x = [3, 6.28]; §\C{// uses (2), (3), specialized constructors}§ 2888 [S, S] y; §\C{// uses (1), (1), default constructor}§ 2889 [S, S] z = x.0; §\C{// uses (4), (4), copy constructor}§ 2624 2890 \end{cfa} 2625 2891 In this example, ©x© is initialized by the multiple constructor calls ©?{}(&x.0, 3)© and ©?{}(&x.1, 6.28)©, while ©y© is initialized by two default constructor calls ©?{}(&y.0)© and ©?{}(&y.1)©. … … 2662 2928 A member-access tuple may be used anywhere a tuple can be used, \eg: 2663 2929 \begin{cfa} 2664 s.[ y, z, x ] = [ 3, 3.2, 'x' ]; §\C{// equivalent to s.x = 'x', s.y = 3, s.z = 3.2}§2665 f( s.[ y, z ] ); §\C{// equivalent to f( s.y, s.z )}§2930 s.[ y, z, x ] = [ 3, 3.2, 'x' ]; §\C{// equivalent to s.x = 'x', s.y = 3, s.z = 3.2}§ 2931 f( s.[ y, z ] ); §\C{// equivalent to f( s.y, s.z )}§ 2666 2932 \end{cfa} 2667 2933 Note, the fields appearing in a record-field tuple may be specified in any order; … … 2673 2939 void f( double, long ); 2674 2940 2675 f( x.[ 0, 3 ] ); §\C{// f( x.0, x.3 )}§2676 x.[ 0, 1 ] = x.[ 1, 0 ]; §\C{// [ x.0, x.1 ] = [ x.1, x.0 ]}§2941 f( x.[ 0, 3 ] ); §\C{// f( x.0, x.3 )}§ 2942 x.[ 0, 1 ] = x.[ 1, 0 ]; §\C{// [ x.0, x.1 ] = [ x.1, x.0 ]}§ 2677 2943 [ long, int, long ] y = x.[ 2, 0, 2 ]; 2678 2944 \end{cfa} … … 2691 2957 \begin{cfa} 2692 2958 [ int, float, double ] f(); 2693 [ double, float ] x = f().[ 2, 1 ]; §\C{// f() called once}§2959 [ double, float ] x = f().[ 2, 1 ]; §\C{// f() called once}§ 2694 2960 \end{cfa} 2695 2961 … … 2704 2970 That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function. 2705 2971 \begin{cfa} 2706 int f(); // (1)2707 double f(); // (2)2708 2709 f(); // ambiguous - (1),(2) both equally viable2710 (int)f(); // choose (2)2972 int f(); §\C{// (1)}§ 2973 double f(); §\C{// (2)}§ 2974 2975 f(); §\C{// ambiguous - (1),(2) both equally viable}§ 2976 (int)f(); §\C{// choose (2)}§ 2711 2977 \end{cfa} 2712 2978 Since casting is a fundamental operation in \CFA, casts need to be given a meaningful interpretation in the context of tuples. … … 2716 2982 void g(); 2717 2983 2718 (void)f(); // valid, ignore results2719 (int)g(); // invalid, void cannot be converted to int2984 (void)f(); §\C{// valid, ignore results}§ 2985 (int)g(); §\C{// invalid, void cannot be converted to int}§ 2720 2986 2721 2987 struct A { int x; }; 2722 (struct A)f(); // invalid, int cannot be converted to A2988 (struct A)f(); §\C{// invalid, int cannot be converted to A}§ 2723 2989 \end{cfa} 2724 2990 In C, line 4 is a valid cast, which calls ©f© and discards its result. … … 2736 3002 [int, [int, int], int] g(); 2737 3003 2738 ([int, double])f(); // (1) valid2739 ([int, int, int])g(); // (2) valid2740 ([void, [int, int]])g(); // (3) valid2741 ([int, int, int, int])g(); // (4) invalid2742 ([int, [int, int, int]])g(); // (5) invalid3004 ([int, double])f(); §\C{// (1) valid}§ 3005 ([int, int, int])g(); §\C{// (2) valid}§ 3006 ([void, [int, int]])g(); §\C{// (3) valid}§ 3007 ([int, int, int, int])g(); §\C{// (4) invalid}§ 3008 ([int, [int, int, int]])g(); §\C{// (5) invalid}§ 2743 3009 \end{cfa} 2744 3010 … … 2800 3066 void f([int, int], int, int); 2801 3067 2802 f([0, 0], 0, 0); // no cost2803 f(0, 0, 0, 0); // cost for structuring2804 f([0, 0,], [0, 0]); // cost for flattening2805 f([0, 0, 0], 0); // cost for flattening and structuring3068 f([0, 0], 0, 0); §\C{// no cost}§ 3069 f(0, 0, 0, 0); §\C{// cost for structuring}§ 3070 f([0, 0,], [0, 0]); §\C{// cost for flattening}§ 3071 f([0, 0, 0], 0); §\C{// cost for flattening and structuring}§ 2806 3072 \end{cfa} 2807 3073 … … 2866 3132 [ unsigned int, char ] 2867 3133 [ double, double, double ] 2868 [ * int, int * ] §\C{// mix of CFA and ANSI}§3134 [ * int, int * ] §\C{// mix of CFA and ANSI}§ 2869 3135 [ * [ 5 ] int, * * char, * [ [ int, int ] ] (int, int) ] 2870 3136 \end{cfa} … … 2873 3139 Examples of declarations using tuple types are: 2874 3140 \begin{cfa} 2875 [ int, int ] x; §\C{// 2 element tuple, each element of type int}§2876 * [ char, char ] y; §\C{// pointer to a 2 element tuple}§3141 [ int, int ] x; §\C{// 2 element tuple, each element of type int}§ 3142 * [ char, char ] y; §\C{// pointer to a 2 element tuple}§ 2877 3143 [ [ int, int ] ] z ([ int, int ]); 2878 3144 \end{cfa} … … 2891 3157 [ int, int ] w1; 2892 3158 [ int, int, int ] w2; 2893 [ void ] f (int, int, int); /* three input parameters of type int */2894 [ void ] g ([ int, int, int ]); /* 3 element tuple as input */3159 [ void ] f (int, int, int); §\C{// three input parameters of type int}§ 3160 [ void ] g ([ int, int, int ]); §\C{3 element tuple as input}§ 2895 3161 f( [ 1, 2, 3 ] ); 2896 3162 f( w1, 3 ); … … 2972 3238 [ int, int, int, int ] w = [ 1, 2, 3, 4 ]; 2973 3239 int x = 5; 2974 [ x, w ] = [ w, x ]; §\C{// all four tuple coercions}§3240 [ x, w ] = [ w, x ]; §\C{// all four tuple coercions}§ 2975 3241 \end{cfa} 2976 3242 Starting on the right-hand tuple in the last assignment statement, w is opened, producing a tuple of four values; … … 3060 3326 both these examples produce indeterminate results: 3061 3327 \begin{cfa} 3062 f( x++, x++ ); §\C{// C routine call with side effects in arguments}§3063 [ v1, v2 ] = [ x++, x++ ]; §\C{// side effects in righthand side of multiple assignment}§3328 f( x++, x++ ); §\C{// C routine call with side effects in arguments}§ 3329 [ v1, v2 ] = [ x++, x++ ]; §\C{// side effects in righthand side of multiple assignment}§ 3064 3330 \end{cfa} 3065 3331 … … 3083 3349 3084 3350 3085 \section{I/O Library} 3086 \label{s:IOLibrary} 3087 \index{input/output library} 3088 3089 The goal of \CFA I/O is to simplify the common cases\index{I/O!common case}, while fully supporting polymorphism and user defined types in a consistent way. 3090 The approach combines ideas from \CC and Python. 3091 The \CFA header file for the I/O library is \Indexc{fstream}. 3092 3093 The common case is printing out a sequence of variables separated by whitespace. 3351 \section{Stream I/O Library} 3352 \label{s:StreamIOLibrary} 3353 \index{input/output stream library} 3354 \index{stream library} 3355 3356 The goal of \CFA stream input/output (I/O) is to simplify the common cases\index{I/O!common case}, while fully supporting polymorphism and user defined types in a consistent way. 3357 Stream I/O can be implicitly or explicitly formatted. 3358 Implicit formatting means \CFA selects the output or input format for values that match with the type of a variable. 3359 Explicit formatting means additional information is specified to augment how an output or input of value is interpreted. 3360 \CFA formatting is a cross between C ©printf© and \CC ©cout© manipulators, and Python implicit spacing and newline. 3361 Specifically: 3362 \begin{itemize} 3363 \item 3364 ©printf©/Python format codes are dense, making them difficult to read and remember. 3365 \CFA/\CC format manipulators are named, making them easier to read and remember. 3366 \item 3367 ©printf©/Python separates format codes from associated variables, making it difficult to match codes with variables. 3368 \CFA/\CC co-locate codes with associated variables, where \CFA has the tighter binding. 3369 \item 3370 Format manipulators in \CFA have local effect, whereas \CC have global effect, except ©setw©. 3371 Hence, it is common programming practice to toggle manipulators on and then back to the default to prevent downstream side-effects. 3372 Without this programming style, errors occur when moving prints, as manipulator effects incorrectly flow into the new location. 3373 (To guarantee no side-effects, manipulator values must be saved and restored across function calls.) 3374 \item 3375 \CFA has more sophisticated implicit spacing between values than Python, plus implicit newline at the end of a print. 3376 \end{itemize} 3377 The \CFA header file for the I/O library is \Indexc{fstream.hfa}. 3378 3379 For implicit formatted output, the common case is printing a series of variables separated by whitespace. 3094 3380 \begin{cquote} 3095 \begin{tabular}{@{}l@{\hspace{ 3em}}l@{}}3096 \multicolumn{1}{c@{\hspace{ 3em}}}{\textbf{\CFA}} & \multicolumn{1}{c}{\textbf{\CC}} \\3381 \begin{tabular}{@{}l@{\hspace{2em}}l@{\hspace{2em}}l@{}} 3382 \multicolumn{1}{c@{\hspace{2em}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{\CC}} & \multicolumn{1}{c}{\textbf{Python}} \\ 3097 3383 \begin{cfa} 3098 3384 int x = 1, y = 2, z = 3; 3099 sout | x ®|® y ®|® z | endl;3385 sout | x ®|® y ®|® z; 3100 3386 \end{cfa} 3101 3387 & … … 3103 3389 3104 3390 cout << x ®<< " "® << y ®<< " "® << z << endl; 3391 \end{cfa} 3392 & 3393 \begin{cfa} 3394 x = 1; y = 2; z = 3 3395 print( x, y, z ) 3105 3396 \end{cfa} 3106 3397 \\ … … 3110 3401 & 3111 3402 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3112 1 2 3 3403 1® ®2® ®3 3404 \end{cfa} 3405 & 3406 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3407 1® ®2® ®3 3113 3408 \end{cfa} 3114 3409 \end{tabular} 3115 3410 \end{cquote} 3116 The \CFA form has half the characters of the \CC form, and is similar to \Index*{Python} I/O with respect to implicit separators .3117 Similar simplification occurs for \Index{tuple} I/O, which prints all tuple valuesseparated by ``\lstinline[showspaces=true]@, @''.3411 The \CFA form has half the characters of the \CC form, and is similar to \Index*{Python} I/O with respect to implicit separators and newline. 3412 Similar simplification occurs for \Index{tuple} I/O, which flattens the tuple and prints each value separated by ``\lstinline[showspaces=true]@, @''. 3118 3413 \begin{cfa} 3119 3414 [int, [ int, int ] ] t1 = [ 1, [ 2, 3 ] ], t2 = [ 4, [ 5, 6 ] ]; 3120 sout | t1 | t2 | endl;§\C{// print tuples}§3415 sout | t1 | t2; §\C{// print tuples}§ 3121 3416 \end{cfa} 3122 3417 \begin{cfa}[showspaces=true,aboveskip=0pt] 3123 3418 1®, ®2®, ®3 4®, ®5®, ®6 3124 3419 \end{cfa} 3125 Finally, \CFA uses the logical-or operator for I/O as it is the lowest-priority overloadableoperator, other than assignment.3420 Finally, \CFA uses the logical-or operator for I/O as it is the lowest-priority \emph{overloadable} operator, other than assignment. 3126 3421 Therefore, fewer output expressions require parenthesis. 3127 3422 \begin{cquote} … … 3130 3425 & 3131 3426 \begin{cfa} 3132 sout | x * 3 | y + 1 | z << 2 | x == y | (x | y) | (x || y) | (x > z ? 1 : 2) | endl;3427 sout | x * 3 | y + 1 | z << 2 | x == y | ®(®x | y®)® | ®(®x || y®)® | ®(®x > z ? 1 : 2®)®; 3133 3428 \end{cfa} 3134 3429 \\ … … 3136 3431 & 3137 3432 \begin{cfa} 3138 cout << x * 3 << y + 1 << ®(®z << 2®)® << ®(®x == y®)® << (x | y) << (x || y) << (x > z ? 1 : 2)<< endl;3433 cout << x * 3 << y + 1 << ®(®z << 2®)® << ®(®x == y®)® << ®(®x | y®)® << ®(®x || y®)® << ®(®x > z ? 1 : 2®)® << endl; 3139 3434 \end{cfa} 3140 3435 \\ … … 3145 3440 \end{tabular} 3146 3441 \end{cquote} 3147 There is a weak similarity between the \CFA logical-or operator and the Shell pipe-operator for moving data, where data flows in the correct direction for input but the opposite direction for output. 3442 Input and output use a uniform operator, ©|©, rather than separate operators, as in ©>>© and ©<<© for \CC. 3443 There is a weak similarity between the \CFA logical-or operator and the \Index{Shell pipe-operator} for moving data, where data flows in the correct direction for input but the opposite direction for output. 3444 3445 For implicit formatted input, the common case is reading a sequence of values separated by whitespace, where the type of an input constant must match with the type of the input variable. 3446 \begin{cquote} 3447 \begin{lrbox}{\LstBox} 3448 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 3449 int x; double y char z; 3450 \end{cfa} 3451 \end{lrbox} 3452 \begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{3em}}l@{}} 3453 \multicolumn{1}{@{}l@{}}{\usebox\LstBox} \\ 3454 \multicolumn{1}{c@{\hspace{2em}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{\CC}} & \multicolumn{1}{c}{\textbf{Python}} \\ 3455 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 3456 sin | x | y | z; 3457 \end{cfa} 3458 & 3459 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 3460 cin >> x >> y >> z; 3461 \end{cfa} 3462 & 3463 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 3464 x = int(input()); y = float(input()); z = input(); 3465 \end{cfa} 3466 \\ 3467 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3468 ®1® ®2.5® ®A® 3469 3470 3471 \end{cfa} 3472 & 3473 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3474 ®1® ®2.5® ®A® 3475 3476 3477 \end{cfa} 3478 & 3479 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3480 ®1® 3481 ®2.5® 3482 ®A® 3483 \end{cfa} 3484 \end{tabular} 3485 \end{cquote} 3486 3148 3487 3149 3488 3150 3489 \subsection{Implicit Separator} 3151 3490 3152 The \Index{implicit separator}\index{I/O!separator} character (space/blank) is a separator not a terminator .3491 The \Index{implicit separator}\index{I/O!separator} character (space/blank) is a separator not a terminator for output. 3153 3492 The rules for implicitly adding the separator are: 3154 3493 \begin{enumerate} … … 3156 3495 A separator does not appear at the start or end of a line. 3157 3496 \begin{cfa}[belowskip=0pt] 3158 sout | 1 | 2 | 3 | endl;3497 sout | 1 | 2 | 3; 3159 3498 \end{cfa} 3160 3499 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3165 3504 A separator does not appear before or after a character literal or variable. 3166 3505 \begin{cfa} 3167 sout | '1' | '2' | '3' | endl;3506 sout | '1' | '2' | '3'; 3168 3507 123 3169 3508 \end{cfa} 3170 3509 3171 3510 \item 3172 A separator does not appear before or after a null (empty) C string .3173 \begin{cfa} 3174 sout | 1 | "" | 2 | "" | 3 | endl;3511 A separator does not appear before or after a null (empty) C string, which is a local mechanism to disable insertion of the separator character. 3512 \begin{cfa} 3513 sout | 1 | "" | 2 | "" | 3; 3175 3514 123 3176 3515 \end{cfa} 3177 which is a local mechanism to disable insertion of the separator character. 3178 3179 \item 3180 A separator does not appear before a C string starting with the (extended) \Index*{ASCII}\index{ASCII!extended} characters: \lstinline[mathescape=off,basicstyle=\tt]@([{=$£¥¡¿«@ 3516 3517 \item 3518 {\lstset{language=CFA,deletedelim=**[is][]{¢}{¢}} 3519 A separator does not appear before a C string starting with the (extended) \Index*{ASCII}\index{ASCII!extended} characters: \lstinline[basicstyle=\tt]@,.;!?)]}%¢»@, where \lstinline[basicstyle=\tt]@»@ is a closing citation mark. 3520 \begin{cfa}[belowskip=0pt] 3521 sout | 1 | ", x" | 2 | ". x" | 3 | "; x" | 4 | "! x" | 5 | "? x" | 6 | "% x" 3522 | 7 | "¢ x" | 8 | "» x" | 9 | ") x" | 10 | "] x" | 11 | "} x"; 3523 \end{cfa} 3524 \begin{cfa}[basicstyle=\tt,showspaces=true,aboveskip=0pt,belowskip=0pt] 3525 1®,® x 2®.® x 3®;® x 4®!® x 5®?® x 6®%® x 7§\color{red}\textcent§ x 8®»® x 9®)® x 10®]® x 11®}® x 3526 \end{cfa}}% 3527 3528 \item 3529 A separator does not appear after a C string ending with the (extended) \Index*{ASCII}\index{ASCII!extended} characters: \lstinline[mathescape=off,basicstyle=\tt]@([{=$£¥¡¿«@, where \lstinline[basicstyle=\tt]@¡¿@ are inverted opening exclamation and question marks, and \lstinline[basicstyle=\tt]@«@ is an opening citation mark. 3181 3530 %$ 3182 3531 \begin{cfa}[mathescape=off] 3183 3532 sout | "x (" | 1 | "x [" | 2 | "x {" | 3 | "x =" | 4 | "x $" | 5 | "x £" | 6 | "x ¥" 3184 | 7 | "x ¡" | 8 | "x ¿" | 9 | "x «" | 10 | endl;3533 | 7 | "x ¡" | 8 | "x ¿" | 9 | "x «" | 10; 3185 3534 \end{cfa} 3186 3535 %$ … … 3189 3538 \end{cfa} 3190 3539 %$ 3191 where \lstinline[basicstyle=\tt]@¡¿@ are inverted opening exclamation and question marks, and \lstinline[basicstyle=\tt]@«@ is an opening citation mark. 3192 3193 \item 3194 {\lstset{language=CFA,deletedelim=**[is][]{¢}{¢}} 3195 A seperator does not appear after a C string ending with the (extended) \Index*{ASCII}\index{ASCII!extended} characters: \lstinline[basicstyle=\tt]@,.;!?)]}%¢»@ 3540 3541 \item 3542 A seperator does not appear before/after a C string starting/ending with the \Index*{ASCII} quote or whitespace characters: \lstinline[basicstyle=\tt,showspaces=true]@`'": \t\v\f\r\n@ 3196 3543 \begin{cfa}[belowskip=0pt] 3197 sout | 1 | ", x" | 2 | ". x" | 3 | "; x" | 4 | "! x" | 5 | "? x" | 6 | "% x" 3198 | 7 | "¢ x" | 8 | "» x" | 9 | ") x" | 10 | "] x" | 11 | "} x" | endl; 3199 \end{cfa} 3200 \begin{cfa}[basicstyle=\tt,showspaces=true,aboveskip=0pt,belowskip=0pt] 3201 1®,® x 2®.® x 3®;® x 4®!® x 5®?® x 6®%® x 7§\color{red}\textcent§ x 8®»® x 9®)® x 10®]® x 11®}® x 3202 \end{cfa}}% 3203 where \lstinline[basicstyle=\tt]@»@ is a closing citation mark. 3204 3205 \item 3206 A seperator does not appear before or after a C string begining/ending with the \Index*{ASCII} quote or whitespace characters: \lstinline[basicstyle=\tt,showspaces=true]@`'": \t\v\f\r\n@ 3207 \begin{cfa}[belowskip=0pt] 3208 sout | "x`" | 1 | "`x'" | 2 | "'x\"" | 3 | "\"x:" | 4 | ":x " | 5 | " x\t" | 6 | "\tx" | endl; 3544 sout | "x`" | 1 | "`x'" | 2 | "'x\"" | 3 | "\"x:" | 4 | ":x " | 5 | " x\t" | 6 | "\tx"; 3209 3545 \end{cfa} 3210 3546 \begin{cfa}[basicstyle=\tt,showspaces=true,showtabs=true,aboveskip=0pt,belowskip=0pt] … … 3215 3551 If a space is desired before or after one of the special string start/end characters, simply insert a space. 3216 3552 \begin{cfa}[belowskip=0pt] 3217 sout | "x (§\color{red}\texttt{\textvisiblespace}§" | 1 | "§\color{red}\texttt{\textvisiblespace}§) x" | 2 | "§\color{red}\texttt{\textvisiblespace}§, x" | 3 | "§\color{red}\texttt{\textvisiblespace}§:x:§\color{red}\texttt{\textvisiblespace}§" | 4 | endl;3553 sout | "x (§\color{red}\texttt{\textvisiblespace}§" | 1 | "§\color{red}\texttt{\textvisiblespace}§) x" | 2 | "§\color{red}\texttt{\textvisiblespace}§, x" | 3 | "§\color{red}\texttt{\textvisiblespace}§:x:§\color{red}\texttt{\textvisiblespace}§" | 4; 3218 3554 \end{cfa} 3219 3555 \begin{cfa}[basicstyle=\tt,showspaces=true,showtabs=true,aboveskip=0pt,belowskip=0pt] … … 3223 3559 3224 3560 3225 \subsection{Manipulator} 3226 3227 The following \CC-style \Index{manipulator}s and routines control implicit seperation. 3561 \subsection{Separation Manipulators} 3562 3563 The following \Index{manipulator}s control \Index{implicit output separation}. 3564 The effect of these manipulators is global for an output stream (except ©sepOn© and ©sepOff©). 3228 3565 \begin{enumerate} 3229 3566 \item 3230 Routines\Indexc{sepSet}\index{manipulator!sepSet@©sepSet©} and \Indexc{sep}\index{manipulator!sep@©sep©}/\Indexc{sepGet}\index{manipulator!sepGet@©sepGet©} set and get the separator string.3567 \Indexc{sepSet}\index{manipulator!sepSet@©sepSet©} and \Indexc{sep}\index{manipulator!sep@©sep©}/\Indexc{sepGet}\index{manipulator!sepGet@©sepGet©} set and get the separator string. 3231 3568 The separator string can be at most 16 characters including the ©'\0'© string terminator (15 printable characters). 3232 3569 \begin{cfa}[mathescape=off,belowskip=0pt] 3233 sepSet( sout, ", $" ); §\C{// set separator from " " to ", \$"}§3234 sout | 1 | 2 | 3 | " \"" | ®sep® | "\"" | endl;3570 sepSet( sout, ", $" ); §\C{// set separator from " " to ", \$"}§ 3571 sout | 1 | 2 | 3 | " \"" | ®sep® | "\""; 3235 3572 \end{cfa} 3236 3573 %$ … … 3240 3577 %$ 3241 3578 \begin{cfa}[belowskip=0pt] 3242 sepSet( sout, " " ); §\C{// reset separator to " "}§3243 sout | 1 | 2 | 3 | " \"" | ®sepGet( sout )® | "\"" | endl;3579 sepSet( sout, " " ); §\C{// reset separator to " "}§ 3580 sout | 1 | 2 | 3 | " \"" | ®sepGet( sout )® | "\""; 3244 3581 \end{cfa} 3245 3582 \begin{cfa}[showspaces=true,aboveskip=0pt] … … 3248 3585 ©sepGet© can be used to store a separator and then restore it: 3249 3586 \begin{cfa}[belowskip=0pt] 3250 char store[®sepSize®]; §\C{// sepSize is the maximum separator size}§3251 strcpy( store, sepGet( sout ) ); §\C{// copy current separator}§3252 sepSet( sout, "_" ); §\C{// change separator to underscore}§3253 sout | 1 | 2 | 3 | endl;3587 char store[®sepSize®]; §\C{// sepSize is the maximum separator size}§ 3588 strcpy( store, sepGet( sout ) ); §\C{// copy current separator}§ 3589 sepSet( sout, "_" ); §\C{// change separator to underscore}§ 3590 sout | 1 | 2 | 3; 3254 3591 \end{cfa} 3255 3592 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3257 3594 \end{cfa} 3258 3595 \begin{cfa}[belowskip=0pt] 3259 sepSet( sout, store ); §\C{// change separator back to original}§3260 sout | 1 | 2 | 3 | endl;3596 sepSet( sout, store ); §\C{// change separator back to original}§ 3597 sout | 1 | 2 | 3; 3261 3598 \end{cfa} 3262 3599 \begin{cfa}[showspaces=true,aboveskip=0pt] … … 3265 3602 3266 3603 \item 3267 Routine\Indexc{sepSetTuple}\index{manipulator!sepSetTuple@©sepSetTuple©} and \Indexc{sepTuple}\index{manipulator!sepTuple@©sepTuple©}/\Indexc{sepGetTuple}\index{manipulator!sepGetTuple@©sepGetTuple©} get and set the tuple separator-string.3604 \Indexc{sepSetTuple}\index{manipulator!sepSetTuple@©sepSetTuple©} and \Indexc{sepTuple}\index{manipulator!sepTuple@©sepTuple©}/\Indexc{sepGetTuple}\index{manipulator!sepGetTuple@©sepGetTuple©} get and set the tuple separator-string. 3268 3605 The tuple separator-string can be at most 16 characters including the ©'\0'© string terminator (15 printable characters). 3269 3606 \begin{cfa}[belowskip=0pt] 3270 sepSetTuple( sout, " " ); §\C{// set tuple separator from ", " to " "}§3271 sout | t1 | t2 | " \"" | ®sepTuple® | "\"" | endl;3607 sepSetTuple( sout, " " ); §\C{// set tuple separator from ", " to " "}§ 3608 sout | t1 | t2 | " \"" | ®sepTuple® | "\""; 3272 3609 \end{cfa} 3273 3610 \begin{cfa}[showspaces=true,aboveskip=0pt] … … 3275 3612 \end{cfa} 3276 3613 \begin{cfa}[belowskip=0pt] 3277 sepSetTuple( sout, ", " ); §\C{// reset tuple separator to ", "}§3278 sout | t1 | t2 | " \"" | ®sepGetTuple( sout )® | "\"" | endl;3614 sepSetTuple( sout, ", " ); §\C{// reset tuple separator to ", "}§ 3615 sout | t1 | t2 | " \"" | ®sepGetTuple( sout )® | "\""; 3279 3616 \end{cfa} 3280 3617 \begin{cfa}[showspaces=true,aboveskip=0pt] … … 3284 3621 3285 3622 \item 3286 Manipulators \Indexc{sepDisable}\index{manipulator!sepDisable@©sepDisable©} and \Indexc{sepEnable}\index{manipulator!sepEnable@©sepEnable©} \emph{globally} toggle printing the separator, \ie the seperator is adjusted with respect to all subsequent printed items.3623 \Indexc{sepDisable}\index{manipulator!sepDisable@©sepDisable©} and \Indexc{sepEnable}\index{manipulator!sepEnable@©sepEnable©} toggle printing the separator. 3287 3624 \begin{cfa}[belowskip=0pt] 3288 sout | sepDisable | 1 | 2 | 3 | endl; §\C{// globallyturn off implicit separator}§3625 sout | sepDisable | 1 | 2 | 3; §\C{// turn off implicit separator}§ 3289 3626 \end{cfa} 3290 3627 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3292 3629 \end{cfa} 3293 3630 \begin{cfa}[belowskip=0pt] 3294 sout | sepEnable | 1 | 2 | 3 | endl; §\C{// globallyturn on implicit separator}§3631 sout | sepEnable | 1 | 2 | 3; §\C{// turn on implicit separator}§ 3295 3632 \end{cfa} 3296 3633 \begin{cfa}[mathescape=off,showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3299 3636 3300 3637 \item 3301 Manipulators \Indexc{sepOn}\index{manipulator!sepOn@©sepOn©} and \Indexc{sepOff}\index{manipulator!sepOff@©sepOff©} \emph{locally} toggle printing the separator, \ie the seperator is adjusted only with respect to the next printed item.3638 \Indexc{sepOn}\index{manipulator!sepOn@©sepOn©} and \Indexc{sepOff}\index{manipulator!sepOff@©sepOff©} toggle printing the separator with respect to the next printed item, and then return to the global seperator setting. 3302 3639 \begin{cfa}[belowskip=0pt] 3303 sout | 1 | sepOff | 2 | 3 | endl; §\C{// locally turn off implicit separator}§3640 sout | 1 | sepOff | 2 | 3; §\C{// turn off implicit separator for the next item}§ 3304 3641 \end{cfa} 3305 3642 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3307 3644 \end{cfa} 3308 3645 \begin{cfa}[belowskip=0pt] 3309 sout | sepDisable | 1 | sepOn | 2 | 3 | endl; §\C{// locally turn on implicit separator}§3646 sout | sepDisable | 1 | sepOn | 2 | 3; §\C{// turn on implicit separator for the next item}§ 3310 3647 \end{cfa} 3311 3648 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3314 3651 The tuple separator also responses to being turned on and off. 3315 3652 \begin{cfa}[belowskip=0pt] 3316 sout | t1 | sepOff | t2 | endl; §\C{// locally turn on/off implicit separator}§3653 sout | t1 | sepOff | t2; §\C{// turn off implicit separator for the next item}§ 3317 3654 \end{cfa} 3318 3655 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3322 3659 use ©sep© to accomplish this functionality. 3323 3660 \begin{cfa}[belowskip=0pt] 3324 sout | sepOn | 1 | 2 | 3 | sepOn | endl ;§\C{// sepOn does nothing at start/end of line}§3661 sout | sepOn | 1 | 2 | 3 | sepOn; §\C{// sepOn does nothing at start/end of line}§ 3325 3662 \end{cfa} 3326 3663 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] … … 3328 3665 \end{cfa} 3329 3666 \begin{cfa}[belowskip=0pt] 3330 sout | sep | 1 | 2 | 3 | sep | endl ;§\C{// use sep to print separator at start/end of line}§3667 sout | sep | 1 | 2 | 3 | sep ; §\C{// use sep to print separator at start/end of line}§ 3331 3668 \end{cfa} 3332 3669 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3333 3670 ® ®1 2 3® ® 3671 \end{cfa} 3672 \end{enumerate} 3673 3674 3675 \subsection{Newline Manipulators} 3676 3677 The following \Index{manipulators} control \Index{newline separation} for input and output. 3678 3679 For input: 3680 \begin{enumerate}[parsep=0pt] 3681 \item 3682 \Indexc{nl}\index{manipulator!nl@©nl©} scans characters until the next newline character, i.e., ignore the remaining characters in the line. 3683 \item 3684 \Indexc{nlOn}\index{manipulator!nlOn@©nlOn©} reads the newline character, when reading single characters. 3685 \item 3686 \Indexc{nlOff}\index{manipulator!nlOff@©nlOff©} does \emph{not} read the newline character, when reading single characters. 3687 \end{enumerate} 3688 For example, in: 3689 \begin{cfa} 3690 sin | i | ®nl® | j; 3691 1 ®2® 3692 3 3693 \end{cfa} 3694 variable ©i© is assigned 1, the 2 is skipped, and variable ©j© is assigned 3. 3695 3696 For output: 3697 \begin{enumerate}[parsep=0pt] 3698 \item 3699 \Indexc{nl}\index{manipulator!nl@©nl©} inserts a newline. 3700 \begin{cfa} 3701 sout | nl; §\C{// only print newline}§ 3702 sout | 2; §\C{// implicit newline}§ 3703 sout | 3 | nl | 4 | nl; §\C{// terminating nl merged with implicit newline}§ 3704 sout | 5 | nl | nl; §\C{// again terminating nl merged with implicit newline}§ 3705 sout | 6; §\C{// implicit newline}§ 3706 3707 2 3708 3 3709 4 3710 5 3711 3712 6 3713 \end{cfa} 3714 Note, a terminating ©nl© is merged (overrides) with the implicit newline at the end of the ©sout© expression, otherwise it is impossible to to print a single newline 3715 \item 3716 \Indexc{nlOn}\index{manipulator!nlOn@©nlOn©} implicitly prints a newline at the end of each output expression. 3717 \item 3718 \Indexc{nlOff}\index{manipulator!nlOff@©nlOff©} does \emph{not} implicitly print a newline at the end of each output expression. 3719 \end{enumerate} 3720 3721 3722 \subsection{Output Value Manipulators} 3723 3724 The following \Index{manipulator}s control formatting of output values (printing), and only affect the format of the argument. 3725 \begin{enumerate} 3726 \item 3727 \Indexc{bin}( integer )\index{manipulator!bin@©bin©} print value in base 2 preceded by ©0b©/©0B©. 3728 \begin{cfa}[belowskip=0pt] 3729 sout | bin( 0 ) | bin( 27HH ) | bin( 27H ) | bin( 27 ) | bin( 27L ); 3730 0b0 0b11011 0b11011 0b11011 0b11011 3731 sout | bin( -27HH ) | bin( -27H ) | bin( -27 ) | bin( -27L ); 3732 0b11100101 0b1111111111100101 0b11111111111111111111111111100101 0b®(58 1s)®100101 3733 \end{cfa} 3734 3735 \item 3736 \Indexc{oct}( integer )\index{manipulator!oct@©oct©} print value in base 8 preceded by ©0©. 3737 \begin{cfa}[belowskip=0pt] 3738 sout | oct( 0 ) | oct( 27HH ) | oct( 27H ) | oct( 27 ) | oct( 27L ); 3739 0 033 033 033 033 3740 sout | oct( -27HH ) | oct( -27H ) | oct( -27 ) | oct( -27L ); 3741 0345 0177745 037777777745 01777777777777777777745 3742 \end{cfa} 3743 Note, octal 0 is \emph{not} preceded by ©0© to prevent confusion. 3744 3745 \item 3746 \Indexc{hex}( integer / floating-point )\index{manipulator!hex@©hex©} print value in base 16 preceded by ©0x©/©0X©. 3747 \begin{cfa}[belowskip=0pt] 3748 sout | hex( 0 ) | hex( 27HH ) | hex( 27H ) | hex( 27 ) | hex( 27L ); 3749 0 0x1b 0x1b 0x1b 0x1b 3750 sout | hex( -27HH ) | hex( -27H ) | hex( -27 ) | hex( -27L ); 3751 0xe5 0xffe5 0xffffffe5 0xffffffffffffffe5 3752 3753 sout | hex( 0.0 ) | hex( 27.5F ) | hex( 27.5 ) | hex( 27.5L ); 3754 0x0.p+0 0x1.b8p+4 0x1.b8p+4 0xd.cp+1 3755 sout | hex( -27.5F ) | hex( -27.5 ) | hex( -27.5L ); 3756 -0x1.b8p+4 -0x1.b8p+4 -0xd.cp+1 3757 \end{cfa} 3758 3759 \item 3760 \Indexc{sci}( floating-point )\index{manipulator!sci@©sci©} print value in scientific notation with exponent. 3761 Default is 6 digits of precision. 3762 \begin{cfa}[belowskip=0pt] 3763 sout | sci( 0.0 ) | sci( 27.5 ) | sci( -27.5 ); 3764 0.000000e+00 2.750000e+01 -2.750000e+01 3765 \end{cfa} 3766 3767 \item 3768 \Indexc{upcase}( bin / hex / floating-point )\index{manipulator!upcase@©upcase©} print letters in a value in upper case. Lower case is the default. 3769 \begin{cfa}[belowskip=0pt] 3770 sout | upcase( bin( 27 ) ) | upcase( hex( 27 ) ) | upcase( 27.5e-10 ) | upcase( hex( 27.5 ) ); 3771 0®B®11011 0®X®1®B® 2.75®E®-09 0®X®1.®B®8®P®+4 3772 \end{cfa} 3773 3774 \item 3775 \Indexc{nobase}( integer )\index{manipulator!nobase@©nobase©} do not precede ©bin©, ©oct©, ©hex© with ©0b©/©0B©, ©0©, or ©0x©/©0X©. 3776 Printing the base is the default. 3777 \begin{cfa}[belowskip=0pt] 3778 sout | nobase( bin( 27 ) ) | nobase( oct( 27 ) ) | nobase( hex( 27 ) ); 3779 11011 33 1b 3780 \end{cfa} 3781 3782 \item 3783 \Indexc{nodp}( floating-point )\index{manipulator!nodp@©nodp©} do not print a decimal point if there are no fractional digits. 3784 Printing a decimal point is the default, if there are no fractional digits. 3785 \begin{cfa}[belowskip=0pt] 3786 sout | 0. | nodp( 0. ) | 27.0 | nodp( 27.0 ) | nodp( 27.5 ); 3787 0.0 ®0® 27.0 ®27® 27.5 3788 \end{cfa} 3789 3790 \item 3791 \Indexc{sign}( integer / floating-point )\index{manipulator!sign@©sign©} prefix with plus or minus sign (©+© or ©-©). Only printing the minus sign is the default. 3792 \begin{cfa}[belowskip=0pt] 3793 sout | sign( 27 ) | sign( -27 ) | sign( 27. ) | sign( -27. ) | sign( 27.5 ) | sign( -27.5 ); 3794 ®+®27 -27 ®+®27.0 -27.0 ®+®27.5 -27.5 3795 \end{cfa} 3796 3797 \item 3798 \Indexc{wd}©( unsigned char minimum, T val )©\index{manipulator!wd@©wd©}, ©wd( unsigned char minimum, unsigned char precision, T val )© 3799 For all types, ©minimum© is the minimum number of printed characters. 3800 If the value is shorter than the minimum, it is padded on the right with spaces. 3801 \begin{cfa}[belowskip=0pt] 3802 sout | wd( 4, 34) | wd( 3, 34 ) | wd( 2, 34 ); 3803 sout | wd( 10, 4.) | wd( 9, 4. ) | wd( 8, 4. ); 3804 sout | wd( 4, "ab" ) | wd( 3, "ab" ) | wd( 2, "ab" ); 3805 \end{cfa} 3806 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3807 ® ®34 ® ®34 34 3808 ® ®4.000000 ® ®4.000000 4.000000 3809 ® ®ab ® ®ab ab 3810 \end{cfa} 3811 If the value is larger, it is printed without truncation, ignoring the ©minimum©. 3812 \begin{cfa}[belowskip=0pt] 3813 sout | wd( 4, 34567 ) | wd( 3, 34567 ) | wd( 2, 34567 ); 3814 sout | wd( 4, 3456. ) | wd( 3, 3456. ) | wd( 2, 3456. ); 3815 sout | wd( 4, "abcde" ) | wd( 3, "abcde" ) | wd( 2,"abcde" ); 3816 \end{cfa} 3817 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3818 3456®7® 345®67® 34®567® 3819 3456®.® 345®6.® 34®56.® 3820 abcd®e® abc®de® ab®cde® 3821 \end{cfa} 3822 3823 For integer types, ©precision© is the minimum number of printed digits. 3824 If the value is shorter, it is padded on the left with leading zeros. 3825 \begin{cfa}[belowskip=0pt] 3826 sout | wd( 4,3, 34 ) | wd( 8,4, 34 ) | wd( 10,10, 34 ); 3827 \end{cfa} 3828 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3829 ®0®34 ®00®34 ®00000000®34 3830 \end{cfa} 3831 If the value is larger, it is printed without truncation, ignoring the ©precision©. 3832 \begin{cfa}[belowskip=0pt] 3833 sout | wd( 4,1, 3456 ) | wd( 8,2, 3456 ) | wd( 10,3, 3456 ); 3834 \end{cfa} 3835 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3836 3456 3456 3456 3837 \end{cfa} 3838 If ©precision© is 0, nothing is printed for zero. 3839 If ©precision© is greater than the minimum, it becomes the minimum. 3840 \begin{cfa}[belowskip=0pt] 3841 sout | wd( 4,0, 0 ) | wd( 3,10, 34 ); 3842 \end{cfa} 3843 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3844 ® ® ®00000000®34 3845 \end{cfa} 3846 For floating-point types, ©precision© is the minimum number of digits after the decimal point. 3847 \begin{cfa}[belowskip=0pt] 3848 sout | wd( 6,3, 27.5 ) | wd( 8,1, 27.5 ) | wd( 8,0, 27.5 ) | wd( 3,8, 27.5 ); 3849 \end{cfa} 3850 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3851 27.®500® 27.®5® 28. 27.®50000000® 3852 \end{cfa} 3853 For the C-string type, ©precision© is the maximum number of printed characters, so the string is truncared if it exceeds the maximum. 3854 \begin{cfa}[belowskip=0pt] 3855 sout | wd( 6,8, "abcd" ) | wd( 6,8, "abcdefghijk" ) | wd( 6,3, "abcd" ); 3856 \end{cfa} 3857 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3858 abcd abcdefgh abc 3859 \end{cfa} 3860 3861 \item 3862 \Indexc{ws( unsigned char minimum, unsigned char significant, floating-point )}\index{manipulator!ws@©ws©} 3863 For floating-point type, ©minimum© is the same as for manipulator ©wd©, but ©significant© is the maximum number of significant digits to be printed for both the integer and fractions (versus only the fraction for ©wd©). 3864 If a value's significant digits is greater than ©significant©, the last significant digit is rounded up. 3865 \begin{cfa}[belowskip=0pt] 3866 sout | ws(6,6, 234.567) | ws(6,5, 234.567) | ws(6,4, 234.567) | ws(6,3, 234.567); 3867 \end{cfa} 3868 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3869 234.567 234.5®7® 234.®6® 23®5® 3870 \end{cfa} 3871 If a value's magnitude is greater than ©significant©, the value is printed in scientific notation with the specified number of significant digits. 3872 \begin{cfa}[belowskip=0pt] 3873 sout | ws(6,6, 234567.) | ws(6,5, 234567.) | ws(6,4, 234567.) | ws(6,3, 234567.); 3874 \end{cfa} 3875 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3876 234567. 2.3457®e+05® 2.346®e+05® 2.35®e+05® 3877 \end{cfa} 3878 If ©significant© is greater than ©minimum©, it defines the number of printed characters. 3879 \begin{cfa}[belowskip=0pt] 3880 sout | ws(3,6, 234567.) | ws(4,6, 234567.) | ws(5,6, 234567.) | ws(6,6, 234567.); 3881 \end{cfa} 3882 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3883 234567. 234567. 234567. 234567. 3884 \end{cfa} 3885 3886 \item 3887 \Indexc{left}( field-width )\index{manipulator!left@©left©} left justify within the given field. 3888 \begin{cfa}[belowskip=0pt] 3889 sout | left(wd(4, 27)) | left(wd(10, 27.)) | left(wd(10, 27.5)) | left(wd(4,3, 27)) | left(wd(10,3, 27.5)); 3890 \end{cfa} 3891 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3892 27® ® 27.000000 27.500000 027 27.500® ® 3893 \end{cfa} 3894 3895 \item 3896 \Indexc{pad0}( field-width )\index{manipulator!pad0@©pad0©} left pad with zeroes (0). 3897 \begin{cfa}[belowskip=0pt] 3898 sout | pad0( wd( 4, 27 ) ) | pad0( wd( 4,3, 27 ) ) | pad0( wd( 8,3, 27.5 ) ); 3899 ®00®27 ®0®27 ®00®27.500 3334 3900 \end{cfa} 3335 3901 \end{enumerate} … … 3341 3907 int main( void ) { 3342 3908 int x = 1, y = 2, z = 3; 3343 sout | x | y | z | endl;3909 sout | x | y | z; 3344 3910 [int, [ int, int ] ] t1 = [ 1, [ 2, 3 ] ], t2 = [ 4, [ 5, 6 ] ]; 3345 sout | t1 | t2 | endl; // print tuples3346 sout | x * 3 | y + 1 | z << 2 | x == y | (x | y) | (x || y) | (x > z ? 1 : 2) | endl;3347 sout | 1 | 2 | 3 | endl;3348 sout | '1' | '2' | '3' | endl;3349 sout | 1 | "" | 2 | "" | 3 | endl;3911 sout | t1 | t2; // print tuples 3912 sout | x * 3 | y + 1 | z << 2 | x == y | (x | y) | (x || y) | (x > z ? 1 : 2); 3913 sout | 1 | 2 | 3; 3914 sout | '1' | '2' | '3'; 3915 sout | 1 | "" | 2 | "" | 3; 3350 3916 sout | "x (" | 1 | "x [" | 2 | "x {" | 3 | "x =" | 4 | "x $" | 5 | "x £" | 6 | "x ¥" 3351 | 7 | "x ¡" | 8 | "x ¿" | 9 | "x «" | 10 | endl;3917 | 7 | "x ¡" | 8 | "x ¿" | 9 | "x «" | 10; 3352 3918 sout | 1 | ", x" | 2 | ". x" | 3 | "; x" | 4 | "! x" | 5 | "? x" | 6 | "% x" 3353 | 7 | "¢ x" | 8 | "» x" | 9 | ") x" | 10 | "] x" | 11 | "} x" | endl;3354 sout | "x`" | 1 | "`x'" | 2 | "'x\"" | 3 | "\"x:" | 4 | ":x " | 5 | " x\t" | 6 | "\tx" | endl;3355 sout | "x ( " | 1 | " ) x" | 2 | " , x" | 3 | " :x: " | 4 | endl;3919 | 7 | "¢ x" | 8 | "» x" | 9 | ") x" | 10 | "] x" | 11 | "} x"; 3920 sout | "x`" | 1 | "`x'" | 2 | "'x\"" | 3 | "\"x:" | 4 | ":x " | 5 | " x\t" | 6 | "\tx"; 3921 sout | "x ( " | 1 | " ) x" | 2 | " , x" | 3 | " :x: " | 4; 3356 3922 3357 3923 sepSet( sout, ", $" ); // set separator from " " to ", $" 3358 sout | 1 | 2 | 3 | " \"" | sep | "\"" | endl;3924 sout | 1 | 2 | 3 | " \"" | sep | "\""; 3359 3925 sepSet( sout, " " ); // reset separator to " " 3360 sout | 1 | 2 | 3 | " \"" | sepGet( sout ) | "\"" | endl;3926 sout | 1 | 2 | 3 | " \"" | sepGet( sout ) | "\""; 3361 3927 3362 3928 char store[sepSize]; 3363 3929 strcpy( store, sepGet( sout ) ); 3364 3930 sepSet( sout, "_" ); 3365 sout | 1 | 2 | 3 | endl;3931 sout | 1 | 2 | 3; 3366 3932 sepSet( sout, store ); 3367 sout | 1 | 2 | 3 | endl;3933 sout | 1 | 2 | 3; 3368 3934 3369 3935 sepSetTuple( sout, " " ); // set tuple separator from ", " to " " 3370 sout | t1 | t2 | " \"" | sepTuple | "\"" | endl;3936 sout | t1 | t2 | " \"" | sepTuple | "\""; 3371 3937 sepSetTuple( sout, ", " ); // reset tuple separator to ", " 3372 sout | t1 | t2 | " \"" | sepGetTuple( sout ) | "\"" | endl;3373 3374 sout | sepDisable | 1 | 2 | 3 | endl; // globally turn off implicit separator3375 sout | sepEnable | 1 | 2 | 3 | endl; // globally turn on implicit separator3376 3377 sout | 1 | sepOff | 2 | 3 | endl; // locally turn on implicit separator3378 sout | sepDisable | 1 | sepOn | 2 | 3 | endl; // globally turn off implicit separator3938 sout | t1 | t2 | " \"" | sepGetTuple( sout ) | "\""; 3939 3940 sout | sepDisable | 1 | 2 | 3; // globally turn off implicit separator 3941 sout | sepEnable | 1 | 2 | 3; // globally turn on implicit separator 3942 3943 sout | 1 | sepOff | 2 | 3; // locally turn on implicit separator 3944 sout | sepDisable | 1 | sepOn | 2 | 3; // globally turn off implicit separator 3379 3945 sout | sepEnable; 3380 sout | t1 | sepOff | t2 | endl; // locally turn on/off implicit separator3381 3382 sout | sepOn | 1 | 2 | 3 | sepOn | endl; // sepOn does nothing at start/end of line3383 sout | sep | 1 | 2 | 3 | sep | endl; // use sep to print separator at start/end of line3946 sout | t1 | sepOff | t2; // locally turn on/off implicit separator 3947 3948 sout | sepOn | 1 | 2 | 3 | sepOn ; // sepOn does nothing at start/end of line 3949 sout | sep | 1 | 2 | 3 | sep ; // use sep to print separator at start/end of line 3384 3950 } 3385 3951 … … 3390 3956 \end{comment} 3391 3957 %$ 3958 3959 3960 \subsection{Input Value Manipulators} 3961 3962 The format of numeric input values in the same as C constants without a trailing type suffix, as the input value-type is denoted by the input variable. 3963 For ©_Bool© type, the constants are ©true© and ©false©. 3964 For integral types, any number of digits, optionally preceded by a sign (©+© or ©-©), where a 3965 \begin{itemize} 3966 \item 3967 ©1©-©9© prefix introduces a decimal value (©0©-©9©), 3968 \item 3969 ©0© prefix introduces an octal value (©0©-©7©), and 3970 \item 3971 ©0x© or ©0X© prefix introduces a hexadecimal value (©0©-©f©) with lower or upper case letters. 3972 \end{itemize} 3973 For floating-point types, any number of decimal digits, optionally preceded by a sign (©+© or ©-©), optionally containing a decimal point, and optionally followed by an exponent, ©e© or ©E©, with signed (optional) decimal digits. 3974 Floating-point values can also be written in hexadecimal format preceded by ©0x© or ©0X© with hexadecimal digits and exponent denoted by ©p© or ©P©. 3975 3976 For the C-string type, the input values are \emph{not} the same as C-string constants, \ie double quotes bracketing arbitrary text with escape sequences. 3977 Instead, the next sequence of non-whitespace characters are read, and the input sequence is terminated with delimiter ©'\0'©. 3978 The string variable \emph{must} be large enough to contain the input sequence. 3979 3980 The following \Index{manipulator}s control formatting of input values (reading), and only affect the format of the argument. 3981 3982 \begin{enumerate} 3983 \item 3984 \Indexc{skip( const char * pattern )}\index{manipulator!skip@©skip©} / ©skip( unsigned int length )© / ©const char * pattern© 3985 The argument defines a ©pattern© or ©length©. 3986 The ©pattern© is composed of white-space and non-white-space characters, where \emph{any} white-space character matches 0 or more input white-space characters (hence, consecutive white-space characters in the pattern are combined), and each non-white-space character matches exactly with an input character. 3987 The ©length© is composed of the next $N$ characters, including the newline character. 3988 If the match successes, the input characters are discarded, and input continues with the next character. 3989 If the match fails, the input characters are left unread. 3990 \begin{cfa}[belowskip=0pt] 3991 char sk[$\,$] = "abc"; 3992 sin | "abc " | skip( sk ) | skip( 5 ); // match input sequence 3993 \end{cfa} 3994 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 3995 ®abc ® 3996 ®abc ® 3997 ®xx® 3998 \end{cfa} 3999 4000 \item 4001 \Indexc{wdi}©( unsigned int maximum, T & val )©\index{manipulator!wdi@©wdi©} 4002 For all types except ©char©, ©maximum© is the maximum number of characters read for the current operation. 4003 \begin{cfa}[belowskip=0pt] 4004 char s[10]; int i; double d; 4005 sin | wdi( 4, s ) | wdi( 3, i ) | wdi( 8, d ); // c == "abcd", i == 123, d == 3.456E+2 4006 \end{cfa} 4007 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 4008 ®abcd1233.456E+2® 4009 \end{cfa} 4010 Note, input ©wdi© cannot be overloaded with output ©wd© because both have the same parameters but return different types. 4011 Currently, \CFA cannot distinguish between these two manipulators in the middle of an ©sout©/©sin© expression based on return type. 4012 4013 \item 4014 \Indexc{ignore( T & val )}\index{manipulator!ignore@©ignore©} 4015 For all types, the data is read from the stream depending on the argument type but ignored, \ie it is not stored in the argument. 4016 \begin{cfa}[belowskip=0pt] 4017 double d; 4018 sin | ignore( d ); // d is unchanged 4019 \end{cfa} 4020 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 4021 ® -75.35e-4® 25 4022 \end{cfa} 4023 4024 \item 4025 \Indexc{incl( const char * scanset, char * s )}\index{manipulator!incl@©incl©} 4026 For the C-string type, the argument defines a ©scanset© that matches any number of characters \emph{in} the set. 4027 Matching characters are read into the C string and null terminated. 4028 \begin{cfa}[belowskip=0pt] 4029 char s[10]; 4030 sin | incl( "abc", s ); 4031 \end{cfa} 4032 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 4033 ®bca®xyz 4034 \end{cfa} 4035 4036 \item 4037 \Indexc{excl( const char * scanset, char * s )}\index{manipulator!excl@©excl©} 4038 For the C-string type, the argument defines a ©scanset© that matches any number of characters \emph{not in} the set. 4039 Non-matching characters are read into the C string and null terminated. 4040 \begin{cfa}[belowskip=0pt] 4041 char s[10]; 4042 sin | excl( "abc", s ); 4043 \end{cfa} 4044 \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 4045 ®xyz®bca 4046 \end{cfa} 4047 \end{enumerate} 3392 4048 3393 4049 … … 3759 4415 \begin{itemize} 3760 4416 \item 3761 preventing having to determine or writelong generic types,3762 \item 3763 ensur esecondary variables, related to a primary variable, always have the same type.4417 not determining or writing long generic types, 4418 \item 4419 ensuring secondary variables, related to a primary variable, always have the same type. 3764 4420 \end{itemize} 3765 4421 … … 3783 4439 There is also the conundrum in type inferencing of when to \emph{\Index{brand}} a type. 3784 4440 That is, when is the type of the variable more important than the type of its initialization expression. 3785 For example, if a change is made in an initialization expression, it can cause significantcascading type changes and/or errors.4441 For example, if a change is made in an initialization expression, it can cause cascading type changes and/or errors. 3786 4442 At some point, a variable type needs to remain constant and the expression to be in error when it changes. 3787 4443 … … 4016 4672 4017 4673 coroutine Fibonacci { 4018 int fn; §\C{// used for communication}§4674 int fn; §\C{// used for communication}§ 4019 4675 }; 4020 4676 void ?{}( Fibonacci * this ) { … … 4022 4678 } 4023 4679 void main( Fibonacci * this ) { 4024 int fn1, fn2; §\C{// retained between resumes}§4025 this->fn = 0; §\C{// case 0}§4680 int fn1, fn2; §\C{// retained between resumes}§ 4681 this->fn = 0; §\C{// case 0}§ 4026 4682 fn1 = this->fn; 4027 suspend(); §\C{// return to last resume}§4028 4029 this->fn = 1; §\C{// case 1}§4683 suspend(); §\C{// return to last resume}§ 4684 4685 this->fn = 1; §\C{// case 1}§ 4030 4686 fn2 = fn1; 4031 4687 fn1 = this->fn; 4032 suspend(); §\C{// return to last resume}§4033 4034 for ( ;; ) { §\C{// general case}§4688 suspend(); §\C{// return to last resume}§ 4689 4690 for ( ;; ) { §\C{// general case}§ 4035 4691 this->fn = fn1 + fn2; 4036 4692 fn2 = fn1; 4037 4693 fn1 = this->fn; 4038 suspend(); §\C{// return to last resume}§4694 suspend(); §\C{// return to last resume}§ 4039 4695 } // for 4040 4696 } 4041 4697 int next( Fibonacci * this ) { 4042 resume( this ); §\C{// transfer to last suspend}§4698 resume( this ); §\C{// transfer to last suspend}§ 4043 4699 return this->fn; 4044 4700 } … … 4046 4702 Fibonacci f1, f2; 4047 4703 for ( int i = 1; i <= 10; i += 1 ) { 4048 sout | next( &f1 ) | ' ' | next( &f2 ) | endl;4704 sout | next( &f1 ) | ' ' | next( &f2 ); 4049 4705 } // for 4050 4706 } … … 4112 4768 MyThread f[4]; 4113 4769 } 4114 sout | global.value | endl;4770 sout | global.value; 4115 4771 } 4116 4772 \end{cfa} … … 4190 4846 void main( First * this ) { 4191 4847 for ( int i = 0; i < 10; i += 1 ) { 4192 sout | "First : Suspend No." | i + 1 | endl;4848 sout | "First : Suspend No." | i + 1; 4193 4849 yield(); 4194 4850 } … … 4199 4855 wait( this->lock ); 4200 4856 for ( int i = 0; i < 10; i += 1 ) { 4201 sout | "Second : Suspend No." | i + 1 | endl;4857 sout | "Second : Suspend No." | i + 1; 4202 4858 yield(); 4203 4859 } … … 4206 4862 int main( void ) { 4207 4863 signal_once lock; 4208 sout | "User main begin" | endl;4864 sout | "User main begin"; 4209 4865 { 4210 4866 processor p; … … 4214 4870 } 4215 4871 } 4216 sout | "User main end" | endl;4872 sout | "User main end"; 4217 4873 } 4218 4874 \end{cfa} … … 4911 5567 void ?{}( Line * l ) { 4912 5568 l->lnth = 0.0; 4913 sout | "default" | endl;5569 sout | "default"; 4914 5570 } 4915 5571 … … 4918 5574 void ?{}( Line * l, float lnth ) { 4919 5575 l->lnth = lnth; 4920 sout | "lnth" | l->lnth | endl;5576 sout | "lnth" | l->lnth; 4921 5577 4922 5578 } … … 4924 5580 // destructor 4925 5581 void ^?() { 4926 sout | "destroyed" | endl;5582 sout | "destroyed"; 4927 5583 l.lnth = 0.0; 4928 5584 } … … 5585 6241 In \CFA, there are ambiguous cases with dereference and operator identifiers, \eg ©int *?*?()©, where the string ©*?*?© can be interpreted as: 5586 6242 \begin{cfa} 5587 *?§\color{red}\textvisiblespace§*? §\C{// dereference operator, dereference operator}§5588 *§\color{red}\textvisiblespace§?*? §\C{// dereference, multiplication operator}§6243 *?§\color{red}\textvisiblespace§*? §\C{// dereference operator, dereference operator}§ 6244 *§\color{red}\textvisiblespace§?*? §\C{// dereference, multiplication operator}§ 5589 6245 \end{cfa} 5590 6246 By default, the first interpretation is selected, which does not yield a meaningful parse. … … 5638 6294 \eg: 5639 6295 \begin{cfa} 5640 x; §\C{// int x}§5641 *y; §\C{// int *y}§5642 f( p1, p2 ); §\C{// int f( int p1, int p2 );}§5643 g( p1, p2 ) int p1, p2; §\C{// int g( int p1, int p2 );}§6296 x; §\C{// int x}§ 6297 *y; §\C{// int *y}§ 6298 f( p1, p2 ); §\C{// int f( int p1, int p2 );}§ 6299 g( p1, p2 ) int p1, p2; §\C{// int g( int p1, int p2 );}§ 5644 6300 \end{cfa} 5645 6301 \CFA continues to support K\&R routine definitions: 5646 6302 \begin{cfa} 5647 f( a, b, c ) §\C{// default int return}§5648 int a, b; char c §\C{// K\&R parameter declarations}§6303 f( a, b, c ) §\C{// default int return}§ 6304 int a, b; char c §\C{// K\&R parameter declarations}§ 5649 6305 { 5650 6306 ... … … 5665 6321 int rtn( int i ); 5666 6322 int rtn( char c ); 5667 rtn( 'x' ); §\C{// programmer expects 2nd rtn to be called}§6323 rtn( 'x' ); §\C{// programmer expects 2nd rtn to be called}§ 5668 6324 \end{cfa} 5669 6325 \item[Rationale:] it is more intuitive for the call to ©rtn© to match the second version of definition of ©rtn© rather than the first. 5670 6326 In particular, output of ©char© variable now print a character rather than the decimal ASCII value of the character. 5671 6327 \begin{cfa} 5672 sout | 'x' | " " | (int)'x' | endl;6328 sout | 'x' | " " | (int)'x'; 5673 6329 x 120 5674 6330 \end{cfa} … … 5687 6343 \item[Change:] make string literals ©const©: 5688 6344 \begin{cfa} 5689 char * p = "abc"; §\C{// valid in C, deprecated in \CFA}§5690 char * q = expr ? "abc" : "de"; §\C{// valid in C, invalid in \CFA}§6345 char * p = "abc"; §\C{// valid in C, deprecated in \CFA}§ 6346 char * q = expr ? "abc" : "de"; §\C{// valid in C, invalid in \CFA}§ 5691 6347 \end{cfa} 5692 6348 The type of a string literal is changed from ©[] char© to ©const [] char©. … … 5695 6351 \begin{cfa} 5696 6352 char * p = "abc"; 5697 p[0] = 'w'; §\C{// segment fault or change constant literal}§6353 p[0] = 'w'; §\C{// segment fault or change constant literal}§ 5698 6354 \end{cfa} 5699 6355 The same problem occurs when passing a string literal to a routine that changes its argument. … … 5707 6363 \item[Change:] remove \newterm{tentative definitions}, which only occurs at file scope: 5708 6364 \begin{cfa} 5709 int i; §\C{// forward definition}§5710 int *j = ®&i®; §\C{// forward reference, valid in C, invalid in \CFA}§5711 int i = 0; §\C{// definition}§6365 int i; §\C{// forward definition}§ 6366 int *j = ®&i®; §\C{// forward reference, valid in C, invalid in \CFA}§ 6367 int i = 0; §\C{// definition}§ 5712 6368 \end{cfa} 5713 6369 is valid in C, and invalid in \CFA because duplicate overloaded object definitions at the same scope level are disallowed. … … 5715 6371 \begin{cfa} 5716 6372 struct X { int i; struct X *next; }; 5717 static struct X a; §\C{// forward definition}§6373 static struct X a; §\C{// forward definition}§ 5718 6374 static struct X b = { 0, ®&a® };§\C{// forward reference, valid in C, invalid in \CFA}§ 5719 static struct X a = { 1, &b }; §\C{// definition}§6375 static struct X a = { 1, &b }; §\C{// definition}§ 5720 6376 \end{cfa} 5721 6377 \item[Rationale:] avoids having different initialization rules for builtin types and user-defined types. … … 5732 6388 struct Person { 5733 6389 enum ®Colour® { R, G, B }; §\C[7cm]{// nested type}§ 5734 struct Face { §\C{// nested type}§5735 ®Colour® Eyes, Hair; §\C{// type defined outside (1 level)}§6390 struct Face { §\C{// nested type}§ 6391 ®Colour® Eyes, Hair; §\C{// type defined outside (1 level)}§ 5736 6392 }; 5737 ®.Colour® shirt; §\C{// type defined outside (top level)}§5738 ®Colour® pants; §\C{// type defined same level}§5739 Face looks[10]; §\C{// type defined same level}§6393 ®.Colour® shirt; §\C{// type defined outside (top level)}§ 6394 ®Colour® pants; §\C{// type defined same level}§ 6395 Face looks[10]; §\C{// type defined same level}§ 5740 6396 }; 5741 ®Colour® c = R; §\C{// type/enum defined same level}§6397 ®Colour® c = R; §\C{// type/enum defined same level}§ 5742 6398 Person®.Colour® pc = Person®.®R;§\C{// type/enum defined inside}§ 5743 Person®.®Face pretty; §\C{// type defined inside}\CRT§6399 Person®.®Face pretty; §\C{// type defined inside}\CRT§ 5744 6400 \end{cfa} 5745 6401 In C, the name of the nested types belongs to the same scope as the name of the outermost enclosing structure, \ie the nested types are hoisted to the scope of the outer-most type, which is not useful and confusing. … … 5758 6414 \item[Difficulty of converting:] Semantic transformation. To make the struct type name visible in the scope of the enclosing struct, the struct tag could be declared in the scope of the enclosing struct, before the enclosing struct is defined. Example: 5759 6415 \begin{cfa} 5760 struct Y; §\C{// struct Y and struct X are at the same scope}§6416 struct Y; §\C{// struct Y and struct X are at the same scope}§ 5761 6417 struct X { 5762 6418 struct Y { /* ... */ } y; … … 5773 6429 \begin{cfa} 5774 6430 void foo() { 5775 int * b = malloc( sizeof(int) ); §\C{// implicitly convert void * to int *}§5776 char * c = b; §\C{// implicitly convert int * to void *, and then void * to char *}§6431 int * b = malloc( sizeof(int) ); §\C{// implicitly convert void * to int *}§ 6432 char * c = b; §\C{// implicitly convert int * to void *, and then void * to char *}§ 5777 6433 } 5778 6434 \end{cfa} … … 5947 6603 void * memalign( size_t align, size_t size );§\indexc{memalign}§ 5948 6604 int posix_memalign( void ** ptr, size_t align, size_t size );§\indexc{posix_memalign}§ 5949 }5950 5951 // §\CFA§ safe equivalents, i.e., implicit size specification5952 forall( dtype T | sized(T) ) T * malloc( void );5953 forall( dtype T | sized(T) ) T * calloc( size_t dim );5954 forall( dtype T | sized(T) ) T * realloc( T * ptr, size_t size );5955 forall( dtype T | sized(T) ) T * memalign( size_t align );5956 forall( dtype T | sized(T) ) T * aligned_alloc( size_t align );5957 forall( dtype T | sized(T) ) int posix_memalign( T ** ptr, size_t align );5958 5959 // §\CFA§ safe general allocation, fill, resize, array5960 forall( dtype T | sized(T) ) T * alloc( void );§\indexc{alloc}§5961 forall( dtype T | sized(T) ) T * alloc( char fill );5962 forall( dtype T | sized(T) ) T * alloc( size_t dim );5963 forall( dtype T | sized(T) ) T * alloc( size_t dim, char fill );5964 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim );5965 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim, char fill );5966 5967 // §\CFA§ safe general allocation, align, fill, array5968 forall( dtype T | sized(T) ) T * align_alloc( size_t align );5969 forall( dtype T | sized(T) ) T * align_alloc( size_t align, char fill );5970 forall( dtype T | sized(T) ) T * align_alloc( size_t align, size_t dim );5971 forall( dtype T | sized(T) ) T * align_alloc( size_t align, size_t dim, char fill );5972 6605 5973 6606 // C unsafe initialization/copy 5974 extern "C" {5975 6607 void * memset( void * dest, int c, size_t size ); 5976 6608 void * memcpy( void * dest, const void * src, size_t size ); 5977 6609 } 5978 6610 6611 forall( dtype T | sized(T) ) { 6612 // §\CFA§ safe equivalents, i.e., implicit size specification 6613 T * malloc( void ); 6614 T * calloc( size_t dim ); 6615 T * realloc( T * ptr, size_t size ); 6616 T * memalign( size_t align ); 6617 T * aligned_alloc( size_t align ); 6618 int posix_memalign( T ** ptr, size_t align ); 6619 6620 // §\CFA§ safe general allocation, fill, resize, array 6621 T * alloc( void );§\indexc{alloc}§ 6622 T * alloc( char fill ); 6623 T * alloc( size_t dim ); 6624 T * alloc( size_t dim, char fill ); 6625 T * alloc( T ptr[], size_t dim ); 6626 T * alloc( T ptr[], size_t dim, char fill ); 6627 6628 // §\CFA§ safe general allocation, align, fill, array 6629 T * align_alloc( size_t align ); 6630 T * align_alloc( size_t align, char fill ); 6631 T * align_alloc( size_t align, size_t dim ); 6632 T * align_alloc( size_t align, size_t dim, char fill ); 6633 5979 6634 // §\CFA§ safe initialization/copy, i.e., implicit size specification 5980 forall( dtype T | sized(T) )T * memset( T * dest, char c );§\indexc{memset}§5981 forall( dtype T | sized(T) )T * memcpy( T * dest, const T * src );§\indexc{memcpy}§6635 T * memset( T * dest, char c );§\indexc{memset}§ 6636 T * memcpy( T * dest, const T * src );§\indexc{memcpy}§ 5982 6637 5983 6638 // §\CFA§ safe initialization/copy array 5984 forall( dtype T | sized(T) ) T * memset( T dest[], size_t dim, char c ); 5985 forall( dtype T | sized(T) ) T * memcpy( T dest[], const T src[], size_t dim ); 6639 T * amemset( T dest[], char c, size_t dim ); 6640 T * amemcpy( T dest[], const T src[], size_t dim ); 6641 } 5986 6642 5987 6643 // §\CFA§ allocation/deallocation and constructor/destructor … … 5999 6655 6000 6656 6001 \subsection{ Conversion}6657 \subsection{String to Value Conversion} 6002 6658 6003 6659 \leavevmode … … 6035 6691 \leavevmode 6036 6692 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 6037 forall( otype T | { int ?<?( T, T ); } ) §\C{// location}§6693 forall( otype T | { int ?<?( T, T ); } ) §\C{// location}§ 6038 6694 T * bsearch( T key, const T * arr, size_t dim );§\indexc{bsearch}§ 6039 6695 6040 forall( otype T | { int ?<?( T, T ); } ) §\C{// position}§6696 forall( otype T | { int ?<?( T, T ); } ) §\C{// position}§ 6041 6697 unsigned int bsearch( T key, const T * arr, size_t dim ); 6042 6698 6043 6699 forall( otype T | { int ?<?( T, T ); } ) 6044 6700 void qsort( const T * arr, size_t dim );§\indexc{qsort}§ 6701 6702 forall( otype E | { int ?<?( E, E ); } ) { 6703 E * bsearch( E key, const E * vals, size_t dim );§\indexc{bsearch}§ §\C{// location}§ 6704 size_t bsearch( E key, const E * vals, size_t dim );§\C{// position}§ 6705 E * bsearchl( E key, const E * vals, size_t dim );§\indexc{bsearchl}§ 6706 size_t bsearchl( E key, const E * vals, size_t dim ); 6707 E * bsearchu( E key, const E * vals, size_t dim );§\indexc{bsearchu}§ 6708 size_t bsearchu( E key, const E * vals, size_t dim ); 6709 } 6710 6711 forall( otype K, otype E | { int ?<?( K, K ); K getKey( const E & ); } ) { 6712 E * bsearch( K key, const E * vals, size_t dim ); 6713 size_t bsearch( K key, const E * vals, size_t dim ); 6714 E * bsearchl( K key, const E * vals, size_t dim ); 6715 size_t bsearchl( K key, const E * vals, size_t dim ); 6716 E * bsearchu( K key, const E * vals, size_t dim ); 6717 size_t bsearchu( K key, const E * vals, size_t dim ); 6718 } 6719 6720 forall( otype E | { int ?<?( E, E ); } ) { 6721 void qsort( E * vals, size_t dim );§\indexc{qsort}§ 6722 } 6045 6723 \end{cfa} 6046 6724 … … 6069 6747 \leavevmode 6070 6748 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 6071 void rand48seed( long int s );§\indexc{rand48seed}§ 6072 char rand48();§\indexc{rand48}§ 6073 int rand48(); 6074 unsigned int rand48(); 6075 long int rand48(); 6076 unsigned long int rand48(); 6077 float rand48(); 6078 double rand48(); 6079 float _Complex rand48(); 6080 double _Complex rand48(); 6081 long double _Complex rand48(); 6749 void srandom( unsigned int seed );§\indexc{srandom}§ 6750 char random( void );§\indexc{random}§ 6751 char random( char u ); §\C{// [0,u)}§ 6752 char random( char l, char u ); §\C{// [l,u)}§ 6753 int random( void ); 6754 int random( int u ); §\C{// [0,u)}§ 6755 int random( int l, int u ); §\C{// [l,u)}§ 6756 unsigned int random( void ); 6757 unsigned int random( unsigned int u ); §\C{// [0,u)}§ 6758 unsigned int random( unsigned int l, unsigned int u ); §\C{// [l,u)}§ 6759 long int random( void ); 6760 long int random( long int u ); §\C{// [0,u)}§ 6761 long int random( long int l, long int u ); §\C{// [l,u)}§ 6762 unsigned long int random( void ); 6763 unsigned long int random( unsigned long int u ); §\C{// [0,u)}§ 6764 unsigned long int random( unsigned long int l, unsigned long int u ); §\C{// [l,u)}§ 6765 float random( void ); §\C{// [0.0, 1.0)}§ 6766 double random( void ); §\C{// [0.0, 1.0)}§ 6767 float _Complex random( void ); §\C{// [0.0, 1.0)+[0.0, 1.0)i}§ 6768 double _Complex random( void ); §\C{// [0.0, 1.0)+[0.0, 1.0)i}§ 6769 long double _Complex random( void ); §\C{// [0.0, 1.0)+[0.0, 1.0)i}§ 6082 6770 \end{cfa} 6083 6771 … … 6122 6810 [ int, long double ] remquo( long double, long double ); 6123 6811 6124 float div( float, float, int * );§\indexc{div}§ §\C{// alternative name for remquo}§6812 float div( float, float, int * );§\indexc{div}§ §\C{// alternative name for remquo}§ 6125 6813 double div( double, double, int * ); 6126 6814 long double div( long double, long double, int * ); … … 6278 6966 long double atan2( long double, long double ); 6279 6967 6280 float atan( float, float ); §\C{// alternative name for atan2}§6968 float atan( float, float ); §\C{// alternative name for atan2}§ 6281 6969 double atan( double, double );§\indexc{atan}§ 6282 6970 long double atan( long double, long double ); … … 6458 7146 6459 7147 6460 \section{Time }6461 \label{s:Time Lib}7148 \section{Time Keeping} 7149 \label{s:TimeKeeping} 6462 7150 6463 7151 6464 7152 %\subsection{\texorpdfstring{\protect\lstinline@Duration@}{Duration}} 6465 \subsection{\texorpdfstring{\Lst KeywordStyle{\textmd{Duration}}}{Duration}}7153 \subsection{\texorpdfstring{\LstBasicStyle{Duration}}{Duration}} 6466 7154 \label{s:Duration} 6467 7155 … … 6469 7157 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 6470 7158 struct Duration { 6471 int64_t tv; §\C{// nanoseconds}§7159 int64_t tv; §\C{// nanoseconds}§ 6472 7160 }; 6473 7161 … … 6512 7200 6513 7201 Duration abs( Duration rhs ); 6514 6515 forall( dtype ostype | ostream( ostype ) ) ostype & ?|?( ostype & os, Duration dur );6516 7202 6517 7203 Duration ?`ns( int64_t nsec ); … … 6537 7223 int64_t ?`d( Duration dur ); 6538 7224 int64_t ?`w( Duration dur ); 7225 7226 Duration max( Duration lhs, Duration rhs ); 7227 Duration min( Duration lhs, Duration rhs ); 6539 7228 \end{cfa} 6540 7229 6541 7230 6542 7231 %\subsection{\texorpdfstring{\protect\lstinline@\timeval@}{timeval}} 6543 \subsection{\texorpdfstring{\Lst KeywordStyle{\textmd{timeval}}}{timeval}}7232 \subsection{\texorpdfstring{\LstBasicStyle{timeval}}{timeval}} 6544 7233 \label{s:timeval} 6545 7234 … … 6560 7249 6561 7250 6562 \subsection{\texorpdfstring{\protect\lstinline@timespec@}{timespec}} 7251 %\subsection{\texorpdfstring{\protect\lstinline@timespec@}{timespec}} 7252 \subsection{\texorpdfstring{\LstBasicStyle{timespec}}{timespec}} 6563 7253 \label{s:timespec} 6564 7254 … … 6579 7269 6580 7270 6581 \subsection{\texorpdfstring{\protect\lstinline@itimerval@}{itimerval}} 7271 %\subsection{\texorpdfstring{\protect\lstinline@itimerval@}{itimerval}} 7272 \subsection{\texorpdfstring{\LstBasicStyle{itimerval}}{itimerval}} 6582 7273 \label{s:itimerval} 6583 7274 … … 6589 7280 6590 7281 6591 \subsection{\texorpdfstring{\protect\lstinline@Time@}{Time}} 7282 %\subsection{\texorpdfstring{\protect\lstinline@Time@}{Time}} 7283 \subsection{\texorpdfstring{\LstBasicStyle{Time}}{Time}} 6592 7284 \label{s:Time} 6593 7285 … … 6595 7287 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 6596 7288 struct Time { 6597 uint64_t tv; §\C{// nanoseconds since UNIX epoch}§7289 uint64_t tv; §\C{// nanoseconds since UNIX epoch}§ 6598 7290 }; 6599 7291 6600 7292 void ?{}( Time & time ); 6601 7293 void ?{}( Time & time, zero_t ); 6602 void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 ); 7294 6603 7295 Time ?=?( Time & time, zero_t ); 6604 7296 … … 6609 7301 Time ?=?( Time & time, timespec t ); 6610 7302 6611 Time ?+?( Time & lhs, Duration rhs ) { return (Time)@{ lhs.tv + rhs.tv }; } 6612 Time ?+?( Duration lhs, Time rhs ) { return rhs + lhs; } 6613 Time ?+=?( Time & lhs, Duration rhs ) { lhs = lhs + rhs; return lhs; } 6614 6615 Duration ?-?( Time lhs, Time rhs ) { return (Duration)@{ lhs.tv - rhs.tv }; } 6616 Time ?-?( Time lhs, Duration rhs ) { return (Time)@{ lhs.tv - rhs.tv }; } 6617 Time ?-=?( Time & lhs, Duration rhs ) { lhs = lhs - rhs; return lhs; } 6618 _Bool ?==?( Time lhs, Time rhs ) { return lhs.tv == rhs.tv; } 6619 _Bool ?!=?( Time lhs, Time rhs ) { return lhs.tv != rhs.tv; } 6620 _Bool ?<?( Time lhs, Time rhs ) { return lhs.tv < rhs.tv; } 6621 _Bool ?<=?( Time lhs, Time rhs ) { return lhs.tv <= rhs.tv; } 6622 _Bool ?>?( Time lhs, Time rhs ) { return lhs.tv > rhs.tv; } 6623 _Bool ?>=?( Time lhs, Time rhs ) { return lhs.tv >= rhs.tv; } 6624 6625 forall( dtype ostype | ostream( ostype ) ) ostype & ?|?( ostype & os, Time time ); 7303 Time ?+?( Time & lhs, Duration rhs ); 7304 Time ?+?( Duration lhs, Time rhs ); 7305 Time ?+=?( Time & lhs, Duration rhs ); 7306 7307 Duration ?-?( Time lhs, Time rhs ); 7308 Time ?-?( Time lhs, Duration rhs ); 7309 Time ?-=?( Time & lhs, Duration rhs ); 7310 _Bool ?==?( Time lhs, Time rhs ); 7311 _Bool ?!=?( Time lhs, Time rhs ); 7312 _Bool ?<?( Time lhs, Time rhs ); 7313 _Bool ?<=?( Time lhs, Time rhs ); 7314 _Bool ?>?( Time lhs, Time rhs ); 7315 _Bool ?>=?( Time lhs, Time rhs ); 6626 7316 6627 7317 char * yy_mm_dd( Time time, char * buf ); … … 6641 7331 6642 7332 size_t strftime( char * buf, size_t size, const char * fmt, Time time ); 7333 forall( dtype ostype | ostream( ostype ) ) ostype & ?|?( ostype & os, Time time ); 6643 7334 \end{cfa} 6644 7335 … … 6661 7352 6662 7353 %\subsection{\texorpdfstring{\protect\lstinline@Clock@}{Clock}} 6663 \subsection{\texorpdfstring{\Lst KeywordStyle{\textmd{Clock}}}{Clock}}7354 \subsection{\texorpdfstring{\LstBasicStyle{Clock}}{Clock}} 6664 7355 \label{s:Clock} 6665 7356 … … 6667 7358 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 6668 7359 struct Clock { 6669 Duration offset; §\C{// for virtual clock: contains offset from real-time}§6670 int clocktype; §\C{// implementation only -1 (virtual), CLOCK\_REALTIME}§7360 Duration offset; §\C{// for virtual clock: contains offset from real-time}§ 7361 int clocktype; §\C{// implementation only -1 (virtual), CLOCK\_REALTIME}§ 6671 7362 }; 6672 7363 … … 6675 7366 void ?{}( Clock & clk ); 6676 7367 void ?{}( Clock & clk, Duration adj ); 6677 Duration getRes(); 6678 Time getTimeNsec(); §\C{// with nanoseconds}§ 6679 Time getTime(); §\C{// without nanoseconds}§ 7368 7369 Duration getResNsec(); §\C{// with nanoseconds}§ 7370 Duration getRes(); §\C{// without nanoseconds}§ 7371 7372 Time getTimeNsec(); §\C{// with nanoseconds}§ 7373 Time getTime(); §\C{// without nanoseconds}§ 6680 7374 Time getTime( Clock & clk ); 6681 7375 Time ?()( Clock & clk ); … … 6693 7387 6694 7388 \begin{cfa} 6695 void ?{}( Int * this ); §\C{// constructor/destructor}§7389 void ?{}( Int * this ); §\C{// constructor/destructor}§ 6696 7390 void ?{}( Int * this, Int init ); 6697 7391 void ?{}( Int * this, zero_t ); … … 6702 7396 void ^?{}( Int * this ); 6703 7397 6704 Int ?=?( Int * lhs, Int rhs ); §\C{// assignment}§7398 Int ?=?( Int * lhs, Int rhs ); §\C{// assignment}§ 6705 7399 Int ?=?( Int * lhs, long int rhs ); 6706 7400 Int ?=?( Int * lhs, unsigned long int rhs ); … … 6719 7413 unsigned long int narrow( Int val ); 6720 7414 6721 int ?==?( Int oper1, Int oper2 ); §\C{// comparison}§7415 int ?==?( Int oper1, Int oper2 ); §\C{// comparison}§ 6722 7416 int ?==?( Int oper1, long int oper2 ); 6723 7417 int ?==?( long int oper2, Int oper1 ); … … 6755 7449 int ?>=?( unsigned long int oper1, Int oper2 ); 6756 7450 6757 Int +?( Int oper ); §\C{// arithmetic}§7451 Int +?( Int oper ); §\C{// arithmetic}§ 6758 7452 Int -?( Int oper ); 6759 7453 Int ~?( Int oper ); … … 6837 7531 Int ?>>=?( Int * lhs, mp_bitcnt_t shift ); 6838 7532 6839 Int abs( Int oper ); §\C{// number functions}§7533 Int abs( Int oper ); §\C{// number functions}§ 6840 7534 Int fact( unsigned long int N ); 6841 7535 Int gcd( Int oper1, Int oper2 ); … … 6862 7556 #include <gmp>§\indexc{gmp}§ 6863 7557 int main( void ) { 6864 sout | "Factorial Numbers" | endl;7558 sout | "Factorial Numbers"; 6865 7559 Int fact = 1; 6866 7560 6867 sout | 0 | fact | endl;7561 sout | 0 | fact; 6868 7562 for ( unsigned int i = 1; i <= 40; i += 1 ) { 6869 7563 fact *= i; 6870 sout | i | fact | endl;7564 sout | i | fact; 6871 7565 } 6872 7566 } … … 6948 7642 // implementation 6949 7643 struct Rational {§\indexc{Rational}§ 6950 long int numerator, denominator; §\C{// invariant: denominator > 0}§7644 long int numerator, denominator; §\C{// invariant: denominator > 0}§ 6951 7645 }; // Rational 6952 7646 6953 Rational rational(); §\C{// constructors}§7647 Rational rational(); §\C{// constructors}§ 6954 7648 Rational rational( long int n ); 6955 7649 Rational rational( long int n, long int d ); … … 6957 7651 void ?{}( Rational * r, one_t ); 6958 7652 6959 long int numerator( Rational r ); §\C{// numerator/denominator getter/setter}§7653 long int numerator( Rational r ); §\C{// numerator/denominator getter/setter}§ 6960 7654 long int numerator( Rational r, long int n ); 6961 7655 long int denominator( Rational r ); 6962 7656 long int denominator( Rational r, long int d ); 6963 7657 6964 int ?==?( Rational l, Rational r ); §\C{// comparison}§7658 int ?==?( Rational l, Rational r ); §\C{// comparison}§ 6965 7659 int ?!=?( Rational l, Rational r ); 6966 7660 int ?<?( Rational l, Rational r ); … … 6969 7663 int ?>=?( Rational l, Rational r ); 6970 7664 6971 Rational -?( Rational r ); §\C{// arithmetic}§7665 Rational -?( Rational r ); §\C{// arithmetic}§ 6972 7666 Rational ?+?( Rational l, Rational r ); 6973 7667 Rational ?-?( Rational l, Rational r ); … … 6975 7669 Rational ?/?( Rational l, Rational r ); 6976 7670 6977 double widen( Rational r ); §\C{// conversion}§7671 double widen( Rational r ); §\C{// conversion}§ 6978 7672 Rational narrow( double f, long int md ); 6979 7673 -
doc/working/exception/impl/exception.c
r7951100 rb067d9b 243 243 244 244 // Get a function pointer from the relative offset and call it 245 // _Unwind_Reason_Code (*matcher)() = (_Unwind_Reason_Code (*)())lsd_info.LPStart + imatcher; 245 // _Unwind_Reason_Code (*matcher)() = (_Unwind_Reason_Code (*)())lsd_info.LPStart + imatcher; 246 246 247 247 _Unwind_Reason_Code (*matcher)() = … … 320 320 // on how the assembly works. 321 321 // Setup the personality routine 322 #if defined(__PIC__) 323 asm volatile (".cfi_personality 0x9b,CFA.ref.__gcfa_personality_v0"); 324 // Setup the exception table 325 asm volatile (".cfi_lsda 0x1b, .LLSDACFA2"); 326 #else 322 327 asm volatile (".cfi_personality 0x3,__gcfa_personality_v0"); 323 328 // Setup the exception table 324 329 asm volatile (".cfi_lsda 0x3, .LLSDACFA2"); 330 #endif 325 331 326 332 // Label which defines the start of the area for which the handler is setup … … 356 362 // Some more works need to be done if we want to have a single 357 363 // call to the try routine 364 #if defined(__PIC__) 365 asm ( 366 //HEADER 367 ".LFECFA1:\n" 368 " .globl __gcfa_personality_v0\n" 369 " .section .gcc_except_table,\"a\",@progbits\n" 370 ".LLSDACFA2:\n" //TABLE header 371 " .byte 0xff\n" 372 " .byte 0xff\n" 373 " .byte 0x1\n" 374 " .uleb128 .LLSDACSECFA2-.LLSDACSBCFA2\n" // BODY length 375 // Body uses language specific data and therefore could be modified arbitrarily 376 ".LLSDACSBCFA2:\n" // BODY start 377 " .uleb128 .TRYSTART-__try_terminate\n" // Handled area start (relative to start of function) 378 " .uleb128 .TRYEND-.TRYSTART\n" // Handled area length 379 " .uleb128 .CATCH-__try_terminate\n" // Handler landing pad adress (relative to start of function) 380 " .uleb128 1\n" // Action code, gcc seems to use always 0 381 ".LLSDACSECFA2:\n" // BODY end 382 " .text\n" // TABLE footer 383 " .size __try_terminate, .-__try_terminate\n" 384 ); 385 386 // Somehow this piece of helps with the resolution of debug symbols. 387 __attribute__((unused)) static const int dummy = 0; 388 asm ( 389 " .hidden CFA.ref.__gcfa_personality_v0\n" // Declare an new hidden symbol 390 " .weak CFA.ref.__gcfa_personality_v0\n" 391 " .section .data.rel.local.CFA.ref.__gcfa_personality_v0,\"awG\",@progbits,CFA.ref.__gcfa_personality_v0,comdat\n" // No clue what this does specifically 392 " .align 8\n" 393 " .type CFA.ref.__gcfa_personality_v0, @object\n" // Type of our hidden symbol (it's not actually the function itself) 394 " .size CFA.ref.__gcfa_personality_v0, 8\n" // Size of our hidden symbol 395 "CFA.ref.__gcfa_personality_v0:\n" 396 " .quad __gcfa_personality_v0\n" 397 ); 398 #else 358 399 asm ( 359 400 //HEADER … … 375 416 " .text\n" // TABLE footer 376 417 " .size __try_terminate, .-__try_terminate\n" 377 " .ident \"GCC: (Ubuntu 6.2.0-3ubuntu11~16.04) 6.2.0 20160901\"\n"378 // " .section .note.GNU-stack,\"x\",@progbits\n"379 418 ); 419 #endif
Note:
See TracChangeset
for help on using the changeset viewer.