Changeset 8a930c03
- Timestamp:
- Jun 12, 2023, 12:05:58 PM (3 years ago)
- Branches:
- master
- Children:
- fec8bd1
- Parents:
- 2b78949 (diff), 38e266ca (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Files:
-
- 33 added
- 4 deleted
- 97 edited
- 198 moved
-
Jenkinsfile (modified) (2 diffs)
-
benchmark/Makefile.am (modified) (5 diffs)
-
doc/bibliography/pl.bib (modified) (8 diffs)
-
doc/papers/llheap/Paper.tex (modified) (47 diffs)
-
doc/papers/llheap/figures/AllocatorComponents.fig (modified) (3 diffs)
-
doc/papers/llheap/figures/AllocatorComponents.fig.bak (deleted)
-
doc/theses/colby_parsons_MMAth/Makefile (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/balance.cfa (modified) (3 diffs)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/dynamic.cfa (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/executor.cfa (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/matrix.cfa (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/repeat.cfa (modified) (4 diffs)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/static.cfa (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/plotData.py (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/channels/plotData.py (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/mutex_stmt/plotData.py (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/cfa/contend.cfa (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/cfa/future.cfa (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/cfa/sidechan.cfa (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/cfa/spin.cfa (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend/contend.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend2/contend.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend2/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend4/contend.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend4/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend8/contend.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/contend8/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/sidechan/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/sidechan/sidechan.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin/spin.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin2/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin2/spin.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin4/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin4/spin.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin8/go.mod (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/go/spin8/spin.go (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/run (added)
-
doc/theses/colby_parsons_MMAth/benchmarks/waituntil/ucpp/future.cc (added)
-
doc/theses/colby_parsons_MMAth/code/basic_actor_example.cfa (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/glossary.tex (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/local.bib (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/style/style.tex (modified) (1 diff)
-
doc/theses/colby_parsons_MMAth/text/channels.tex (modified) (8 diffs)
-
doc/theses/colby_parsons_MMAth/text/waituntil.tex (modified) (5 diffs)
-
doc/theses/colby_parsons_MMAth/thesis.tex (modified) (2 diffs)
-
doc/user/figures/EHMHierarchy.fig (modified) (1 diff)
-
doc/user/user.tex (modified) (15 diffs)
-
driver/cc1.cc (modified) (2 diffs)
-
driver/cfa.cc (modified) (2 diffs)
-
libcfa/src/Makefile.am (modified) (4 diffs)
-
libcfa/src/bits/weakso_locks.cfa (modified) (1 diff)
-
libcfa/src/bits/weakso_locks.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/actor.hfa (modified) (9 diffs)
-
libcfa/src/concurrency/atomic.hfa (added)
-
libcfa/src/concurrency/channel.hfa (modified) (8 diffs)
-
libcfa/src/concurrency/future.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/locks.cfa (modified) (1 diff)
-
libcfa/src/concurrency/locks.hfa (modified) (4 diffs)
-
libcfa/src/concurrency/select.cfa (modified) (1 diff)
-
libcfa/src/concurrency/select.hfa (modified) (4 diffs)
-
libcfa/src/containers/lockfree.hfa (modified) (4 diffs)
-
libcfa/src/fstream.cfa (modified) (11 diffs)
-
libcfa/src/fstream.hfa (modified) (6 diffs)
-
libcfa/src/math.trait.hfa (modified) (4 diffs)
-
libcfa/src/parseconfig.cfa (modified) (5 diffs)
-
libcfa/src/parseconfig.hfa (modified) (1 diff)
-
libcfa/src/rational.cfa (modified) (12 diffs)
-
libcfa/src/rational.hfa (modified) (2 diffs)
-
src/AST/DeclReplacer.hpp (modified) (1 diff)
-
src/AST/Pass.hpp (modified) (1 diff)
-
src/AST/Pass.impl.hpp (modified) (8 diffs)
-
src/AST/Pass.proto.hpp (modified) (1 diff)
-
src/AST/Print.cpp (modified) (1 diff)
-
src/AST/SymbolTable.cpp (modified) (8 diffs)
-
src/AST/SymbolTable.hpp (modified) (5 diffs)
-
src/AST/TypeEnvironment.cpp (modified) (10 diffs)
-
src/AST/TypeEnvironment.hpp (modified) (4 diffs)
-
src/AST/TypeSubstitution.cpp (modified) (3 diffs)
-
src/AST/TypeSubstitution.hpp (modified) (4 diffs)
-
src/AST/Util.cpp (modified) (2 diffs)
-
src/Concurrency/Actors.cpp (modified) (7 diffs)
-
src/Concurrency/Waituntil.cpp (modified) (11 diffs)
-
src/ControlStruct/ExceptDeclNew.cpp (modified) (1 diff)
-
src/GenPoly/InstantiateGenericNew.cpp (modified) (1 diff)
-
src/GenPoly/LvalueNew.cpp (modified) (4 diffs)
-
src/GenPoly/SpecializeNew.cpp (modified) (1 diff)
-
src/InitTweak/InitTweak.cc (modified) (1 diff)
-
src/MakeLibCfaNew.cpp (modified) (1 diff)
-
src/Parser/lex.ll (modified) (2 diffs)
-
src/Parser/parser.yy (modified) (31 diffs)
-
src/ResolvExpr/CandidateFinder.cpp (modified) (9 diffs)
-
src/ResolvExpr/CastCost.cc (modified) (2 diffs)
-
src/ResolvExpr/CommonType.cc (modified) (21 diffs)
-
src/ResolvExpr/CommonType.hpp (modified) (1 diff)
-
src/ResolvExpr/ConversionCost.cc (modified) (4 diffs)
-
src/ResolvExpr/PolyCost.cc (modified) (1 diff)
-
src/ResolvExpr/Resolver.cc (modified) (5 diffs)
-
src/ResolvExpr/SatisfyAssertions.cpp (modified) (3 diffs)
-
src/ResolvExpr/Unify.cc (modified) (30 diffs)
-
src/ResolvExpr/Unify.h (modified) (1 diff)
-
src/SymTab/Autogen.h (modified) (2 diffs)
-
src/SymTab/GenImplicitCall.cpp (modified) (2 diffs)
-
src/SymTab/GenImplicitCall.hpp (modified) (2 diffs)
-
src/Tuples/Explode.cc (modified) (1 diff)
-
src/Validate/Autogen.cpp (modified) (5 diffs)
-
src/Validate/FixQualifiedTypes.cpp (modified) (1 diff)
-
src/Validate/GenericParameter.cpp (modified) (1 diff)
-
src/Validate/HoistStruct.cpp (modified) (6 diffs)
-
src/Validate/ReplaceTypedef.cpp (modified) (2 diffs)
-
src/Virtual/ExpandCasts.cc (modified) (1 diff)
-
src/main.cc (modified) (1 diff)
-
tests/.expect/array-ERR1.txt (added)
-
tests/.expect/array-ERR2.txt (added)
-
tests/.expect/array-ERR3.txt (added)
-
tests/.expect/array.txt (modified) (1 diff)
-
tests/.expect/copyfile.txt (modified) (2 diffs)
-
tests/.in/copyfile.txt (modified) (2 diffs)
-
tests/Makefile.am (modified) (8 diffs)
-
tests/PRNG.cfa (modified) (1 diff)
-
tests/array.cfa (modified) (1 diff)
-
tests/concurrency/.expect/clib.txt (moved) (moved from tests/concurrent/.expect/clib.txt )
-
tests/concurrency/.expect/clib_tls.txt (moved) (moved from tests/concurrent/.expect/clib_tls.txt )
-
tests/concurrency/.expect/cluster.txt (moved) (moved from tests/concurrent/.expect/cluster.txt )
-
tests/concurrency/.expect/coroutineYield.txt (moved) (moved from tests/concurrent/.expect/coroutineYield.txt )
-
tests/concurrency/.expect/ctor-check.txt (moved) (moved from tests/concurrent/.expect/ctor-check.txt ) (1 diff)
-
tests/concurrency/.expect/join.txt (moved) (moved from tests/concurrent/.expect/join.txt )
-
tests/concurrency/.expect/joinerror.sed (moved) (moved from tests/concurrent/.expect/joinerror.sed )
-
tests/concurrency/.expect/keywordErrors.txt (added)
-
tests/concurrency/.expect/lockfree_stack.txt (moved) (moved from tests/concurrent/.expect/migrate.txt )
-
tests/concurrency/.expect/mainError.txt (added)
-
tests/concurrency/.expect/migrate.txt (moved) (moved from tests/concurrent/barrier/.expect/last.txt )
-
tests/concurrency/.expect/monitor.txt (moved) (moved from tests/concurrent/.expect/monitor.txt )
-
tests/concurrency/.expect/multi-monitor.txt (moved) (moved from tests/concurrent/.expect/multi-monitor.txt )
-
tests/concurrency/.expect/once.txt (moved) (moved from tests/concurrent/.expect/once.txt )
-
tests/concurrency/.expect/preempt.txt (moved) (moved from tests/concurrent/.expect/preempt.txt )
-
tests/concurrency/.expect/preempt2.txt (moved) (moved from tests/concurrent/.expect/preempt2.txt )
-
tests/concurrency/.expect/sleep.txt (moved) (moved from tests/concurrent/.expect/sleep.txt )
-
tests/concurrency/.expect/suspend_then.txt (moved) (moved from tests/concurrent/.expect/suspend_then.txt )
-
tests/concurrency/.expect/thread.txt (moved) (moved from tests/concurrent/.expect/thread.txt )
-
tests/concurrency/actors/.expect/dynamic.txt (moved) (moved from tests/concurrent/actors/.expect/dynamic.txt )
-
tests/concurrency/actors/.expect/executor.txt (moved) (moved from tests/concurrent/actors/.expect/executor.txt )
-
tests/concurrency/actors/.expect/inherit.txt (moved) (moved from tests/concurrent/actors/.expect/inherit.txt )
-
tests/concurrency/actors/.expect/matrix.txt (moved) (moved from tests/concurrent/actors/.expect/matrix.txt )
-
tests/concurrency/actors/.expect/pingpong.txt (moved) (moved from tests/concurrent/actors/.expect/pingpong.txt )
-
tests/concurrency/actors/.expect/poison.txt (moved) (moved from tests/concurrent/actors/.expect/poison.txt )
-
tests/concurrency/actors/.expect/static.txt (moved) (moved from tests/concurrent/actors/.expect/static.txt )
-
tests/concurrency/actors/.expect/types.txt (moved) (moved from tests/concurrent/actors/.expect/types.txt )
-
tests/concurrency/actors/dynamic.cfa (moved) (moved from tests/concurrent/actors/dynamic.cfa ) (1 diff)
-
tests/concurrency/actors/executor.cfa (moved) (moved from tests/concurrent/actors/executor.cfa ) (1 diff)
-
tests/concurrency/actors/inherit.cfa (moved) (moved from tests/concurrent/actors/inherit.cfa ) (1 diff)
-
tests/concurrency/actors/matrix.cfa (moved) (moved from tests/concurrent/actors/matrix.cfa ) (1 diff)
-
tests/concurrency/actors/pingpong.cfa (moved) (moved from tests/concurrent/actors/pingpong.cfa ) (2 diffs)
-
tests/concurrency/actors/poison.cfa (moved) (moved from tests/concurrent/actors/poison.cfa ) (3 diffs)
-
tests/concurrency/actors/static.cfa (moved) (moved from tests/concurrent/actors/static.cfa ) (1 diff)
-
tests/concurrency/actors/types.cfa (moved) (moved from tests/concurrent/actors/types.cfa ) (5 diffs)
-
tests/concurrency/barrier/.expect/generation.txt (moved) (moved from tests/concurrent/barrier/.expect/generation.txt )
-
tests/concurrency/barrier/.expect/last.txt (moved) (moved from tests/concurrent/barrier/.expect/order.txt )
-
tests/concurrency/barrier/.expect/order.txt (moved) (moved from tests/concurrent/examples/.expect/datingService.txt )
-
tests/concurrency/barrier/gen_generation_expect.cfa (moved) (moved from tests/concurrent/barrier/gen_generation_expect.cfa ) (1 diff)
-
tests/concurrency/barrier/generation.cfa (moved) (moved from tests/concurrent/barrier/generation.cfa ) (1 diff)
-
tests/concurrency/barrier/last.cfa (moved) (moved from tests/concurrent/barrier/last.cfa ) (1 diff)
-
tests/concurrency/barrier/order.cfa (moved) (moved from tests/concurrent/barrier/order.cfa ) (1 diff)
-
tests/concurrency/channels/.expect/big_elems.txt (moved) (moved from tests/concurrent/channels/.expect/big_elems.txt )
-
tests/concurrency/channels/.expect/churn.txt (moved) (moved from tests/concurrent/channels/.expect/churn.txt )
-
tests/concurrency/channels/.expect/contend.txt (moved) (moved from tests/concurrent/channels/.expect/contend.txt )
-
tests/concurrency/channels/.expect/daisy_chain.txt (moved) (moved from tests/concurrent/channels/.expect/daisy_chain.txt )
-
tests/concurrency/channels/.expect/hot_potato.txt (moved) (moved from tests/concurrent/channels/.expect/hot_potato.txt )
-
tests/concurrency/channels/.expect/ping_pong.txt (moved) (moved from tests/concurrent/channels/.expect/ping_pong.txt )
-
tests/concurrency/channels/.expect/pub_sub.txt (moved) (moved from tests/concurrent/channels/.expect/pub_sub.txt )
-
tests/concurrency/channels/.expect/zero_size.txt (moved) (moved from tests/concurrent/channels/.expect/zero_size.txt )
-
tests/concurrency/channels/barrier.cfa (moved) (moved from tests/concurrent/channels/barrier.cfa )
-
tests/concurrency/channels/big_elems.cfa (moved) (moved from tests/concurrent/channels/big_elems.cfa )
-
tests/concurrency/channels/churn.cfa (moved) (moved from tests/concurrent/channels/churn.cfa )
-
tests/concurrency/channels/contend.cfa (moved) (moved from tests/concurrent/channels/contend.cfa )
-
tests/concurrency/channels/daisy_chain.cfa (moved) (moved from tests/concurrent/channels/daisy_chain.cfa )
-
tests/concurrency/channels/hot_potato.cfa (moved) (moved from tests/concurrent/channels/hot_potato.cfa )
-
tests/concurrency/channels/parallel_harness.hfa (moved) (moved from tests/concurrent/channels/parallel_harness.hfa )
-
tests/concurrency/channels/ping_pong.cfa (moved) (moved from tests/concurrent/channels/ping_pong.cfa )
-
tests/concurrency/channels/pub_sub.cfa (moved) (moved from tests/concurrent/channels/pub_sub.cfa )
-
tests/concurrency/channels/zero_size.cfa (moved) (moved from tests/concurrent/channels/zero_size.cfa )
-
tests/concurrency/clib.c (moved) (moved from tests/concurrent/clib.c )
-
tests/concurrency/clib_tls.c (moved) (moved from tests/concurrent/clib_tls.c )
-
tests/concurrency/cluster.cfa (moved) (moved from tests/concurrent/cluster.cfa )
-
tests/concurrency/coroutineYield.cfa (moved) (moved from tests/concurrent/coroutineYield.cfa )
-
tests/concurrency/ctor-check.cfa (moved) (moved from tests/concurrent/ctor-check.cfa )
-
tests/concurrency/examples/.expect/boundedBufferEXT.txt (moved) (moved from tests/concurrent/examples/.expect/boundedBufferEXT.txt )
-
tests/concurrency/examples/.expect/boundedBufferINT.txt (moved) (moved from tests/concurrent/examples/.expect/boundedBufferINT.txt )
-
tests/concurrency/examples/.expect/datingService.txt (moved) (moved from tests/concurrent/park/.expect/force_preempt.txt )
-
tests/concurrency/examples/.expect/gortn.txt (moved) (moved from tests/concurrent/examples/.expect/gortn.txt )
-
tests/concurrency/examples/.expect/matrixSum.txt (moved) (moved from tests/concurrent/examples/.expect/matrixSum.txt )
-
tests/concurrency/examples/.expect/quickSort.txt (moved) (moved from tests/concurrent/examples/.expect/quickSort.txt )
-
tests/concurrency/examples/.in/quickSort.txt (moved) (moved from tests/concurrent/examples/.in/quickSort.txt )
-
tests/concurrency/examples/boundedBufferEXT.cfa (moved) (moved from tests/concurrent/examples/boundedBufferEXT.cfa )
-
tests/concurrency/examples/boundedBufferINT.cfa (moved) (moved from tests/concurrent/examples/boundedBufferINT.cfa )
-
tests/concurrency/examples/boundedBufferTHREAD.cfa (moved) (moved from tests/concurrent/examples/boundedBufferTHREAD.cfa )
-
tests/concurrency/examples/datingService.cfa (moved) (moved from tests/concurrent/examples/datingService.cfa )
-
tests/concurrency/examples/gortn.cfa (moved) (moved from tests/concurrent/examples/gortn.cfa )
-
tests/concurrency/examples/matrixSum.cfa (moved) (moved from tests/concurrent/examples/matrixSum.cfa )
-
tests/concurrency/examples/multiSort.cfa (moved) (moved from tests/concurrent/examples/multiSort.cfa )
-
tests/concurrency/examples/quickSort.cfa (moved) (moved from tests/concurrent/examples/quickSort.cfa )
-
tests/concurrency/examples/quickSort.generic.cfa (moved) (moved from tests/concurrent/examples/quickSort.generic.cfa )
-
tests/concurrency/futures/.expect/abandon.txt (moved) (moved from tests/concurrent/futures/.expect/abandon.txt )
-
tests/concurrency/futures/.expect/basic.txt (moved) (moved from tests/concurrent/futures/.expect/basic.txt )
-
tests/concurrency/futures/.expect/multi.txt (moved) (moved from tests/concurrent/futures/.expect/multi.txt )
-
tests/concurrency/futures/.expect/select_future.txt (moved) (moved from tests/concurrent/futures/.expect/select_future.txt )
-
tests/concurrency/futures/.expect/typed.txt (moved) (moved from tests/concurrent/futures/.expect/typed.txt )
-
tests/concurrency/futures/.expect/wait_any.txt (moved) (moved from tests/concurrent/futures/.expect/wait_any.txt )
-
tests/concurrency/futures/abandon.cfa (moved) (moved from tests/concurrent/futures/abandon.cfa )
-
tests/concurrency/futures/basic.cfa (moved) (moved from tests/concurrent/futures/basic.cfa )
-
tests/concurrency/futures/multi.cfa (moved) (moved from tests/concurrent/futures/multi.cfa )
-
tests/concurrency/futures/select_future.cfa (moved) (moved from tests/concurrent/futures/select_future.cfa )
-
tests/concurrency/futures/typed.cfa (moved) (moved from tests/concurrent/futures/typed.cfa )
-
tests/concurrency/futures/wait_any.cfa (moved) (moved from tests/concurrent/futures/wait_any.cfa )
-
tests/concurrency/join.cfa (moved) (moved from tests/concurrent/join.cfa )
-
tests/concurrency/joinerror.cfa (moved) (moved from tests/concurrent/joinerror.cfa )
-
tests/concurrency/keywordErrors.cfa (moved) (moved from tests/concurrent/keywordErrors.cfa )
-
tests/concurrency/lockfree_stack.cfa (added)
-
tests/concurrency/mainError.cfa (moved) (moved from tests/concurrent/mainError.cfa )
-
tests/concurrency/migrate.cfa (moved) (moved from tests/concurrent/migrate.cfa )
-
tests/concurrency/monitor.cfa (moved) (moved from tests/concurrent/monitor.cfa )
-
tests/concurrency/multi-monitor.cfa (moved) (moved from tests/concurrent/multi-monitor.cfa )
-
tests/concurrency/mutexstmt/.expect/locks.txt (moved) (moved from tests/concurrent/mutexstmt/.expect/locks.txt )
-
tests/concurrency/mutexstmt/.expect/monitors.txt (moved) (moved from tests/concurrent/mutexstmt/.expect/monitors.txt )
-
tests/concurrency/mutexstmt/locks.cfa (moved) (moved from tests/concurrent/mutexstmt/locks.cfa )
-
tests/concurrency/mutexstmt/monitors.cfa (moved) (moved from tests/concurrent/mutexstmt/monitors.cfa )
-
tests/concurrency/once.cfa (moved) (moved from tests/concurrent/once.cfa )
-
tests/concurrency/park/.expect/contention.txt (moved) (moved from tests/concurrent/park/.expect/contention.txt )
-
tests/concurrency/park/.expect/force_preempt.txt (moved) (moved from tests/concurrent/park/.expect/start_parked.txt )
-
tests/concurrency/park/.expect/start_parked.txt (added)
-
tests/concurrency/park/contention.cfa (moved) (moved from tests/concurrent/park/contention.cfa )
-
tests/concurrency/park/force_preempt.cfa (moved) (moved from tests/concurrent/park/force_preempt.cfa )
-
tests/concurrency/park/start_parked.cfa (moved) (moved from tests/concurrent/park/start_parked.cfa )
-
tests/concurrency/preempt.cfa (moved) (moved from tests/concurrent/preempt.cfa )
-
tests/concurrency/preempt2.cfa (moved) (moved from tests/concurrent/preempt2.cfa )
-
tests/concurrency/pthread/.expect/bounded_buffer.x64.txt (moved) (moved from tests/concurrent/pthread/.expect/bounded_buffer.x64.txt )
-
tests/concurrency/pthread/.expect/bounded_buffer.x86.txt (moved) (moved from tests/concurrent/pthread/.expect/bounded_buffer.x86.txt )
-
tests/concurrency/pthread/.expect/pthread_attr_test.txt (moved) (moved from tests/concurrent/pthread/.expect/pthread_attr_test.txt )
-
tests/concurrency/pthread/.expect/pthread_cond_test.txt (moved) (moved from tests/concurrent/pthread/.expect/pthread_cond_test.txt )
-
tests/concurrency/pthread/.expect/pthread_demo_create_join.txt (moved) (moved from tests/concurrent/pthread/.expect/pthread_demo_create_join.txt )
-
tests/concurrency/pthread/.expect/pthread_demo_lock.txt (moved) (moved from tests/concurrent/pthread/.expect/pthread_demo_lock.txt )
-
tests/concurrency/pthread/.expect/pthread_key_test.txt (moved) (moved from tests/concurrent/pthread/.expect/pthread_key_test.txt )
-
tests/concurrency/pthread/.expect/pthread_once_test.txt (moved) (moved from tests/concurrent/pthread/.expect/pthread_once_test.txt )
-
tests/concurrency/pthread/bounded_buffer.cfa (moved) (moved from tests/concurrent/pthread/bounded_buffer.cfa )
-
tests/concurrency/pthread/pthread_attr_test.cfa (moved) (moved from tests/concurrent/pthread/pthread_attr_test.cfa )
-
tests/concurrency/pthread/pthread_cond_test.cfa (moved) (moved from tests/concurrent/pthread/pthread_cond_test.cfa )
-
tests/concurrency/pthread/pthread_demo_create_join.cfa (moved) (moved from tests/concurrent/pthread/pthread_demo_create_join.cfa )
-
tests/concurrency/pthread/pthread_demo_lock.cfa (moved) (moved from tests/concurrent/pthread/pthread_demo_lock.cfa )
-
tests/concurrency/pthread/pthread_key_test.cfa (moved) (moved from tests/concurrent/pthread/pthread_key_test.cfa )
-
tests/concurrency/pthread/pthread_once_test.cfa (moved) (moved from tests/concurrent/pthread/pthread_once_test.cfa )
-
tests/concurrency/readyQ/.expect/barrier_sleeper.txt (moved) (moved from tests/concurrent/readyQ/.expect/barrier_sleeper.txt )
-
tests/concurrency/readyQ/.expect/leader_spin.txt (moved) (moved from tests/concurrent/readyQ/.expect/leader_spin.txt )
-
tests/concurrency/readyQ/barrier_sleeper.cfa (moved) (moved from tests/concurrent/readyQ/barrier_sleeper.cfa ) (1 diff)
-
tests/concurrency/readyQ/leader_spin.cfa (moved) (moved from tests/concurrent/readyQ/leader_spin.cfa ) (1 diff)
-
tests/concurrency/signal/.expect/block.txt (moved) (moved from tests/concurrent/signal/.expect/block.txt )
-
tests/concurrency/signal/.expect/disjoint.txt (moved) (moved from tests/concurrent/signal/.expect/disjoint.txt )
-
tests/concurrency/signal/.expect/wait.txt (moved) (moved from tests/concurrent/signal/.expect/wait.txt )
-
tests/concurrency/signal/block.cfa (moved) (moved from tests/concurrent/signal/block.cfa )
-
tests/concurrency/signal/disjoint.cfa (moved) (moved from tests/concurrent/signal/disjoint.cfa )
-
tests/concurrency/signal/wait.cfa (moved) (moved from tests/concurrent/signal/wait.cfa )
-
tests/concurrency/sleep.cfa (moved) (moved from tests/concurrent/sleep.cfa )
-
tests/concurrency/suspend_then.cfa (moved) (moved from tests/concurrent/suspend_then.cfa )
-
tests/concurrency/thread.cfa (moved) (moved from tests/concurrent/thread.cfa )
-
tests/concurrency/unified_locking/.expect/block_spin_lock.txt (moved) (moved from tests/concurrent/unified_locking/.expect/block_spin_lock.txt )
-
tests/concurrency/unified_locking/.expect/exp_backoff.txt (moved) (moved from tests/concurrent/unified_locking/.expect/exp_backoff.txt )
-
tests/concurrency/unified_locking/.expect/fast_block_lock.txt (moved) (moved from tests/concurrent/unified_locking/.expect/fast_block_lock.txt )
-
tests/concurrency/unified_locking/.expect/futex_mutex.txt (moved) (moved from tests/concurrent/unified_locking/.expect/futex_mutex.txt )
-
tests/concurrency/unified_locking/.expect/locks.txt (moved) (moved from tests/concurrent/unified_locking/.expect/locks.txt )
-
tests/concurrency/unified_locking/.expect/mcs.txt (moved) (moved from tests/concurrent/unified_locking/.expect/mcs.txt )
-
tests/concurrency/unified_locking/.expect/mcs_block_spin_lock.txt (moved) (moved from tests/concurrent/unified_locking/.expect/mcs_block_spin_lock.txt )
-
tests/concurrency/unified_locking/.expect/mcs_spin.txt (moved) (moved from tests/concurrent/unified_locking/.expect/mcs_spin.txt )
-
tests/concurrency/unified_locking/.expect/pthread_locks.txt (moved) (moved from tests/concurrent/unified_locking/.expect/pthread_locks.txt )
-
tests/concurrency/unified_locking/.expect/simple_owner_lock.txt (moved) (moved from tests/concurrent/unified_locking/.expect/simple_owner_lock.txt )
-
tests/concurrency/unified_locking/.expect/spin_queue_lock.txt (moved) (moved from tests/concurrent/unified_locking/.expect/spin_queue_lock.txt )
-
tests/concurrency/unified_locking/.expect/timeout_lock.txt (moved) (moved from tests/concurrent/unified_locking/.expect/timeout_lock.txt )
-
tests/concurrency/unified_locking/block_spin_lock.cfa (moved) (moved from tests/concurrent/unified_locking/block_spin_lock.cfa )
-
tests/concurrency/unified_locking/exp_backoff.cfa (moved) (moved from tests/concurrent/unified_locking/exp_backoff.cfa )
-
tests/concurrency/unified_locking/fast_block_lock.cfa (moved) (moved from tests/concurrent/unified_locking/fast_block_lock.cfa )
-
tests/concurrency/unified_locking/futex_mutex.cfa (moved) (moved from tests/concurrent/unified_locking/futex_mutex.cfa )
-
tests/concurrency/unified_locking/locks.cfa (moved) (moved from tests/concurrent/unified_locking/locks.cfa )
-
tests/concurrency/unified_locking/mcs.cfa (moved) (moved from tests/concurrent/unified_locking/mcs.cfa )
-
tests/concurrency/unified_locking/mcs_block_spin_lock.cfa (moved) (moved from tests/concurrent/unified_locking/mcs_block_spin_lock.cfa )
-
tests/concurrency/unified_locking/mcs_spin.cfa (moved) (moved from tests/concurrent/unified_locking/mcs_spin.cfa )
-
tests/concurrency/unified_locking/mutex_test.hfa (moved) (moved from tests/concurrent/unified_locking/mutex_test.hfa )
-
tests/concurrency/unified_locking/pthread_locks.cfa (moved) (moved from tests/concurrent/unified_locking/pthread_locks.cfa )
-
tests/concurrency/unified_locking/simple_owner_lock.cfa (moved) (moved from tests/concurrent/unified_locking/simple_owner_lock.cfa )
-
tests/concurrency/unified_locking/spin_queue_lock.cfa (moved) (moved from tests/concurrent/unified_locking/spin_queue_lock.cfa )
-
tests/concurrency/unified_locking/spintest.cfa (moved) (moved from tests/concurrent/unified_locking/spintest.cfa )
-
tests/concurrency/unified_locking/test_debug.cfa (moved) (moved from tests/concurrent/unified_locking/test_debug.cfa )
-
tests/concurrency/unified_locking/thread_test.cfa (moved) (moved from tests/concurrent/unified_locking/thread_test.cfa )
-
tests/concurrency/unified_locking/timeout_lock.cfa (moved) (moved from tests/concurrent/unified_locking/timeout_lock.cfa )
-
tests/concurrency/waitfor/.expect/barge.txt (moved) (moved from tests/concurrent/waitfor/.expect/barge.txt )
-
tests/concurrency/waitfor/.expect/dtor.txt (moved) (moved from tests/concurrent/waitfor/.expect/dtor.txt )
-
tests/concurrency/waitfor/.expect/else.txt (moved) (moved from tests/concurrent/waitfor/.expect/else.txt )
-
tests/concurrency/waitfor/.expect/parse.txt (added)
-
tests/concurrency/waitfor/.expect/recurse.txt (moved) (moved from tests/concurrent/waitfor/.expect/recurse.txt )
-
tests/concurrency/waitfor/.expect/statment.txt (moved) (moved from tests/concurrent/waitfor/.expect/statment.txt )
-
tests/concurrency/waitfor/.expect/when.txt (moved) (moved from tests/concurrent/waitfor/.expect/when.txt )
-
tests/concurrency/waitfor/barge.cfa (moved) (moved from tests/concurrent/waitfor/barge.cfa )
-
tests/concurrency/waitfor/dtor.cfa (moved) (moved from tests/concurrent/waitfor/dtor.cfa )
-
tests/concurrency/waitfor/else.cfa (moved) (moved from tests/concurrent/waitfor/else.cfa )
-
tests/concurrency/waitfor/parse.cfa (moved) (moved from tests/concurrent/waitfor/parse.cfa )
-
tests/concurrency/waitfor/recurse.cfa (moved) (moved from tests/concurrent/waitfor/recurse.cfa )
-
tests/concurrency/waitfor/simple.cfa (moved) (moved from tests/concurrent/waitfor/simple.cfa )
-
tests/concurrency/waitfor/statment.cfa (moved) (moved from tests/concurrent/waitfor/statment.cfa )
-
tests/concurrency/waitfor/when.cfa (moved) (moved from tests/concurrent/waitfor/when.cfa )
-
tests/concurrency/waituntil/.expect/all_types.txt (moved) (moved from tests/concurrent/waituntil/.expect/all_types.txt )
-
tests/concurrency/waituntil/.expect/basic_else.txt (moved) (moved from tests/concurrent/waituntil/.expect/basic_else.txt )
-
tests/concurrency/waituntil/.expect/channel_close.txt (moved) (moved from tests/concurrent/waituntil/.expect/channel_close.txt )
-
tests/concurrency/waituntil/.expect/channel_zero_size.txt (moved) (moved from tests/concurrent/waituntil/.expect/channel_zero_size.txt )
-
tests/concurrency/waituntil/.expect/channels.txt (moved) (moved from tests/concurrent/waituntil/.expect/channels.txt )
-
tests/concurrency/waituntil/.expect/futures.txt (moved) (moved from tests/concurrent/waituntil/.expect/futures.txt )
-
tests/concurrency/waituntil/.expect/locks.txt (moved) (moved from tests/concurrent/waituntil/.expect/locks.txt )
-
tests/concurrency/waituntil/.expect/one_chan.txt (moved) (moved from tests/concurrent/waituntil/.expect/one_chan.txt )
-
tests/concurrency/waituntil/.expect/timeout.txt (moved) (moved from tests/concurrent/waituntil/.expect/timeout.txt )
-
tests/concurrency/waituntil/all_types.cfa (moved) (moved from tests/concurrent/waituntil/all_types.cfa )
-
tests/concurrency/waituntil/basic_else.cfa (moved) (moved from tests/concurrent/waituntil/basic_else.cfa )
-
tests/concurrency/waituntil/channel_close.cfa (moved) (moved from tests/concurrent/waituntil/channel_close.cfa )
-
tests/concurrency/waituntil/channel_zero_size.cfa (moved) (moved from tests/concurrent/waituntil/channel_zero_size.cfa )
-
tests/concurrency/waituntil/channels.cfa (moved) (moved from tests/concurrent/waituntil/channels.cfa )
-
tests/concurrency/waituntil/futures.cfa (moved) (moved from tests/concurrent/waituntil/futures.cfa )
-
tests/concurrency/waituntil/locks.cfa (moved) (moved from tests/concurrent/waituntil/locks.cfa ) (1 diff)
-
tests/concurrency/waituntil/one_chan.cfa (moved) (moved from tests/concurrent/waituntil/one_chan.cfa )
-
tests/concurrency/waituntil/timeout.cfa (moved) (moved from tests/concurrent/waituntil/timeout.cfa )
-
tests/concurrent/.expect/keywordErrors.txt (deleted)
-
tests/concurrent/.expect/mainError.txt (deleted)
-
tests/configs/.expect/parseconfig.txt (modified) (1 diff)
-
tests/configs/parseconfig.cfa (modified) (1 diff)
-
tests/copyfile.cfa (modified) (2 diffs)
-
tests/rational.cfa (modified) (6 diffs)
-
tests/zombies/simplePoly.c (deleted)
Legend:
- Unmodified
- Added
- Removed
-
Jenkinsfile
r2b78949 r8a930c03 155 155 dir (BuildDir) { 156 156 //Run the tests from the tests directory 157 sh """make ${jopt} --no-print-directory -C tests timeout s="--timeout=600 --global-timeout=14400" tests debug=yes archiveerrors=${BuildDir}/tests/crashes/full-debug"""157 sh """make ${jopt} --no-print-directory -C tests timeout=600 global-timeout=14400 tests debug=yes archive-errors=${BuildDir}/tests/crashes/full-debug""" 158 158 } 159 159 } … … 162 162 dir (BuildDir) { 163 163 //Run the tests from the tests directory 164 sh """make ${jopt} --no-print-directory -C tests timeout s="--timeout=600 --global-timeout=14400" tests debug=no archiveerrors=${BuildDir}/tests/crashes/full-nodebug"""164 sh """make ${jopt} --no-print-directory -C tests timeout=600 global-timeout=14400 tests debug=no archive-errors=${BuildDir}/tests/crashes/full-nodebug""" 165 165 } 166 166 } -
benchmark/Makefile.am
r2b78949 r8a930c03 11 11 ## Created On : Sun May 31 09:08:15 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Tue Mar 10 11:41:18 202014 ## Update Count : 2 5813 ## Last Modified On : Fri May 26 12:13:48 2023 14 ## Update Count : 260 15 15 ############################################################################### 16 16 … … 374 374 ## ========================================================================================================= 375 375 376 mutexStmt$(EXEEXT) : \377 mutexStmt-cpp1.run \378 mutexStmt-cpp2.run \379 mutexStmt-cpp4.run \380 mutexStmt-cpp8.run \381 mutexStmt-java.run \382 mutexStmt-lock1.run \383 mutexStmt-lock2.run \384 mutexStmt-lock4.run \385 mutexStmt-lock8.run \386 mutexStmt-no-stmt-lock1.run \387 mutexStmt-no-stmt-lock2.run \388 mutexStmt-no-stmt-lock4.run \389 mutexStmt-no-stmt-lock8.run \390 mutexStmt-monitor1.run \391 mutexStmt-monitor2.run \376 mutexStmt$(EXEEXT) : \ 377 mutexStmt-cpp1.run \ 378 mutexStmt-cpp2.run \ 379 mutexStmt-cpp4.run \ 380 mutexStmt-cpp8.run \ 381 mutexStmt-java.run \ 382 mutexStmt-lock1.run \ 383 mutexStmt-lock2.run \ 384 mutexStmt-lock4.run \ 385 mutexStmt-lock8.run \ 386 mutexStmt-no-stmt-lock1.run \ 387 mutexStmt-no-stmt-lock2.run \ 388 mutexStmt-no-stmt-lock4.run \ 389 mutexStmt-no-stmt-lock8.run \ 390 mutexStmt-monitor1.run \ 391 mutexStmt-monitor2.run \ 392 392 mutexStmt-monitor4.run 393 393 … … 567 567 compile-array.make \ 568 568 compile-attributes.make \ 569 compile-empty.make \569 compile-empty.make \ 570 570 compile-expression.make \ 571 571 compile-io.make \ … … 592 592 593 593 compile-monitor$(EXEEXT): 594 $(CFACOMPILE) -DNO_COMPILED_PRAGMA -fsyntax-only -w $(testdir)/concurren t/monitor.cfa594 $(CFACOMPILE) -DNO_COMPILED_PRAGMA -fsyntax-only -w $(testdir)/concurrency/monitor.cfa 595 595 596 596 compile-operators$(EXEEXT): … … 598 598 599 599 compile-thread$(EXEEXT): 600 $(CFACOMPILE) -DNO_COMPILED_PRAGMA -fsyntax-only -w $(testdir)/concurren t/thread.cfa600 $(CFACOMPILE) -DNO_COMPILED_PRAGMA -fsyntax-only -w $(testdir)/concurrency/thread.cfa 601 601 602 602 compile-typeof$(EXEEXT): -
doc/bibliography/pl.bib
r2b78949 r8a930c03 1209 1209 year = 2018, 1210 1210 pages = {2111-2146}, 1211 note = {\href{http://dx.doi.org/10.1002/spe.2624}{http://\-dx.doi.org/\-10.1002/\-spe.2624}},1211 optnote = {\href{http://dx.doi.org/10.1002/spe.2624}{http://\-dx.doi.org/\-10.1002/\-spe.2624}}, 1212 1212 } 1213 1213 … … 1870 1870 month = sep, 1871 1871 year = 2020, 1872 note = {\ href{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uC++.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-pub/\-uSystem/uC++.pdf}},1872 note = {\url{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uC++.pdf}}, 1873 1873 } 1874 1874 … … 2004 2004 number = 5, 2005 2005 pages = {1005-1042}, 2006 note = {\href{https://onlinelibrary.wiley.com/doi/10.1002/spe.2925}{https://\-onlinelibrary.wiley.com/\-doi/\-10.1002/\-spe.2925}},2006 optnote = {\href{https://onlinelibrary.wiley.com/doi/10.1002/spe.2925}{https://\-onlinelibrary.wiley.com/\-doi/\-10.1002/\-spe.2925}}, 2007 2007 } 2008 2008 … … 4223 4223 title = {Implementing Lock-Free Queues}, 4224 4224 booktitle = {Seventh International Conference on Parallel and Distributed Computing Systems}, 4225 organization= {International Society for Computers and Their Applications}, 4225 4226 address = {Las Vegas, Nevada, U.S.A.}, 4226 4227 year = {1994}, … … 5086 5087 } 5087 5088 5088 @m anual{MMTk,5089 @misc{MMTk, 5089 5090 keywords = {Java memory management}, 5090 5091 contributer = {pabuhr@plg}, … … 5093 5094 month = sep, 5094 5095 year = 2006, 5095 note = {\href{http://cs.anu.edu.au/~Robin.Garner/mmtk-guide.pdf} 5096 {http://cs.anu.edu.au/\-$\sim$Robin.Garner/\-mmtk-guide.pdf}}, 5096 howpublished= {\url{http://cs.anu.edu.au/~Robin.Garner/mmtk-guide.pdf}}, 5097 5097 } 5098 5098 … … 7402 7402 } 7403 7403 7404 @misc{rpmalloc, 7405 author = {Mattias Jansson}, 7406 title = {rpmalloc version 1.4.1}, 7407 month = apr, 7408 year = 2022, 7409 howpublished= {\href{https://github.com/mjansson/rpmalloc}{https://\-github.com/\-mjansson/\-rpmalloc}}, 7410 } 7411 7404 7412 @manual{Rust, 7405 7413 keywords = {Rust programming language}, … … 7456 7464 booktitle = {PLDI '04: Proceedings of the ACM SIGPLAN 2004 Conference on Programming Language Design and Implementation}, 7457 7465 location = {Washington DC, USA}, 7458 publisher= {ACM},7466 organization= {ACM}, 7459 7467 address = {New York, NY, USA}, 7460 7468 volume = 39, -
doc/papers/llheap/Paper.tex
r2b78949 r8a930c03 252 252 Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}. 253 253 However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control. 254 Stack memory is managed by the program call/return-mechanism using a simpleLIFO technique, which works well for sequential programs.255 For stackful coroutines and user threads, a new stack is commonly created in dynamic-allocation memory.254 Stack memory is managed by the program call/return-mechanism using a LIFO technique, which works well for sequential programs. 255 For stackful coroutines and user threads, a new stack is commonly created in the dynamic-allocation memory. 256 256 This work focuses solely on management of the dynamic-allocation memory. 257 257 … … 293 293 \begin{enumerate}[leftmargin=*,itemsep=0pt] 294 294 \item 295 Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFAusing user-level threads running on multiple kernel threads (M:N threading).296 297 \item 298 Extend the standard C heap functionality by preserving with each allocation: its request size plus the amount allocated, whether an allocation is zero fill , andallocation alignment.295 Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC~\cite{uC++} and \CFA~\cite{Moss18,Delisle21} using user-level threads running on multiple kernel threads (M:N threading). 296 297 \item 298 Extend the standard C heap functionality by preserving with each allocation: its request size plus the amount allocated, whether an allocation is zero fill and/or allocation alignment. 299 299 300 300 \item … … 365 365 366 366 The following discussion is a quick overview of the moving-pieces that affect the design of a memory allocator and its performance. 367 It is assumed that dynamic allocates and deallocates acquire storage for a program variable, referred to asan \newterm{object}, through calls such as @malloc@ and @free@ in C, and @new@ and @delete@ in \CC.367 Dynamic acquires and releases obtain storage for a program variable, called an \newterm{object}, through calls such as @malloc@ and @free@ in C, and @new@ and @delete@ in \CC. 368 368 Space for each allocated object comes from the dynamic-allocation zone. 369 369 … … 378 378 379 379 Figure~\ref{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}. 380 The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data. 381 The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory. 380 The \newterm{management data} is a data structure located at a known memory address and contains fixed-sized information in the static-data memory that references components in the dynamic-allocation memory. 382 381 For multi-threaded programs, additional management data may exist in \newterm{thread-local storage} (TLS) for each kernel thread executing the program. 383 382 The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}. … … 385 384 \ie only the program knows the location of allocated storage not the memory allocator. 386 385 Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating easy location of new allocations. 387 Reserved memory (dark grey) is one or more blocks of memory obtained from the operating systembut not yet allocated to the program;388 if there are multiple reserved blocks, they are also chained together , usually internally.386 Reserved memory (dark grey) is one or more blocks of memory obtained from the \newterm{operating system} (OS) but not yet allocated to the program; 387 if there are multiple reserved blocks, they are also chained together. 389 388 390 389 \begin{figure} … … 395 394 \end{figure} 396 395 397 In m ost allocator designs, allocated objects have management data embedded within them.396 In many allocator designs, allocated objects and reserved blocks have management data embedded within them (see also Section~\ref{s:ObjectContainers}). 398 397 Figure~\ref{f:AllocatedObject} shows an allocated object with a header, trailer, and optional spacing around the object. 399 398 The header contains information about the object, \eg size, type, etc. … … 404 403 When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists. 405 404 406 A free object alsocontains management data, \eg size, pointers, etc.405 A free object often contains management data, \eg size, pointers, etc. 407 406 Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks. 408 407 For internal chaining, the amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, allocation requests less than 16 bytes are rounded up. 409 The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new management information and/or program data.408 The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new program data and/or management information. 410 409 411 410 \begin{figure} … … 428 427 \label{s:Fragmentation} 429 428 430 Fragmentation is memory requested from the operating systembut not used by the program;429 Fragmentation is memory requested from the OS but not used by the program; 431 430 hence, allocated objects are not fragmentation. 432 431 Figure~\ref{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: internal or external. … … 443 442 An allocator should strive to keep internal management information to a minimum. 444 443 445 \newterm{External fragmentation} is all memory space reserved from the operating systembut not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.444 \newterm{External fragmentation} is all memory space reserved from the OS but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory. 446 445 This memory is problematic in two ways: heap blowup and highly fragmented memory. 447 446 \newterm{Heap blowup} occurs when freed memory cannot be reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}. 448 Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects, resulting in a checkerboard of adjacent allocated and free areas, where the free blocks have become very small.447 Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects, resulting in a checkerboard of adjacent allocated and free areas, where the free blocks have become to small to service requests. 449 448 % Figure~\ref{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time. 450 449 Heap blowup can occur due to allocator policies that are too restrictive in reusing freed memory (the allocated size cannot use a larger free block) and/or no coalescing of free storage. … … 452 451 % Memory is highly fragmented when most free blocks are unusable because of their sizes. 453 452 % For example, Figure~\ref{f:Contiguous} and Figure~\ref{f:HighlyFragmented} have the same quantity of external fragmentation, but Figure~\ref{f:HighlyFragmented} is highly fragmented. 454 % If there is a request to allocate a large object, Figure~\ref{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while Figure~\ref{f:HighlyFragmented} likely has to request more memory from the operating system.453 % If there is a request to allocate a large object, Figure~\ref{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while Figure~\ref{f:HighlyFragmented} likely has to request more memory from the OS. 455 454 456 455 % \begin{figure} … … 475 474 The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size. 476 475 Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size. 477 Any storage larger than the request can become spacing after the object or besplit into a smaller free object.476 Any storage larger than the request can become spacing after the object or split into a smaller free object. 478 477 % The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list. 479 478 … … 489 488 490 489 The third approach is \newterm{splitting} and \newterm{coalescing algorithms}. 491 When an object is allocated, if there are no free objects of the requested size, a larger free object may be split into two smaller objects to satisfy the allocation request without obtaining more memory from the operating system.492 For example, in the \newterm{buddy system}, a block of free memory is split into two equal chunks, one of those chunks is again split into two equal chunks, and so on until a block just large enough to fit the requested object is created.493 When an object is deallocated it is coalesced with the objects immediately before and after it in memory, if they are free, turning them into one larger object.490 When an object is allocated, if there are no free objects of the requested size, a larger free object is split into two smaller objects to satisfy the allocation request rather than obtaining more memory from the OS. 491 For example, in the \newterm{buddy system}, a block of free memory is split into equal chunks, one of those chunks is again split, and so on until a minimal block is created that fits the requested object. 492 When an object is deallocated, it is coalesced with the objects immediately before and after it in memory, if they are free, turning them into one larger block. 494 493 Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled. 495 In all cases, coalescing increases allocation latency, hence some allocations can cause unbounded delays during coalescing.494 In all cases, coalescing increases allocation latency, hence some allocations can cause unbounded delays. 496 495 While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup. 497 496 % Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory. … … 501 500 \label{s:Locality} 502 501 503 The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set iscomposed of temporal and spatial accesses~\cite{Denning05}.502 The principle of locality recognizes that programs tend to reference a small set of data, called a \newterm{working set}, for a certain period of time, composed of temporal and spatial accesses~\cite{Denning05}. 504 503 % Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period. 505 504 % Temporal locality commonly occurs during an iterative computation with a fixed set of disjoint variables, while spatial locality commonly occurs when traversing an array. 506 Hardware takes advantage of t emporal and spatial localitythrough multiple levels of caching, \ie memory hierarchy.505 Hardware takes advantage of the working set through multiple levels of caching, \ie memory hierarchy. 507 506 % When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time. 508 For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory.507 For example, entire cache lines are transferred between cache and memory, and entire virtual-memory pages are transferred between memory and disk. 509 508 % A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}. 510 509 … … 532 531 \label{s:MutualExclusion} 533 532 534 \newterm{Mutual exclusion} provides sequential access to the shared management data of the heap.533 \newterm{Mutual exclusion} provides sequential access to the shared-management data of the heap. 535 534 There are two performance issues for mutual exclusion. 536 535 First is the overhead necessary to perform (at least) a hardware atomic operation every time a shared resource is accessed. 537 536 Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released. 538 537 Contention can be reduced in a number of ways: 539 1) Using multiple fine-grained locks versus a single lock , spreadingthe contention across a number of locks.538 1) Using multiple fine-grained locks versus a single lock to spread the contention across a number of locks. 540 539 2) Using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff. 541 540 3) Using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}. … … 551 550 a memory allocator can only affect the latter two. 552 551 553 Assume two objects, object$_1$ and object$_2$, share a cache line.554 \newterm{Program-induced false-sharing} occurs when thread$_1$ passes a reference to object$_2$ to thread$_2$, and then threads$_1$ modifies object$_1$ while thread$_2$ modifies object$_2$.552 Specifically, assume two objects, O$_1$ and O$_2$, share a cache line, with threads, T$_1$ and T$_2$. 553 \newterm{Program-induced false-sharing} occurs when T$_1$ passes a reference to O$_2$ to T$_2$, and then T$_1$ modifies O$_1$ while T$_2$ modifies O$_2$. 555 554 % Figure~\ref{f:ProgramInducedFalseSharing} shows when Thread$_1$ passes Object$_2$ to Thread$_2$, a false-sharing situation forms when Thread$_1$ modifies Object$_1$ and Thread$_2$ modifies Object$_2$. 556 555 % Changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line. … … 574 573 % \label{f:FalseSharing} 575 574 % \end{figure} 576 \newterm{Allocator-induced active false-sharing}\label{s:AllocatorInducedActiveFalseSharing} occurs when object$_1$ and object$_2$ are heap allocated and their references are passed to thread$_1$ and thread$_2$, which modify the objects.575 \newterm{Allocator-induced active false-sharing}\label{s:AllocatorInducedActiveFalseSharing} occurs when O$_1$ and O$_2$ are heap allocated and their references are passed to T$_1$ and T$_2$, which modify the objects. 577 576 % For example, in Figure~\ref{f:AllocatorInducedActiveFalseSharing}, each thread allocates an object and loads a cache-line of memory into its associated cache. 578 577 % Again, changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line. … … 580 579 % is another form of allocator-induced false-sharing caused by program-induced false-sharing. 581 580 % When an object in a program-induced false-sharing situation is deallocated, a future allocation of that object may cause passive false-sharing. 582 when thread$_1$ passes object$_2$ to thread$_2$, and thread$_2$ subsequently deallocates object$_2$, and then object$_2$ is reallocated to thread$_2$ while thread$_1$ is still using object$_1$.581 when T$_1$ passes O$_2$ to T$_2$, and T$_2$ subsequently deallocates O$_2$, and then O$_2$ is reallocated to T$_2$ while T$_1$ is still using O$_1$. 583 582 584 583 … … 593 592 \label{s:MultiThreadedMemoryAllocatorFeatures} 594 593 595 The following features are used in the construction of multi-threaded memory-allocators: 596 \begin{enumerate}[itemsep=0pt] 597 \item multiple heaps: with or without a global heap, or with or without heap ownership. 598 \item object containers: with or without ownership, fixed or variable sized, global or local free-lists. 599 \item hybrid private/public heap 600 \item allocation buffer 601 \item lock-free operations 602 \end{enumerate} 594 The following features are used in the construction of multi-threaded memory-allocators: multiple heaps, user-level threading, ownership, object containers, allocation buffer, lock-free operations. 603 595 The first feature, multiple heaps, pertains to different kinds of heaps. 604 596 The second feature, object containers, pertains to the organization of objects within the storage area. … … 606 598 607 599 608 \subs ection{Multiple Heaps}600 \subsubsection{Multiple Heaps} 609 601 \label{s:MultipleHeaps} 610 602 611 603 A multi-threaded allocator has potentially multiple threads and heaps. 612 604 The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity. 613 The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see Figure~\ref{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see Figure~\ref{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see Figure~\ref{f:PerThreadHeap}), which is almost back to a single-threaded allocator.605 The spectrum ranges from multiple threads using a single heap, denoted as T:1, to multiple threads sharing multiple heaps, denoted as T:H, to one thread per heap, denoted as 1:1, which is almost back to a single-threaded allocator. 614 606 615 607 \begin{figure} … … 635 627 \end{figure} 636 628 637 \paragraph{T:1 model } where all threads allocate and deallocate objects from one heap.638 Memory is obtained from the freed objects, or reserved memory in the heap, or from the operating system (OS);639 the heap may also return freed memory to the operating system.629 \paragraph{T:1 model (see Figure~\ref{f:SingleHeap})} where all threads allocate and deallocate objects from one heap. 630 Memory is obtained from the freed objects, or reserved memory in the heap, or from the OS; 631 the heap may also return freed memory to the OS. 640 632 The arrows indicate the direction memory conceptually moves for each kind of operation: allocation moves memory along the path from the heap/operating-system to the user application, while deallocation moves memory along the path from the application back to the heap/operating-system. 641 633 To safely handle concurrency, a single lock may be used for all heap operations or fine-grained locking for different operations. 642 634 Regardless, a single heap may be a significant source of contention for programs with a large amount of memory allocation. 643 635 644 \paragraph{T:H model } where each thread allocates storage from several heaps depending on certain criteria, with the goal of reducing contention by spreading allocations/deallocations across the heaps.636 \paragraph{T:H model (see Figure~\ref{f:SharedHeaps})} where each thread allocates storage from several heaps depending on certain criteria, with the goal of reducing contention by spreading allocations/deallocations across the heaps. 645 637 The decision on when to create a new heap and which heap a thread allocates from depends on the allocator design. 646 638 To determine which heap to access, each thread must point to its associated heap in some way. … … 673 665 An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory. 674 666 Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur. 675 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the operating system.667 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the OS. 676 668 677 669 % \begin{figure} … … 684 676 Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup. 685 677 The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory. 686 Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the operating system, which can be expensive.687 Depending on how the operating system provides dynamic storage to an application, returning storagemay be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.688 In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.678 Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the OS (see Section~\ref{s:Ownership}). 679 Returning storage to the OS may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix. 680 % In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused. 689 681 690 682 Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space. 691 Now, each heap obtains and returns storage to/from the global heap rather than the operating system.683 Now, each heap obtains and returns storage to/from the global heap rather than the OS. 692 684 Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold. 693 Similarly, the global heap buffers this memory, obtaining and returning storage to/from the operating systemas necessary.685 Similarly, the global heap buffers this memory, obtaining and returning storage to/from the OS as necessary. 694 686 The global heap does not have its own thread and makes no internal allocation requests; 695 687 instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations. 696 688 Hence, the worst-case cost of a memory operation includes all these steps. 697 With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the operating system to achieve the same goal and is independent of the mechanism used by the operating system to present dynamic memory to an address space. 698 689 With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the OS to achieve the same goal and is independent of the mechanism used by the OS to present dynamic memory to an address space. 699 690 However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking. 700 691 A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention. 701 692 In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap. 702 693 703 704 \paragraph{1:1 model (thread heaps)} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see ownership in Section~\ref{s:Ownership}). 694 \paragraph{1:1 model (see Figure~\ref{f:PerThreadHeap})} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see Section~\ref{s:Ownership}). 705 695 An additional benefit of thread heaps is improved locality due to better memory layout. 706 696 As each thread only allocates from its heap, all objects are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages. … … 708 698 Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap. 709 699 For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them. 710 Hence, allocator-induced active false-sharing cannot occur because the memory for thread heaps never overlaps.700 % Hence, allocator-induced active false-sharing cannot occur because the memory for thread heaps never overlaps. 711 701 712 702 When a thread terminates, there are two options for handling its thread heap. … … 720 710 721 711 It is possible to use any of the heap models with user-level (M:N) threading. 722 However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).712 However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the OS, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000). 723 713 It is difficult to retain this goal, if the user-threading model is directly involved with the heap model. 724 714 Figure~\ref{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime. … … 732 722 \end{figure} 733 723 734 Adopting this modelresults in a subtle problem with shared heaps.735 With kernel threading, an operation that isstarted by a kernel thread is always completed by that thread.736 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.724 Adopting user threading results in a subtle problem with shared heaps. 725 With kernel threading, an operation started by a kernel thread is always completed by that thread. 726 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap, even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption. 737 727 However, this correctness property is not preserved for user-level threading. 738 728 A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}. 739 729 When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it. 740 730 To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread. 741 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption does not happen that frequently.731 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is infrequent (milliseconds). 742 732 Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically. 743 Occasional ly ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long termstarvation;744 techniques like roll forward can be used to force an eventual preemption.733 Occasional ignoring of a preemption should be benign, but a persistent lack of preemption can result in starvation; 734 techniques like rolling forward the preemption to the next context switch can be used. 745 735 746 736 … … 800 790 % For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially. 801 791 % If Thread$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur. 792 793 For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps.% (see~Figure~\ref{f:HybridPrivatePublicHeap}). 794 The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup. 795 In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap. 796 Similarly, a thread first deallocates an object to its private heap, and second to the public heap. 797 Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps. 798 % Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap. 799 Finally, when a thread frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing thread's private heap for delayed ownership, which does allows the freeing thread to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return. 800 801 % \begin{figure} 802 % \centering 803 % \input{PrivatePublicHeaps.pstex_t} 804 % \caption{Hybrid Private/Public Heap for Per-thread Heaps} 805 % \label{f:HybridPrivatePublicHeap} 806 % \vspace{10pt} 807 % \input{RemoteFreeList.pstex_t} 808 % \caption{Remote Free-List} 809 % \label{f:RemoteFreeList} 810 % \end{figure} 811 812 % As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified. 813 % For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}. 814 % To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage. 815 % Since the remote free-list is occasionally cleared during an allocation, this adds to that cost. 816 % Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object. 817 818 % If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking. 819 % In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur. 820 % If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations. 821 822 % The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator. 823 % Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used. 824 % In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps. 825 % For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach. 802 826 803 827 … … 817 841 818 842 819 \subs ection{Object Containers}843 \subsubsection{Object Containers} 820 844 \label{s:ObjectContainers} 821 845 … … 827 851 \eg an object is accessed by the program after it is allocated, while the header is accessed by the allocator after it is free. 828 852 829 The alternative factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks} in~\cite{Berger00}), as in Figure~\ref{f:ObjectContainer}.853 An alternative approach factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks}~\cite{Berger00}), as in Figure~\ref{f:ObjectContainer}. 830 854 The header for the container holds information necessary for all objects in the container; 831 855 a trailer may also be used at the end of the container. … … 862 886 863 887 864 \ subsubsection{Container Ownership}888 \paragraph{Container Ownership} 865 889 \label{s:ContainerOwnership} 866 890 … … 894 918 895 919 Additional restrictions may be applied to the movement of containers to prevent active false-sharing. 896 For example, if a container changes ownership through the global heap, then when a thread allocates an object from the newly acquired container itis actively false-sharing even though no objects are passed among threads.920 For example, if a container changes ownership through the global heap, then a thread allocating from the newly acquired container is actively false-sharing even though no objects are passed among threads. 897 921 Note, once the thread frees the object, no more false sharing can occur until the container changes ownership again. 898 922 To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free. 899 One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the operating system.923 One implementation approach that increases the freedom to return a free container to the OS involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the OS. 900 924 901 925 % \begin{figure} … … 930 954 931 955 932 \ subsubsection{Container Size}956 \paragraph{Container Size} 933 957 \label{s:ContainerSize} 934 958 … … 941 965 However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation. 942 966 With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container. 943 In general, large objects, \eg greater than 64\,KB, are allocated directly from the operating system and are returned immediately to the operating systemto reduce long-term external fragmentation.967 In general, large objects, \eg greater than 64\,KB, are allocated directly from the OS and are returned immediately to the OS to reduce long-term external fragmentation. 944 968 If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate. 945 969 Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header. … … 970 994 971 995 972 \ subsubsection{Container Free-Lists}996 \paragraph{Container Free-Lists} 973 997 \label{s:containersfreelists} 974 998 … … 1005 1029 1006 1030 1007 \subsubsection{Hybrid Private/Public Heap} 1008 \label{s:HybridPrivatePublicHeap} 1009 1010 Section~\ref{s:Ownership} discusses advantages and disadvantages of public heaps (T:H model and with ownership) and private heaps (thread heaps with ownership). 1011 For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps (see~Figure~\ref{f:HybridPrivatePublicHeap}). 1012 The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup. 1013 In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap. 1014 Similarly, a thread first deallocates an object to its private heap, and second to the public heap. 1015 Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps. 1016 Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap. 1017 Finally, when a thread frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing thread's private heap for delayed ownership, which allows the freeing thread to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return. 1018 1019 \begin{figure} 1020 \centering 1021 \input{PrivatePublicHeaps.pstex_t} 1022 \caption{Hybrid Private/Public Heap for Per-thread Heaps} 1023 \label{f:HybridPrivatePublicHeap} 1024 % \vspace{10pt} 1025 % \input{RemoteFreeList.pstex_t} 1026 % \caption{Remote Free-List} 1027 % \label{f:RemoteFreeList} 1028 \end{figure} 1029 1030 As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified. 1031 For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}. 1032 To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage. 1033 Since the remote free-list is occasionally cleared during an allocation, this adds to that cost. 1034 Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object. 1035 1036 If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking. 1037 In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur. 1038 If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations. 1039 1040 The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator. 1041 Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used. 1042 In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps. 1043 For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach. 1044 1045 1046 \subsection{Allocation Buffer} 1031 \subsubsection{Allocation Buffer} 1047 1032 \label{s:AllocationBuffer} 1048 1033 1049 1034 An allocation buffer is reserved memory (see Section~\ref{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty. 1050 1035 That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later. 1051 Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.1036 Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or OS, respectively. 1052 1037 The allocation buffer reduces contention and the number of global/operating-system calls. 1053 1038 For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations. … … 1062 1047 1063 1048 Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated. 1064 A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or operating system.1049 A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or OS. 1065 1050 The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer. 1066 1051 … … 1068 1053 For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made. 1069 1054 This lazy method of constructing objects is beneficial in terms of paging and caching. 1070 For example, although an entire container, possibly spanning several pages, is allocated from the operating system, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache.1071 1072 1073 \subs ection{Lock-Free Operations}1055 For example, although an entire container, possibly spanning several pages, is allocated from the OS, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache. 1056 1057 1058 \subsubsection{Lock-Free Operations} 1074 1059 \label{s:LockFreeOperations} 1075 1060 … … 1194 1179 % A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable} 1195 1180 % \end{quote} 1196 % If a KT is preempted during an allocation operation, the operating systemcan schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.1181 % If a KT is preempted during an allocation operation, the OS can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness. 1197 1182 % Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable. 1198 % Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the operating systemis providing the second thread via the signal handler.1183 % Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the OS is providing the second thread via the signal handler. 1199 1184 % 1200 1185 % Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical subsection after undoing its writes, if the critical subsection is preempted. … … 1256 1241 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable} 1257 1242 \end{quote} 1258 If a KT is preempted during an allocation operation, the operating systemcan schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.1243 If a KT is preempted during an allocation operation, the OS can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness. 1259 1244 Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable. 1260 Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the operating systemis providing the second thread via the signal handler.1245 Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the OS is providing the second thread via the signal handler. 1261 1246 1262 1247 Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical subsection after undoing its writes, if the critical subsection is preempted. … … 1273 1258 For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath. 1274 1259 However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs. 1275 More operating systemsupport is required to make this model viable, but there is still the serially-reusable problem with user-level threading.1260 More OS support is required to make this model viable, but there is still the serially-reusable problem with user-level threading. 1276 1261 So the 1:1 model had no atomic actions along the fastpath and no special operating-system support requirements. 1277 1262 The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in Section~\ref{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns. … … 1308 1293 A primary goal of llheap is low latency, hence the name low-latency heap (llheap). 1309 1294 Two forms of latency are internal and external. 1310 Internal latency is the time to perform an allocation, while external latency is time to obtain/return storage from/to the operating system.1295 Internal latency is the time to perform an allocation, while external latency is time to obtain/return storage from/to the OS. 1311 1296 Ideally latency is $O(1)$ with a small constant. 1312 1297 … … 1314 1299 The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger). 1315 1300 1316 To obtain $O(1)$ external latency means obtaining one large storage area from the operating systemand subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation.1301 To obtain $O(1)$ external latency means obtaining one large storage area from the OS and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation. 1317 1302 Excluding real-time operating-systems, operating-system operations are unbounded, and hence some external latency is unavoidable. 1318 1303 The mitigating factor is that operating-system calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \pageref{p:malloc_expansion}). … … 1329 1314 headers per allocation versus containers, 1330 1315 no coalescing to minimize latency, 1331 global heap memory (pool) obtained from the operating systemusing @mmap@ to create and reuse heaps needed by threads,1316 global heap memory (pool) obtained from the OS using @mmap@ to create and reuse heaps needed by threads, 1332 1317 local reserved memory (pool) per heap obtained from global pool, 1333 global reserved memory (pool) obtained from the operating systemusing @sbrk@ call,1318 global reserved memory (pool) obtained from the OS using @sbrk@ call, 1334 1319 optional fast-lookup table for converting allocation requests into bucket sizes, 1335 1320 optional statistic-counters table for accumulating counts of allocation operations. … … 1358 1343 Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M. 1359 1344 All objects in a bucket are of the same size. 1360 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system.1345 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the OS. 1361 1346 Each free bucket of a specific size has two lists. 1362 1347 1) A free stack used solely by the KT heap-owner, so push/pop operations do not require locking. … … 1367 1352 Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$. 1368 1353 First, the allocation is divided into small (@sbrk@) or large (@mmap@). 1369 For large allocations, the storage is mapped directly from the operating system.1354 For large allocations, the storage is mapped directly from the OS. 1370 1355 For small allocations, $S$ is quantized into a bucket size. 1371 1356 Quantizing is performed using a binary search over the ordered bucket array. … … 1378 1363 heap's local pool, 1379 1364 global pool, 1380 operating system(@sbrk@).1365 OS (@sbrk@). 1381 1366 1382 1367 \begin{algorithm} … … 1443 1428 Algorithm~\ref{alg:heapObjectFreeOwn} shows the de-allocation (free) outline for an object at address $A$ with ownership. 1444 1429 First, the address is divided into small (@sbrk@) or large (@mmap@). 1445 For large allocations, the storage is unmapped back to the operating system.1430 For large allocations, the storage is unmapped back to the OS. 1446 1431 For small allocations, the bucket associated with the request size is retrieved. 1447 1432 If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket. … … 3044 3029 3045 3030 \textsf{pt3} is the only memory allocator where the total dynamic memory goes down in the second half of the program lifetime when the memory is freed by the benchmark program. 3046 It makes pt3 the only memory allocator that gives memory back to the operating systemas it is freed by the program.3031 It makes pt3 the only memory allocator that gives memory back to the OS as it is freed by the program. 3047 3032 3048 3033 % FOR 1 THREAD -
doc/papers/llheap/figures/AllocatorComponents.fig
r2b78949 r8a930c03 8 8 -2 9 9 1200 2 10 6 1275 2025 2700 262511 10 6 2400 2025 2700 2625 12 11 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 … … 14 13 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 15 14 2700 2025 2700 2325 2400 2325 2400 2025 2700 2025 16 -617 4 2 0 50 -1 2 11 0.0000 2 165 1005 2325 2400 Management\00118 15 -6 19 16 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 … … 61 58 2 2 0 1 0 7 60 -1 13 0.000 0 0 -1 0 0 5 62 59 3300 2700 6300 2700 6300 3000 3300 3000 3300 2700 63 4 0 0 50 -1 2 11 0.0000 2 165 585 3300 1725 Storage\00160 4 0 0 50 -1 2 11 0.0000 2 165 1005 3300 1725 Storage Data\001 64 61 4 2 0 50 -1 0 11 0.0000 2 165 810 3000 1875 free objects\001 65 62 4 2 0 50 -1 0 11 0.0000 2 135 1140 3000 2850 reserve memory\001 66 63 4 1 0 50 -1 0 11 0.0000 2 120 795 2325 1500 Static Zone\001 67 64 4 1 0 50 -1 0 11 0.0000 2 165 1845 4800 1500 Dynamic-Allocation Zone\001 65 4 2 0 50 -1 2 11 0.0000 2 165 1005 2325 2325 Management\001 66 4 2 0 50 -1 2 11 0.0000 2 135 375 2325 2525 Data\001 -
doc/theses/colby_parsons_MMAth/Makefile
r2b78949 r8a930c03 98 98 99 99 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${DATA} \ 100 style/style.tex ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build}100 glossary.tex style/style.tex ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build} 101 101 # Must have *.aux file containing citations for bibtex 102 102 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/balance.cfa
r2b78949 r8a930c03 31 31 32 32 d_actor ** actor_arr; 33 Allocation receive( d_actor & this, start_msg & msg ) with( this ) {33 allocation receive( d_actor & this, start_msg & msg ) with( this ) { 34 34 for ( i; Set ) { 35 35 *actor_arr[i + gstart] << shared_msg; … … 38 38 } 39 39 40 Allocation receive( d_actor & this, d_msg & msg ) with( this ) {40 allocation receive( d_actor & this, d_msg & msg ) with( this ) { 41 41 if ( recs == rounds ) return Delete; 42 42 if ( recs % Batch == 0 ) { … … 50 50 } 51 51 52 Allocation receive( filler & this, d_msg & msg ) { return Delete; }52 allocation receive( filler & this, d_msg & msg ) { return Delete; } 53 53 54 54 int main( int argc, char * argv[] ) { -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/dynamic.cfa
r2b78949 r8a930c03 24 24 25 25 uint64_t start_time; 26 Allocation receive( derived_actor & receiver, derived_msg & msg ) {26 allocation receive( derived_actor & receiver, derived_msg & msg ) { 27 27 if ( msg.cnt >= Times ) { 28 28 printf("%.2f\n", ((double)(bench_time() - start_time)) / ((double)Times) ); // ns -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/executor.cfa
r2b78949 r8a930c03 25 25 struct d_msg { inline message; } shared_msg; 26 26 27 Allocation receive( d_actor & this, d_msg & msg ) with( this ) {27 allocation receive( d_actor & this, d_msg & msg ) with( this ) { 28 28 if ( recs == rounds ) return Finished; 29 29 if ( recs % Batch == 0 ) { -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/matrix.cfa
r2b78949 r8a930c03 24 24 } 25 25 26 Allocation receive( derived_actor & receiver, derived_msg & msg ) {26 allocation receive( derived_actor & receiver, derived_msg & msg ) { 27 27 for ( unsigned int i = 0; i < yc; i += 1 ) { // multiply X_row by Y_col and sum products 28 28 msg.Z[i] = 0; -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/repeat.cfa
r2b78949 r8a930c03 46 46 47 47 Client * cl; 48 Allocation receive( Server & this, IntMsg & msg ) { msg.val = 7; *cl << msg; return Nodelete; }49 Allocation receive( Server & this, CharMsg & msg ) { msg.val = 'x'; *cl << msg; return Nodelete; }50 Allocation receive( Server & this, StateMsg & msg ) { return Finished; }48 allocation receive( Server & this, IntMsg & msg ) { msg.val = 7; *cl << msg; return Nodelete; } 49 allocation receive( Server & this, CharMsg & msg ) { msg.val = 'x'; *cl << msg; return Nodelete; } 50 allocation receive( Server & this, StateMsg & msg ) { return Finished; } 51 51 52 52 void terminateServers( Client & this ) with(this) { … … 56 56 } 57 57 58 Allocation reset( Client & this ) with(this) {58 allocation reset( Client & this ) with(this) { 59 59 times += 1; 60 60 if ( times == Times ) { terminateServers( this ); return Finished; } … … 64 64 } 65 65 66 Allocation process( Client & this ) with(this) {66 allocation process( Client & this ) with(this) { 67 67 this.results++; 68 68 if ( results == 2 * Messages ) { return reset( this ); } … … 70 70 } 71 71 72 Allocation receive( Client & this, IntMsg & msg ) { return process( this ); }73 Allocation receive( Client & this, CharMsg & msg ) { return process( this ); }74 Allocation receive( Client & this, StateMsg & msg ) with(this) {72 allocation receive( Client & this, IntMsg & msg ) { return process( this ); } 73 allocation receive( Client & this, CharMsg & msg ) { return process( this ); } 74 allocation receive( Client & this, StateMsg & msg ) with(this) { 75 75 for ( i; Messages ) { 76 76 servers[i] << intmsg[i]; -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/static.cfa
r2b78949 r8a930c03 23 23 24 24 uint64_t start_time; 25 Allocation receive( derived_actor & receiver, derived_msg & msg ) {25 allocation receive( derived_actor & receiver, derived_msg & msg ) { 26 26 if ( msg.cnt >= Times ) { 27 27 printf("%.2f\n", ((double)(bench_time() - start_time)) / ((double)Times) ); // ns -
doc/theses/colby_parsons_MMAth/benchmarks/actors/plotData.py
r2b78949 r8a930c03 160 160 161 161 if currVariant == numVariants: 162 fig, ax = plt.subplots( )162 fig, ax = plt.subplots(layout='constrained') 163 163 plt.title(name + " Benchmark") 164 164 plt.ylabel("Runtime (seconds)") -
doc/theses/colby_parsons_MMAth/benchmarks/channels/plotData.py
r2b78949 r8a930c03 124 124 125 125 if currVariant == numVariants: 126 fig, ax = plt.subplots( )126 fig, ax = plt.subplots(layout='constrained') 127 127 plt.title(name + " Benchmark") 128 128 plt.ylabel("Throughput (channel operations)") -
doc/theses/colby_parsons_MMAth/benchmarks/mutex_stmt/plotData.py
r2b78949 r8a930c03 97 97 98 98 if currVariant == numVariants: 99 fig, ax = plt.subplots( )99 fig, ax = plt.subplots(layout='constrained') 100 100 plt.title(name + " Benchmark: " + str(currLocks) + " Locks") 101 101 plt.ylabel("Throughput (entries)") -
doc/theses/colby_parsons_MMAth/code/basic_actor_example.cfa
r2b78949 r8a930c03 19 19 } 20 20 21 Allocation receive( derived_actor & receiver, derived_msg & msg ) {21 allocation receive( derived_actor & receiver, derived_msg & msg ) { 22 22 printf("The message contained the string: %s\n", msg.word); 23 23 return Finished; // Return allocation status of Finished now that the actor is done work -
doc/theses/colby_parsons_MMAth/glossary.tex
r2b78949 r8a930c03 32 32 % Examples from template above 33 33 34 \newabbreviation{raii}{RAII}{ Resource Acquisition Is Initialization}35 \newabbreviation{rtti}{RTTI}{ Run-Time Type Information}36 \newabbreviation{fcfs}{FCFS}{ First Come First Served}37 \newabbreviation{toctou}{TOCTOU}{ time-of-check to time-of-use}34 \newabbreviation{raii}{RAII}{\Newterm{resource acquisition is initialization}} 35 \newabbreviation{rtti}{RTTI}{\Newterm{run-time type information}} 36 \newabbreviation{fcfs}{FCFS}{\Newterm{first-come first-served}} 37 \newabbreviation{toctou}{TOCTOU}{\Newterm{time-of-check to time-of-use}} 38 38 39 39 \newglossaryentry{actor} -
doc/theses/colby_parsons_MMAth/local.bib
r2b78949 r8a930c03 95 95 @misc{go:select, 96 96 author = "The Go Programming Language", 97 title = "src/runtime/ chan.go",97 title = "src/runtime/select.go", 98 98 howpublished = {\href{https://go.dev/src/runtime/select.go}}, 99 99 note = "[Online; accessed 23-May-2023]" 100 100 } 101 101 102 @misc{go:selectref, 103 author = "The Go Programming Language Specification", 104 title = "Select statements", 105 howpublished = {\href{https://go.dev/ref/spec#Select\_statements}}, 106 note = "[Online; accessed 23-May-2023]" 107 } 108 109 @misc{boost:channel, 110 author = "Boost C++ Libraries", 111 title = "experimental::basic\_concurrent\_channel", 112 howpublished = {\href{https://www.boost.org/doc/libs/master/doc/html/boost\_asio/reference/experimental\__basic\_concurrent\_channel.html}}, 113 note = "[Online; accessed 23-May-2023]" 114 } 115 116 @misc{rust:channel, 117 author = "The Rust Standard Library", 118 title = "std::sync::mpsc::sync\_channel", 119 howpublished = {\href{https://doc.rust-lang.org/std/sync/mpsc/fn.sync\_channel.html}}, 120 note = "[Online; accessed 23-May-2023]" 121 } 122 123 @misc{rust:select, 124 author = "The Rust Standard Library", 125 title = "Macro futures::select", 126 howpublished = {\href{https://docs.rs/futures/latest/futures/macro.select.html}}, 127 note = "[Online; accessed 23-May-2023]" 128 } 129 130 @misc{ocaml:channel, 131 author = "The OCaml Manual", 132 title = "OCaml library : Event", 133 howpublished = {\href{https://v2.ocaml.org/api/Event.html}}, 134 note = "[Online; accessed 23-May-2023]" 135 } 136 137 @misc{haskell:channel, 138 author = "The Haskell Package Repository", 139 title = "Control.Concurrent.Chan", 140 howpublished = {\href{https://hackage.haskell.org/package/base-4.18.0.0/docs/Control-Concurrent-Chan.html}}, 141 note = "[Online; accessed 23-May-2023]" 142 } 143 144 @misc{linux:select, 145 author = "Linux man pages", 146 title = "select(2) — Linux manual page", 147 howpublished = {\href{https://man7.org/linux/man-pages/man2/select.2.html}}, 148 note = "[Online; accessed 23-May-2023]" 149 } 150 151 @misc{linux:poll, 152 author = "Linux man pages", 153 title = "poll(2) — Linux manual page", 154 howpublished = {\href{https://man7.org/linux/man-pages/man2/poll.2.html}}, 155 note = "[Online; accessed 23-May-2023]" 156 } 157 158 @misc{linux:epoll, 159 author = "Linux man pages", 160 title = "epoll(7) — Linux manual page", 161 howpublished = {\href{https://man7.org/linux/man-pages/man7/epoll.7.html}}, 162 note = "[Online; accessed 23-May-2023]" 163 } 164 165 @article{Ichbiah79, 166 title={Preliminary Ada reference manual}, 167 author={Ichbiah, Jean D}, 168 journal={ACM Sigplan Notices}, 169 volume={14}, 170 number={6a}, 171 pages={1--145}, 172 year={1979}, 173 publisher={ACM New York, NY, USA} 174 } 175 176 @misc{cpp:whenany, 177 author = "C++ reference", 178 title = "std::experimental::when\_any", 179 howpublished = {\href{https://en.cppreference.com/w/cpp/experimental/when\_any}}, 180 note = "[Online; accessed 23-May-2023]" 181 } 182 183 184 -
doc/theses/colby_parsons_MMAth/style/style.tex
r2b78949 r8a930c03 15 15 \newsavebox{\myboxB} 16 16 17 \lstnewenvironment{Golang}[1][] 18 {\lstset{language=Go,literate={<-}{\makebox[2ex][c]{\textless\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}}2, 19 moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}} 20 {} 21 17 22 \lstnewenvironment{java}[1][] 18 23 {\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}} -
doc/theses/colby_parsons_MMAth/text/channels.tex
r2b78949 r8a930c03 17 17 Additionally all channel operations in CSP are synchronous (no buffering). 18 18 Advanced channels as a programming language feature has been popularized in recent years by the language Go~\cite{Go}, which encourages the use of channels as its fundamental concurrent feature. 19 It was the popularity of Go channels that lead to their implement ion in \CFA.19 It was the popularity of Go channels that lead to their implementation in \CFA. 20 20 Neither Go nor \CFA channels have the restrictions of the early channel-based concurrent systems. 21 22 Other popular languages and libraries that provide channels include C++ Boost~\cite{boost:channel}, Rust~\cite{rust:channel}, Haskell~\cite{haskell:channel}, and OCaml~\cite{ocaml:channel}. 23 Boost channels only support asynchronous (non-blocking) operations, and Rust channels are limited to only having one consumer per channel. 24 Haskell channels are unbounded in size, and OCaml channels are zero-size. 25 These restrictions in Haskell and OCaml are likely due to their functional approach, which results in them both using a list as the underlying data structure for their channel. 26 These languages and libraries are not discussed further, as their channel implementation is not comparable to the bounded-buffer style channels present in Go and \CFA. 21 27 22 28 \section{Producer-Consumer Problem} … … 61 67 \section{Channel Implementation} 62 68 Currently, only the Go programming language provides user-level threading where the primary communication mechanism is channels. 63 Experiments were conducted that varied the producer-consumer problemalgorithm and lock type used inside the channel.69 Experiments were conducted that varied the producer-consumer algorithm and lock type used inside the channel. 64 70 With the exception of non-\gls{fcfs} or non-FIFO algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation. 65 71 Performance of channels can be improved by sharding the underlying buffer \cite{Dice11}. 66 In doing so the FIFO property is lost, which is undesireable for user-facing channels.72 However, the FIFO property is lost, which is undesirable for user-facing channels. 67 73 Therefore, the low-level channel implementation in \CFA is largely copied from the Go implementation, but adapted to the \CFA type and runtime systems. 68 74 As such the research contributions added by \CFA's channel implementation lie in the realm of safety and productivity features. 69 75 70 The Go channel implementation utilitizes cooperation between threads to achieve good performance~\cite{go:chan}. 71 The cooperation between threads only occurs when producers or consumers need to block due to the buffer being full or empty. 72 In these cases the blocking thread stores their relevant data in a shared location and the signalling thread will complete their operation before waking them. 73 This helps improve performance in a few ways. 74 First, each thread interacting with the channel with only acquire and release the internal channel lock exactly once. 75 This decreases contention on the internal lock, as only entering threads will compete for the lock since signalled threads never reacquire the lock. 76 The other advantage of the cooperation approach is that it eliminates the potential bottleneck of waiting for signalled threads. 77 The property of acquiring/releasing the lock only once can be achieved without cooperation by \Newterm{baton passing} the lock. 78 Baton passing is when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section conceptually "passing" the mutual exclusion to the signalled thread. 79 While baton passing is useful in some algorithms, it results in worse performance than the cooperation approach in channel implementations since all entering threads then need to wait for the blocked thread to reach the front of the ready queue and run before other operations on the channel can proceed. 76 The Go channel implementation utilizes cooperation among threads to achieve good performance~\cite{go:chan}. 77 This cooperation only occurs when producers or consumers need to block due to the buffer being full or empty. 78 In these cases, a blocking thread stores their relevant data in a shared location and the signalling thread completes the blocking thread's operation before waking them; 79 \ie the blocking thread has no work to perform after it unblocks because the signalling threads has done this work. 80 This approach is similar to wait morphing for locks~\cite[p.~82]{Butenhof97} and improves performance in a few ways. 81 First, each thread interacting with the channel only acquires and releases the internal channel lock once. 82 As a result, contention on the internal lock is decreased, as only entering threads compete for the lock as unblocking threads do not reacquire the lock. 83 The other advantage of Go's wait-morphing approach is that it eliminates the bottleneck of waiting for signalled threads to run. 84 Note, the property of acquiring/releasing the lock only once can also be achieved with a different form of cooperation, called \Newterm{baton passing}. 85 Baton passing occurs when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section, conceptually ``passing'' the mutual exclusion from the signalling thread to the signalled thread. 86 The baton-passing approach has threads cooperate to pass mutual exclusion without additional lock acquires or releases; 87 the wait-morphing approach has threads cooperate by completing the signalled thread's operation, thus removing a signalled thread's need for mutual exclusion after unblocking. 88 While baton passing is useful in some algorithms, it results in worse channel performance than the Go approach. 89 In the baton-passing approach, all threads need to wait for the signalled thread to reach the front of the ready queue, context switch, and run before other operations on the channel can proceed, since the signalled thread holds mutual exclusion; 90 in the wait-morphing approach, since the operation is completed before the signal, other threads can continue to operate on the channel without waiting for the signalled thread to run. 80 91 81 92 In this work, all channel sizes \see{Sections~\ref{s:ChannelSize}} are implemented with bounded buffers. … … 100 111 \subsection{Toggle-able Statistics} 101 112 As discussed, a channel is a concurrent layer over a bounded buffer. 102 To achieve efficient buffering users should aim for as few blocking operations on a channel as possible.103 Often to achieve this users maychange the buffer size, shard a channel into multiple channels, or tweak the number of producer and consumer threads.104 Fo users to be able to make informed decisions when tuning channel usage, toggle-able channel statistics are provided.105 The statistics are toggled at compile time via the @CHAN_STATS@ macro to ensure that they are entirely elided when not used.106 When statistics are turned on, four counters are maintained per channel, two for producers and two for consumers.113 To achieve efficient buffering, users should aim for as few blocking operations on a channel as possible. 114 Mechanisms to reduce blocking are: change the buffer size, shard a channel into multiple channels, or tweak the number of producer and consumer threads. 115 For users to be able to make informed decisions when tuning channel usage, toggle-able channel statistics are provided. 116 The statistics are toggled on during the \CFA build by defining the @CHAN_STATS@ macro, which guarantees zero cost when not using this feature. 117 When statistics are turned on, four counters are maintained per channel, two for inserting (producers) and two for removing (consumers). 107 118 The two counters per type of operation track the number of blocking operations and total operations. 108 In the channel destructor the counters are printed out aggregated and also per type of operation. 109 An example use case of the counters follows. 110 A user is buffering information between producer and consumer threads and wants to analyze channel performance. 111 Via the statistics they see that producers block for a large percentage of their operations while consumers do not block often. 112 They then can use this information to adjust their number of producers/consumers or channel size to achieve a larger percentage of non-blocking producer operations, thus increasing their channel throughput. 119 In the channel destructor, the counters are printed out aggregated and also per type of operation. 120 An example use case is noting that producer inserts are blocking often while consumer removes do not block often. 121 This information can be used to increase the number of consumers to decrease the blocking producer operations, thus increasing the channel throughput. 122 Whereas, increasing the channel size in this scenario is unlikely to produce a benefit because the consumers can never keep up with the producers. 113 123 114 124 \subsection{Deadlock Detection} 115 The deadlock detection in the \CFA channels is fairly basic. 116 It only detects the case where threads are blocked on the channel during deallocation. 117 This case is guaranteed to deadlock since the list holding the blocked thread is internal to the channel and will be deallocated. 118 If a user maintained a separate reference to a thread and unparked it outside the channel they could avoid the deadlock, but would run into other runtime errors since the thread would access channel data after waking that is now deallocated. 119 More robust deadlock detection surrounding channel usage would have to be implemented separate from the channel implementation since it would require knowledge about the threading system and other channel/thread state. 125 The deadlock detection in the \CFA channels is fairly basic but detects a very common channel mistake during termination. 126 That is, it detects the case where threads are blocked on the channel during channel deallocation. 127 This case is guaranteed to deadlock since there are no other threads to supply or consume values needed by the waiting threads. 128 Only if a user maintained a separate reference to the blocked threads and manually unblocks them outside the channel could the deadlock be avoid. 129 However, without special semantics, this unblocking would generate other runtime errors where the unblocked thread attempts to access non-existing channel data or even a deallocated channel. 130 More robust deadlock detection needs to be implemented separate from channels since it requires knowledge about the threading system and other channel/thread state. 120 131 121 132 \subsection{Program Shutdown} 122 133 Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed. 123 The difficulty of graceful termination often arises from the usage ofsynchronization primitives that need to be handled carefully during shutdown.134 Graceful termination can be difficult to achieve with synchronization primitives that need to be handled carefully during shutdown. 124 135 It is easy to deadlock during termination if threads are left behind on synchronization primitives. 125 136 Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state. 126 137 \gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it. 127 138 Channels are a particularly hard synchronization primitive to terminate since both sending and receiving to/from a channel can block. 128 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads trying to perform the termination may end up unexpectedly blocking in their attempt to help other threads exit the system. 129 130 \paragraph{Go channels} provide a set of tools to help with concurrent shutdown~\cite{go:chan}. 131 Channels in Go have a @close@ operation and a \Go{select} statement that both can be used to help threads terminate. 139 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads performing the termination may end up unexpectedly blocking in their attempt to help other threads exit the system. 140 141 \paragraph{Go channels} provide a set of tools to help with concurrent shutdown~\cite{go:chan} using a @close@ operation in conjunction with the \Go{select} statement. 132 142 The \Go{select} statement is discussed in \ref{s:waituntil}, where \CFA's @waituntil@ statement is compared with the Go \Go{select} statement. 133 143 … … 143 153 Note, panics in Go can be caught, but it is not the idiomatic way to write Go programs. 144 154 145 While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired.155 While Go's channel-closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired. 146 156 Since both closing and sending panic once a channel is closed, a user often has to synchronize the senders (producers) before the channel can be closed to avoid panics. 147 157 However, in doing so it renders the @close@ operation nearly useless, as the only utilities it provides are the ability to ensure receivers no longer block on the channel and receive zero-valued elements. 148 158 This functionality is only useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is necessary, then @close@ only provides the non-blocking feature. 149 159 To avoid \gls{toctou} issues during shutdown, a busy wait with a \Go{select} statement is often used to add or remove elements from a channel. 150 Due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown.160 Hence, due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown. 151 161 152 162 \paragraph{\CFA channels} have access to an extensive exception handling mechanism~\cite{Beach21}. … … 161 171 When a channel in \CFA is closed, all subsequent calls to the channel raise a resumption exception at the caller. 162 172 If the resumption is handled, the caller attempts to complete the channel operation. 163 However, if channel operation would block, a termination exception is thrown.173 However, if the channel operation would block, a termination exception is thrown. 164 174 If the resumption is not handled, the exception is rethrown as a termination. 165 175 These termination exceptions allow for non-local transfer that is used to great effect to eagerly and gracefully shut down a thread. 166 176 When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them. 167 The resumption exception, @channel_closed@, has a couple fields to aid in handling the exception. 168 The exception contains a pointer to the channel it was thrown from, and a pointer to an element. 169 In exceptions thrown from remove the element pointer will be null. 170 In the case of insert the element pointer points to the element that the thread attempted to insert. 177 The resumption exception, @channel_closed@, has internal fields to aid in handling the exception. 178 The exception contains a pointer to the channel it is thrown from and a pointer to a buffer element. 179 For exceptions thrown from @remove@, the buffer element pointer is null. 180 For exceptions thrown from @insert@, the element pointer points to the buffer element that the thread attempted to insert. 181 Utility routines @bool is_insert( channel_closed & e );@ and @bool is_remove( channel_closed & e );@ are provided for convenient checking of the element pointer. 171 182 This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler. 172 Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed. 173 Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception. 174 It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go. 175 This should not be an issue, since termination is rarely an fast-path of an application and ensuring that termination can be implemented correctly with ease is the aim of the exception approach. 183 Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based on which channel and operation failed. 184 For example, exception handlers in \CFA have an optional predicate which can be used to trigger or skip handlers based on the content of the matching exception. 185 It is worth mentioning that using exceptions for termination may incur a larger performance cost than the Go approach. 186 However, this should not be an issue, since termination is rarely on the fast-path of an application. 187 In contrast, ensuring termination can be easily implemented correctly is the aim of the exception approach. 176 188 177 189 \section{\CFA / Go channel Examples} 178 To highlight the differences between \CFA's and Go's close semantics, three examples will be presented.190 To highlight the differences between \CFA's and Go's close semantics, three examples are presented. 179 191 The first example is a simple shutdown case, where there are producer threads and consumer threads operating on a channel for a fixed duration. 180 Once the duration ends, producers and consumers terminate without worrying about any leftover values in the channel.181 The second example extends the first example by requiring the channel to be empty uponshutdown.192 Once the duration ends, producers and consumers terminate immediately leaving unprocessed elements in the channel. 193 The second example extends the first by requiring the channel to be empty after shutdown. 182 194 Both the first and second example are shown in Figure~\ref{f:ChannelTermination}. 183 184 185 First the Go solutions to these examples shown in Figure~\ref{l:go_chan_term} are discussed.186 Since some of the elements being passed through the channel are zero-valued, closing the channel in Go does not aid in communicating shutdown.187 Instead, a different mechanism to communicate with the consumers and producers needs to be used.188 This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs.189 In this example, a flag is used to communicate with producers and another flag is used for consumers.190 Producers and consumers need separate avenues of communication both so that producers terminate before the channel is closed to avoid panicking, and to avoid the case where all the consumers terminate first, which can result in a deadlock for producers if the channel is full.191 The producer flag is set first, then after producers terminate the consumer flag is set and the channel is closed.192 In the second example where all values need to be consumed, the main thread iterates over the closed channel to process any remaining values.193 194 195 In the \CFA solutions in Figure~\ref{l:cfa_chan_term}, shutdown is communicated directly to both producers and consumers via the @close@ call.196 In the first example where all values do not need to be consumed, both producers and consumers do not handle the resumption and finish once they receive the termination exception.197 The second \CFA example where all values must be consumed highlights how resumption is used with channel shutdown.198 The @Producer@ thread-main knows to stop producing when the @insert@ call on a closed channel raises exception @channel_closed@.199 The @Consumer@ thread-main knows to stop consuming after all elements of a closed channel are removed and the call to @remove@ would block.200 Hence, the consumer knows the moment the channel closes because a resumption exception is raised, caught, and ignored, and then control returns to @remove@ to return another item from the buffer.201 Only when the buffer is drained and the call to @remove@ would block, a termination exception is raised to stop consuming.202 The \CFA semantics allow users to communicate channel shutdown directly through the channel, without having to share extra state between threads.203 Additionally, when the channel needs to be drained, \CFA provides users with easy options for processing the leftover channel values in the main thread or in the consumer threads.204 If one wishes to consume the leftover values in the consumer threads in Go, extra synchronization between the main thread and the consumer threads is needed.205 195 206 196 \begin{figure} … … 208 198 209 199 \begin{lrbox}{\myboxA} 200 \begin{Golang}[aboveskip=0pt,belowskip=0pt] 201 var channel chan int = make( chan int, 128 ) 202 var prodJoin chan int = make( chan int, 4 ) 203 var consJoin chan int = make( chan int, 4 ) 204 var cons_done, prod_done bool = false, false; 205 func producer() { 206 for { 207 if prod_done { break } 208 channel <- 5 209 } 210 prodJoin <- 0 // synch with main thd 211 } 212 213 func consumer() { 214 for { 215 if cons_done { break } 216 <- channel 217 } 218 consJoin <- 0 // synch with main thd 219 } 220 221 222 func main() { 223 for j := 0; j < 4; j++ { go consumer() } 224 for j := 0; j < 4; j++ { go producer() } 225 time.Sleep( time.Second * 10 ) 226 prod_done = true 227 for j := 0; j < 4 ; j++ { <- prodJoin } 228 cons_done = true 229 close(channel) // ensure no cons deadlock 230 @for elem := range channel {@ 231 // process leftover values 232 @}@ 233 for j := 0; j < 4; j++ { <- consJoin } 234 } 235 \end{Golang} 236 \end{lrbox} 237 238 \begin{lrbox}{\myboxB} 210 239 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 211 channel( size_t ) Channel{ ChannelSize }; 212 240 channel( size_t ) chan{ 128 }; 213 241 thread Consumer {}; 242 thread Producer {}; 243 244 void main( Producer & this ) { 245 try { 246 for () 247 insert( chan, 5 ); 248 } catch( channel_closed * ) { 249 // unhandled resume or full 250 } 251 } 214 252 void main( Consumer & this ) { 215 try { 216 for ( ;; ) 217 remove( Channel ); 218 @} catchResume( channel_closed * ) { @ 219 // handled resume => consume from chan 220 } catch( channel_closed * ) { 221 // empty or unhandled resume 222 } 223 } 224 225 thread Producer {}; 226 void main( Producer & this ) { 227 size_t count = 0; 228 try { 229 for ( ;; ) 230 insert( Channel, count++ ); 231 } catch ( channel_closed * ) { 232 // unhandled resume or full 233 } 234 } 235 236 int main( int argc, char * argv[] ) { 237 Consumer c[Consumers]; 238 Producer p[Producers]; 239 sleep(Duration`s); 240 close( Channel ); 241 return 0; 242 } 253 try { 254 for () { int i = remove( chan ); } 255 @} catchResume( channel_closed * ) {@ 256 // handled resume => consume from chan 257 } catch( channel_closed * ) { 258 // empty or unhandled resume 259 } 260 } 261 int main() { 262 Consumer c[4]; 263 Producer p[4]; 264 sleep( 10`s ); 265 close( chan ); 266 } 267 268 269 270 271 272 273 243 274 \end{cfa} 244 275 \end{lrbox} 245 276 246 \begin{lrbox}{\myboxB} 247 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 248 var cons_done, prod_done bool = false, false; 249 var prodJoin chan int = make(chan int, Producers) 250 var consJoin chan int = make(chan int, Consumers) 251 252 func consumer( channel chan uint64 ) { 253 for { 254 if cons_done { break } 255 <-channel 256 } 257 consJoin <- 0 // synch with main thd 258 } 259 260 func producer( channel chan uint64 ) { 261 var count uint64 = 0 262 for { 263 if prod_done { break } 264 channel <- count++ 265 } 266 prodJoin <- 0 // synch with main thd 267 } 268 269 func main() { 270 channel = make(chan uint64, ChannelSize) 271 for j := 0; j < Consumers; j++ { 272 go consumer( channel ) 273 } 274 for j := 0; j < Producers; j++ { 275 go producer( channel ) 276 } 277 time.Sleep(time.Second * Duration) 278 prod_done = true 279 for j := 0; j < Producers ; j++ { 280 <-prodJoin // wait for prods 281 } 282 cons_done = true 283 close(channel) // ensure no cons deadlock 284 @for elem := range channel { @ 285 // process leftover values 286 @}@ 287 for j := 0; j < Consumers; j++{ 288 <-consJoin // wait for cons 289 } 290 } 291 \end{cfa} 292 \end{lrbox} 293 294 \subfloat[\CFA style]{\label{l:cfa_chan_term}\usebox\myboxA} 277 \subfloat[Go style]{\label{l:go_chan_term}\usebox\myboxA} 295 278 \hspace*{3pt} 296 279 \vrule 297 280 \hspace*{3pt} 298 \subfloat[ Go style]{\label{l:go_chan_term}\usebox\myboxB}281 \subfloat[\CFA style]{\label{l:cfa_chan_term}\usebox\myboxB} 299 282 \caption{Channel Termination Examples 1 and 2. Code specific to example 2 is highlighted.} 300 283 \label{f:ChannelTermination} 301 284 \end{figure} 302 285 303 The final shutdown example uses channels to implement a barrier. 304 It is shown in Figure~\ref{f:ChannelBarrierTermination}. 305 The problem of implementing a barrier is chosen since threads are both producers and consumers on the barrier-internal channels, which removes the ability to easily synchronize producers before consumers during shutdown. 306 As such, while the shutdown details will be discussed with this problem in mind, they are also applicable to other problems taht have individual threads both producing and consuming from channels. 307 Both of these examples are implemented using \CFA syntax so that they can be easily compared. 308 Figure~\ref{l:cfa_chan_bar} uses \CFA-style channel close semantics and Figure~\ref{l:go_chan_bar} uses Go-style close semantics. 309 In this example it is infeasible to use the Go @close@ call since all threads are both potentially producers and consumers, causing panics on close to be unavoidable without complex synchronization. 310 As such in Figure~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of @-1@ has to be used to indicate to threads that they need to leave the barrier. 311 This sentinel value has to be checked at two points. 286 Figure~\ref{l:go_chan_term} shows the Go solution. 287 Since some of the elements being passed through the channel are zero-valued, closing the channel in Go does not aid in communicating shutdown. 288 Instead, a different mechanism to communicate with the consumers and producers needs to be used. 289 Flag variables are common in Go-channel shutdown-code to avoid panics on a channel, meaning the channel shutdown has to be communicated with threads before it occurs. 290 Hence, the two flags @cons_done@ and @prod_done@ are used to communicate with the producers and consumers, respectively. 291 Furthermore, producers and consumers need to shutdown separately to ensure that producers terminate before the channel is closed to avoid panicking, and to avoid the case where all the consumers terminate first, which can result in a deadlock for producers if the channel is full. 292 The producer flag is set first; 293 then after all producers terminate, the consumer flag is set and the channel is closed leaving elements in the buffer. 294 To purge the buffer, a loop is added (red) that iterates over the closed channel to process any remaining values. 295 296 Figure~\ref{l:cfa_chan_term} shows the \CFA solution. 297 Here, shutdown is communicated directly to both producers and consumers via the @close@ call. 298 A @Producer@ thread knows to stop producing when the @insert@ call on a closed channel raises exception @channel_closed@. 299 If a @Consumer@ thread ignores the first resumption exception from the @close@, the exception is reraised as a termination exception and elements are left in the buffer. 300 If a @Consumer@ thread handles the resumptions exceptions (red), control returns to complete the remove. 301 A @Consumer@ thread knows to stop consuming after all elements of a closed channel are removed and the consumer would block, which causes a termination raise of @channel_closed@. 302 The \CFA semantics allow users to communicate channel shutdown directly through the channel, without having to share extra state between threads. 303 Additionally, when the channel needs to be drained, \CFA provides users with easy options for processing the leftover channel values in the main thread or in the consumer threads. 304 305 Figure~\ref{f:ChannelBarrierTermination} shows a final shutdown example using channels to implement a barrier. 306 A Go and \CFA style solution are presented but both are implemented using \CFA syntax so they can be easily compared. 307 Implementing a barrier is interesting because threads are both producers and consumers on the barrier-internal channels, @entryWait@ and @barWait@. 308 The outline for the barrier implementation starts by initially filling the @entryWait@ channel with $N$ tickets in the barrier constructor, allowing $N$ arriving threads to remove these values and enter the barrier. 309 After @entryWait@ is empty, arriving threads block when removing. 310 However, the arriving threads that entered the barrier cannot leave the barrier until $N$ threads have arrived. 311 Hence, the entering threads block on the empty @barWait@ channel until the $N$th arriving thread inserts $N-1$ elements into @barWait@ to unblock the $N-1$ threads calling @remove@. 312 The race between these arriving threads blocking on @barWait@ and the $N$th thread inserting values into @barWait@ does not affect correctness; 313 \ie an arriving thread may or may not block on channel @barWait@ to get its value. 314 Finally, the last thread to remove from @barWait@ with ticket $N-2$, refills channel @entryWait@ with $N$ values to start the next group into the barrier. 315 316 Now, the two channels makes termination synchronization between producers and consumers difficult. 317 Interestingly, the shutdown details for this problem are also applicable to other problems with threads producing and consuming from the same channel. 318 The Go-style solution cannot use the Go @close@ call since all threads are both potentially producers and consumers, causing panics on close to be unavoidable without complex synchronization. 319 As such in Figure \ref{l:go_chan_bar}, a flush routine is needed to insert a sentinel value, @-1@, to inform threads waiting in the buffer they need to leave the barrier. 320 This sentinel value has to be checked at two points along the fast-path and sentinel values daisy-chained into the buffers. 312 321 Furthermore, an additional flag @done@ is needed to communicate to threads once they have left the barrier that they are done. 313 314 In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate.322 Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier. 323 For The \CFA solution~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, to inform waiting threads they must leave the barrier. 315 324 This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation. 316 Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier.317 325 318 326 \begin{figure} … … 320 328 321 329 \begin{lrbox}{\myboxA} 330 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 331 struct barrier { 332 channel( int ) barWait, entryWait; 333 int size; 334 }; 335 void ?{}( barrier & this, int size ) with(this) { 336 barWait{size + 1}; entryWait{size + 1}; 337 this.size = size; 338 for ( i; size ) 339 insert( entryWait, i ); 340 } 341 void wait( barrier & this ) with(this) { 342 int ticket = remove( entryWait ); 343 @if ( ticket == -1 ) { insert( entryWait, -1 ); return; }@ 344 if ( ticket == size - 1 ) { 345 for ( i; size - 1 ) 346 insert( barWait, i ); 347 return; 348 } 349 ticket = remove( barWait ); 350 @if ( ticket == -1 ) { insert( barWait, -1 ); return; }@ 351 if ( size == 1 || ticket == size - 2 ) { // last ? 352 for ( i; size ) 353 insert( entryWait, i ); 354 } 355 } 356 void flush(barrier & this) with(this) { 357 @insert( entryWait, -1 ); insert( barWait, -1 );@ 358 } 359 enum { Threads = 4 }; 360 barrier b{Threads}; 361 @bool done = false;@ 362 thread Thread {}; 363 void main( Thread & this ) { 364 for () { 365 @if ( done ) break;@ 366 wait( b ); 367 } 368 } 369 int main() { 370 Thread t[Threads]; 371 sleep(10`s); 372 done = true; 373 flush( b ); 374 } // wait for threads to terminate 375 \end{cfa} 376 \end{lrbox} 377 378 \begin{lrbox}{\myboxB} 322 379 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 323 380 struct barrier { … … 368 425 \end{lrbox} 369 426 370 \begin{lrbox}{\myboxB} 371 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 372 struct barrier { 373 channel( int ) barWait, entryWait; 374 int size; 375 }; 376 void ?{}( barrier & this, int size ) with(this) { 377 barWait{size + 1}; entryWait{size + 1}; 378 this.size = size; 379 for ( i; size ) 380 insert( entryWait, i ); 381 } 382 void wait( barrier & this ) with(this) { 383 int ticket = remove( entryWait ); 384 @if ( ticket == -1 ) { insert( entryWait, -1 ); return; }@ 385 if ( ticket == size - 1 ) { 386 for ( i; size - 1 ) 387 insert( barWait, i ); 388 return; 389 } 390 ticket = remove( barWait ); 391 @if ( ticket == -1 ) { insert( barWait, -1 ); return; }@ 392 if ( size == 1 || ticket == size - 2 ) { // last ? 393 for ( i; size ) 394 insert( entryWait, i ); 395 } 396 } 397 void flush(barrier & this) with(this) { 398 @insert( entryWait, -1 ); insert( barWait, -1 );@ 399 } 400 enum { Threads = 4 }; 401 barrier b{Threads}; 402 @bool done = false;@ 403 thread Thread {}; 404 void main( Thread & this ) { 405 for () { 406 @if ( done ) break;@ 407 wait( b ); 408 } 409 } 410 int main() { 411 Thread t[Threads]; 412 sleep(10`s); 413 done = true; 414 flush( b ); 415 } // wait for threads to terminate 416 \end{cfa} 417 \end{lrbox} 418 419 \subfloat[\CFA style]{\label{l:cfa_chan_bar}\usebox\myboxA} 427 \subfloat[Go style]{\label{l:go_chan_bar}\usebox\myboxA} 420 428 \hspace*{3pt} 421 429 \vrule 422 430 \hspace*{3pt} 423 \subfloat[ Go style]{\label{l:go_chan_bar}\usebox\myboxB}431 \subfloat[\CFA style]{\label{l:cfa_chan_bar}\usebox\myboxB} 424 432 \caption{Channel Barrier Termination} 425 433 \label{f:ChannelBarrierTermination} -
doc/theses/colby_parsons_MMAth/text/waituntil.tex
r2b78949 r8a930c03 14 14 The ability to wait for the first stall available without spinning can be done with concurrent tools that provide \gls{synch_multiplex}, the ability to wait synchronously for a resource or set of resources. 15 15 16 % C_TODO: fill in citations in following section17 16 \section{History of Synchronous Multiplexing} 18 17 There is a history of tools that provide \gls{synch_multiplex}. 19 Some of the most well known include the set o r unix system utilities signal(2)\cite{}, poll(2)\cite{}, and epoll(7)\cite{}, and the select statement provided by Go\cite{}.18 Some of the most well known include the set of unix system utilities: select(2)\cite{linux:select}, poll(2)\cite{linux:poll}, and epoll(7)\cite{linux:epoll}, and the select statement provided by Go\cite{go:selectref}. 20 19 21 20 Before one can examine the history of \gls{synch_multiplex} implementations in detail, the preceding theory must be discussed. … … 27 26 If a guard is false then the resource it guards is considered to not be in the set of resources being waited on. 28 27 Guards can be simulated using if statements, but to do so requires \[2^N\] if cases, where @N@ is the number of guards. 29 This transformation from guards to if statements will be discussed further in Section~\ref{}. % C_TODO: fill ref when writing semantics section later 28 The equivalence between guards and exponential if statements comes from an Occam ALT statement rule~\cite{Roscoe88}, which is presented in \CFA syntax in Figure~\ref{f:wu_if}. 29 Providing guards allows for easy toggling of waituntil clauses without introducing repeated code. 30 31 \begin{figure} 32 \begin{cfa} 33 when( predicate ) waituntil( A ) {} 34 or waituntil( B ) {} 35 // === 36 if ( predicate ) { 37 waituntil( A ) {} 38 or waituntil( B ) {} 39 } else { 40 waituntil( B ) {} 41 } 42 \end{cfa} 43 \caption{Occam's guard to if statement equivalence shown in \CFA syntax.} 44 \label{f:wu_if} 45 \end{figure} 30 46 31 47 Switching to implementations, it is important to discuss the resources being multiplexed. … … 44 60 It is worth noting these \gls{synch_multiplex} tools mentioned so far interact directly with the operating system and are often used to communicate between processes. 45 61 Later \gls{synch_multiplex} started to appear in user-space to support fast multiplexed concurrent communication between threads. 46 An early example of \gls{synch_multiplex} is the select statement in Ada .62 An early example of \gls{synch_multiplex} is the select statement in Ada~\cite[\S~9.7]{Ichbiah79}. 47 63 The select statement in Ada allows a task to multiplex over some subset of its own methods that it would like to @accept@ calls to. 48 64 Tasks in Ada can be thought of as threads which are an object of a specific class, and as such have methods, fields, etc. … … 53 69 The @else@ changes the synchronous multiplexing to asynchronous multiplexing. 54 70 If an @else@ clause is in a select statement and no calls to the @accept@ed methods are immediately available the code block associated with the @else@ is run and the task does not block. 55 The most popular example of user-space \gls{synch_multiplex} is Go with their select statement. 71 72 A popular example of user-space \gls{synch_multiplex} is Go with their select statement~\cite{go:selectref}. 56 73 Go's select statement operates on channels and has the same exclusive-or semantics as the ALT primitive from Occam, and has associated code blocks for each clause like ALT and Ada. 57 74 However, unlike Ada and ALT, Go does not provide any guards for their select statement cases. 58 75 Go provides a timeout utility and also provides a @default@ clause which has the same semantics as Ada's @else@ clause. 76 77 \uC provides \gls{synch_multiplex} over futures with their @_Select@ statement and Ada-style \gls{synch_multiplex} over monitor methods with their @_Accept@ statement~\cite{uC++}. 78 Their @_Accept@ statement builds upon the select statement offered by Ada, by offering both @and@ and @or@ semantics, which can be used together in the same statement. 79 These semantics are also supported for \uC's @_Select@ statement. 80 This enables fully expressive \gls{synch_multiplex} predicates. 81 82 There are many other languages that provide \gls{synch_multiplex}, including Rust's @select!@ over futures~\cite{rust:select}, OCaml's @select@ over channels~\cite{ocaml:channe}, and C++14's @when_any@ over futures~\cite{cpp:whenany}. 83 Note that while C++14 and Rust provide \gls{synch_multiplex}, their implemetations leave much to be desired as they both rely on busy-waiting polling to wait on multiple resources. 59 84 60 85 \section{Other Approaches to Synchronous Multiplexing} … … 69 94 If the requests for the other resources need to be retracted, the burden falls on the programmer to determine how to synchronize appropriately to ensure that only one resource is delivered. 70 95 71 72 96 \section{\CFA's Waituntil Statement} 73 74 75 97 The new \CFA \gls{synch_multiplex} utility introduced in this work is the @waituntil@ statement. 98 There is a @waitfor@ statement in \CFA that supports Ada-style \gls{synch_multiplex} over monitor methods, so this @waituntil@ focuses on synchronizing over other resources. 99 All of the \gls{synch_multiplex} features mentioned so far are monomorphic, only supporting one resource to wait on, select(2) supports file descriptors, Go's select supports channel operations, \uC's select supports futures, and Ada's select supports monitor method calls. 100 The waituntil statement in \CFA is polymorphic and provides \gls{synch_multiplex} over any objects that satisfy the trait in Figure~\ref{f:wu_trait}. 101 102 \begin{figure} 103 \begin{cfa} 104 forall(T & | sized(T)) 105 trait is_selectable { 106 // For registering a waituntil stmt on a selectable type 107 bool register_select( T &, select_node & ); 108 109 // For unregistering a waituntil stmt from a selectable type 110 bool unregister_select( T &, select_node & ); 111 112 // on_selected is run on the selecting thread prior to executing the statement associated with the select_node 113 void on_selected( T &, select_node & ); 114 }; 115 \end{cfa} 116 \caption{Trait for types that can be passed into \CFA's waituntil statement.} 117 \label{f:wu_trait} 118 \end{figure} 119 120 Currently locks, channels, futures and timeouts are supported by the waituntil statement, but this will be expanded as other use cases arise. 121 The waituntil statement supports guarded clauses, like Ada, and Occam, supports both @or@, and @and@ semantics, like \uC, and provides an @else@ for asynchronous multiplexing. An example of \CFA waituntil usage is shown in Figure~\ref{f:wu_example}. In Figure~\ref{f:wu_example} the waituntil statement is waiting for either @Lock@ to be available or for a value to be read from @Channel@ into @i@ and for @Future@ to be fulfilled. The semantics of the waituntil statement will be discussed in detail in the next section. 122 123 \begin{figure} 124 \begin{cfa} 125 future(int) Future; 126 channel(int) Channel; 127 owner_lock Lock; 128 int i = 0; 129 130 waituntil( Lock ) { ... } 131 or when( i == 0 ) waituntil( i << Channel ) { ... } 132 and waituntil( Future ) { ... } 133 \end{cfa} 134 \caption{Example of \CFA's waituntil statement} 135 \label{f:wu_example} 136 \end{figure} 137 138 \section{Waituntil Semantics} 139 There are two parts of the waituntil semantics to discuss, the semantics of the statement itself, \ie @and@, @or@, @when@ guards, and @else@ semantics, and the semantics of how the waituntil interacts with types like channels, locks and futures. 140 To start, the semantics of the statement itself will be discussed. 141 142 \subsection{Waituntil Statement Semantics} 143 The @or@ semantics are the most straightforward and nearly match those laid out in the ALT statement from Occam, the clauses have an exclusive-or relationship where the first one to be available will be run and only one clause is run. 144 \CFA's @or@ semantics differ from ALT semantics in one respect, instead of randomly picking a clause when multiple are available, the clause that appears first in the order of clauses will be picked. 145 \eg in the following example, if @foo@ and @bar@ are both available, @foo@ will always be selected since it comes first in the order of waituntil clauses. 146 \begin{cfa} 147 future(int) bar; 148 future(int) foo; 149 waituntil( foo ) { ... } 150 or waituntil( bar ) { ... } 151 \end{cfa} 152 153 The @and@ semantics match the @and@ semantics used by \uC. 154 When multiple clauses are joined by @and@, the waituntil will make a thread wait for all to be available, but will run the corresponding code blocks \emph{as they become available}. 155 As @and@ clauses are made available, the thread will be woken to run those clauses' code blocks and then the thread will wait again until all clauses have been run. 156 This allows work to be done in parallel while synchronizing over a set of resources, and furthermore gives a good reason to use the @and@ operator. 157 If the @and@ operator waited for all clauses to be available before running, it would not provide much more use that just acquiring those resources one by one in subsequent lines of code. 158 The @and@ operator binds more tightly than the @or@ operator. 159 To give an @or@ operator higher precedence brackets can be used. 160 \eg the following waituntil unconditionally waits for @C@ and one of either @A@ or @B@, since the @or@ is given higher precendence via brackets. 161 \begin{cfa} 162 (waituntil( A ) { ... } 163 or waituntil( B ) { ... } ) 164 and waituntil( C ) { ... } 165 \end{cfa} 166 167 The guards in the waituntil statement are called @when@ clauses. 168 The @when@ clause is passed a boolean expression. 169 All the @when@ boolean expressions are evaluated before the waituntil statement is run. 170 The guards in Occam's ALT effectively toggle clauses on and off, where a clause will only be evaluated and waited on if the corresponding guard is @true@. 171 The guards in the waituntil statement operate the same way, but require some nuance since both @and@ and @or@ operators are supported. 172 When a guard is false and a clause is removed, it can be thought of as removing that clause and its preceding operator from the statement. 173 \eg in the following example the two waituntil statements are semantically the same. 174 \begin{cfa} 175 when(true) waituntil( A ) { ... } 176 or when(false) waituntil( B ) { ... } 177 and waituntil( C ) { ... } 178 // === 179 waituntil( A ) { ... } 180 and waituntil( C ) { ... } 181 \end{cfa} 182 183 The @else@ clause on the waituntil has identical semantics to the @else@ clause in Ada. 184 If all resources are not immediately available and there is an @else@ clause, the @else@ clause is run and the thread will not block. 185 186 \subsection{Waituntil Type Semantics} 187 As described earlier, to support interaction with the waituntil statement a type must support the trait shown in Figure~\ref{f:wu_trait}. 188 The waituntil statement expects types to register and unregister themselves via calls to @register_select@ and @unregister_select@ respectively. 189 When a resource becomes available, @on_selected@ is run. 190 Many types may not need @on_selected@, but it is provided since some types may need to check and set things before the resource can be accessed in the code block. 191 The register/unregister routines in the trait return booleans. 192 The return value of @register_select@ is @true@ if the resource is immediately available, and @false@ otherwise. 193 The return value of @unregister_select@ is @true@ if the corresponding code block should be run after unregistration and @false@ otherwise. 194 The routine @on_selected@, and the return value of @unregister_select@ were needed to support channels as a resource. 195 More detail on channels and their interaction with waituntil will be discussed in Section~\ref{s:wu_chans}. 196 197 \section{Waituntil Implementation} 198 The waituntil statement is not inherently complex, and can be described as a few steps. 199 The complexity of the statement comes from the consideration of race conditions and synchronization needed when supporting various primitives. 200 The basic steps that the waituntil statement follows are the following. 201 202 First the waituntil statement creates a @select_node@ per resource that is being waited on. 203 The @select_node@ is an object that stores the waituntil data pertaining to one of the resources. 204 Then, each @select_node@ is then registered with the corresponding resource. 205 The thread executing the waituntil then enters a loop that will loop until the entire waituntil statement being satisfied. 206 In each iteration of the loop the thread attempts to block. 207 If any clauses are satified the block will fail and the thread will proceed, otherwise the block succeeds. 208 After proceeding past the block all clauses are checked for completion and the completed clauses have their code blocks run. 209 Once the thread escapes the loop, the @select_nodes@ are unregistered from the resources. 210 In the case where the block suceeds, the thread will be woken by the thread that marks one of the resources as available. 211 Pseudocode detailing these steps is presented in the following code block. 212 213 \begin{cfa} 214 select_nodes s[N]; // N select nodes 215 for ( node in s ) 216 register_select( resource, node ); 217 while( statement not satisfied ) { 218 // try to block 219 for ( resource in waituntil statement ) 220 if ( resource is avail ) run code block 221 } 222 for ( node in s ) 223 unregister_select( resource, node ); 224 \end{cfa} 225 226 These steps give a basic, but mildly inaccurate overview of how the statement works. 227 Digging into some parts of the implementation will shed light on more of the specifics and provide some accuracy. 228 229 \subsection{Locks} 230 Locks are one of the resources supported in the waituntil statement. 231 When a thread waits on multiple locks via a waituntil, it enqueues a @select_node@ in each of the lock's waiting queues. 232 When a @select_node@ reaches the front of the queue and gains ownership of a lock, the blocked thread is notified. 233 The lock will be held until the node is unregistered. 234 To prevent the waiting thread from holding many locks at once and potentially introducing a deadlock, the node is unregistered right after the corresponding code block is executed. 235 This prevents deadlocks since the waiting thread will never hold a lock while waiting on another resource. 236 As such the only nodes unregistered at the end are the ones that have not run. 237 238 \subsection{Timeouts} 239 Timeouts in the waituntil take the form of a duration being passed to a @sleep@ or @timeout@ call. 240 An example is shown in the following code. 241 242 \begin{cfa} 243 waituntil( sleep( 1`ms ) ) {} 244 waituntil( timeout( 1`s ) ) {} or waituntil( timeout( 2`s ) ) {} 245 waituntil( timeout( 1`ns ) ) {} and waituntil( timeout( 2`s ) ) {} 246 \end{cfa} 247 248 The timeout implementation highlights a key part of the waituntil semantics, the expression is evaluated before the waituntil runs. 249 As such calls to @sleep@ and @timeout@ do not block, but instead return a type that supports the @is_selectable@ trait. 250 This mechanism is needed for types that want to support multiple operations such as channels that support reading and writing. 251 252 \subsection{Channels}\label{s:wu_chans} 253 To support both waiting on both reading and writing to channels, the opperators @?<<?@ and @?>>?@ are used to show reading and writing to a channel respectively, where the lefthand operand is the value and the righthand operand is the channel. 254 Channels require significant complexity to wait on for a few reasons. 255 The first reason is that reading or writing to a channel is a mutating operation. 256 What this means is that if a read or write to a channel occurs, the state of the channel has changed. 257 In comparison, for standard locks and futures, if a lock is acquired then released or a future is ready but not accessed, the states of the lock and the future are not modified. 258 In this way if a waituntil over locks or futures have some resources available that were not consumed, it is not an issue. 259 However, if a thread modifies a channel on behalf of a thread blocked on a waituntil statement, it is important that the corresponding waituntil code block is run, otherwise there is a potentially erroneous mismatch between the channel state and associated side effects. 260 As such, the @unregister_select@ routine has a boolean return that is used by channels to indicate when the operation was completed but the block was not run yet. 261 As such some channel code blocks may be run as part of the unregister. 262 Furthermore if there are both @and@ and @or@ operators, the @or@ operators stop behaving like exclusive-or semantics since this race between operations and unregisters exists. 263 264 It was deemed important that exclusive-or semantics were maintained when only @or@ operators were used, so this situation has been special-cased, and is handled by having all clauses race to set a value \emph{before} operating on the channel. 265 This approach is infeasible in the case where @and@ and @or@ operators are used. 266 To show this consider the following waituntil statement. 267 268 \begin{cfa} 269 waituntil( i >> A ) {} and waituntil( i >> B ) {} 270 or waituntil( i >> C ) {} and waituntil( i >> D ) {} 271 \end{cfa} 272 273 If exclusive-or semantics were followed, this waituntil would only run the code blocks for @A@ and @B@, or the code blocks for @C@ and @D@. 274 However, to race before operation completion in this case introduces a race whose complexity increases with the size of the waituntil statement. 275 In the example above, for @i@ to be inserted into @C@, to ensure the exclusive-or it must be ensured that @i@ can also be inserted into @D@. 276 Furthermore, the race for the @or@ would also need to be won. 277 However, due to TOCTOU issues, one cannot know that all resources are available without acquiring all the internal locks of channels in the subtree. 278 This is not a good solution for two reasons. 279 It is possible that once all the locks are acquired that the subtree is not satisfied and they must all be released. 280 This would incur high cost for signalling threads and also heavily increase contention on internal channel locks. 281 Furthermore, the waituntil statement is polymorphic and can support resources that do not have internal locks, which also makes this approach infeasible. 282 As such, the exclusive-or semantics are lost when using both @and@ and @or@ operators since they can not be supported without significant complexity and hits to waituntil statement performance. 283 284 The mechanism by which the predicate of the waituntil is checked is discussed in more detail in Section~\ref{s:wu_guards}. 285 286 Another consideration introduced by channels is that supporting both reading and writing to a channel in a waituntil means that one waituntil clause may be the notifier for another waituntil clause. 287 This becomes a problem when dealing with the special-cased @or@ where the clauses need to win a race to operate on a channel. 288 When you have both a special-case @or@ inserting on one thread and another special-case @or@ consuming is blocked on another thread there is not one but two races that need to be consolidated by the inserting thread. 289 (The race can occur in the opposite case with a blocked producer and signalling consumer too.) 290 For them to know that the insert succeeded, they need to win the race for their own waituntil and win the race for the other waituntil. 291 Go solves this problem in their select statement by acquiring the internal locks of all channels before registering the select on the channels. 292 This eliminates the race since no other threads can operate on the blocked channel since its lock will be held. 293 294 This approach is not used in \CFA since the waituntil is polymorphic. 295 Not all types in a waituntil have an internal lock, and when using non-channel types acquiring all the locks incurs extra uneeded overhead. 296 Instead this race is consolidated in \CFA in two phases by having an intermediate pending status value for the race. 297 This case is detectable, and if detected the thread attempting to signal will first race to set the race flag to be pending. 298 If it succeeds, it then attempts to set the consumer's race flag to its success value. 299 If the producer successfully sets the consumer race flag, then the operation can proceed, if not the signalling thread will set its own race flag back to the initial value. 300 If any other threads attempt to set the producer's flag and see a pending value, they will wait until the value changes before proceeding to ensure that in the case that the producer fails, the signal will not be lost. 301 This protocol ensures that signals will not be lost and that the two races can be resolved in a safe manner. 302 303 Channels in \CFA have exception based shutdown mechanisms that the waituntil statement needs to support. 304 These exception mechanisms were what brought in the @on_selected@ routine. 305 This routine is needed by channels to detect if they are closed upon waking from a waituntil statement, to ensure that the appropriate behaviour is taken. 306 307 \subsection{Guards and Statement Predicate}\label{s:wu_guards} 308 Checking for when a synchronous multiplexing utility is done is trivial when it has an or/xor relationship, since any resource becoming available means that the blocked thread can proceed. 309 In \uC and \CFA, their \gls{synch_multiplex} utilities involve both an @and@ and @or@ operator, which make the problem of checking for completion of the statement more difficult. 310 311 In the \uC @_Select@ statement, they solve this problem by constructing a tree of the resources, where the internal nodes are operators and the leafs are the resources. 312 The internal nodes also store the status of each of the subtrees beneath them. 313 When resources become available, their status is modified and the status of the leaf nodes percolate into the internal nodes update the state of the statement. 314 Once the root of the tree has both subtrees marked as @true@ then the statement is complete. 315 As an optimization, when the internal nodes are updated, their subtrees marked as @true@ are effectively pruned and are not touched again. 316 To support \uC's @_Select@ statement guards, the tree prunes the branch if the guard is false. 317 318 The \CFA waituntil statement blocks a thread until a set of resources have become available that satisfy the underlying predicate. 319 The waiting condition of the waituntil statement can be represented as a predicate over the resources, joined by the waituntil operators, where a resource is @true@ if it is available, and @false@ otherwise. 320 In \CFA, this representation is used as the mechanism to check if a thread is done waiting on the waituntil. 321 Leveraging the compiler, a routine is generated per waituntil that is passed the statuses of the resources and returns a boolean that is @true@ when the waituntil is done, and false otherwise. 322 To support guards on the \CFA waituntil statement, the status of a resource disabled by a guard is set to ensure that the predicate function behaves as if that resource is no longer part of the predicate. 323 324 In \uC's @_Select@, it supports operators both inside and outside the clauses of their statement. 325 \eg in the following example the code blocks will run once their corresponding predicate inside the round braces is satisfied. 326 327 % C_TODO put this is uC++ code style not cfa-style 328 \begin{cfa} 329 Future_ISM<int> A, B, C, D; 330 _Select( A || B && C ) { ... } 331 and _Select( D && E ) { ... } 332 \end{cfa} 333 334 This is more expressive that the waituntil statement in \CFA. 335 In \CFA, since the waituntil statement supports more resources than just futures, implmenting operators inside clauses was avoided for a few reasons. 336 As an example, suppose \CFA supported operators inside clauses and consider the code snippet in Figure~\ref{f:wu_inside_op}. 337 338 \begin{figure} 339 \begin{cfa} 340 owner_lock A, B, C, D; 341 waituntil( A && B ) { ... } 342 or waituntil( C && D ) { ... } 343 \end{cfa} 344 \caption{Example of unsupported operators inside clauses in \CFA.} 345 \label{f:wu_inside_op} 346 \end{figure} 347 348 If the waituntil in Figure~\ref{f:wu_inside_op} works with the same semantics as described and acquires each lock as it becomes available, it opens itself up to possible deadlocks since it is now holding locks and waiting on other resources. 349 As such other semantics would be needed to ensure that this operation is safe. 350 One possibility is to use \CC's @scoped_lock@ approach that was described in Section~\ref{s:DeadlockAvoidance}, however the potential for livelock leaves much to be desired. 351 Another possibility would be to use resource ordering similar to \CFA's @mutex@ statement, but that alone is not sufficient if the resource ordering is not used everywhere. 352 Additionally, using resource ordering could conflict with other semantics of the waituntil statement. 353 To show this conflict, consider if the locks in Figure~\ref{f:wu_inside_op} were ordered @D@, @B@, @C@, @A@. 354 If all the locks are available, it becomes complex to both respect the ordering of the waituntil in Figure~\ref{f:wu_inside_op} when choosing which code block to run and also respect the lock ordering of @D@, @B@, @C@, @A@ at the same time. 355 One other way this could be implemented is to wait until all resources for a given clause are available before proceeding to acquire them, but this also quickly becomes a poor approach. 356 This approach won't work due to TOCTOU issues, as it is not possible to ensure that the full set resources are available without holding them all first. 357 Operators inside clauses in \CFA could potentially be implemented with careful circumvention of the problems involved, but it was not deemed an important feature when taking into account the runtime cost that would need to be paid to handle these situations. 358 The problem of operators inside clauses also becomes a difficult issue to handle when supporting channels. 359 If internal operators were supported, it would require some way to ensure that channels with internal operators are modified on if and only if the corresponding code block is run, but that is not feasible due to reasons described in the exclusive-or portion of Section~\ref{s:wu_chans}. 360 361 \section{Waituntil Performance} 362 The two \gls{synch_multiplex} utilities that are in the realm of comparability with the \CFA waituntil statement are the Go @select@ statement and the \uC @_Select@ statement. 363 As such, two microbenchmarks are presented, one for Go and one for \uC to contrast the systems. 364 The similar utilities discussed at the start of this chapter in C, Ada, Rust, \CC, and OCaml are either not meaningful or feasible to benchmark against. 365 The select(2) and related utilities in C are not comparable since they are system calls that go into the kernel and operate on file descriptors, whereas the waituntil exists solely in userspace. 366 Ada's @select@ only operates on methods, which is done in \CFA via the @waitfor@ utility so it is not feasible to benchmark against the @waituntil@, which cannot wait on the same resource. 367 Rust and \CC only offer a busy-wait based approach which is not meaningly comparable to a blocking approach. 368 OCaml's @select@ waits on channels that are not comparable with \CFA and Go channels, which makes the OCaml @select@ infeasible to compare it with Go's @select@ and \CFA's @waituntil@. 369 Given the differences in features, polymorphism, and expressibility between the waituntil and @select@, and @_Select@, the aim of the microbenchmarking in this chapter is to show that these implementations lie in the same realm of performance, not to pick a winner. 370 371 \subsection{Channel Benchmark} 372 The channel microbenchmark compares \CFA's waituntil and Go's select, where the resource being waited on is a set of channels. 373 374 %C_TODO explain benchmark 375 376 %C_TODO show results 377 378 %C_TODO discuss results 379 380 \subsection{Future Benchmark} 381 The future benchmark compares \CFA's waituntil with \uC's @_Select@, with both utilities waiting on futures. 382 383 %C_TODO explain benchmark 384 385 %C_TODO show results 386 387 %C_TODO discuss results -
doc/theses/colby_parsons_MMAth/thesis.tex
r2b78949 r8a930c03 111 111 colorlinks=true, % false: boxed links; true: colored links 112 112 linkcolor=blue, % color of internal links 113 citecolor=blue, % color of links to bibliography113 citecolor=blue, % color of links to bibliography 114 114 filecolor=magenta, % color of file links 115 urlcolor=cyan % color of external links 115 urlcolor=cyan, % color of external links 116 breaklinks=true 116 117 } 117 118 \ifthenelse{\boolean{PrintVersion}}{ % for improved print quality, change some hyperref options … … 126 127 % \usepackage[acronym]{glossaries} 127 128 \usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package 129 \renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}} 128 130 % If glossaries-extra is not in your LaTeX distribution, get it from CTAN (http://ctan.org/pkg/glossaries-extra), 129 131 % although it's supposed to be in both the TeX Live and MikTeX distributions. There are also documentation and -
doc/user/figures/EHMHierarchy.fig
r2b78949 r8a930c03 29 29 1 1 1.00 60.00 90.00 30 30 4950 1950 4950 1725 31 4 1 0 50 -1 0 1 30.0000 2 135 225 1950 1650 IO\00132 4 1 0 50 -1 0 1 30.0000 2 135 915 4950 1650 Arithmetic\00133 4 1 0 50 -1 0 1 30.0000 2 150 330 1350 2100 File\00134 4 1 0 50 -1 0 1 30.0000 2 135 735 2550 2100 Network\00135 4 1 0 50 -1 0 1 30.0000 2 180 1215 3750 2100 DivideByZero\00136 4 1 0 50 -1 0 1 30.0000 2 150 810 4950 2100 Overflow\00137 4 1 0 50 -1 0 1 30.0000 2 150 915 6000 2100 Underflow\00138 4 1 0 50 -1 0 1 30.0000 2 180 855 3450 1200 Exception\00131 4 1 0 50 -1 0 12 0.0000 2 135 225 1950 1650 IO\001 32 4 1 0 50 -1 0 12 0.0000 2 135 915 4950 1650 Arithmetic\001 33 4 1 0 50 -1 0 12 0.0000 2 150 330 1350 2100 File\001 34 4 1 0 50 -1 0 12 0.0000 2 135 735 2550 2100 Network\001 35 4 1 0 50 -1 0 12 0.0000 2 180 1215 3750 2100 DivideByZero\001 36 4 1 0 50 -1 0 12 0.0000 2 150 810 4950 2100 Overflow\001 37 4 1 0 50 -1 0 12 0.0000 2 150 915 6000 2100 Underflow\001 38 4 1 0 50 -1 0 12 0.0000 2 180 855 3450 1200 Exception\001 -
doc/user/user.tex
r2b78949 r8a930c03 11 11 %% Created On : Wed Apr 6 14:53:29 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Mon Aug 22 23:43:30 202214 %% Update Count : 55 0313 %% Last Modified On : Mon Jun 5 21:18:29 2023 14 %% Update Count : 5521 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 108 108 \huge \CFA Team (past and present) \medskip \\ 109 109 \Large Andrew Beach, Richard Bilson, Michael Brooks, Peter A. Buhr, Thierry Delisle, \smallskip \\ 110 \Large Glen Ditchfield, Rodolfo G. Esteves, Aaron Moss, Colby Parsons, Rob Schluntz,\smallskip \\111 \Large Fangren Yu, Mubeen Zulfiqar110 \Large Glen Ditchfield, Rodolfo G. Esteves, Jiada Liang, Aaron Moss, Colby Parsons \smallskip \\ 111 \Large Rob Schluntz, Fangren Yu, Mubeen Zulfiqar 112 112 }% author 113 113 … … 169 169 Like \Index*[C++]{\CC{}}, there may be both old and new ways to achieve the same effect. 170 170 For example, the following programs compare the C, \CFA, and \CC I/O mechanisms, where the programs output the same result. 171 \begin{ flushleft}172 \begin{tabular}{@{}l @{\hspace{1em}}l@{\hspace{1em}}l@{}}173 \multicolumn{1}{@{}c @{\hspace{1em}}}{\textbf{C}} & \multicolumn{1}{c}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{\CC}} \\171 \begin{center} 172 \begin{tabular}{@{}lll@{}} 173 \multicolumn{1}{@{}c}{\textbf{C}} & \multicolumn{1}{c}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{\CC}} \\ 174 174 \begin{cfa}[tabsize=3] 175 175 #include <stdio.h>$\indexc{stdio.h}$ … … 199 199 \end{cfa} 200 200 \end{tabular} 201 \end{ flushleft}201 \end{center} 202 202 While \CFA I/O \see{\VRef{s:StreamIOLibrary}} looks similar to \Index*[C++]{\CC{}}, there are important differences, such as automatic spacing between variables and an implicit newline at the end of the expression list, similar to \Index*{Python}~\cite{Python}. 203 203 … … 856 856 still works. 857 857 Nevertheless, reversing the default action would have a non-trivial effect on case actions that compound, such as the above example of processing shell arguments. 858 Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called \Indexc{choose}, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword \Indexc{fallthrough}/\ Indexc{fallthru}, \eg:858 Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called \Indexc{choose}, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword \Indexc{fallthrough}/\-\Indexc{fallthru}, \eg: 859 859 \begin{cfa} 860 860 ®choose® ( i ) { … … 1167 1167 \end{cfa} 1168 1168 \end{itemize} 1169 \R{Warning}: specifying the down-to range maybe unex cepted because the loop control \emph{implicitly} switches the L and H values (and toggles the increment/decrement for I):1169 \R{Warning}: specifying the down-to range maybe unexpected because the loop control \emph{implicitly} switches the L and H values (and toggles the increment/decrement for I): 1170 1170 \begin{cfa} 1171 1171 for ( i; 1 ~ 10 ) ${\C[1.5in]{// up range}$ … … 1173 1173 for ( i; ®10 -~ 1® ) ${\C{// \R{WRONG down range!}}\CRT}$ 1174 1174 \end{cfa} 1175 The reason for this sema tics is that the range direction can be toggled by adding/removing the minus, ©'-'©, versus interchanging the L and H expressions, which has a greater chance of introducing errors.1175 The reason for this semantics is that the range direction can be toggled by adding/removing the minus, ©'-'©, versus interchanging the L and H expressions, which has a greater chance of introducing errors. 1176 1176 1177 1177 … … 2256 2256 Days days = Mon; // enumeration type declaration and initialization 2257 2257 \end{cfa} 2258 The set of enums are injected into the variable namespace at the definition scope. 2259 Hence, enums may be overloaded with enum/variable/function names. 2260 \begin{cfa} 2258 The set of enums is injected into the variable namespace at the definition scope. 2259 Hence, enums may be overloaded with variable, enum, and function names. 2260 \begin{cfa} 2261 int Foo; $\C{// type/variable separate namespaces}$ 2261 2262 enum Foo { Bar }; 2262 2263 enum Goo { Bar }; $\C[1.75in]{// overload Foo.Bar}$ 2263 int Foo; $\C{// type/variable separate namespace}$2264 2264 double Bar; $\C{// overload Foo.Bar, Goo.Bar}\CRT$ 2265 2265 \end{cfa} … … 2301 2301 Hence, the value of enum ©Mon© is 0, ©Tue© is 1, ...\,, ©Sun© is 6. 2302 2302 If an enum value is specified, numbering continues by one from that value for subsequent unnumbered enums. 2303 If an enum value is a nexpression, the compiler performs constant-folding to obtain a constant value.2303 If an enum value is a \emph{constant} expression, the compiler performs constant-folding to obtain a constant value. 2304 2304 2305 2305 \CFA allows other integral types with associated values. … … 2313 2313 \begin{cfa} 2314 2314 // non-integral numeric 2315 enum( ®double® ) Math { PI_2 = 1.570796, PI = 3.141597, E = 2.718282 }2315 enum( ®double® ) Math { PI_2 = 1.570796, PI = 3.141597, E = 2.718282 } 2316 2316 // pointer 2317 enum( ®char *® ) Name { Fred = "Fred", Mary = "Mary", Jane = "Jane" };2317 enum( ®char *® ) Name { Fred = "Fred", Mary = "Mary", Jane = "Jane" }; 2318 2318 int i, j, k; 2319 2319 enum( ®int *® ) ptr { I = &i, J = &j, K = &k }; 2320 enum( ®int &® ) ref { I = i, J = j,K = k };2320 enum( ®int &® ) ref { I = i, J = j, K = k }; 2321 2321 // tuple 2322 2322 enum( ®[int, int]® ) { T = [ 1, 2 ] }; … … 2361 2361 \begin{cfa} 2362 2362 enum( char * ) Name2 { ®inline Name®, Jack = "Jack", Jill = "Jill" }; 2363 enum ®/* inferred */® Name3 { ®inline Name2®, Sue = "Sue", Tom = "Tom" };2363 enum ®/* inferred */® Name3 { ®inline Name2®, Sue = "Sue", Tom = "Tom" }; 2364 2364 \end{cfa} 2365 2365 Enumeration ©Name2© inherits all the enums and their values from enumeration ©Name© by containment, and a ©Name© enumeration is a subtype of enumeration ©Name2©. … … 3818 3818 "[ output-file (default stdout) ] ]"; 3819 3819 } // choose 3820 } catch( ® Open_Failure® * ex; ex->istream == &in ) {3820 } catch( ®open_failure® * ex; ex->istream == &in ) { $\C{// input file errors}$ 3821 3821 ®exit® | "Unable to open input file" | argv[1]; 3822 } catch( ® Open_Failure® * ex; ex->ostream == &out ) {3822 } catch( ®open_failure® * ex; ex->ostream == &out ) { $\C{// output file errors}$ 3823 3823 ®close®( in ); $\C{// optional}$ 3824 3824 ®exit® | "Unable to open output file" | argv[2]; … … 4038 4038 4039 4039 \item 4040 \Indexc{sepDisable}\index{manipulator!sepDisable@©sepDisable©} and \Indexc{sepEnable}\index{manipulator!sepEnable@©sepEnable©} toggle printing the separator.4040 \Indexc{sepDisable}\index{manipulator!sepDisable@©sepDisable©} and \Indexc{sepEnable}\index{manipulator!sepEnable@©sepEnable©} globally toggle printing the separator. 4041 4041 \begin{cfa}[belowskip=0pt] 4042 4042 sout | sepDisable | 1 | 2 | 3; $\C{// turn off implicit separator}$ … … 4053 4053 4054 4054 \item 4055 \Indexc{sepOn}\index{manipulator!sepOn@©sepOn©} and \Indexc{sepOff}\index{manipulator!sepOff@©sepOff©} toggle printing the separator with respect to the next printed item, and then return to the global separator setting.4055 \Indexc{sepOn}\index{manipulator!sepOn@©sepOn©} and \Indexc{sepOff}\index{manipulator!sepOff@©sepOff©} locally toggle printing the separator with respect to the next printed item, and then return to the global separator setting. 4056 4056 \begin{cfa}[belowskip=0pt] 4057 4057 sout | 1 | sepOff | 2 | 3; $\C{// turn off implicit separator for the next item}$ … … 4129 4129 6 4130 4130 \end{cfa} 4131 Note, a terminating ©nl© is merged (overrides) with the implicit newline at the end of the ©sout© expression, otherwise it is impossible to toprint a single newline4131 Note, a terminating ©nl© is merged (overrides) with the implicit newline at the end of the ©sout© expression, otherwise it is impossible to print a single newline 4132 4132 \item 4133 4133 \Indexc{nlOn}\index{manipulator!nlOn@©nlOn©} implicitly prints a newline at the end of each output expression. -
driver/cc1.cc
r2b78949 r8a930c03 10 10 // Created On : Fri Aug 26 14:23:51 2005 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 17 18:04:23 202213 // Update Count : 42 212 // Last Modified On : Fri Jun 9 11:36:44 2023 13 // Update Count : 423 14 14 // 15 15 … … 385 385 // strip inappropriate flags with an argument 386 386 387 } else if ( arg == "-auxbase" || arg == "-auxbase-strip" || arg == "-dumpbase" || arg == "-dumpdir" ) { 387 } else if ( arg == "-auxbase" || arg == "-auxbase-strip" || 388 arg == "-dumpbase" || arg == "-dumpbase-ext" || arg == "-dumpdir" ) { 388 389 i += 1; 389 390 #ifdef __DEBUG_H__ -
driver/cfa.cc
r2b78949 r8a930c03 10 10 // Created On : Tue Aug 20 13:44:49 2002 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue May 23 16:22:47202313 // Update Count : 47 712 // Last Modified On : Tue May 30 10:47:52 2023 13 // Update Count : 478 14 14 // 15 15 … … 329 329 #endif // __x86_64__ 330 330 331 // ARM -mno-outline-atomics => use LL/SC instead of calls to atomic routines: __aarch64_swp_acq_rel, __aarch64_cas8_acq_rel 332 // ARM -march=armv8.2-a+lse => generate Arm LSE extension instructions SWAP and CAS 333 // https://community.arm.com/developer/tools-software/tools/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10 331 334 #ifdef __ARM_ARCH 332 335 args[nargs++] = "-mno-outline-atomics"; // use ARM LL/SC instructions for atomics -
libcfa/src/Makefile.am
r2b78949 r8a930c03 11 11 ## Created On : Sun May 31 08:54:01 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Fri Jul 16 16:00:40 202114 ## Update Count : 25 513 ## Last Modified On : Thu May 25 15:20:04 2023 14 ## Update Count : 259 15 15 ############################################################################### 16 16 … … 59 59 bits/queue.hfa \ 60 60 bits/sequence.hfa \ 61 concurrency/atomic.hfa \ 61 62 concurrency/iofwd.hfa \ 62 63 concurrency/barrier.hfa \ … … 115 116 concurrency/kernel/fwd.hfa \ 116 117 concurrency/mutex_stmt.hfa \ 117 concurrency/channel.hfa \118 concurrency/actor.hfa118 concurrency/channel.hfa \ 119 concurrency/actor.hfa 119 120 120 121 inst_thread_headers_src = \ … … 127 128 concurrency/monitor.hfa \ 128 129 concurrency/mutex.hfa \ 129 concurrency/select.hfa \130 concurrency/select.hfa \ 130 131 concurrency/thread.hfa 131 132 -
libcfa/src/bits/weakso_locks.cfa
r2b78949 r8a930c03 30 30 bool register_select( blocking_lock & this, select_node & node ) { return false; } 31 31 bool unregister_select( blocking_lock & this, select_node & node ) { return false; } 32 bool on_selected( blocking_lock & this, select_node & node ) { return true;}32 void on_selected( blocking_lock & this, select_node & node ) {} 33 33 -
libcfa/src/bits/weakso_locks.hfa
r2b78949 r8a930c03 62 62 bool register_select( blocking_lock & this, select_node & node ) OPTIONAL_THREAD; 63 63 bool unregister_select( blocking_lock & this, select_node & node ) OPTIONAL_THREAD; 64 boolon_selected( blocking_lock & this, select_node & node ) OPTIONAL_THREAD;64 void on_selected( blocking_lock & this, select_node & node ) OPTIONAL_THREAD; 65 65 66 66 //---------- … … 80 80 static inline bool register_select( multiple_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 81 81 static inline bool unregister_select( multiple_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 82 static inline bool on_selected( multiple_acquisition_lock & this, select_node & node ) { returnon_selected( (blocking_lock &)this, node ); }82 static inline void on_selected( multiple_acquisition_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); } -
libcfa/src/concurrency/actor.hfa
r2b78949 r8a930c03 13 13 #endif // CFA_DEBUG 14 14 15 #define DEBUG_ABORT( cond, string ) CFA_DEBUG( if ( cond ) abort( string ) ) 16 15 17 // Define the default number of processors created in the executor. Must be greater than 0. 16 18 #define __DEFAULT_EXECUTOR_PROCESSORS__ 2 … … 42 44 struct executor; 43 45 44 enum Allocation { Nodelete, Delete, Destroy, Finished }; // allocation status45 46 typedef Allocation (*__receive_fn)(actor &, message &);46 enum allocation { Nodelete, Delete, Destroy, Finished }; // allocation status 47 48 typedef allocation (*__receive_fn)(actor &, message &); 47 49 struct request { 48 50 actor * receiver; … … 393 395 struct actor { 394 396 size_t ticket; // executor-queue handle 395 Allocation allocation_; // allocation action397 allocation allocation_; // allocation action 396 398 inline virtual_dtor; 397 399 }; … … 400 402 // Once an actor is allocated it must be sent a message or the actor system cannot stop. Hence, its receive 401 403 // member must be called to end it 402 verifyf( __actor_executor_, "Creating actor before calling start_actor_system() can cause undefined behaviour.\n" );404 DEBUG_ABORT( __actor_executor_ == 0p, "Creating actor before calling start_actor_system() can cause undefined behaviour.\n" ); 403 405 allocation_ = Nodelete; 404 406 ticket = __get_next_ticket( *__actor_executor_ ); … … 430 432 431 433 struct message { 432 Allocation allocation_; // allocation action434 allocation allocation_; // allocation action 433 435 inline virtual_dtor; 434 436 }; … … 437 439 this.allocation_ = Nodelete; 438 440 } 439 static inline void ?{}( message & this, Allocation allocation) {440 memcpy( &this.allocation_, &alloc ation, sizeof(allocation) ); // optimization to elide ctor441 verifyf( this.allocation_ != Finished, "The Finished Allocation status is not supported for message types.\n");441 static inline void ?{}( message & this, allocation alloc ) { 442 memcpy( &this.allocation_, &alloc, sizeof(allocation) ); // optimization to elide ctor 443 DEBUG_ABORT( this.allocation_ == Finished, "The Finished allocation status is not supported for message types.\n" ); 442 444 } 443 445 static inline void ^?{}( message & this ) with(this) { … … 453 455 } // switch 454 456 } 455 static inline void set_allocation( message & this, Allocation state ) {457 static inline void set_allocation( message & this, allocation state ) { 456 458 this.allocation_ = state; 457 459 } 458 460 459 461 static inline void deliver_request( request & this ) { 462 DEBUG_ABORT( this.receiver->ticket == (unsigned long int)MAX, "Attempted to send message to deleted/dead actor\n" ); 460 463 this.receiver->allocation_ = this.fn( *this.receiver, *this.msg ); 461 464 check_message( *this.msg ); … … 631 634 632 635 static inline void send( actor & this, request & req ) { 633 verifyf( this.ticket != (unsigned long int)MAX, "Attempted to send message to deleted/dead actor\n" );636 DEBUG_ABORT( this.ticket == (unsigned long int)MAX, "Attempted to send message to deleted/dead actor\n" ); 634 637 send( *__actor_executor_, req, this.ticket ); 635 638 } … … 680 683 // assigned at creation to __base_msg_finished to avoid unused message warning 681 684 message __base_msg_finished @= { .allocation_ : Finished }; 682 struct __ DeleteMsg { inline message; } DeleteMsg = __base_msg_finished;683 struct __ DestroyMsg { inline message; } DestroyMsg = __base_msg_finished;684 struct __ FinishedMsg { inline message; } FinishedMsg = __base_msg_finished;685 686 Allocation receive( actor & this, __DeleteMsg& msg ) { return Delete; }687 Allocation receive( actor & this, __DestroyMsg& msg ) { return Destroy; }688 Allocation receive( actor & this, __FinishedMsg& msg ) { return Finished; }689 685 struct __delete_msg_t { inline message; } delete_msg = __base_msg_finished; 686 struct __destroy_msg_t { inline message; } destroy_msg = __base_msg_finished; 687 struct __finished_msg_t { inline message; } finished_msg = __base_msg_finished; 688 689 allocation receive( actor & this, __delete_msg_t & msg ) { return Delete; } 690 allocation receive( actor & this, __destroy_msg_t & msg ) { return Destroy; } 691 allocation receive( actor & this, __finished_msg_t & msg ) { return Finished; } 692 -
libcfa/src/concurrency/channel.hfa
r2b78949 r8a930c03 51 51 vtable(channel_closed) channel_closed_vt; 52 52 53 static inline bool is_insert( channel_closed & e ) { return e.elem != 0p; } 54 static inline bool is_remove( channel_closed & e ) { return e.elem == 0p; } 55 53 56 // #define CHAN_STATS // define this to get channel stats printed in dtor 54 57 … … 341 344 } 342 345 346 // special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case 347 static inline bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) { 348 while ( !queue`isEmpty ) { 349 // if node not a special OR case or if we win the special OR case race break 350 if ( !queue`first.clause_status || queue`first.park_counter || __pending_set_other( queue`first, mine, ((unsigned long int)(&(queue`first))) ) ) 351 return true; 352 353 // our node lost the race when toggling in __pending_set_other 354 if ( *mine.clause_status != __SELECT_PENDING ) 355 return false; 356 357 // otherwise we lost the special OR race so discard node 358 try_pop_front( queue ); 359 } 360 return false; 361 } 362 343 363 // type used by select statement to capture a chan read as the selected operation 344 364 struct chan_read { … … 374 394 return false; 375 395 } 376 377 if ( __handle_ waituntil_OR( prods) ) {396 397 if ( __handle_pending( prods, node ) ) { 378 398 __prods_handoff( chan, ret ); 379 399 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node … … 381 401 return true; 382 402 } 383 __make_select_node_unsat( node ); 403 if ( *node.clause_status == __SELECT_PENDING ) 404 __make_select_node_unsat( node ); 384 405 } 385 406 // check if we can complete operation. If so race to establish winner in special OR case … … 423 444 } 424 445 static inline bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( this.chan, node ); } 425 static inline boolon_selected( chan_read(T) & this, select_node & node ) with(this) {446 static inline void on_selected( chan_read(T) & this, select_node & node ) with(this) { 426 447 if ( node.extra == 0p ) // check if woken up due to closed channel 427 448 __closed_remove( chan, ret ); 428 449 // This is only reachable if not closed or closed exception was handled 429 return true;430 450 } 431 451 … … 464 484 return false; 465 485 } 466 467 if ( __handle_ waituntil_OR( cons) ) {486 487 if ( __handle_pending( cons, node ) ) { 468 488 __cons_handoff( chan, elem ); 469 489 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node … … 471 491 return true; 472 492 } 473 __make_select_node_unsat( node ); 493 if ( *node.clause_status == __SELECT_PENDING ) 494 __make_select_node_unsat( node ); 474 495 } 475 496 // check if we can complete operation. If so race to establish winner in special OR case … … 515 536 static inline bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( this.chan, node ); } 516 537 517 static inline boolon_selected( chan_write(T) & this, select_node & node ) with(this) {538 static inline void on_selected( chan_write(T) & this, select_node & node ) with(this) { 518 539 if ( node.extra == 0p ) // check if woken up due to closed channel 519 540 __closed_insert( chan, elem ); 520 541 521 542 // This is only reachable if not closed or closed exception was handled 522 return true;523 543 } 524 544 -
libcfa/src/concurrency/future.hfa
r2b78949 r8a930c03 70 70 // check if the future is available 71 71 // currently no mutual exclusion because I can't see when you need this call to be synchronous or protected 72 bool available( future(T) & this ) { return this.state; }72 bool available( future(T) & this ) { return __atomic_load_n( &this.state, __ATOMIC_RELAXED ); } 73 73 74 74 … … 180 180 } 181 181 182 bool on_selected( future(T) & this, select_node & node ) { return true;}182 void on_selected( future(T) & this, select_node & node ) {} 183 183 } 184 184 } 185 185 186 186 //-------------------------------------------------------------------------------------------------------- 187 // These futures below do not support select statements so they may not be as usefulas 'future'187 // These futures below do not support select statements so they may not have as many features as 'future' 188 188 // however the 'single_future' is cheap and cheerful and is most likely more performant than 'future' 189 189 // since it uses raw atomics and no locks -
libcfa/src/concurrency/locks.cfa
r2b78949 r8a930c03 239 239 } 240 240 241 bool on_selected( blocking_lock & this, select_node & node ) { return true;}241 void on_selected( blocking_lock & this, select_node & node ) {} 242 242 243 243 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/locks.hfa
r2b78949 r8a930c03 32 32 #include "select.hfa" 33 33 34 #include <fstream.hfa>35 36 34 // futex headers 37 35 #include <linux/futex.h> /* Definition of FUTEX_* constants */ … … 114 112 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 115 113 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 116 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { returnon_selected( (blocking_lock &)this, node ); }114 static inline void on_selected( single_acquisition_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); } 117 115 118 116 //---------- … … 131 129 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 132 130 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 133 static inline bool on_selected( owner_lock & this, select_node & node ) { returnon_selected( (blocking_lock &)this, node ); }131 static inline void on_selected( owner_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); } 134 132 135 133 //----------------------------------------------------------------------------- … … 621 619 } 622 620 623 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true;}621 static inline void on_selected( simple_owner_lock & this, select_node & node ) {} 624 622 625 623 -
libcfa/src/concurrency/select.cfa
r2b78949 r8a930c03 49 49 return false; 50 50 } 51 bool on_selected( select_timeout_node & this, select_node & node ) { return true;}51 void on_selected( select_timeout_node & this, select_node & node ) {} 52 52 53 53 // Gateway routine to wait on duration -
libcfa/src/concurrency/select.hfa
r2b78949 r8a930c03 91 91 // For unregistering a select stmt on a selectable concurrency primitive 92 92 // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN) 93 bool unregister_select( T &, select_node & );93 bool unregister_select( T &, select_node & ); 94 94 95 95 // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node 96 96 // passed as an arg to this routine 97 97 // If on_selected returns false, the statement is not run, if it returns true it is run. 98 boolon_selected( T &, select_node & );98 void on_selected( T &, select_node & ); 99 99 }; 100 100 … … 102 102 // Waituntil Helpers 103 103 //============================================================================================= 104 105 static inline void __make_select_node_unsat( select_node & this ) with( this ) { 106 __atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST ); 107 } 108 static inline void __make_select_node_sat( select_node & this ) with( this ) { 109 __atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST ); 110 } 104 111 105 112 // used for the 2-stage avail needed by the special OR case … … 116 123 } 117 124 118 static inline void __make_select_node_unsat( select_node & this ) with( this ) { 119 __atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST ); 120 } 121 static inline void __make_select_node_sat( select_node & this ) with( this ) { 122 __atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST ); 125 // used for the 2-stage avail by the thread who owns a pending node 126 static inline bool __pending_set_other( select_node & other, select_node & mine, unsigned long int val ) with( other ) { 127 /* paranoid */ verify( park_counter == 0p ); 128 /* paranoid */ verify( clause_status != 0p ); 129 130 unsigned long int cmp_status = __SELECT_UNSAT; 131 while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) { 132 if ( cmp_status != __SELECT_PENDING ) 133 return false; 134 135 // toggle current status flag to avoid starvation/deadlock 136 __make_select_node_unsat( mine ); 137 cmp_status = __SELECT_UNSAT; 138 if ( !__atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) 139 return false; 140 cmp_status = __SELECT_UNSAT; 141 } 142 return true; 123 143 } 124 144 … … 188 208 bool register_select( select_timeout_node & this, select_node & node ); 189 209 bool unregister_select( select_timeout_node & this, select_node & node ); 190 boolon_selected( select_timeout_node & this, select_node & node );210 void on_selected( select_timeout_node & this, select_node & node ); 191 211 192 212 // Gateway routines to waituntil on duration -
libcfa/src/containers/lockfree.hfa
r2b78949 r8a930c03 199 199 200 200 forall( T & ) 201 struct LinkData { 202 T * volatile top; // pointer to stack top 203 uintptr_t count; // count each push 204 }; 205 206 forall( T & ) 201 207 union Link { 202 struct { // 32/64-bit x 2 203 T * volatile top; // pointer to stack top 204 uintptr_t count; // count each push 205 }; 208 LinkData(T) data; 206 209 #if __SIZEOF_INT128__ == 16 207 210 __int128 // gcc, 128-bit integer … … 220 223 void ?{}( StackLF(T) & this ) with(this) { stack.atom = 0; } 221 224 222 T * top( StackLF(T) & this ) with(this) { return stack. top; }225 T * top( StackLF(T) & this ) with(this) { return stack.data.top; } 223 226 224 227 void push( StackLF(T) & this, T & n ) with(this) { 225 228 *( &n )`next = stack; // atomic assignment unnecessary, or use CAA 226 229 for () { // busy wait 227 if ( __atomic_compare_exchange_n( &stack.atom, &( &n )`next->atom, (Link(T))@{ {&n, ( &n )`next->count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node230 if ( __atomic_compare_exchange_n( &stack.atom, &( &n )`next->atom, (Link(T))@{ (LinkData(T))@{ &n, ( &n )`next->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node 228 231 } // for 229 232 } // push … … 232 235 Link(T) t @= stack; // atomic assignment unnecessary, or use CAA 233 236 for () { // busy wait 234 if ( t.top == 0p ) return 0p; // empty stack ? 235 if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ {( t.top )`next->top, t.count} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.top; // attempt to update top node 237 if ( t.data.top == 0p ) return 0p; // empty stack ? 238 Link(T) * next = ( t.data.top )`next; 239 if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ (LinkData(T))@{ next->data.top, t.data.count } }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.data.top; // attempt to update top node 236 240 } // for 237 241 } // pop … … 239 243 bool unsafe_remove( StackLF(T) & this, T * node ) with(this) { 240 244 Link(T) * link = &stack; 241 for() { 242 T * next = link->top; 243 if( next == node ) { 244 link->top = ( node )`next->top; 245 for () { 246 // TODO: Avoiding some problems with double fields access. 247 LinkData(T) * data = &link->data; 248 T * next = (T *)&(*data).top; 249 if ( next == node ) { 250 data->top = ( node )`next->data.top; 245 251 return true; 246 252 } 247 if ( next == 0p ) return false;253 if ( next == 0p ) return false; 248 254 link = ( next )`next; 249 255 } -
libcfa/src/fstream.cfa
r2b78949 r8a930c03 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Apr 9 14:55:54 202213 // Update Count : 51 512 // Last Modified On : Mon Jun 5 22:00:23 2023 13 // Update Count : 518 14 14 // 15 15 … … 117 117 } // for 118 118 if ( file == 0p ) { 119 throw ( Open_Failure){ os };119 throw (open_failure){ os }; 120 120 // abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno ); 121 121 } // if … … 137 137 } // for 138 138 if ( ret == EOF ) { 139 throw ( Close_Failure){ os };139 throw (close_failure){ os }; 140 140 // abort | IO_MSG "close output" | nl | strerror( errno ); 141 141 } // if … … 145 145 ofstream & write( ofstream & os, const char data[], size_t size ) { 146 146 if ( fail( os ) ) { 147 throw ( Write_Failure){ os };147 throw (write_failure){ os }; 148 148 // abort | IO_MSG "attempt write I/O on failed stream"; 149 149 } // if 150 150 151 151 if ( fwrite( data, 1, size, (FILE *)(os.file$) ) != size ) { 152 throw ( Write_Failure){ os };152 throw (write_failure){ os }; 153 153 // abort | IO_MSG "write" | nl | strerror( errno ); 154 154 } // if … … 240 240 } // for 241 241 if ( file == 0p ) { 242 throw ( Open_Failure){ is };242 throw (open_failure){ is }; 243 243 // abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno ); 244 244 } // if … … 260 260 } // for 261 261 if ( ret == EOF ) { 262 throw ( Close_Failure){ is };262 throw (close_failure){ is }; 263 263 // abort | IO_MSG "close input" | nl | strerror( errno ); 264 264 } // if … … 268 268 ifstream & read( ifstream & is, char data[], size_t size ) { 269 269 if ( fail( is ) ) { 270 throw ( Read_Failure){ is };270 throw (read_failure){ is }; 271 271 // abort | IO_MSG "attempt read I/O on failed stream"; 272 272 } // if 273 273 274 274 if ( fread( data, size, 1, (FILE *)(is.file$) ) == 0 ) { 275 throw ( Read_Failure){ is };275 throw (read_failure){ is }; 276 276 // abort | IO_MSG "read" | nl | strerror( errno ); 277 277 } // if … … 318 318 319 319 320 static vtable( Open_Failure) Open_Failure_vt;320 static vtable(open_failure) open_failure_vt; 321 321 322 322 // exception I/O constructors 323 void ?{}( Open_Failure & ex, ofstream & ostream ) with(ex) {324 virtual_table = & Open_Failure_vt;323 void ?{}( open_failure & ex, ofstream & ostream ) with(ex) { 324 virtual_table = &open_failure_vt; 325 325 ostream = &ostream; 326 326 tag = 1; 327 327 } // ?{} 328 328 329 void ?{}( Open_Failure & ex, ifstream & istream ) with(ex) {330 virtual_table = & Open_Failure_vt;329 void ?{}( open_failure & ex, ifstream & istream ) with(ex) { 330 virtual_table = &open_failure_vt; 331 331 istream = &istream; 332 332 tag = 0; … … 334 334 335 335 336 static vtable( Close_Failure) Close_Failure_vt;336 static vtable(close_failure) close_failure_vt; 337 337 338 338 // exception I/O constructors 339 void ?{}( Close_Failure & ex, ofstream & ostream ) with(ex) {340 virtual_table = & Close_Failure_vt;339 void ?{}( close_failure & ex, ofstream & ostream ) with(ex) { 340 virtual_table = &close_failure_vt; 341 341 ostream = &ostream; 342 342 tag = 1; 343 343 } // ?{} 344 344 345 void ?{}( Close_Failure & ex, ifstream & istream ) with(ex) {346 virtual_table = & Close_Failure_vt;345 void ?{}( close_failure & ex, ifstream & istream ) with(ex) { 346 virtual_table = &close_failure_vt; 347 347 istream = &istream; 348 348 tag = 0; … … 350 350 351 351 352 static vtable( Write_Failure) Write_Failure_vt;352 static vtable(write_failure) write_failure_vt; 353 353 354 354 // exception I/O constructors 355 void ?{}( Write_Failure & ex, ofstream & ostream ) with(ex) {356 virtual_table = & Write_Failure_vt;355 void ?{}( write_failure & ex, ofstream & ostream ) with(ex) { 356 virtual_table = &write_failure_vt; 357 357 ostream = &ostream; 358 358 tag = 1; 359 359 } // ?{} 360 360 361 void ?{}( Write_Failure & ex, ifstream & istream ) with(ex) {362 virtual_table = & Write_Failure_vt;361 void ?{}( write_failure & ex, ifstream & istream ) with(ex) { 362 virtual_table = &write_failure_vt; 363 363 istream = &istream; 364 364 tag = 0; … … 366 366 367 367 368 static vtable( Read_Failure) Read_Failure_vt;368 static vtable(read_failure) read_failure_vt; 369 369 370 370 // exception I/O constructors 371 void ?{}( Read_Failure & ex, ofstream & ostream ) with(ex) {372 virtual_table = & Read_Failure_vt;371 void ?{}( read_failure & ex, ofstream & ostream ) with(ex) { 372 virtual_table = &read_failure_vt; 373 373 ostream = &ostream; 374 374 tag = 1; 375 375 } // ?{} 376 376 377 void ?{}( Read_Failure & ex, ifstream & istream ) with(ex) {378 virtual_table = & Read_Failure_vt;377 void ?{}( read_failure & ex, ifstream & istream ) with(ex) { 378 virtual_table = &read_failure_vt; 379 379 istream = &istream; 380 380 tag = 0; 381 381 } // ?{} 382 382 383 // void throw Open_Failure( ofstream & ostream ) {384 // Open_Failure exc = { ostream };383 // void throwopen_failure( ofstream & ostream ) { 384 // open_failure exc = { ostream }; 385 385 // } 386 386 387 // void throw Open_Failure( ifstream & istream ) {388 // Open_Failure exc = { istream };387 // void throwopen_failure( ifstream & istream ) { 388 // open_failure exc = { istream }; 389 389 // } 390 390 -
libcfa/src/fstream.hfa
r2b78949 r8a930c03 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Oct 10 09:37:32 202113 // Update Count : 24 312 // Last Modified On : Mon Jun 5 22:00:20 2023 13 // Update Count : 246 14 14 // 15 15 … … 137 137 138 138 139 exception Open_Failure {139 exception open_failure { 140 140 union { 141 141 ofstream * ostream; … … 146 146 }; 147 147 148 void ?{}( Open_Failure & this, ofstream & );149 void ?{}( Open_Failure & this, ifstream & );148 void ?{}( open_failure & this, ofstream & ); 149 void ?{}( open_failure & this, ifstream & ); 150 150 151 exception Close_Failure {151 exception close_failure { 152 152 union { 153 153 ofstream * ostream; … … 158 158 }; 159 159 160 void ?{}( Close_Failure & this, ofstream & );161 void ?{}( Close_Failure & this, ifstream & );160 void ?{}( close_failure & this, ofstream & ); 161 void ?{}( close_failure & this, ifstream & ); 162 162 163 exception Write_Failure {163 exception write_failure { 164 164 union { 165 165 ofstream * ostream; … … 170 170 }; 171 171 172 void ?{}( Write_Failure & this, ofstream & );173 void ?{}( Write_Failure & this, ifstream & );172 void ?{}( write_failure & this, ofstream & ); 173 void ?{}( write_failure & this, ifstream & ); 174 174 175 exception Read_Failure {175 exception read_failure { 176 176 union { 177 177 ofstream * ostream; … … 182 182 }; 183 183 184 void ?{}( Read_Failure & this, ofstream & );185 void ?{}( Read_Failure & this, ifstream & );184 void ?{}( read_failure & this, ofstream & ); 185 void ?{}( read_failure & this, ifstream & ); 186 186 187 187 // Local Variables: // -
libcfa/src/math.trait.hfa
r2b78949 r8a930c03 10 10 // Created On : Fri Jul 16 15:40:52 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Feb 2 11:36:56202313 // Update Count : 2 012 // Last Modified On : Tue Jun 6 07:59:17 2023 13 // Update Count : 24 14 14 // 15 15 … … 17 17 18 18 forall( U ) 19 trait Not {19 trait not { 20 20 void ?{}( U &, zero_t ); 21 21 int !?( U ); 22 }; // Not22 }; // not 23 23 24 forall( T | Not( T ) )25 trait Equality {24 forall( T | not( T ) ) 25 trait equality { 26 26 int ?==?( T, T ); 27 27 int ?!=?( T, T ); 28 }; // Equality28 }; // equality 29 29 30 forall( U | Equality( U ) )31 trait Relational {30 forall( U | equality( U ) ) 31 trait relational { 32 32 int ?<?( U, U ); 33 33 int ?<=?( U, U ); 34 34 int ?>?( U, U ); 35 35 int ?>=?( U, U ); 36 }; // Relational36 }; // relational 37 37 38 38 forall ( T ) 39 trait Signed { 39 trait Signed { // must be capitalized, conflict with keyword signed 40 40 T +?( T ); 41 41 T -?( T ); … … 44 44 45 45 forall( U | Signed( U ) ) 46 trait Additive {46 trait additive { 47 47 U ?+?( U, U ); 48 48 U ?-?( U, U ); 49 49 U ?+=?( U &, U ); 50 50 U ?-=?( U &, U ); 51 }; // Additive51 }; // additive 52 52 53 forall( T | Additive( T ) )54 trait Incdec {53 forall( T | additive( T ) ) 54 trait inc_dec { 55 55 void ?{}( T &, one_t ); 56 56 // T ?++( T & ); … … 58 58 // T ?--( T & ); 59 59 // T --?( T & ); 60 }; // Incdec60 }; // inc_dec 61 61 62 forall( U | Incdec( U ) )63 trait Multiplicative {62 forall( U | inc_dec( U ) ) 63 trait multiplicative { 64 64 U ?*?( U, U ); 65 65 U ?/?( U, U ); 66 66 U ?%?( U, U ); 67 67 U ?/=?( U &, U ); 68 }; // Multiplicative68 }; // multiplicative 69 69 70 forall( T | Relational( T ) | Multiplicative( T ) )71 trait Arithmetic {72 }; // Arithmetic70 forall( T | relational( T ) | multiplicative( T ) ) 71 trait arithmetic { 72 }; // arithmetic 73 73 74 74 // Local Variables: // -
libcfa/src/parseconfig.cfa
r2b78949 r8a930c03 144 144 in | nl; // ignore remainder of line 145 145 } // for 146 } catch( Open_Failure * ex; ex->istream == &in ) {146 } catch( open_failure * ex; ex->istream == &in ) { 147 147 delete( kv_pairs ); 148 148 throw *ex; … … 203 203 204 204 205 forall(T | Relational( T ))205 forall(T | relational( T )) 206 206 [ bool ] is_nonnegative( & T value ) { 207 207 T zero_val = 0; … … 209 209 } 210 210 211 forall(T | Relational( T ))211 forall(T | relational( T )) 212 212 [ bool ] is_positive( & T value ) { 213 213 T zero_val = 0; … … 215 215 } 216 216 217 forall(T | Relational( T ))217 forall(T | relational( T )) 218 218 [ bool ] is_nonpositive( & T value ) { 219 219 T zero_val = 0; … … 221 221 } 222 222 223 forall(T | Relational( T ))223 forall(T | relational( T )) 224 224 [ bool ] is_negative( & T value ) { 225 225 T zero_val = 0; -
libcfa/src/parseconfig.hfa
r2b78949 r8a930c03 107 107 108 108 109 forall(T | Relational( T ))109 forall(T | relational( T )) 110 110 [ bool ] is_nonnegative( & T ); 111 111 112 forall(T | Relational( T ))112 forall(T | relational( T )) 113 113 [ bool ] is_positive( & T ); 114 114 115 forall(T | Relational( T ))115 forall(T | relational( T )) 116 116 [ bool ] is_nonpositive( & T ); 117 117 118 forall(T | Relational( T ))118 forall(T | relational( T )) 119 119 [ bool ] is_negative( & T ); 120 120 -
libcfa/src/rational.cfa
r2b78949 r8a930c03 10 10 // Created On : Wed Apr 6 17:54:28 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Aug 25 18:09:58 202213 // Update Count : 19 412 // Last Modified On : Mon Jun 5 22:49:06 2023 13 // Update Count : 196 14 14 // 15 15 … … 20 20 #pragma GCC visibility push(default) 21 21 22 forall( T | Arithmetic( T ) ) {22 forall( T | arithmetic( T ) ) { 23 23 // helper routines 24 24 … … 39 39 abort | "Invalid rational number construction: denominator cannot be equal to 0."; 40 40 } // exit 41 if ( d < (T){0} ) { d = -d; n = -n; } // move sign to numerator41 if ( d < (T){0} ) { d = -d; n = -n; } // move sign to numerator 42 42 return gcd( abs( n ), d ); // simplify 43 } // Rationalnumber::simplify43 } // simplify 44 44 45 45 // constructors 46 46 47 void ?{}( Rational(T) & r, zero_t ) {47 void ?{}( rational(T) & r, zero_t ) { 48 48 r{ (T){0}, (T){1} }; 49 49 } // rational 50 50 51 void ?{}( Rational(T) & r, one_t ) {51 void ?{}( rational(T) & r, one_t ) { 52 52 r{ (T){1}, (T){1} }; 53 53 } // rational 54 54 55 void ?{}( Rational(T) & r ) {55 void ?{}( rational(T) & r ) { 56 56 r{ (T){0}, (T){1} }; 57 57 } // rational 58 58 59 void ?{}( Rational(T) & r, T n ) {59 void ?{}( rational(T) & r, T n ) { 60 60 r{ n, (T){1} }; 61 61 } // rational 62 62 63 void ?{}( Rational(T) & r, T n, T d ) {64 T t = simplify( n, d ); // simplify63 void ?{}( rational(T) & r, T n, T d ) { 64 T t = simplify( n, d ); // simplify 65 65 r.[numerator, denominator] = [n / t, d / t]; 66 66 } // rational … … 68 68 // getter for numerator/denominator 69 69 70 T numerator( Rational(T) r ) {70 T numerator( rational(T) r ) { 71 71 return r.numerator; 72 72 } // numerator 73 73 74 T denominator( Rational(T) r ) {74 T denominator( rational(T) r ) { 75 75 return r.denominator; 76 76 } // denominator 77 77 78 [ T, T ] ?=?( & [ T, T ] dest, Rational(T) src ) {78 [ T, T ] ?=?( & [ T, T ] dest, rational(T) src ) { 79 79 return dest = src.[ numerator, denominator ]; 80 80 } // ?=? … … 82 82 // setter for numerator/denominator 83 83 84 T numerator( Rational(T) r, T n ) {84 T numerator( rational(T) r, T n ) { 85 85 T prev = r.numerator; 86 T t = gcd( abs( n ), r.denominator ); // simplify86 T t = gcd( abs( n ), r.denominator ); // simplify 87 87 r.[numerator, denominator] = [n / t, r.denominator / t]; 88 88 return prev; 89 89 } // numerator 90 90 91 T denominator( Rational(T) r, T d ) {91 T denominator( rational(T) r, T d ) { 92 92 T prev = r.denominator; 93 T t = simplify( r.numerator, d ); // simplify93 T t = simplify( r.numerator, d ); // simplify 94 94 r.[numerator, denominator] = [r.numerator / t, d / t]; 95 95 return prev; … … 98 98 // comparison 99 99 100 int ?==?( Rational(T) l, Rational(T) r ) {100 int ?==?( rational(T) l, rational(T) r ) { 101 101 return l.numerator * r.denominator == l.denominator * r.numerator; 102 102 } // ?==? 103 103 104 int ?!=?( Rational(T) l, Rational(T) r ) {104 int ?!=?( rational(T) l, rational(T) r ) { 105 105 return ! ( l == r ); 106 106 } // ?!=? 107 107 108 int ?!=?( Rational(T) l, zero_t ) {109 return ! ( l == ( Rational(T)){ 0 } );108 int ?!=?( rational(T) l, zero_t ) { 109 return ! ( l == (rational(T)){ 0 } ); 110 110 } // ?!=? 111 111 112 int ?<?( Rational(T) l, Rational(T) r ) {112 int ?<?( rational(T) l, rational(T) r ) { 113 113 return l.numerator * r.denominator < l.denominator * r.numerator; 114 114 } // ?<? 115 115 116 int ?<=?( Rational(T) l, Rational(T) r ) {116 int ?<=?( rational(T) l, rational(T) r ) { 117 117 return l.numerator * r.denominator <= l.denominator * r.numerator; 118 118 } // ?<=? 119 119 120 int ?>?( Rational(T) l, Rational(T) r ) {120 int ?>?( rational(T) l, rational(T) r ) { 121 121 return ! ( l <= r ); 122 122 } // ?>? 123 123 124 int ?>=?( Rational(T) l, Rational(T) r ) {124 int ?>=?( rational(T) l, rational(T) r ) { 125 125 return ! ( l < r ); 126 126 } // ?>=? … … 128 128 // arithmetic 129 129 130 Rational(T) +?( Rational(T) r ) {131 return ( Rational(T)){ r.numerator, r.denominator };130 rational(T) +?( rational(T) r ) { 131 return (rational(T)){ r.numerator, r.denominator }; 132 132 } // +? 133 133 134 Rational(T) -?( Rational(T) r ) {135 return ( Rational(T)){ -r.numerator, r.denominator };134 rational(T) -?( rational(T) r ) { 135 return (rational(T)){ -r.numerator, r.denominator }; 136 136 } // -? 137 137 138 Rational(T) ?+?( Rational(T) l, Rational(T) r ) {138 rational(T) ?+?( rational(T) l, rational(T) r ) { 139 139 if ( l.denominator == r.denominator ) { // special case 140 return ( Rational(T)){ l.numerator + r.numerator, l.denominator };140 return (rational(T)){ l.numerator + r.numerator, l.denominator }; 141 141 } else { 142 return ( Rational(T)){ l.numerator * r.denominator + l.denominator * r.numerator, l.denominator * r.denominator };142 return (rational(T)){ l.numerator * r.denominator + l.denominator * r.numerator, l.denominator * r.denominator }; 143 143 } // if 144 144 } // ?+? 145 145 146 Rational(T) ?+=?( Rational(T) & l, Rational(T) r ) {146 rational(T) ?+=?( rational(T) & l, rational(T) r ) { 147 147 l = l + r; 148 148 return l; 149 149 } // ?+? 150 150 151 Rational(T) ?+=?( Rational(T) & l, one_t ) {152 l = l + ( Rational(T)){ 1 };151 rational(T) ?+=?( rational(T) & l, one_t ) { 152 l = l + (rational(T)){ 1 }; 153 153 return l; 154 154 } // ?+? 155 155 156 Rational(T) ?-?( Rational(T) l, Rational(T) r ) {156 rational(T) ?-?( rational(T) l, rational(T) r ) { 157 157 if ( l.denominator == r.denominator ) { // special case 158 return ( Rational(T)){ l.numerator - r.numerator, l.denominator };158 return (rational(T)){ l.numerator - r.numerator, l.denominator }; 159 159 } else { 160 return ( Rational(T)){ l.numerator * r.denominator - l.denominator * r.numerator, l.denominator * r.denominator };160 return (rational(T)){ l.numerator * r.denominator - l.denominator * r.numerator, l.denominator * r.denominator }; 161 161 } // if 162 162 } // ?-? 163 163 164 Rational(T) ?-=?( Rational(T) & l, Rational(T) r ) {164 rational(T) ?-=?( rational(T) & l, rational(T) r ) { 165 165 l = l - r; 166 166 return l; 167 167 } // ?-? 168 168 169 Rational(T) ?-=?( Rational(T) & l, one_t ) {170 l = l - ( Rational(T)){ 1 };169 rational(T) ?-=?( rational(T) & l, one_t ) { 170 l = l - (rational(T)){ 1 }; 171 171 return l; 172 172 } // ?-? 173 173 174 Rational(T) ?*?( Rational(T) l, Rational(T) r ) {175 return ( Rational(T)){ l.numerator * r.numerator, l.denominator * r.denominator };174 rational(T) ?*?( rational(T) l, rational(T) r ) { 175 return (rational(T)){ l.numerator * r.numerator, l.denominator * r.denominator }; 176 176 } // ?*? 177 177 178 Rational(T) ?*=?( Rational(T) & l, Rational(T) r ) {178 rational(T) ?*=?( rational(T) & l, rational(T) r ) { 179 179 return l = l * r; 180 180 } // ?*? 181 181 182 Rational(T) ?/?( Rational(T) l, Rational(T) r ) {182 rational(T) ?/?( rational(T) l, rational(T) r ) { 183 183 if ( r.numerator < (T){0} ) { 184 184 r.[numerator, denominator] = [-r.numerator, -r.denominator]; 185 185 } // if 186 return ( Rational(T)){ l.numerator * r.denominator, l.denominator * r.numerator };186 return (rational(T)){ l.numerator * r.denominator, l.denominator * r.numerator }; 187 187 } // ?/? 188 188 189 Rational(T) ?/=?( Rational(T) & l, Rational(T) r ) {189 rational(T) ?/=?( rational(T) & l, rational(T) r ) { 190 190 return l = l / r; 191 191 } // ?/? … … 194 194 195 195 forall( istype & | istream( istype ) | { istype & ?|?( istype &, T & ); } ) 196 istype & ?|?( istype & is, Rational(T) & r ) {196 istype & ?|?( istype & is, rational(T) & r ) { 197 197 is | r.numerator | r.denominator; 198 198 T t = simplify( r.numerator, r.denominator ); … … 203 203 204 204 forall( ostype & | ostream( ostype ) | { ostype & ?|?( ostype &, T ); } ) { 205 ostype & ?|?( ostype & os, Rational(T) r ) {205 ostype & ?|?( ostype & os, rational(T) r ) { 206 206 return os | r.numerator | '/' | r.denominator; 207 207 } // ?|? 208 208 209 void ?|?( ostype & os, Rational(T) r ) {209 void ?|?( ostype & os, rational(T) r ) { 210 210 (ostype &)(os | r); ends( os ); 211 211 } // ?|? … … 213 213 } // distribution 214 214 215 forall( T | Arithmetic( T ) | { T ?\?( T, unsigned long ); } ) {216 Rational(T) ?\?( Rational(T) x, long int y ) {215 forall( T | arithmetic( T ) | { T ?\?( T, unsigned long ); } ) { 216 rational(T) ?\?( rational(T) x, long int y ) { 217 217 if ( y < 0 ) { 218 return ( Rational(T)){ x.denominator \ -y, x.numerator \ -y };218 return (rational(T)){ x.denominator \ -y, x.numerator \ -y }; 219 219 } else { 220 return ( Rational(T)){ x.numerator \ y, x.denominator \ y };220 return (rational(T)){ x.numerator \ y, x.denominator \ y }; 221 221 } // if 222 222 } // ?\? 223 223 224 Rational(T) ?\=?( Rational(T) & x, long int y ) {224 rational(T) ?\=?( rational(T) & x, long int y ) { 225 225 return x = x \ y; 226 226 } // ?\? … … 229 229 // conversion 230 230 231 forall( T | Arithmetic( T ) | { double convert( T ); } )232 double widen( Rational(T) r ) {231 forall( T | arithmetic( T ) | { double convert( T ); } ) 232 double widen( rational(T) r ) { 233 233 return convert( r.numerator ) / convert( r.denominator ); 234 234 } // widen 235 235 236 forall( T | Arithmetic( T ) | { double convert( T ); T convert( double ); } )237 Rational(T) narrow( double f, T md ) {236 forall( T | arithmetic( T ) | { double convert( T ); T convert( double ); } ) 237 rational(T) narrow( double f, T md ) { 238 238 // http://www.ics.uci.edu/~eppstein/numth/frap.c 239 if ( md <= (T){1} ) { // maximum fractional digits too small?240 return ( Rational(T)){ convert( f ), (T){1}};// truncate fraction239 if ( md <= (T){1} ) { // maximum fractional digits too small? 240 return (rational(T)){ convert( f ), (T){1}}; // truncate fraction 241 241 } // if 242 242 … … 260 260 if ( f > (double)0x7FFFFFFF ) break; // representation failure 261 261 } // for 262 return ( Rational(T)){ m00, m10 };262 return (rational(T)){ m00, m10 }; 263 263 } // narrow 264 264 -
libcfa/src/rational.hfa
r2b78949 r8a930c03 12 12 // Created On : Wed Apr 6 17:56:25 2016 13 13 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Tue Jul 20 17:45:29 202115 // Update Count : 11 814 // Last Modified On : Mon Jun 5 22:49:05 2023 15 // Update Count : 119 16 16 // 17 17 … … 19 19 20 20 #include "iostream.hfa" 21 #include "math.trait.hfa" // Arithmetic21 #include "math.trait.hfa" // arithmetic 22 22 23 23 // implementation 24 24 25 forall( T | Arithmetic( T ) ) {26 struct Rational {25 forall( T | arithmetic( T ) ) { 26 struct rational { 27 27 T numerator, denominator; // invariant: denominator > 0 28 }; // Rational28 }; // rational 29 29 30 30 // constructors 31 31 32 void ?{}( Rational(T) & r );33 void ?{}( Rational(T) & r, zero_t );34 void ?{}( Rational(T) & r, one_t );35 void ?{}( Rational(T) & r, T n );36 void ?{}( Rational(T) & r, T n, T d );32 void ?{}( rational(T) & r ); 33 void ?{}( rational(T) & r, zero_t ); 34 void ?{}( rational(T) & r, one_t ); 35 void ?{}( rational(T) & r, T n ); 36 void ?{}( rational(T) & r, T n, T d ); 37 37 38 38 // numerator/denominator getter 39 39 40 T numerator( Rational(T) r );41 T denominator( Rational(T) r );42 [ T, T ] ?=?( & [ T, T ] dest, Rational(T) src );40 T numerator( rational(T) r ); 41 T denominator( rational(T) r ); 42 [ T, T ] ?=?( & [ T, T ] dest, rational(T) src ); 43 43 44 44 // numerator/denominator setter 45 45 46 T numerator( Rational(T) r, T n );47 T denominator( Rational(T) r, T d );46 T numerator( rational(T) r, T n ); 47 T denominator( rational(T) r, T d ); 48 48 49 49 // comparison 50 50 51 int ?==?( Rational(T) l, Rational(T) r );52 int ?!=?( Rational(T) l, Rational(T) r );53 int ?!=?( Rational(T) l, zero_t ); // => !54 int ?<?( Rational(T) l, Rational(T) r );55 int ?<=?( Rational(T) l, Rational(T) r );56 int ?>?( Rational(T) l, Rational(T) r );57 int ?>=?( Rational(T) l, Rational(T) r );51 int ?==?( rational(T) l, rational(T) r ); 52 int ?!=?( rational(T) l, rational(T) r ); 53 int ?!=?( rational(T) l, zero_t ); // => ! 54 int ?<?( rational(T) l, rational(T) r ); 55 int ?<=?( rational(T) l, rational(T) r ); 56 int ?>?( rational(T) l, rational(T) r ); 57 int ?>=?( rational(T) l, rational(T) r ); 58 58 59 59 // arithmetic 60 60 61 Rational(T) +?( Rational(T) r );62 Rational(T) -?( Rational(T) r );63 Rational(T) ?+?( Rational(T) l, Rational(T) r );64 Rational(T) ?+=?( Rational(T) & l, Rational(T) r );65 Rational(T) ?+=?( Rational(T) & l, one_t ); // => ++?, ?++66 Rational(T) ?-?( Rational(T) l, Rational(T) r );67 Rational(T) ?-=?( Rational(T) & l, Rational(T) r );68 Rational(T) ?-=?( Rational(T) & l, one_t ); // => --?, ?--69 Rational(T) ?*?( Rational(T) l, Rational(T) r );70 Rational(T) ?*=?( Rational(T) & l, Rational(T) r );71 Rational(T) ?/?( Rational(T) l, Rational(T) r );72 Rational(T) ?/=?( Rational(T) & l, Rational(T) r );61 rational(T) +?( rational(T) r ); 62 rational(T) -?( rational(T) r ); 63 rational(T) ?+?( rational(T) l, rational(T) r ); 64 rational(T) ?+=?( rational(T) & l, rational(T) r ); 65 rational(T) ?+=?( rational(T) & l, one_t ); // => ++?, ?++ 66 rational(T) ?-?( rational(T) l, rational(T) r ); 67 rational(T) ?-=?( rational(T) & l, rational(T) r ); 68 rational(T) ?-=?( rational(T) & l, one_t ); // => --?, ?-- 69 rational(T) ?*?( rational(T) l, rational(T) r ); 70 rational(T) ?*=?( rational(T) & l, rational(T) r ); 71 rational(T) ?/?( rational(T) l, rational(T) r ); 72 rational(T) ?/=?( rational(T) & l, rational(T) r ); 73 73 74 74 // I/O 75 75 forall( istype & | istream( istype ) | { istype & ?|?( istype &, T & ); } ) 76 istype & ?|?( istype &, Rational(T) & );76 istype & ?|?( istype &, rational(T) & ); 77 77 78 78 forall( ostype & | ostream( ostype ) | { ostype & ?|?( ostype &, T ); } ) { 79 ostype & ?|?( ostype &, Rational(T) );80 void ?|?( ostype &, Rational(T) );79 ostype & ?|?( ostype &, rational(T) ); 80 void ?|?( ostype &, rational(T) ); 81 81 } // distribution 82 82 } // distribution 83 83 84 forall( T | Arithmetic( T ) | { T ?\?( T, unsigned long ); } ) {85 Rational(T) ?\?( Rational(T) x, long int y );86 Rational(T) ?\=?( Rational(T) & x, long int y );84 forall( T | arithmetic( T ) | { T ?\?( T, unsigned long ); } ) { 85 rational(T) ?\?( rational(T) x, long int y ); 86 rational(T) ?\=?( rational(T) & x, long int y ); 87 87 } // distribution 88 88 89 89 // conversion 90 forall( T | Arithmetic( T ) | { double convert( T ); } )91 double widen( Rational(T) r );92 forall( T | Arithmetic( T ) | { double convert( T ); T convert( double );} )93 Rational(T) narrow( double f, T md );90 forall( T | arithmetic( T ) | { double convert( T ); } ) 91 double widen( rational(T) r ); 92 forall( T | arithmetic( T ) | { double convert( T ); T convert( double );} ) 93 rational(T) narrow( double f, T md ); 94 94 95 95 // Local Variables: // -
src/AST/DeclReplacer.hpp
r2b78949 r8a930c03 18 18 #include <unordered_map> 19 19 20 #include "Node.hpp" 20 namespace ast { 21 class DeclWithType; 22 class Expr; 23 class Node; 24 class TypeDecl; 25 } 21 26 22 27 namespace ast { 23 class DeclWithType;24 class TypeDecl;25 class Expr;26 28 27 namespace DeclReplacer { 28 using DeclMap = std::unordered_map< const DeclWithType *, const DeclWithType * >; 29 using TypeMap = std::unordered_map< const TypeDecl *, const TypeDecl * >; 30 using ExprMap = std::unordered_map< const DeclWithType *, const Expr * >; 29 namespace DeclReplacer { 31 30 32 const Node * replace( const Node * node, const DeclMap & declMap, bool debug = false ); 33 const Node * replace( const Node * node, const TypeMap & typeMap, bool debug = false ); 34 const Node * replace( const Node * node, const DeclMap & declMap, const TypeMap & typeMap, bool debug = false ); 35 const Node * replace( const Node * node, const ExprMap & exprMap); 36 } 31 using DeclMap = std::unordered_map< const DeclWithType *, const DeclWithType * >; 32 using TypeMap = std::unordered_map< const TypeDecl *, const TypeDecl * >; 33 using ExprMap = std::unordered_map< const DeclWithType *, const Expr * >; 34 35 const Node * replace( const Node * node, const DeclMap & declMap, bool debug = false ); 36 const Node * replace( const Node * node, const TypeMap & typeMap, bool debug = false ); 37 const Node * replace( const Node * node, const DeclMap & declMap, const TypeMap & typeMap, bool debug = false ); 38 const Node * replace( const Node * node, const ExprMap & exprMap); 39 40 } 41 37 42 } 38 43 -
src/AST/Pass.hpp
r2b78949 r8a930c03 414 414 }; 415 415 416 /// Use when the templated visitor should update the symbol table 416 /// Use when the templated visitor should update the symbol table, 417 /// that is, when your pass core needs to query the symbol table. 418 /// Expected setups: 419 /// - For master passes that kick off at the compilation unit 420 /// - before resolver: extend WithSymbolTableX<IgnoreErrors> 421 /// - after resolver: extend WithSymbolTable and use defaults 422 /// - (FYI, for completeness, the resolver's main pass uses ValidateOnAdd when it kicks off) 423 /// - For helper passes that kick off at arbitrary points in the AST: 424 /// - take an existing symbol table as a parameter, extend WithSymbolTable, 425 /// and construct with WithSymbolTable(const SymbolTable &) 417 426 struct WithSymbolTable { 418 SymbolTable symtab; 427 WithSymbolTable(const ast::SymbolTable & from) : symtab(from) {} 428 WithSymbolTable(ast::SymbolTable::ErrorDetection errorMode = ast::SymbolTable::ErrorDetection::AssertClean) : symtab(errorMode) {} 429 ast::SymbolTable symtab; 430 }; 431 template <ast::SymbolTable::ErrorDetection errorMode> 432 struct WithSymbolTableX : WithSymbolTable { 433 WithSymbolTableX() : WithSymbolTable(errorMode) {} 419 434 }; 420 435 -
src/AST/Pass.impl.hpp
r2b78949 r8a930c03 20 20 #include <unordered_map> 21 21 22 #include "AST/Copy.hpp" 22 23 #include "AST/TranslationUnit.hpp" 23 24 #include "AST/TypeSubstitution.hpp" … … 45 46 46 47 #ifdef PEDANTIC_PASS_ASSERT 47 #define __pedantic_pass_assert(...) assert (__VA_ARGS__)48 #define __pedantic_pass_assert(...) assert(__VA_ARGS__) 48 49 #define __pedantic_pass_assertf(...) assertf(__VA_ARGS__) 49 50 #else … … 71 72 template<typename it_t, template <class...> class container_t> 72 73 static inline void take_all( it_t it, container_t<ast::ptr<ast::Decl>> * decls, bool * mutated = nullptr ) { 73 if (empty(decls)) return;74 if ( empty( decls ) ) return; 74 75 75 76 std::transform(decls->begin(), decls->end(), it, [](const ast::Decl * decl) -> auto { … … 77 78 }); 78 79 decls->clear(); 79 if (mutated) *mutated = true;80 if ( mutated ) *mutated = true; 80 81 } 81 82 82 83 template<typename it_t, template <class...> class container_t> 83 84 static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * stmts, bool * mutated = nullptr ) { 84 if (empty(stmts)) return;85 if ( empty( stmts ) ) return; 85 86 86 87 std::move(stmts->begin(), stmts->end(), it); 87 88 stmts->clear(); 88 if (mutated) *mutated = true;89 if ( mutated ) *mutated = true; 89 90 } 90 91 … … 92 93 /// Check if should be skipped, different for pointers and containers 93 94 template<typename node_t> 94 bool skip( const ast::ptr<node_t> & val ) {95 bool skip( const ast::ptr<node_t> & val ) { 95 96 return !val; 96 97 } … … 109 110 110 111 template<typename node_t> 111 const node_t & get( const node_t & val, long ) {112 const node_t & get( const node_t & val, long ) { 112 113 return val; 113 114 } … … 125 126 } 126 127 } 127 128 template< typename core_t > 129 template< typename node_t > 130 auto ast::Pass< core_t >::call_accept( const node_t * node ) 131 -> typename ast::Pass< core_t >::template generic_call_accept_result<node_t>::type 132 { 133 __pedantic_pass_assert( __visit_children() ); 134 __pedantic_pass_assert( node ); 135 136 static_assert( !std::is_base_of<ast::Expr, node_t>::value, "ERROR"); 137 static_assert( !std::is_base_of<ast::Stmt, node_t>::value, "ERROR"); 138 139 auto nval = node->accept( *this ); 140 __pass::result1< 141 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 142 > res; 143 res.differs = nval != node; 144 res.value = nval; 145 return res; 146 } 147 148 template< typename core_t > 149 __pass::template result1<ast::Expr> ast::Pass< core_t >::call_accept( const ast::Expr * expr ) { 150 __pedantic_pass_assert( __visit_children() ); 151 __pedantic_pass_assert( expr ); 152 153 auto nval = expr->accept( *this ); 154 return { nval != expr, nval }; 155 } 156 157 template< typename core_t > 158 __pass::template result1<ast::Stmt> ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) { 159 __pedantic_pass_assert( __visit_children() ); 160 __pedantic_pass_assert( stmt ); 161 162 const ast::Stmt * nval = stmt->accept( *this ); 163 return { nval != stmt, nval }; 164 } 165 166 template< typename core_t > 167 __pass::template result1<ast::Expr> ast::Pass< core_t >::call_accept_top( const ast::Expr * expr ) { 168 __pedantic_pass_assert( __visit_children() ); 169 __pedantic_pass_assert( expr ); 170 171 const ast::TypeSubstitution ** typeSubs_ptr = __pass::typeSubs( core, 0 ); 172 if ( typeSubs_ptr && expr->env ) { 173 *typeSubs_ptr = expr->env; 174 } 175 176 auto nval = expr->accept( *this ); 177 return { nval != expr, nval }; 178 } 179 180 template< typename core_t > 181 __pass::template result1<ast::Stmt> ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) { 182 __pedantic_pass_assert( __visit_children() ); 183 __pedantic_pass_assert( stmt ); 184 185 // add a few useful symbols to the scope 186 using __pass::empty; 187 188 // get the stmts/decls that will need to be spliced in 189 auto stmts_before = __pass::stmtsToAddBefore( core, 0 ); 190 auto stmts_after = __pass::stmtsToAddAfter ( core, 0 ); 191 auto decls_before = __pass::declsToAddBefore( core, 0 ); 192 auto decls_after = __pass::declsToAddAfter ( core, 0 ); 193 194 // These may be modified by subnode but most be restored once we exit this statemnet. 195 ValueGuardPtr< const ast::TypeSubstitution * > __old_env ( __pass::typeSubs( core, 0 ) ); 196 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_before) >::type > __old_decls_before( stmts_before ); 197 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_after ) >::type > __old_decls_after ( stmts_after ); 198 ValueGuardPtr< typename std::remove_pointer< decltype(decls_before) >::type > __old_stmts_before( decls_before ); 199 ValueGuardPtr< typename std::remove_pointer< decltype(decls_after ) >::type > __old_stmts_after ( decls_after ); 200 201 // Now is the time to actually visit the node 202 const ast::Stmt * nstmt = stmt->accept( *this ); 203 204 // If the pass doesn't want to add anything then we are done 205 if( empty(stmts_before) && empty(stmts_after) && empty(decls_before) && empty(decls_after) ) { 206 return { nstmt != stmt, nstmt }; 207 } 208 209 // Make sure that it is either adding statements or declartions but not both 210 // this is because otherwise the order would be awkward to predict 211 assert(( empty( stmts_before ) && empty( stmts_after )) 212 || ( empty( decls_before ) && empty( decls_after )) ); 213 214 // Create a new Compound Statement to hold the new decls/stmts 215 ast::CompoundStmt * compound = new ast::CompoundStmt( stmt->location ); 216 217 // Take all the declarations that go before 218 __pass::take_all( std::back_inserter( compound->kids ), decls_before ); 219 __pass::take_all( std::back_inserter( compound->kids ), stmts_before ); 220 221 // Insert the original declaration 222 compound->kids.emplace_back( nstmt ); 223 224 // Insert all the declarations that go before 225 __pass::take_all( std::back_inserter( compound->kids ), decls_after ); 226 __pass::take_all( std::back_inserter( compound->kids ), stmts_after ); 227 228 return {true, compound}; 229 } 230 231 template< typename core_t > 232 template< template <class...> class container_t > 233 __pass::template resultNstmt<container_t> ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) { 234 __pedantic_pass_assert( __visit_children() ); 235 if( statements.empty() ) return {}; 236 237 // We are going to aggregate errors for all these statements 238 SemanticErrorException errors; 239 240 // add a few useful symbols to the scope 241 using __pass::empty; 242 243 // get the stmts/decls that will need to be spliced in 244 auto stmts_before = __pass::stmtsToAddBefore( core, 0 ); 245 auto stmts_after = __pass::stmtsToAddAfter ( core, 0 ); 246 auto decls_before = __pass::declsToAddBefore( core, 0 ); 247 auto decls_after = __pass::declsToAddAfter ( core, 0 ); 248 249 // These may be modified by subnode but most be restored once we exit this statemnet. 250 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_before) >::type > __old_decls_before( stmts_before ); 251 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_after ) >::type > __old_decls_after ( stmts_after ); 252 ValueGuardPtr< typename std::remove_pointer< decltype(decls_before) >::type > __old_stmts_before( decls_before ); 253 ValueGuardPtr< typename std::remove_pointer< decltype(decls_after ) >::type > __old_stmts_after ( decls_after ); 254 255 // update pass statitistics 256 pass_visitor_stats.depth++; 257 pass_visitor_stats.max->push(pass_visitor_stats.depth); 258 pass_visitor_stats.avg->push(pass_visitor_stats.depth); 259 260 __pass::resultNstmt<container_t> new_kids; 261 for( auto value : enumerate( statements ) ) { 262 try { 263 size_t i = value.idx; 264 const Stmt * stmt = value.val; 265 __pedantic_pass_assert( stmt ); 266 const ast::Stmt * new_stmt = stmt->accept( *this ); 267 assert( new_stmt ); 268 if(new_stmt != stmt ) { new_kids.differs = true; } 269 270 // Make sure that it is either adding statements or declartions but not both 271 // this is because otherwise the order would be awkward to predict 272 assert(( empty( stmts_before ) && empty( stmts_after )) 273 || ( empty( decls_before ) && empty( decls_after )) ); 274 275 // Take all the statements which should have gone after, N/A for first iteration 276 new_kids.take_all( decls_before ); 277 new_kids.take_all( stmts_before ); 278 279 // Now add the statement if there is one 280 if(new_stmt != stmt) { 281 new_kids.values.emplace_back( new_stmt, i, false ); 282 } else { 283 new_kids.values.emplace_back( nullptr, i, true ); 284 } 285 286 // Take all the declarations that go before 287 new_kids.take_all( decls_after ); 288 new_kids.take_all( stmts_after ); 128 } 129 130 template< typename core_t > 131 template< typename node_t > 132 auto ast::Pass< core_t >::call_accept( const node_t * node ) -> 133 typename ast::Pass< core_t >::template generic_call_accept_result<node_t>::type 134 { 135 __pedantic_pass_assert( __visit_children() ); 136 __pedantic_pass_assert( node ); 137 138 static_assert( !std::is_base_of<ast::Expr, node_t>::value, "ERROR" ); 139 static_assert( !std::is_base_of<ast::Stmt, node_t>::value, "ERROR" ); 140 141 auto nval = node->accept( *this ); 142 __pass::result1< 143 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 144 > res; 145 res.differs = nval != node; 146 res.value = nval; 147 return res; 148 } 149 150 template< typename core_t > 151 ast::__pass::template result1<ast::Expr> ast::Pass< core_t >::call_accept( const ast::Expr * expr ) { 152 __pedantic_pass_assert( __visit_children() ); 153 __pedantic_pass_assert( expr ); 154 155 auto nval = expr->accept( *this ); 156 return { nval != expr, nval }; 157 } 158 159 template< typename core_t > 160 ast::__pass::template result1<ast::Stmt> ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) { 161 __pedantic_pass_assert( __visit_children() ); 162 __pedantic_pass_assert( stmt ); 163 164 const ast::Stmt * nval = stmt->accept( *this ); 165 return { nval != stmt, nval }; 166 } 167 168 template< typename core_t > 169 ast::__pass::template result1<ast::Expr> ast::Pass< core_t >::call_accept_top( const ast::Expr * expr ) { 170 __pedantic_pass_assert( __visit_children() ); 171 __pedantic_pass_assert( expr ); 172 173 const ast::TypeSubstitution ** typeSubs_ptr = __pass::typeSubs( core, 0 ); 174 if ( typeSubs_ptr && expr->env ) { 175 *typeSubs_ptr = expr->env; 176 } 177 178 auto nval = expr->accept( *this ); 179 return { nval != expr, nval }; 180 } 181 182 template< typename core_t > 183 ast::__pass::template result1<ast::Stmt> ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) { 184 __pedantic_pass_assert( __visit_children() ); 185 __pedantic_pass_assert( stmt ); 186 187 // add a few useful symbols to the scope 188 using __pass::empty; 189 190 // get the stmts/decls that will need to be spliced in 191 auto stmts_before = __pass::stmtsToAddBefore( core, 0 ); 192 auto stmts_after = __pass::stmtsToAddAfter ( core, 0 ); 193 auto decls_before = __pass::declsToAddBefore( core, 0 ); 194 auto decls_after = __pass::declsToAddAfter ( core, 0 ); 195 196 // These may be modified by subnode but most be restored once we exit this statemnet. 197 ValueGuardPtr< const ast::TypeSubstitution * > __old_env ( __pass::typeSubs( core, 0 ) ); 198 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_before) >::type > __old_decls_before( stmts_before ); 199 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_after ) >::type > __old_decls_after ( stmts_after ); 200 ValueGuardPtr< typename std::remove_pointer< decltype(decls_before) >::type > __old_stmts_before( decls_before ); 201 ValueGuardPtr< typename std::remove_pointer< decltype(decls_after ) >::type > __old_stmts_after ( decls_after ); 202 203 // Now is the time to actually visit the node 204 const ast::Stmt * nstmt = stmt->accept( *this ); 205 206 // If the pass doesn't want to add anything then we are done 207 if ( empty(stmts_before) && empty(stmts_after) && empty(decls_before) && empty(decls_after) ) { 208 return { nstmt != stmt, nstmt }; 209 } 210 211 // Make sure that it is either adding statements or declartions but not both 212 // this is because otherwise the order would be awkward to predict 213 assert(( empty( stmts_before ) && empty( stmts_after )) 214 || ( empty( decls_before ) && empty( decls_after )) ); 215 216 // Create a new Compound Statement to hold the new decls/stmts 217 ast::CompoundStmt * compound = new ast::CompoundStmt( stmt->location ); 218 219 // Take all the declarations that go before 220 __pass::take_all( std::back_inserter( compound->kids ), decls_before ); 221 __pass::take_all( std::back_inserter( compound->kids ), stmts_before ); 222 223 // Insert the original declaration 224 compound->kids.emplace_back( nstmt ); 225 226 // Insert all the declarations that go before 227 __pass::take_all( std::back_inserter( compound->kids ), decls_after ); 228 __pass::take_all( std::back_inserter( compound->kids ), stmts_after ); 229 230 return { true, compound }; 231 } 232 233 template< typename core_t > 234 template< template <class...> class container_t > 235 ast::__pass::template resultNstmt<container_t> ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) { 236 __pedantic_pass_assert( __visit_children() ); 237 if ( statements.empty() ) return {}; 238 239 // We are going to aggregate errors for all these statements 240 SemanticErrorException errors; 241 242 // add a few useful symbols to the scope 243 using __pass::empty; 244 245 // get the stmts/decls that will need to be spliced in 246 auto stmts_before = __pass::stmtsToAddBefore( core, 0 ); 247 auto stmts_after = __pass::stmtsToAddAfter ( core, 0 ); 248 auto decls_before = __pass::declsToAddBefore( core, 0 ); 249 auto decls_after = __pass::declsToAddAfter ( core, 0 ); 250 251 // These may be modified by subnode but most be restored once we exit this statemnet. 252 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_before) >::type > __old_decls_before( stmts_before ); 253 ValueGuardPtr< typename std::remove_pointer< decltype(stmts_after ) >::type > __old_decls_after ( stmts_after ); 254 ValueGuardPtr< typename std::remove_pointer< decltype(decls_before) >::type > __old_stmts_before( decls_before ); 255 ValueGuardPtr< typename std::remove_pointer< decltype(decls_after ) >::type > __old_stmts_after ( decls_after ); 256 257 // update pass statitistics 258 pass_visitor_stats.depth++; 259 pass_visitor_stats.max->push(pass_visitor_stats.depth); 260 pass_visitor_stats.avg->push(pass_visitor_stats.depth); 261 262 __pass::resultNstmt<container_t> new_kids; 263 for ( auto value : enumerate( statements ) ) { 264 try { 265 size_t i = value.idx; 266 const Stmt * stmt = value.val; 267 __pedantic_pass_assert( stmt ); 268 const ast::Stmt * new_stmt = stmt->accept( *this ); 269 assert( new_stmt ); 270 if ( new_stmt != stmt ) { new_kids.differs = true; } 271 272 // Make sure that it is either adding statements or declartions but not both 273 // this is because otherwise the order would be awkward to predict 274 assert(( empty( stmts_before ) && empty( stmts_after )) 275 || ( empty( decls_before ) && empty( decls_after )) ); 276 277 // Take all the statements which should have gone after, N/A for first iteration 278 new_kids.take_all( decls_before ); 279 new_kids.take_all( stmts_before ); 280 281 // Now add the statement if there is one 282 if ( new_stmt != stmt ) { 283 new_kids.values.emplace_back( new_stmt, i, false ); 284 } else { 285 new_kids.values.emplace_back( nullptr, i, true ); 289 286 } 290 catch ( SemanticErrorException &e ) { 291 errors.append( e ); 287 288 // Take all the declarations that go before 289 new_kids.take_all( decls_after ); 290 new_kids.take_all( stmts_after ); 291 } catch ( SemanticErrorException &e ) { 292 errors.append( e ); 293 } 294 } 295 pass_visitor_stats.depth--; 296 if ( !errors.isEmpty() ) { throw errors; } 297 298 return new_kids; 299 } 300 301 template< typename core_t > 302 template< template <class...> class container_t, typename node_t > 303 ast::__pass::template resultN<container_t, node_t> ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) { 304 __pedantic_pass_assert( __visit_children() ); 305 if ( container.empty() ) return {}; 306 SemanticErrorException errors; 307 308 pass_visitor_stats.depth++; 309 pass_visitor_stats.max->push(pass_visitor_stats.depth); 310 pass_visitor_stats.avg->push(pass_visitor_stats.depth); 311 312 bool mutated = false; 313 container_t<ptr<node_t>> new_kids; 314 for ( const node_t * node : container ) { 315 try { 316 __pedantic_pass_assert( node ); 317 const node_t * new_stmt = strict_dynamic_cast< const node_t * >( node->accept( *this ) ); 318 if ( new_stmt != node ) { 319 mutated = true; 320 new_kids.emplace_back( new_stmt ); 321 } else { 322 new_kids.emplace_back( nullptr ); 292 323 } 293 } 294 pass_visitor_stats.depth--; 295 if ( !errors.isEmpty() ) { throw errors; } 296 297 return new_kids; 298 } 299 300 template< typename core_t > 301 template< template <class...> class container_t, typename node_t > 302 __pass::template resultN<container_t, node_t> ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) { 303 __pedantic_pass_assert( __visit_children() ); 304 if( container.empty() ) return {}; 305 SemanticErrorException errors; 306 307 pass_visitor_stats.depth++; 308 pass_visitor_stats.max->push(pass_visitor_stats.depth); 309 pass_visitor_stats.avg->push(pass_visitor_stats.depth); 310 311 bool mutated = false; 312 container_t<ptr<node_t>> new_kids; 313 for ( const node_t * node : container ) { 314 try { 315 __pedantic_pass_assert( node ); 316 const node_t * new_stmt = strict_dynamic_cast< const node_t * >( node->accept( *this ) ); 317 if(new_stmt != node ) { 318 mutated = true; 319 new_kids.emplace_back( new_stmt ); 320 } else { 321 new_kids.emplace_back( nullptr ); 322 } 323 324 } 325 catch( SemanticErrorException &e ) { 326 errors.append( e ); 327 } 328 } 329 330 __pedantic_pass_assert( new_kids.size() == container.size() ); 331 pass_visitor_stats.depth--; 332 if ( ! errors.isEmpty() ) { throw errors; } 333 334 return ast::__pass::resultN<container_t, node_t>{ mutated, new_kids }; 335 } 336 337 template< typename core_t > 338 template<typename node_t, typename super_t, typename field_t> 339 void ast::Pass< core_t >::maybe_accept( 340 const node_t * & parent, 341 field_t super_t::*field 342 ) { 343 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 344 345 if(__pass::skip(parent->*field)) return; 346 const auto & old_val = __pass::get(parent->*field, 0); 347 348 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR"); 349 350 auto new_val = call_accept( old_val ); 351 352 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR"); 353 354 if( new_val.differs ) { 355 auto new_parent = __pass::mutate<core_t>(parent); 356 new_val.apply(new_parent, field); 357 parent = new_parent; 358 } 359 } 360 361 template< typename core_t > 362 template<typename node_t, typename super_t, typename field_t> 363 void ast::Pass< core_t >::maybe_accept_top( 364 const node_t * & parent, 365 field_t super_t::*field 366 ) { 367 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 368 369 if(__pass::skip(parent->*field)) return; 370 const auto & old_val = __pass::get(parent->*field, 0); 371 372 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR"); 373 374 auto new_val = call_accept_top( old_val ); 375 376 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR"); 377 378 if( new_val.differs ) { 379 auto new_parent = __pass::mutate<core_t>(parent); 380 new_val.apply(new_parent, field); 381 parent = new_parent; 382 } 383 } 384 385 template< typename core_t > 386 template<typename node_t, typename super_t, typename field_t> 387 void ast::Pass< core_t >::maybe_accept_as_compound( 388 const node_t * & parent, 389 field_t super_t::*child 390 ) { 391 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 392 393 if(__pass::skip(parent->*child)) return; 394 const auto & old_val = __pass::get(parent->*child, 0); 395 396 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR"); 397 398 auto new_val = call_accept_as_compound( old_val ); 399 400 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR"); 401 402 if( new_val.differs ) { 403 auto new_parent = __pass::mutate<core_t>(parent); 404 new_val.apply( new_parent, child ); 405 parent = new_parent; 406 } 407 } 408 324 } catch ( SemanticErrorException &e ) { 325 errors.append( e ); 326 } 327 } 328 329 __pedantic_pass_assert( new_kids.size() == container.size() ); 330 pass_visitor_stats.depth--; 331 if ( !errors.isEmpty() ) { throw errors; } 332 333 return ast::__pass::resultN<container_t, node_t>{ mutated, new_kids }; 334 } 335 336 template< typename core_t > 337 template<typename node_t, typename super_t, typename field_t> 338 void ast::Pass< core_t >::maybe_accept( 339 const node_t * & parent, 340 field_t super_t::*field 341 ) { 342 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 343 344 if ( __pass::skip( parent->*field ) ) return; 345 const auto & old_val = __pass::get(parent->*field, 0); 346 347 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR" ); 348 349 auto new_val = call_accept( old_val ); 350 351 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR" ); 352 353 if ( new_val.differs ) { 354 auto new_parent = __pass::mutate<core_t>(parent); 355 new_val.apply(new_parent, field); 356 parent = new_parent; 357 } 358 } 359 360 template< typename core_t > 361 template<typename node_t, typename super_t, typename field_t> 362 void ast::Pass< core_t >::maybe_accept_top( 363 const node_t * & parent, 364 field_t super_t::*field 365 ) { 366 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 367 368 if ( __pass::skip( parent->*field ) ) return; 369 const auto & old_val = __pass::get(parent->*field, 0); 370 371 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR" ); 372 373 auto new_val = call_accept_top( old_val ); 374 375 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR" ); 376 377 if ( new_val.differs ) { 378 auto new_parent = __pass::mutate<core_t>(parent); 379 new_val.apply(new_parent, field); 380 parent = new_parent; 381 } 382 } 383 384 template< typename core_t > 385 template<typename node_t, typename super_t, typename field_t> 386 void ast::Pass< core_t >::maybe_accept_as_compound( 387 const node_t * & parent, 388 field_t super_t::*child 389 ) { 390 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 391 392 if ( __pass::skip( parent->*child ) ) return; 393 const auto & old_val = __pass::get(parent->*child, 0); 394 395 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR" ); 396 397 auto new_val = call_accept_as_compound( old_val ); 398 399 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR" ); 400 401 if ( new_val.differs ) { 402 auto new_parent = __pass::mutate<core_t>(parent); 403 new_val.apply( new_parent, child ); 404 parent = new_parent; 405 } 409 406 } 410 407 … … 760 757 761 758 if ( __visit_children() ) { 762 // Do not enter (or leave) a new scope if atFunctionTop. Remember to save the result. 763 auto guard1 = makeFuncGuard( [this, enterScope = !this->atFunctionTop]() { 764 if ( enterScope ) { 765 __pass::symtab::enter(core, 0); 766 } 767 }, [this, leaveScope = !this->atFunctionTop]() { 768 if ( leaveScope ) { 769 __pass::symtab::leave(core, 0); 770 } 771 }); 772 ValueGuard< bool > guard2( atFunctionTop ); 773 atFunctionTop = false; 774 guard_scope guard3 { *this }; 775 maybe_accept( node, &CompoundStmt::kids ); 759 // Do not enter (or leave) a new symbol table scope if atFunctionTop. 760 // But always enter (and leave) a new general scope. 761 if ( atFunctionTop ) { 762 ValueGuard< bool > guard1( atFunctionTop ); 763 atFunctionTop = false; 764 guard_scope guard2( *this ); 765 maybe_accept( node, &CompoundStmt::kids ); 766 } else { 767 guard_symtab guard1( *this ); 768 guard_scope guard2( *this ); 769 maybe_accept( node, &CompoundStmt::kids ); 770 } 776 771 } 777 772 -
src/AST/Pass.proto.hpp
r2b78949 r8a930c03 27 27 28 28 #ifdef PEDANTIC_PASS_ASSERT 29 #define __pedantic_pass_assert(...) assert (__VA_ARGS__)29 #define __pedantic_pass_assert(...) assert(__VA_ARGS__) 30 30 #define __pedantic_pass_assertf(...) assertf(__VA_ARGS__) 31 31 #else -
src/AST/Print.cpp
r2b78949 r8a930c03 16 16 #include "Print.hpp" 17 17 18 #include "Attribute.hpp" 18 19 #include "Decl.hpp" 19 20 #include "Expr.hpp" 21 #include "Init.hpp" 20 22 #include "Stmt.hpp" 21 23 #include "Type.hpp" 22 24 #include "TypeSubstitution.hpp" 23 25 #include "CompilationState.h" 24 25 #include "Common/utility.h" // for group_iterate 26 #include "Common/Iterate.hpp" 26 27 27 28 using namespace std; -
src/AST/SymbolTable.cpp
r2b78949 r8a930c03 18 18 #include <cassert> 19 19 20 #include "Copy.hpp" 20 21 #include "Decl.hpp" 21 22 #include "Expr.hpp" … … 87 88 } 88 89 89 SymbolTable::SymbolTable( )90 SymbolTable::SymbolTable( ErrorDetection errorMode ) 90 91 : idTable(), typeTable(), structTable(), enumTable(), unionTable(), traitTable(), 91 prevScope(), scope( 0 ), repScope( 0 ) { ++*stats().count; }92 prevScope(), scope( 0 ), repScope( 0 ), errorMode(errorMode) { ++*stats().count; } 92 93 93 94 SymbolTable::~SymbolTable() { stats().size->push( idTable ? idTable->size() : 0 ); } 95 96 void SymbolTable::OnFindError( CodeLocation location, std::string error ) const { 97 assertf( errorMode != AssertClean, "Name collision/redefinition, found during a compilation phase where none should be possible. Detail: %s", error.c_str() ); 98 if (errorMode == ValidateOnAdd) { 99 SemanticError(location, error); 100 } 101 assertf( errorMode == IgnoreErrors, "Unrecognized symbol-table error mode %d", errorMode ); 102 } 94 103 95 104 void SymbolTable::enterScope() { … … 268 277 } 269 278 270 namespace { 271 /// true if redeclaration conflict between two types 272 bool addedTypeConflicts( const NamedTypeDecl * existing, const NamedTypeDecl * added ) { 273 if ( existing->base == nullptr ) { 274 return false; 275 } else if ( added->base == nullptr ) { 276 return true; 277 } else { 278 // typedef redeclarations are errors only if types are different 279 if ( ! ResolvExpr::typesCompatible( existing->base, added->base, SymbolTable{} ) ) { 280 SemanticError( added->location, "redeclaration of " + added->name ); 281 } 282 } 283 // does not need to be added to the table if both existing and added have a base that are 284 // the same 279 bool SymbolTable::addedTypeConflicts( 280 const NamedTypeDecl * existing, const NamedTypeDecl * added ) const { 281 if ( existing->base == nullptr ) { 282 return false; 283 } else if ( added->base == nullptr ) { 285 284 return true; 286 } 287 288 /// true if redeclaration conflict between two aggregate declarations 289 bool addedDeclConflicts( const AggregateDecl * existing, const AggregateDecl * added ) { 290 if ( ! existing->body ) { 291 return false; 292 } else if ( added->body ) { 293 SemanticError( added, "redeclaration of " ); 294 } 295 return true; 296 } 285 } else { 286 // typedef redeclarations are errors only if types are different 287 if ( ! ResolvExpr::typesCompatible( existing->base, added->base ) ) { 288 OnFindError( added->location, "redeclaration of " + added->name ); 289 } 290 } 291 // does not need to be added to the table if both existing and added have a base that are 292 // the same 293 return true; 294 } 295 296 bool SymbolTable::addedDeclConflicts( 297 const AggregateDecl * existing, const AggregateDecl * added ) const { 298 if ( ! existing->body ) { 299 return false; 300 } else if ( added->body ) { 301 OnFindError( added, "redeclaration of " ); 302 } 303 return true; 297 304 } 298 305 … … 642 649 } else if ( existing.id->linkage.is_mangled 643 650 || ResolvExpr::typesCompatible( 644 added->get_type(), existing.id->get_type() , SymbolTable{}) ) {651 added->get_type(), existing.id->get_type() ) ) { 645 652 646 653 // it is a conflict if one declaration is deleted and the other is not 647 654 if ( deleter && ! existing.deleter ) { 648 655 if ( handleConflicts.mode == OnConflict::Error ) { 649 SemanticError( added, "deletion of defined identifier " );656 OnFindError( added, "deletion of defined identifier " ); 650 657 } 651 658 return true; 652 659 } else if ( ! deleter && existing.deleter ) { 653 660 if ( handleConflicts.mode == OnConflict::Error ) { 654 SemanticError( added, "definition of deleted identifier " );661 OnFindError( added, "definition of deleted identifier " ); 655 662 } 656 663 return true; … … 660 667 if ( isDefinition( added ) && isDefinition( existing.id ) ) { 661 668 if ( handleConflicts.mode == OnConflict::Error ) { 662 SemanticError( added,669 OnFindError( added, 663 670 isFunction( added ) ? 664 671 "duplicate function definition for " : … … 669 676 } else { 670 677 if ( handleConflicts.mode == OnConflict::Error ) { 671 SemanticError( added, "duplicate definition for " );678 OnFindError( added, "duplicate definition for " ); 672 679 } 673 680 return true; … … 721 728 // Check that a Cforall declaration doesn't override any C declaration 722 729 if ( hasCompatibleCDecl( name, mangleName ) ) { 723 SemanticError( decl, "Cforall declaration hides C function " );730 OnFindError( decl, "Cforall declaration hides C function " ); 724 731 } 725 732 } else { … … 727 734 // type-compatibility, which it may not be. 728 735 if ( hasIncompatibleCDecl( name, mangleName ) ) { 729 SemanticError( decl, "conflicting overload of C function " );736 OnFindError( decl, "conflicting overload of C function " ); 730 737 } 731 738 } -
src/AST/SymbolTable.hpp
r2b78949 r8a930c03 93 93 94 94 public: 95 SymbolTable(); 95 96 /// Mode to control when (during which pass) user-caused name-declaration errors get reported. 97 /// The default setting `AssertClean` supports, "I expect all user-caused errors to have been 98 /// reported by now," or, "I wouldn't know what to do with an error; are there even any here?" 99 enum ErrorDetection { 100 AssertClean, ///< invalid user decls => assert fails during addFoo (default) 101 ValidateOnAdd, ///< invalid user decls => calls SemanticError during addFoo 102 IgnoreErrors ///< acts as if unspecified decls were removed, forcing validity 103 }; 104 105 explicit SymbolTable( 106 ErrorDetection ///< mode for the lifetime of the symbol table (whole pass) 107 ); 108 SymbolTable() : SymbolTable(AssertClean) {} 96 109 ~SymbolTable(); 110 111 ErrorDetection getErrorMode() const { 112 return errorMode; 113 } 97 114 98 115 // when using an indexer manually (e.g., within a mutator traversal), it is necessary to … … 158 175 159 176 private: 177 void OnFindError( CodeLocation location, std::string error ) const; 178 179 template< typename T > 180 void OnFindError( const T * obj, const std::string & error ) const { 181 OnFindError( obj->location, toString( error, obj ) ); 182 } 183 184 template< typename T > 185 void OnFindError( CodeLocation location, const T * obj, const std::string & error ) const { 186 OnFindError( location, toString( error, obj ) ); 187 } 188 160 189 /// Ensures that a proper backtracking scope exists before a mutation 161 190 void lazyInitScope(); … … 168 197 bool removeSpecialOverrides( IdData & decl, MangleTable::Ptr & mangleTable ); 169 198 170 /// Options for handling identifier conflicts 199 /// Error detection mode given at construction (pass-specific). 200 /// Logically const, except that the symbol table's push-pop is achieved by autogenerated 201 /// assignment onto self. The feield is left motuable to keep this code-gen simple. 202 /// Conceptual constness is preserved by all SymbolTable in a stack sharing the same mode. 203 ErrorDetection errorMode; 204 205 /// Options for handling identifier conflicts. 206 /// Varies according to AST location during traversal: captures semantics of the construct 207 /// being visited as "would shadow" vs "must not collide." 208 /// At a given AST location, is the same for every pass. 171 209 struct OnConflict { 172 210 enum { 173 Error, ///< Throw a semantic error211 Error, ///< Follow the current pass's ErrorDetection mode (may throw a semantic error) 174 212 Delete ///< Delete the earlier version with the delete statement 175 213 } mode; … … 191 229 const Decl * deleter ); 192 230 231 /// true if redeclaration conflict between two types 232 bool addedTypeConflicts( const NamedTypeDecl * existing, const NamedTypeDecl * added ) const; 233 234 /// true if redeclaration conflict between two aggregate declarations 235 bool addedDeclConflicts( const AggregateDecl * existing, const AggregateDecl * added ) const; 236 193 237 /// common code for addId, addDeletedId, etc. 194 238 void addIdCommon( … … 213 257 } 214 258 259 215 260 // Local Variables: // 216 261 // tab-width: 4 // -
src/AST/TypeEnvironment.cpp
r2b78949 r8a930c03 178 178 179 179 bool TypeEnvironment::combine( 180 const TypeEnvironment & o, OpenVarSet & open , const SymbolTable & symtab) {180 const TypeEnvironment & o, OpenVarSet & open ) { 181 181 // short-circuit easy cases 182 182 if ( o.empty() ) return true; … … 201 201 EqvClass & r = *rt; 202 202 // merge bindings 203 if ( ! mergeBound( r, c, open , symtab) ) return false;203 if ( ! mergeBound( r, c, open ) ) return false; 204 204 // merge previous unbound variables into this class, checking occurs if needed 205 205 if ( r.bound ) for ( const auto & u : c.vars ) { … … 216 216 } else if ( st != rt ) { 217 217 // bound, but not to the same class 218 if ( ! mergeClasses( rt, st, open , symtab) ) return false;218 if ( ! mergeClasses( rt, st, open ) ) return false; 219 219 } // ignore bound into the same class 220 220 } … … 280 280 bool TypeEnvironment::bindVar( 281 281 const TypeInstType * typeInst, const Type * bindTo, const TypeData & data, 282 AssertionSet & need, AssertionSet & have, const OpenVarSet & open, WidenMode widen, 283 const SymbolTable & symtab 282 AssertionSet & need, AssertionSet & have, const OpenVarSet & open, WidenMode widen 284 283 ) { 285 284 // remove references from bound type, so that type variables can only bind to value types … … 300 299 if ( unifyInexact( 301 300 newType, target, *this, need, have, open, 302 widen & WidenMode{ it->allowWidening, true }, symtab,common ) ) {301 widen & WidenMode{ it->allowWidening, true }, common ) ) { 303 302 if ( common ) { 304 303 it->bound = std::move(common); … … 321 320 const TypeInstType * var1, const TypeInstType * var2, TypeData && data, 322 321 AssertionSet & need, AssertionSet & have, const OpenVarSet & open, 323 WidenMode widen , const SymbolTable & symtab322 WidenMode widen 324 323 ) { 325 324 auto c1 = internal_lookup( *var1 ); … … 358 357 359 358 if ( unifyInexact( 360 newType1, newType2, *this, need, have, open, newWidenMode, symtab,common ) ) {359 newType1, newType2, *this, need, have, open, newWidenMode, common ) ) { 361 360 c1->vars.insert( c2->vars.begin(), c2->vars.end() ); 362 361 c1->allowWidening = widen1 && widen2; … … 409 408 410 409 bool TypeEnvironment::mergeBound( 411 EqvClass & to, const EqvClass & from, OpenVarSet & open , const SymbolTable & symtab) {410 EqvClass & to, const EqvClass & from, OpenVarSet & open ) { 412 411 if ( from.bound ) { 413 412 if ( to.bound ) { … … 419 418 420 419 if ( unifyInexact( 421 toType, fromType, *this, need, have, open, widen, symtab,common ) ) {420 toType, fromType, *this, need, have, open, widen, common ) ) { 422 421 // unifies, set common type if necessary 423 422 if ( common ) { … … 437 436 438 437 bool TypeEnvironment::mergeClasses( 439 ClassList::iterator to, ClassList::iterator from, OpenVarSet & open , const SymbolTable & symtab438 ClassList::iterator to, ClassList::iterator from, OpenVarSet & open 440 439 ) { 441 440 EqvClass & r = *to, & s = *from; 442 441 443 442 // ensure bounds match 444 if ( ! mergeBound( r, s, open , symtab) ) return false;443 if ( ! mergeBound( r, s, open ) ) return false; 445 444 446 445 // check safely bindable -
src/AST/TypeEnvironment.hpp
r2b78949 r8a930c03 169 169 /// Merge environment with this one, checking compatibility. 170 170 /// Returns false if fails, but does NOT roll back partial changes. 171 bool combine( const TypeEnvironment & o, OpenVarSet & openVars , const SymbolTable & symtab);171 bool combine( const TypeEnvironment & o, OpenVarSet & openVars ); 172 172 173 173 /// Add all type variables in environment to open var list … … 183 183 const TypeInstType * typeInst, const Type * bindTo, const TypeData & data, 184 184 AssertionSet & need, AssertionSet & have, const OpenVarSet & openVars, 185 ResolvExpr::WidenMode widen , const SymbolTable & symtab);185 ResolvExpr::WidenMode widen ); 186 186 187 187 /// Binds the type classes represented by `var1` and `var2` together; will add one or both … … 190 190 const TypeInstType * var1, const TypeInstType * var2, TypeData && data, 191 191 AssertionSet & need, AssertionSet & have, const OpenVarSet & openVars, 192 ResolvExpr::WidenMode widen , const SymbolTable & symtab);192 ResolvExpr::WidenMode widen ); 193 193 194 194 /// Disallows widening for all bindings in the environment … … 205 205 /// Unifies the type bound of `to` with the type bound of `from`, returning false if fails 206 206 bool mergeBound( 207 EqvClass & to, const EqvClass & from, OpenVarSet & openVars , const SymbolTable & symtab);207 EqvClass & to, const EqvClass & from, OpenVarSet & openVars ); 208 208 209 209 /// Merges two type classes from local environment, returning false if fails 210 210 bool mergeClasses( 211 ClassList::iterator to, ClassList::iterator from, OpenVarSet & openVars, 212 const SymbolTable & symtab ); 211 ClassList::iterator to, ClassList::iterator from, OpenVarSet & openVars); 213 212 214 213 /// Private lookup API; returns array index of string, or env.size() for not found -
src/AST/TypeSubstitution.cpp
r2b78949 r8a930c03 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Jun 3 13:26:00 2017 13 // Update Count : 5 14 // 12 // Last Modified On : Thr May 25 11:24:00 2023 13 // Update Count : 6 14 // 15 16 #include "TypeSubstitution.hpp" 15 17 16 18 #include "Type.hpp" // for TypeInstType, Type, StructInstType, UnionInstType 17 #include " TypeSubstitution.hpp"19 #include "Pass.hpp" // for Pass, PureVisitor, WithGuards, WithVisitorRef 18 20 19 21 namespace ast { 20 21 22 // size_t TypeSubstitution::Substituter::traceId = Stats::Heap::new_stacktrace_id("TypeSubstitution");23 22 24 23 TypeSubstitution::TypeSubstitution() { … … 119 118 } 120 119 120 // definitition must happen after PassVisitor is included so that WithGuards can be used 121 struct TypeSubstitution::Substituter : public WithGuards, public WithVisitorRef<Substituter>, public PureVisitor { 122 //static size_t traceId; 123 124 Substituter( const TypeSubstitution & sub, bool freeOnly ) : sub( sub ), freeOnly( freeOnly ) {} 125 126 const Type * postvisit( const TypeInstType * aggregateUseType ); 127 128 /// Records type variable bindings from forall-statements 129 void previsit( const FunctionType * type ); 130 /// Records type variable bindings from forall-statements and instantiations of generic types 131 // void handleAggregateType( const BaseInstType * type ); 132 133 // void previsit( const StructInstType * aggregateUseType ); 134 // void previsit( const UnionInstType * aggregateUseType ); 135 136 const TypeSubstitution & sub; 137 int subCount = 0; 138 bool freeOnly; 139 typedef std::unordered_set< TypeEnvKey > BoundVarsType; 140 BoundVarsType boundVars; 141 }; 142 143 // size_t TypeSubstitution::Substituter::traceId = Stats::Heap::new_stacktrace_id("TypeSubstitution"); 144 121 145 void TypeSubstitution::normalize() { 122 146 Pass<Substituter> sub( *this, true ); … … 128 152 } 129 153 } while ( sub.core.subCount ); 154 } 155 156 TypeSubstitution::ApplyResult<Node> TypeSubstitution::applyBase( 157 const Node * input, bool isFree ) const { 158 assert( input ); 159 Pass<Substituter> sub( *this, isFree ); 160 const Node * output = input->accept( sub ); 161 return { output, sub.core.subCount }; 130 162 } 131 163 -
src/AST/TypeSubstitution.hpp
r2b78949 r8a930c03 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : T ue Apr 30 22:52:47 201913 // Update Count : 911 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr May 25 12:31:00 2023 13 // Update Count : 10 14 14 // 15 15 … … 46 46 TypeSubstitution &operator=( const TypeSubstitution &other ); 47 47 48 template< typename SynTreeClass>48 template< typename node_t > 49 49 struct ApplyResult { 50 ast::ptr< SynTreeClass> node;50 ast::ptr<node_t> node; 51 51 int count; 52 52 }; 53 53 54 template< typename SynTreeClass > ApplyResult<SynTreeClass> apply( const SynTreeClass * input ) const; 55 template< typename SynTreeClass > ApplyResult<SynTreeClass> applyFree( const SynTreeClass * input ) const; 54 template< typename node_t > 55 ApplyResult<node_t> apply( const node_t * input ) const { 56 ApplyResult<Node> ret = applyBase( input, false ); 57 return { ret.node.strict_as<node_t>(), ret.count }; 58 } 56 59 57 60 template< typename node_t, enum Node::ref_type ref_t > 58 61 int apply( ptr_base< node_t, ref_t > & input ) const { 59 const node_t * p = input.get(); 60 auto ret = apply(p); 61 input = ret.node; 62 ApplyResult<Node> ret = applyBase( input.get(), false ); 63 input = ret.node.strict_as<node_t>(); 62 64 return ret.count; 65 } 66 67 template< typename node_t > 68 ApplyResult<node_t> applyFree( const node_t * input ) const { 69 ApplyResult<Node> ret = applyBase( input, true ); 70 return { ret.node.strict_as<node_t>(), ret.count }; 63 71 } 64 72 65 73 template< typename node_t, enum Node::ref_type ref_t > 66 74 int applyFree( ptr_base< node_t, ref_t > & input ) const { 67 const node_t * p = input.get(); 68 auto ret = applyFree(p); 69 input = ret.node; 75 ApplyResult<Node> ret = applyBase( input.get(), true ); 76 input = ret.node.strict_as<node_t>(); 70 77 return ret.count; 71 78 } … … 97 104 // Mutator that performs the substitution 98 105 struct Substituter; 106 ApplyResult<Node> applyBase( const Node * input, bool isFree ) const; 99 107 100 108 // TODO: worry about traversing into a forall-qualified function type or type decl with assertions … … 158 166 } // namespace ast 159 167 160 // include needs to happen after TypeSubstitution is defined so that both TypeSubstitution and161 // PassVisitor are defined before PassVisitor implementation accesses TypeSubstitution internals.162 #include "Pass.hpp"163 #include "Copy.hpp"164 165 namespace ast {166 167 // definitition must happen after PassVisitor is included so that WithGuards can be used168 struct TypeSubstitution::Substituter : public WithGuards, public WithVisitorRef<Substituter>, public PureVisitor {169 static size_t traceId;170 171 Substituter( const TypeSubstitution & sub, bool freeOnly ) : sub( sub ), freeOnly( freeOnly ) {}172 173 const Type * postvisit( const TypeInstType * aggregateUseType );174 175 /// Records type variable bindings from forall-statements176 void previsit( const FunctionType * type );177 /// Records type variable bindings from forall-statements and instantiations of generic types178 // void handleAggregateType( const BaseInstType * type );179 180 // void previsit( const StructInstType * aggregateUseType );181 // void previsit( const UnionInstType * aggregateUseType );182 183 const TypeSubstitution & sub;184 int subCount = 0;185 bool freeOnly;186 typedef std::unordered_set< TypeEnvKey > BoundVarsType;187 BoundVarsType boundVars;188 189 };190 191 template< typename SynTreeClass >192 TypeSubstitution::ApplyResult<SynTreeClass> TypeSubstitution::apply( const SynTreeClass * input ) const {193 assert( input );194 Pass<Substituter> sub( *this, false );195 input = strict_dynamic_cast< const SynTreeClass * >( input->accept( sub ) );196 return { input, sub.core.subCount };197 }198 199 template< typename SynTreeClass >200 TypeSubstitution::ApplyResult<SynTreeClass> TypeSubstitution::applyFree( const SynTreeClass * input ) const {201 assert( input );202 Pass<Substituter> sub( *this, true );203 input = strict_dynamic_cast< const SynTreeClass * >( input->accept( sub ) );204 return { input, sub.core.subCount };205 }206 207 } // namespace ast208 209 168 // Local Variables: // 210 169 // tab-width: 4 // -
src/AST/Util.cpp
r2b78949 r8a930c03 83 83 } 84 84 85 /// Check that the MemberExpr has an aggregate type and matching member. 86 void memberMatchesAggregate( const MemberExpr * expr ) { 87 const Type * aggrType = expr->aggregate->result->stripReferences(); 88 const AggregateDecl * decl = nullptr; 89 if ( auto inst = dynamic_cast<const StructInstType *>( aggrType ) ) { 90 decl = inst->base; 91 } else if ( auto inst = dynamic_cast<const UnionInstType *>( aggrType ) ) { 92 decl = inst->base; 93 } 94 assertf( decl, "Aggregate of member not correct type." ); 95 96 for ( auto aggrMember : decl->members ) { 97 if ( expr->member == aggrMember ) { 98 return; 99 } 100 } 101 assertf( false, "Member not found." ); 102 } 103 85 104 struct InvariantCore { 86 105 // To save on the number of visits: this is a kind of composed core. … … 108 127 } 109 128 129 void previsit( const MemberExpr * node ) { 130 previsit( (const ParseNode *)node ); 131 memberMatchesAggregate( node ); 132 } 133 110 134 void postvisit( const Node * node ) { 111 135 no_strong_cycles.postvisit( node ); -
src/Concurrency/Actors.cpp
r2b78949 r8a930c03 38 38 bool namedDecl = false; 39 39 40 // finds and sets a ptr to the Allocation enum, which is needed in the next pass40 // finds and sets a ptr to the allocation enum, which is needed in the next pass 41 41 void previsit( const EnumDecl * decl ) { 42 if( decl->name == " Allocation" ) *allocationDecl = decl;42 if( decl->name == "allocation" ) *allocationDecl = decl; 43 43 } 44 44 … … 227 227 static inline derived_actor & ?|?( derived_actor & receiver, derived_msg & msg ) { 228 228 request new_req; 229 Allocation (*my_work_fn)( derived_actor &, derived_msg & ) = receive;229 allocation (*my_work_fn)( derived_actor &, derived_msg & ) = receive; 230 230 __receive_fn fn = (__receive_fn)my_work_fn; 231 231 new_req{ &receiver, &msg, fn }; … … 246 246 )); 247 247 248 // Function type is: Allocation (*)( derived_actor &, derived_msg & )248 // Function type is: allocation (*)( derived_actor &, derived_msg & ) 249 249 FunctionType * derivedReceive = new FunctionType(); 250 250 derivedReceive->params.push_back( ast::deepCopy( derivedActorRef ) ); … … 252 252 derivedReceive->returns.push_back( new EnumInstType( *allocationDecl ) ); 253 253 254 // Generates: Allocation (*my_work_fn)( derived_actor &, derived_msg & ) = receive;254 // Generates: allocation (*my_work_fn)( derived_actor &, derived_msg & ) = receive; 255 255 sendBody->push_back( new DeclStmt( 256 256 decl->location, … … 263 263 )); 264 264 265 // Function type is: Allocation (*)( actor &, message & )265 // Function type is: allocation (*)( actor &, message & ) 266 266 FunctionType * genericReceive = new FunctionType(); 267 267 genericReceive->params.push_back( new ReferenceType( new StructInstType( *actorDecl ) ) ); … … 269 269 genericReceive->returns.push_back( new EnumInstType( *allocationDecl ) ); 270 270 271 // Generates: Allocation (*fn)( actor &, message & ) = (Allocation (*)( actor &, message & ))my_work_fn;271 // Generates: allocation (*fn)( actor &, message & ) = (allocation (*)( actor &, message & ))my_work_fn; 272 272 // More readable synonymous code: 273 // typedef Allocation (*__receive_fn)(actor &, message &);273 // typedef allocation (*__receive_fn)(actor &, message &); 274 274 // __receive_fn fn = (__receive_fn)my_work_fn; 275 275 sendBody->push_back( new DeclStmt( … … 422 422 const StructDecl ** msgDecl = &msgDeclPtr; 423 423 424 // first pass collects ptrs to Allocation enum, request type, and generic receive fn typedef424 // first pass collects ptrs to allocation enum, request type, and generic receive fn typedef 425 425 // also populates maps of all derived actors and messages 426 426 Pass<CollectactorStructDecls>::run( translationUnit, actorStructDecls, messageStructDecls, requestDecl, -
src/Concurrency/Waituntil.cpp
r2b78949 r8a930c03 14 14 // 15 15 16 #include "Waituntil.hpp" 17 16 18 #include <string> 17 19 18 #include " Waituntil.hpp"20 #include "AST/Copy.hpp" 19 21 #include "AST/Expr.hpp" 20 22 #include "AST/Pass.hpp" … … 93 95 case 0: 94 96 try { 95 if (on_selected( A, clause1 ))97 on_selected( A, clause1 ); 96 98 doA(); 97 99 } … … 120 122 // the unregister and on_selected calls are needed to support primitives where the acquire has side effects 121 123 // so the corresponding block MUST be run for those primitives to not lose state (example is channels) 122 if ( ! has_run(clause_statuses[0]) && whenA && unregister_select(A, clause1) && on_selected( A, clause1 ) ) 124 if ( !has_run(clause_statuses[0]) && whenA && unregister_select(A, clause1) ) 125 on_selected( A, clause1 ) 123 126 doA(); 124 127 ... repeat if above for B and C ... … … 617 620 618 621 // Generates: 619 /* if ( on_selected( target_1, node_1 ))... corresponding body of target_1 ...622 /* on_selected( target_1, node_1 ); ... corresponding body of target_1 ... 620 623 */ 621 624 CompoundStmt * GenerateWaitUntilCore::genStmtBlock( const WhenClause * clause, const ClauseData * data ) { … … 623 626 return new CompoundStmt( cLoc, 624 627 { 625 new IfStmt( cLoc, 626 genSelectTraitCall( clause, data, "on_selected" ), 627 new CompoundStmt( cLoc, 628 { 629 ast::deepCopy( clause->stmt ) 630 } 631 ) 632 ) 628 new ExprStmt( cLoc, 629 genSelectTraitCall( clause, data, "on_selected" ) 630 ), 631 ast::deepCopy( clause->stmt ) 633 632 } 634 633 ); … … 642 641 case 0: 643 642 try { 644 if (on_selected( target1, clause1 ))645 dotarget1stmt();643 on_selected( target1, clause1 ); 644 dotarget1stmt(); 646 645 } 647 646 finally { clause_statuses[i] = __SELECT_RUN; unregister_select(target1, clause1); } … … 662 661 case 0: 663 662 try { 664 if (on_selected( target1, clause1 ))665 dotarget1stmt();663 on_selected( target1, clause1 ); 664 dotarget1stmt(); 666 665 } 667 666 finally { clause_statuses[i] = __SELECT_RUN; unregister_select(target1, clause1); } … … 938 937 } 939 938 940 // C_TODO: will remove this commented code later. Currently it isn't needed but may switch to a modified version of this later if it has better performance941 // std::vector<ptr<CaseClause>> switchCases;942 943 // int idx = 0;944 // for ( const auto & clause: stmt->clauses ) {945 // const CodeLocation & cLoc = clause->location;946 // switchCases.push_back(947 // new CaseClause( cLoc,948 // new CastExpr( cLoc,949 // new AddressExpr( cLoc, new NameExpr( cLoc, data.at(idx)->targetName ) ),950 // new BasicType( BasicType::Kind::LongUnsignedInt ), GeneratedFlag::ExplicitCast951 // ),952 // {953 // new CompoundStmt( cLoc,954 // {955 // ast::deepCopy( clause->stmt ),956 // new BranchStmt( cLoc, BranchStmt::Kind::Break, Label( cLoc ) )957 // }958 // )959 // }960 // )961 // );962 // idx++;963 // }964 965 939 return new CompoundStmt( loc, 966 940 { 967 941 new ExprStmt( loc, new UntypedExpr( loc, new NameExpr( loc, "park" ) ) ), 968 942 outerIf 969 // new SwitchStmt( loc,970 // new NameExpr( loc, statusName ),971 // std::move( switchCases )972 // )973 943 } 974 944 ); … … 1013 983 const CodeLocation & cLoc = stmt->clauses.at(idx)->location; 1014 984 985 Expr * baseCond = genSelectTraitCall( stmt->clauses.at(idx), data.at(idx), "register_select" ); 1015 986 Expr * ifCond; 1016 987 … … 1023 994 ), 1024 995 new CastExpr( cLoc, 1025 genSelectTraitCall( stmt->clauses.at(idx), data.at(idx), "register_select" ),996 baseCond, 1026 997 new BasicType( BasicType::Kind::Bool ), GeneratedFlag::ExplicitCast 1027 998 ), 1028 999 LogicalFlag::AndExpr 1029 1000 ); 1030 } else ifCond = genSelectTraitCall( stmt->clauses.at(idx), data.at(idx), "register_select" );1001 } else ifCond = baseCond; 1031 1002 1032 1003 return new CompoundStmt( cLoc, … … 1046 1017 ifCond, 1047 1018 genStmtBlock( stmt->clauses.at(idx), data.at(idx) ), 1048 // ast::deepCopy( stmt->clauses.at(idx)->stmt ),1049 1019 recursiveOrIfGen( stmt, data, idx + 1, elseWhenName ) 1050 1020 ) -
src/ControlStruct/ExceptDeclNew.cpp
r2b78949 r8a930c03 18 18 #include <sstream> 19 19 20 #include "AST/Copy.hpp" 20 21 #include "AST/Decl.hpp" 21 22 #include "AST/Pass.hpp" -
src/GenPoly/InstantiateGenericNew.cpp
r2b78949 r8a930c03 362 362 ResolvExpr::typesCompatible( 363 363 memberExpr->result, 364 memberExpr->member->get_type() , ast::SymbolTable()) ) {364 memberExpr->member->get_type() ) ) { 365 365 return memberExpr; 366 366 } -
src/GenPoly/LvalueNew.cpp
r2b78949 r8a930c03 359 359 !ResolvExpr::typesCompatible( 360 360 srcType, 361 strict_dynamic_cast<ast::ReferenceType const *>( dstType )->base, 362 ast::SymbolTable() ) ) { 361 strict_dynamic_cast<ast::ReferenceType const *>( dstType )->base ) ) { 363 362 // Must keep cast if cast-to type is different from the actual type. 364 363 return ast::mutate_field( expr, &ast::CastExpr::arg, ret ); … … 377 376 if ( !ResolvExpr::typesCompatibleIgnoreQualifiers( 378 377 dstType->stripReferences(), 379 srcType->stripReferences(), 380 ast::SymbolTable() ) ) { 378 srcType->stripReferences() ) ) { 381 379 return ast::mutate_field( expr, &ast::CastExpr::arg, ret ); 382 380 } … … 393 391 ResolvExpr::typesCompatible( 394 392 expr->result, 395 expr->arg->result , ast::SymbolTable()) ) {393 expr->arg->result ) ) { 396 394 PRINT( 397 395 std::cerr << "types are compatible, removing cast: " << expr << '\n'; … … 590 588 ast::OpenVarSet openVars; 591 589 ResolvExpr::unify( ret->arg2->result, ret->arg3->result, newEnv, 592 needAssertions, haveAssertions, openVars, 593 ast::SymbolTable(), common ); 590 needAssertions, haveAssertions, openVars, common ); 594 591 ret->result = common ? common : ast::deepCopy( ret->arg2->result ); 595 592 return ret; -
src/GenPoly/SpecializeNew.cpp
r2b78949 r8a930c03 16 16 #include "Specialize.h" 17 17 18 #include "AST/Copy.hpp" // for deepCopy 18 19 #include "AST/Inspect.hpp" // for isIntrinsicCallExpr 19 20 #include "AST/Pass.hpp" // for Pass -
src/InitTweak/InitTweak.cc
r2b78949 r8a930c03 1066 1066 const ast::Type * t2 = ftype->params.back(); 1067 1067 1068 return ResolvExpr::typesCompatibleIgnoreQualifiers( t1, t2 , ast::SymbolTable());1068 return ResolvExpr::typesCompatibleIgnoreQualifiers( t1, t2 ); 1069 1069 } 1070 1070 -
src/MakeLibCfaNew.cpp
r2b78949 r8a930c03 16 16 #include "MakeLibCfa.h" 17 17 18 #include "AST/Copy.hpp" 18 19 #include "AST/Fwd.hpp" 19 20 #include "AST/Pass.hpp" -
src/Parser/lex.ll
r2b78949 r8a930c03 10 10 * Created On : Sat Sep 22 08:58:10 2001 11 11 * Last Modified By : Peter A. Buhr 12 * Last Modified On : Tue May 2 08:45:21202313 * Update Count : 7 6912 * Last Modified On : Fri Jun 9 10:04:00 2023 13 * Update Count : 770 14 14 */ 15 15 … … 319 319 static { KEYWORD_RETURN(STATIC); } 320 320 _Static_assert { KEYWORD_RETURN(STATICASSERT); } // C11 321 _static_assert { KEYWORD_RETURN(STATICASSERT); } // C23 321 322 struct { KEYWORD_RETURN(STRUCT); } 322 323 suspend { KEYWORD_RETURN(SUSPEND); } // CFA -
src/Parser/parser.yy
r2b78949 r8a930c03 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Apr 26 16:45:37202313 // Update Count : 63 3012 // Last Modified On : Wed Jun 7 14:32:28 2023 13 // Update Count : 6341 14 14 // 15 15 … … 108 108 assert( declList ); 109 109 // printf( "distAttr1 typeSpec %p\n", typeSpec ); typeSpec->print( std::cout ); 110 DeclarationNode * c ur = declList, * cl = (new DeclarationNode)->addType( typeSpec );110 DeclarationNode * cl = (new DeclarationNode)->addType( typeSpec ); 111 111 // printf( "distAttr2 cl %p\n", cl ); cl->type->print( std::cout ); 112 112 // cl->type->aggregate.name = cl->type->aggInst.aggregate->aggregate.name; 113 113 114 for ( cur = dynamic_cast<DeclarationNode *>( cur->get_next() ); cur != nullptr; cur = dynamic_cast<DeclarationNode *>( cur->get_next() ) ) {114 for ( DeclarationNode * cur = dynamic_cast<DeclarationNode *>( declList->get_next() ); cur != nullptr; cur = dynamic_cast<DeclarationNode *>( cur->get_next() ) ) { 115 115 cl->cloneBaseType( cur ); 116 116 } // for … … 206 206 #define NEW_ONE new ExpressionNode( build_constantInteger( yylloc, *new string( "1" ) ) ) 207 207 #define UPDOWN( compop, left, right ) (compop == OperKinds::LThan || compop == OperKinds::LEThan ? left : right) 208 #define MISSING_ANON_FIELD " Missing loop fields with an anonymous loop index is meaningless as loop index is unavailable in loop body."209 #define MISSING_LOW " Missing low value for up-to range so index is uninitialized."210 #define MISSING_HIGH " Missing high value for down-to range so index is uninitialized."208 #define MISSING_ANON_FIELD "syntax error, missing loop fields with an anonymous loop index is meaningless as loop index is unavailable in loop body." 209 #define MISSING_LOW "syntax error, missing low value for up-to range so index is uninitialized." 210 #define MISSING_HIGH "syntax error, missing high value for down-to range so index is uninitialized." 211 211 212 212 static ForCtrl * makeForCtrl( … … 232 232 ForCtrl * forCtrl( const CodeLocation & location, DeclarationNode * index, ExpressionNode * start, enum OperKinds compop, ExpressionNode * comp, ExpressionNode * inc ) { 233 233 if ( index->initializer ) { 234 SemanticError( yylloc, " Direct initialization disallowed. Use instead: type var; initialization ~ comparison ~ increment." );234 SemanticError( yylloc, "syntax error, direct initialization disallowed. Use instead: type var; initialization ~ comparison ~ increment." ); 235 235 } // if 236 236 if ( index->next ) { 237 SemanticError( yylloc, " Multiple loop indexes disallowed in for-loop declaration." );237 SemanticError( yylloc, "syntax error, multiple loop indexes disallowed in for-loop declaration." ); 238 238 } // if 239 239 DeclarationNode * initDecl = index->addInitializer( new InitializerNode( start ) ); … … 260 260 return forCtrl( location, type, new string( identifier->name ), start, compop, comp, inc ); 261 261 } else { 262 SemanticError( yylloc, " Expression disallowed. Only loop-index nameallowed." ); return nullptr;262 SemanticError( yylloc, "syntax error, loop-index name missing. Expression disallowed." ); return nullptr; 263 263 } // if 264 264 } else { 265 SemanticError( yylloc, " Expression disallowed. Only loop-index name allowed." ); return nullptr;265 SemanticError( yylloc, "syntax error, loop-index name missing. Expression disallowed. ." ); return nullptr; 266 266 } // if 267 267 } // forCtrl 268 268 269 269 static void IdentifierBeforeIdentifier( string & identifier1, string & identifier2, const char * kind ) { 270 SemanticError( yylloc, ::toString( " Adjacent identifiers \"", identifier1, "\" and \"", identifier2, "\" are not meaningful in a", kind, ".\n"270 SemanticError( yylloc, ::toString( "syntax error, adjacent identifiers \"", identifier1, "\" and \"", identifier2, "\" are not meaningful in a", kind, ".\n" 271 271 "Possible cause is misspelled type name or missing generic parameter." ) ); 272 272 } // IdentifierBeforeIdentifier 273 273 274 274 static void IdentifierBeforeType( string & identifier, const char * kind ) { 275 SemanticError( yylloc, ::toString( " Identifier \"", identifier, "\" cannot appear before a ", kind, ".\n"275 SemanticError( yylloc, ::toString( "syntax error, identifier \"", identifier, "\" cannot appear before a ", kind, ".\n" 276 276 "Possible cause is misspelled storage/CV qualifier, misspelled typename, or missing generic parameter." ) ); 277 277 } // IdentifierBeforeType … … 689 689 // | RESUME '(' comma_expression ')' compound_statement 690 690 // { SemanticError( yylloc, "Resume expression is currently unimplemented." ); $$ = nullptr; } 691 | IDENTIFIER IDENTIFIER // syntax error691 | IDENTIFIER IDENTIFIER // invalid syntax rules 692 692 { IdentifierBeforeIdentifier( *$1.str, *$2.str, "n expression" ); $$ = nullptr; } 693 | IDENTIFIER type_qualifier // syntax error693 | IDENTIFIER type_qualifier // invalid syntax rules 694 694 { IdentifierBeforeType( *$1.str, "type qualifier" ); $$ = nullptr; } 695 | IDENTIFIER storage_class // syntax error695 | IDENTIFIER storage_class // invalid syntax rules 696 696 { IdentifierBeforeType( *$1.str, "storage class" ); $$ = nullptr; } 697 | IDENTIFIER basic_type_name // syntax error697 | IDENTIFIER basic_type_name // invalid syntax rules 698 698 { IdentifierBeforeType( *$1.str, "type" ); $$ = nullptr; } 699 | IDENTIFIER TYPEDEFname // syntax error699 | IDENTIFIER TYPEDEFname // invalid syntax rules 700 700 { IdentifierBeforeType( *$1.str, "type" ); $$ = nullptr; } 701 | IDENTIFIER TYPEGENname // syntax error701 | IDENTIFIER TYPEGENname // invalid syntax rules 702 702 { IdentifierBeforeType( *$1.str, "type" ); $$ = nullptr; } 703 703 ; … … 1152 1152 identifier_or_type_name ':' attribute_list_opt statement 1153 1153 { $$ = $4->add_label( yylloc, $1, $3 ); } 1154 | identifier_or_type_name ':' attribute_list_opt error // syntax error1155 { 1156 SemanticError( yylloc, ::toString( " Label \"", *$1.str, "\" must be associated with a statement, "1154 | identifier_or_type_name ':' attribute_list_opt error // invalid syntax rule 1155 { 1156 SemanticError( yylloc, ::toString( "syntx error, label \"", *$1.str, "\" must be associated with a statement, " 1157 1157 "where a declaration, case, or default is not a statement. " 1158 1158 "Move the label or terminate with a semi-colon." ) ); … … 1193 1193 | statement_list_nodecl statement 1194 1194 { assert( $1 ); $1->set_last( $2 ); $$ = $1; } 1195 | statement_list_nodecl error // syntax error1196 { SemanticError( yylloc, " Declarations only allowed at the start of the switch body, i.e., after the '{'." ); $$ = nullptr; }1195 | statement_list_nodecl error // invalid syntax rule 1196 { SemanticError( yylloc, "syntax error, declarations only allowed at the start of the switch body, i.e., after the '{'." ); $$ = nullptr; } 1197 1197 ; 1198 1198 … … 1219 1219 $$ = $7 ? new StatementNode( build_compound( yylloc, (StatementNode *)((new StatementNode( $7 ))->set_last( sw )) ) ) : sw; 1220 1220 } 1221 | SWITCH '(' comma_expression ')' '{' error '}' // CFA, syntaxerror1222 { SemanticError( yylloc, " Only declarations canappear before the list of case clauses." ); $$ = nullptr; }1221 | SWITCH '(' comma_expression ')' '{' error '}' // CFA, invalid syntax rule error 1222 { SemanticError( yylloc, "synatx error, declarations can only appear before the list of case clauses." ); $$ = nullptr; } 1223 1223 | CHOOSE '(' comma_expression ')' case_clause // CFA 1224 1224 { $$ = new StatementNode( build_switch( yylloc, false, $3, $5 ) ); } … … 1228 1228 $$ = $7 ? new StatementNode( build_compound( yylloc, (StatementNode *)((new StatementNode( $7 ))->set_last( sw )) ) ) : sw; 1229 1229 } 1230 | CHOOSE '(' comma_expression ')' '{' error '}' // CFA, syntax error1231 { SemanticError( yylloc, " Only declarations canappear before the list of case clauses." ); $$ = nullptr; }1230 | CHOOSE '(' comma_expression ')' '{' error '}' // CFA, invalid syntax rule 1231 { SemanticError( yylloc, "syntax error, declarations can only appear before the list of case clauses." ); $$ = nullptr; } 1232 1232 ; 1233 1233 … … 1268 1268 1269 1269 case_label: // CFA 1270 CASE error // syntax error1271 { SemanticError( yylloc, " Missing case listafter case." ); $$ = nullptr; }1270 CASE error // invalid syntax rule 1271 { SemanticError( yylloc, "syntax error, case list missing after case." ); $$ = nullptr; } 1272 1272 | CASE case_value_list ':' { $$ = $2; } 1273 | CASE case_value_list error // syntax error1274 { SemanticError( yylloc, " Missing colonafter case list." ); $$ = nullptr; }1273 | CASE case_value_list error // invalid syntax rule 1274 { SemanticError( yylloc, "syntax error, colon missing after case list." ); $$ = nullptr; } 1275 1275 | DEFAULT ':' { $$ = new ClauseNode( build_default( yylloc ) ); } 1276 1276 // A semantic check is required to ensure only one default clause per switch/choose statement. 1277 | DEFAULT error // syntax error1278 { SemanticError( yylloc, " Missing colonafter default." ); $$ = nullptr; }1277 | DEFAULT error // invalid syntax rules 1278 { SemanticError( yylloc, "syntax error, colon missing after default." ); $$ = nullptr; } 1279 1279 ; 1280 1280 … … 1405 1405 else { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1406 1406 } 1407 | comma_expression updowneq comma_expression '~' '@' // CFA, error1407 | comma_expression updowneq comma_expression '~' '@' // CFA, invalid syntax rules 1408 1408 { SemanticError( yylloc, MISSING_ANON_FIELD ); $$ = nullptr; } 1409 | '@' updowneq '@' // CFA, error1409 | '@' updowneq '@' // CFA, invalid syntax rules 1410 1410 { SemanticError( yylloc, MISSING_ANON_FIELD ); $$ = nullptr; } 1411 | '@' updowneq comma_expression '~' '@' // CFA, error1411 | '@' updowneq comma_expression '~' '@' // CFA, invalid syntax rules 1412 1412 { SemanticError( yylloc, MISSING_ANON_FIELD ); $$ = nullptr; } 1413 | comma_expression updowneq '@' '~' '@' // CFA, error1413 | comma_expression updowneq '@' '~' '@' // CFA, invalid syntax rules 1414 1414 { SemanticError( yylloc, MISSING_ANON_FIELD ); $$ = nullptr; } 1415 | '@' updowneq '@' '~' '@' // CFA, error1415 | '@' updowneq '@' '~' '@' // CFA, invalid syntax rules 1416 1416 { SemanticError( yylloc, MISSING_ANON_FIELD ); $$ = nullptr; } 1417 1417 … … 1431 1431 { 1432 1432 if ( $4 == OperKinds::GThan || $4 == OperKinds::GEThan ) { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1433 else if ( $4 == OperKinds::LEThan ) { SemanticError( yylloc, " Equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; }1433 else if ( $4 == OperKinds::LEThan ) { SemanticError( yylloc, "syntax error, equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; } 1434 1434 else $$ = forCtrl( yylloc, $3, $1, $3->clone(), $4, nullptr, NEW_ONE ); 1435 1435 } 1436 | comma_expression ';' '@' updowneq '@' // CFA, error1437 { SemanticError( yylloc, " Missing low/high value for up/down-to range so index is uninitialized." ); $$ = nullptr; }1436 | comma_expression ';' '@' updowneq '@' // CFA, invalid syntax rules 1437 { SemanticError( yylloc, "syntax error, missing low/high value for up/down-to range so index is uninitialized." ); $$ = nullptr; } 1438 1438 1439 1439 | comma_expression ';' comma_expression updowneq comma_expression '~' comma_expression // CFA 1440 1440 { $$ = forCtrl( yylloc, $3, $1, UPDOWN( $4, $3->clone(), $5 ), $4, UPDOWN( $4, $5->clone(), $3->clone() ), $7 ); } 1441 | comma_expression ';' '@' updowneq comma_expression '~' comma_expression // CFA, error1441 | comma_expression ';' '@' updowneq comma_expression '~' comma_expression // CFA, invalid syntax rules 1442 1442 { 1443 1443 if ( $4 == OperKinds::LThan || $4 == OperKinds::LEThan ) { SemanticError( yylloc, MISSING_LOW ); $$ = nullptr; } … … 1447 1447 { 1448 1448 if ( $4 == OperKinds::GThan || $4 == OperKinds::GEThan ) { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1449 else if ( $4 == OperKinds::LEThan ) { SemanticError( yylloc, " Equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; }1449 else if ( $4 == OperKinds::LEThan ) { SemanticError( yylloc, "syntax error, equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; } 1450 1450 else $$ = forCtrl( yylloc, $3, $1, $3->clone(), $4, nullptr, $7 ); 1451 1451 } 1452 1452 | comma_expression ';' comma_expression updowneq comma_expression '~' '@' // CFA 1453 1453 { $$ = forCtrl( yylloc, $3, $1, UPDOWN( $4, $3->clone(), $5 ), $4, UPDOWN( $4, $5->clone(), $3->clone() ), nullptr ); } 1454 | comma_expression ';' '@' updowneq comma_expression '~' '@' // CFA, error1454 | comma_expression ';' '@' updowneq comma_expression '~' '@' // CFA, invalid syntax rules 1455 1455 { 1456 1456 if ( $4 == OperKinds::LThan || $4 == OperKinds::LEThan ) { SemanticError( yylloc, MISSING_LOW ); $$ = nullptr; } … … 1460 1460 { 1461 1461 if ( $4 == OperKinds::GThan || $4 == OperKinds::GEThan ) { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1462 else if ( $4 == OperKinds::LEThan ) { SemanticError( yylloc, " Equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; }1462 else if ( $4 == OperKinds::LEThan ) { SemanticError( yylloc, "syntax error, equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; } 1463 1463 else $$ = forCtrl( yylloc, $3, $1, $3->clone(), $4, nullptr, nullptr ); 1464 1464 } 1465 1465 | comma_expression ';' '@' updowneq '@' '~' '@' // CFA 1466 { SemanticError( yylloc, " Missing low/high value for up/down-to range so index is uninitialized." ); $$ = nullptr; }1466 { SemanticError( yylloc, "syntax error, missing low/high value for up/down-to range so index is uninitialized." ); $$ = nullptr; } 1467 1467 1468 1468 | declaration comma_expression // CFA … … 1481 1481 { 1482 1482 if ( $3 == OperKinds::GThan || $3 == OperKinds::GEThan ) { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1483 else if ( $3 == OperKinds::LEThan ) { SemanticError( yylloc, " Equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; }1483 else if ( $3 == OperKinds::LEThan ) { SemanticError( yylloc, "syntax error, equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; } 1484 1484 else $$ = forCtrl( yylloc, $1, $2, $3, nullptr, NEW_ONE ); 1485 1485 } … … 1495 1495 { 1496 1496 if ( $3 == OperKinds::GThan || $3 == OperKinds::GEThan ) { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1497 else if ( $3 == OperKinds::LEThan ) { SemanticError( yylloc, " Equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; }1497 else if ( $3 == OperKinds::LEThan ) { SemanticError( yylloc, "syntax error, equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; } 1498 1498 else $$ = forCtrl( yylloc, $1, $2, $3, nullptr, $6 ); 1499 1499 } … … 1508 1508 { 1509 1509 if ( $3 == OperKinds::GThan || $3 == OperKinds::GEThan ) { SemanticError( yylloc, MISSING_HIGH ); $$ = nullptr; } 1510 else if ( $3 == OperKinds::LEThan ) { SemanticError( yylloc, " Equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; }1510 else if ( $3 == OperKinds::LEThan ) { SemanticError( yylloc, "syntax error, equality with missing high value is meaningless. Use \"~\"." ); $$ = nullptr; } 1511 1511 else $$ = forCtrl( yylloc, $1, $2, $3, nullptr, nullptr ); 1512 1512 } 1513 | declaration '@' updowneq '@' '~' '@' // CFA, error1514 { SemanticError( yylloc, " Missing low/high value for up/down-to range so index is uninitialized." ); $$ = nullptr; }1513 | declaration '@' updowneq '@' '~' '@' // CFA, invalid syntax rules 1514 { SemanticError( yylloc, "syntax error, missing low/high value for up/down-to range so index is uninitialized." ); $$ = nullptr; } 1515 1515 1516 1516 | comma_expression ';' TYPEDEFname // CFA, array type … … 1521 1521 | comma_expression ';' downupdowneq TYPEDEFname // CFA, array type 1522 1522 { 1523 if ( $3 == OperKinds::LEThan || $3 == OperKinds::GEThan ) { SemanticError( yylloc, "All enumation ranges are equal (all values). Remove \"=~\"." ); $$ = nullptr; } 1523 if ( $3 == OperKinds::LEThan || $3 == OperKinds::GEThan ) { 1524 SemanticError( yylloc, "syntax error, all enumeration ranges are equal (all values). Remove \"=~\"." ); $$ = nullptr; 1525 } 1524 1526 SemanticError( yylloc, "Type iterator is currently unimplemented." ); $$ = nullptr; 1525 1527 } … … 1616 1618 MUTEX '(' argument_expression_list_opt ')' statement 1617 1619 { 1618 if ( ! $3 ) { SemanticError( yylloc, " mutex argument list cannot be empty." ); $$ = nullptr; }1620 if ( ! $3 ) { SemanticError( yylloc, "syntax error, mutex argument list cannot be empty." ); $$ = nullptr; } 1619 1621 $$ = new StatementNode( build_mutex( yylloc, $3, $5 ) ); 1620 1622 } … … 1664 1666 { $$ = build_waitfor_timeout( yylloc, $1, $3, $4, maybe_build_compound( yylloc, $5 ) ); } 1665 1667 // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless) 1666 | wor_waitfor_clause wor when_clause_opt timeout statement wor ELSE statement // syntax error1667 { SemanticError( yylloc, " else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; }1668 | wor_waitfor_clause wor when_clause_opt timeout statement wor ELSE statement // invalid syntax rules 1669 { SemanticError( yylloc, "syntax error, else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; } 1668 1670 | wor_waitfor_clause wor when_clause_opt timeout statement wor when_clause ELSE statement 1669 1671 { $$ = build_waitfor_else( yylloc, build_waitfor_timeout( yylloc, $1, $3, $4, maybe_build_compound( yylloc, $5 ) ), $7, maybe_build_compound( yylloc, $9 ) ); } … … 1709 1711 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, build_waituntil_timeout( yylloc, $3, $4, maybe_build_compound( yylloc, $5 ) ) ); } 1710 1712 // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless) 1711 | wor_waituntil_clause wor when_clause_opt timeout statement wor ELSE statement // syntax error1712 { SemanticError( yylloc, " else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; }1713 | wor_waituntil_clause wor when_clause_opt timeout statement wor ELSE statement // invalid syntax rules 1714 { SemanticError( yylloc, "syntax error, else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; } 1713 1715 | wor_waituntil_clause wor when_clause_opt timeout statement wor when_clause ELSE statement 1714 1716 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, … … 2065 2067 assert( $1->type ); 2066 2068 if ( $1->type->qualifiers.any() ) { // CV qualifiers ? 2067 SemanticError( yylloc, " Useless type qualifier(s) in empty declaration." ); $$ = nullptr;2069 SemanticError( yylloc, "syntax error, useless type qualifier(s) in empty declaration." ); $$ = nullptr; 2068 2070 } 2069 2071 // enums are never empty declarations because there must have at least one enumeration. 2070 2072 if ( $1->type->kind == TypeData::AggregateInst && $1->storageClasses.any() ) { // storage class ? 2071 SemanticError( yylloc, " Useless storage qualifier(s) in empty aggregate declaration." ); $$ = nullptr;2073 SemanticError( yylloc, "syntax error, useless storage qualifier(s) in empty aggregate declaration." ); $$ = nullptr; 2072 2074 } 2073 2075 } … … 2100 2102 | type_declaration_specifier 2101 2103 | sue_declaration_specifier 2102 | sue_declaration_specifier invalid_types 2103 { 2104 SemanticError( yylloc, ::toString( " Missing ';' afterend of ",2104 | sue_declaration_specifier invalid_types // invalid syntax rule 2105 { 2106 SemanticError( yylloc, ::toString( "syntax error, expecting ';' at end of ", 2105 2107 $1->type->enumeration.name ? "enum" : ast::AggregateDecl::aggrString( $1->type->aggregate.kind ), 2106 " declaration " ) );2108 " declaration." ) ); 2107 2109 $$ = nullptr; 2108 2110 } … … 2584 2586 // } // for 2585 2587 } 2588 | type_specifier field_declaring_list_opt '}' // invalid syntax rule 2589 { 2590 SemanticError( yylloc, ::toString( "syntax error, expecting ';' at end of previous declaration." ) ); 2591 $$ = nullptr; 2592 } 2586 2593 | EXTENSION type_specifier field_declaring_list_opt ';' // GCC 2587 2594 { $$ = fieldDecl( $2, $3 ); distExt( $$ ); } … … 2682 2689 | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt '{' enumerator_list comma_opt '}' 2683 2690 { 2684 if ( $3->storageClasses.val != 0 || $3->type->qualifiers.any() ) 2685 { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }2686 2691 if ( $3->storageClasses.val != 0 || $3->type->qualifiers.any() ) { 2692 SemanticError( yylloc, "syntax error, storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); 2693 } 2687 2694 $$ = DeclarationNode::newEnum( nullptr, $7, true, true, $3 )->addQualifiers( $5 ); 2688 2695 } 2689 2696 | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt 2690 2697 { 2691 if ( $3->storageClasses.any() || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); } 2698 if ( $3->storageClasses.any() || $3->type->qualifiers.val != 0 ) { 2699 SemanticError( yylloc, "syntax error, storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); 2700 } 2692 2701 typedefTable.makeTypedef( *$6 ); 2693 2702 } … … 3154 3163 | IDENTIFIER IDENTIFIER 3155 3164 { IdentifierBeforeIdentifier( *$1.str, *$2.str, " declaration" ); $$ = nullptr; } 3156 | IDENTIFIER type_qualifier // syntax error3165 | IDENTIFIER type_qualifier // invalid syntax rules 3157 3166 { IdentifierBeforeType( *$1.str, "type qualifier" ); $$ = nullptr; } 3158 | IDENTIFIER storage_class // syntax error3167 | IDENTIFIER storage_class // invalid syntax rules 3159 3168 { IdentifierBeforeType( *$1.str, "storage class" ); $$ = nullptr; } 3160 | IDENTIFIER basic_type_name // syntax error3169 | IDENTIFIER basic_type_name // invalid syntax rules 3161 3170 { IdentifierBeforeType( *$1.str, "type" ); $$ = nullptr; } 3162 | IDENTIFIER TYPEDEFname // syntax error3171 | IDENTIFIER TYPEDEFname // invalid syntax rules 3163 3172 { IdentifierBeforeType( *$1.str, "type" ); $$ = nullptr; } 3164 | IDENTIFIER TYPEGENname // syntax error3173 | IDENTIFIER TYPEGENname // invalid syntax rules 3165 3174 { IdentifierBeforeType( *$1.str, "type" ); $$ = nullptr; } 3166 3175 | external_function_definition … … 3197 3206 | type_qualifier_list 3198 3207 { 3199 if ( $1->type->qualifiers.any() ) { SemanticError( yylloc, "CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); } 3208 if ( $1->type->qualifiers.any() ) { 3209 SemanticError( yylloc, "syntax error, CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); 3210 } 3200 3211 if ( $1->type->forall ) forall = true; // remember generic type 3201 3212 } … … 3208 3219 | declaration_qualifier_list 3209 3220 { 3210 if ( $1->type && $1->type->qualifiers.any() ) { SemanticError( yylloc, "CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); } 3221 if ( $1->type && $1->type->qualifiers.any() ) { 3222 SemanticError( yylloc, "syntax error, CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); 3223 } 3211 3224 if ( $1->type && $1->type->forall ) forall = true; // remember generic type 3212 3225 } … … 3219 3232 | declaration_qualifier_list type_qualifier_list 3220 3233 { 3221 if ( ($1->type && $1->type->qualifiers.any()) || ($2->type && $2->type->qualifiers.any()) ) { SemanticError( yylloc, "CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); } 3234 if ( ($1->type && $1->type->qualifiers.any()) || ($2->type && $2->type->qualifiers.any()) ) { 3235 SemanticError( yylloc, "syntax error, CV qualifiers cannot be distributed; only storage-class and forall qualifiers." ); 3236 } 3222 3237 if ( ($1->type && $1->type->forall) || ($2->type && $2->type->forall) ) forall = true; // remember generic type 3223 3238 } … … 3250 3265 $$ = $3; forall = false; 3251 3266 if ( $5 ) { 3252 SemanticError( yylloc, " Attributes cannot be associated with function body. Move attribute(s) before \"with\" clause." );3267 SemanticError( yylloc, "syntax error, attributes cannot be associated with function body. Move attribute(s) before \"with\" clause." ); 3253 3268 $$ = nullptr; 3254 3269 } // if -
src/ResolvExpr/CandidateFinder.cpp
r2b78949 r8a930c03 373 373 unify( 374 374 ttype, argType, newResult.env, newResult.need, newResult.have, 375 newResult.open , symtab)375 newResult.open ) 376 376 ) { 377 377 finalResults.emplace_back( std::move( newResult ) ); … … 444 444 ) 445 445 446 if ( unify( paramType, argType, env, need, have, open , symtab) ) {446 if ( unify( paramType, argType, env, need, have, open ) ) { 447 447 unsigned nextExpl = results[i].nextExpl + 1; 448 448 if ( nextExpl == expl.exprs.size() ) { nextExpl = 0; } … … 463 463 ast::OpenVarSet open = results[i].open; 464 464 465 if ( unify( paramType, cnst->result, env, need, have, open , symtab) ) {465 if ( unify( paramType, cnst->result, env, need, have, open ) ) { 466 466 results.emplace_back( 467 467 i, new ast::DefaultArgExpr{ cnst->location, cnst }, std::move( env ), … … 506 506 507 507 // attempt to unify types 508 if ( unify( paramType, argType, env, need, have, open , symtab) ) {508 if ( unify( paramType, argType, env, need, have, open ) ) { 509 509 // add new result 510 510 results.emplace_back( … … 750 750 const ast::Type * returnType = funcType->returns.front(); 751 751 if ( ! unify( 752 returnType, targetType, funcEnv, funcNeed, funcHave, funcOpen , symtab)752 returnType, targetType, funcEnv, funcNeed, funcHave, funcOpen ) 753 753 ) { 754 754 // unification failed, do not pursue this candidate … … 1159 1159 1160 1160 // unification run for side-effects 1161 unify( toType, cand->expr->result, cand->env, need, have, open , symtab);1161 unify( toType, cand->expr->result, cand->env, need, have, open ); 1162 1162 Cost thisCost = 1163 1163 (castExpr->isGenerated == ast::GeneratedFlag::GeneratedCast) … … 1483 1483 if ( 1484 1484 unify( 1485 r2->expr->result, r3->expr->result, env, need, have, open, symtab,1485 r2->expr->result, r3->expr->result, env, need, have, open, 1486 1486 common ) 1487 1487 ) { … … 1556 1556 if ( 1557 1557 unify( 1558 r1->expr->result, r2->expr->result, env, need, have, open, symtab,1558 r1->expr->result, r2->expr->result, env, need, have, open, 1559 1559 common ) 1560 1560 ) { … … 1659 1659 1660 1660 // unification run for side-effects 1661 bool canUnify = unify( toType, cand->expr->result, env, need, have, open , symtab);1661 bool canUnify = unify( toType, cand->expr->result, env, need, have, open ); 1662 1662 (void) canUnify; 1663 1663 Cost thisCost = computeConversionCost( cand->expr->result, toType, cand->expr->get_lvalue(), -
src/ResolvExpr/CastCost.cc
r2b78949 r8a930c03 165 165 if ( 166 166 pointerType->qualifiers <= ptr->qualifiers 167 && typesCompatibleIgnoreQualifiers( pointerType->base, ptr->base, symtab,env )167 && typesCompatibleIgnoreQualifiers( pointerType->base, ptr->base, env ) 168 168 ) { 169 169 cost = Cost::safe; … … 232 232 ) 233 233 234 if ( typesCompatibleIgnoreQualifiers( src, dst, symtab,env ) ) {234 if ( typesCompatibleIgnoreQualifiers( src, dst, env ) ) { 235 235 PRINT( std::cerr << "compatible!" << std::endl; ) 236 236 return Cost::zero; -
src/ResolvExpr/CommonType.cc
r2b78949 r8a930c03 21 21 22 22 #include "AST/Decl.hpp" 23 #include "AST/Pass.hpp" 23 24 #include "AST/Type.hpp" 24 25 #include "Common/PassVisitor.h" … … 675 676 const ast::Type * type2; 676 677 WidenMode widen; 677 const ast::SymbolTable & symtab;678 678 ast::TypeEnvironment & tenv; 679 679 const ast::OpenVarSet & open; … … 685 685 686 686 CommonType_new( 687 const ast::Type * t2, WidenMode w, const ast::SymbolTable & st,687 const ast::Type * t2, WidenMode w, 688 688 ast::TypeEnvironment & env, const ast::OpenVarSet & o, 689 689 ast::AssertionSet & need, ast::AssertionSet & have ) 690 : type2( t2 ), widen( w ), symtab( st ),tenv( env ), open( o ), need (need), have (have) ,result() {}690 : type2( t2 ), widen( w ), tenv( env ), open( o ), need (need), have (have) ,result() {} 691 691 692 692 void previsit( const ast::Node * ) { visit_children = false; } … … 748 748 ast::AssertionSet need, have; 749 749 if ( ! tenv.bindVar( 750 var, voidPtr->base, entry->second, need, have, open, widen , symtab)750 var, voidPtr->base, entry->second, need, have, open, widen ) 751 751 ) return; 752 752 } … … 761 761 ast::OpenVarSet newOpen{ open }; 762 762 if (enumInst->base->base 763 && unifyExact(type1, enumInst->base->base, tenv, need, have, newOpen, widen , symtab)) {763 && unifyExact(type1, enumInst->base->base, tenv, need, have, newOpen, widen)) { 764 764 result = type1; 765 765 return true; … … 798 798 799 799 ast::OpenVarSet newOpen{ open }; 800 if ( unifyExact( t1, t2, tenv, have, need, newOpen, noWiden() , symtab) ) {800 if ( unifyExact( t1, t2, tenv, have, need, newOpen, noWiden() ) ) { 801 801 result = pointer; 802 802 if ( q1.val != q2.val ) { … … 841 841 if (unifyExact( 842 842 arg1, tupleFromTypes( crnt2, end2 ), tenv, need, have, open, 843 noWiden() , symtab)) {843 noWiden() )) { 844 844 break; 845 845 … … 850 850 if (unifyExact( 851 851 tupleFromTypes( crnt1, end1 ), arg2, tenv, need, have, open, 852 noWiden() , symtab)) {852 noWiden() )) { 853 853 break; 854 854 … … 874 874 875 875 if ( ! unifyExact( 876 base1, base2, tenv, need, have, open, noWiden() , symtab)876 base1, base2, tenv, need, have, open, noWiden() ) 877 877 ) return; 878 878 } … … 894 894 895 895 if ( ! unifyExact( 896 base1, base2, tenv, need, have, open, noWiden() , symtab)896 base1, base2, tenv, need, have, open, noWiden() ) 897 897 ) return; 898 898 } … … 902 902 } 903 903 else if (! unifyExact( 904 arg1, arg2, tenv, need, have, open, noWiden() , symtab)) return;904 arg1, arg2, tenv, need, have, open, noWiden() )) return; 905 905 906 906 ++crnt1; ++crnt2; … … 912 912 if (! unifyExact( 913 913 t1, tupleFromTypes( crnt2, end2 ), tenv, need, have, open, 914 noWiden() , symtab)) return;914 noWiden() )) return; 915 915 } else if ( crnt2 != end2 ) { 916 916 // try unifying empty tuple with ttype … … 919 919 if (! unifyExact( 920 920 tupleFromTypes( crnt1, end1 ), t2, tenv, need, have, open, 921 noWiden() , symtab)) return;921 noWiden() )) return; 922 922 } 923 923 if ((f1->returns.size() == 0 && f2->returns.size() == 0) 924 || (f1->returns.size() == 1 && f2->returns.size() == 1 && unifyExact(f1->returns[0], f2->returns[0], tenv, need, have, open, noWiden() , symtab))) {924 || (f1->returns.size() == 1 && f2->returns.size() == 1 && unifyExact(f1->returns[0], f2->returns[0], tenv, need, have, open, noWiden()))) { 925 925 result = pointer; 926 926 … … 979 979 980 980 ast::OpenVarSet newOpen{ open }; 981 if ( unifyExact( t1, t2, tenv, have, need, newOpen, noWiden() , symtab) ) {981 if ( unifyExact( t1, t2, tenv, have, need, newOpen, noWiden() ) ) { 982 982 result = ref; 983 983 if ( q1.val != q2.val ) { … … 994 994 } else { 995 995 if (!dynamic_cast<const ast::EnumInstType *>(type2)) 996 result = commonType( type2, ref, tenv, need, have, open, widen , symtab);996 result = commonType( type2, ref, tenv, need, have, open, widen ); 997 997 } 998 998 } … … 1012 1012 void postvisit( const ast::EnumInstType * enumInst ) { 1013 1013 if (!dynamic_cast<const ast::EnumInstType *>(type2)) 1014 result = commonType( type2, enumInst, tenv, need, have, open, widen , symtab);1014 result = commonType( type2, enumInst, tenv, need, have, open, widen); 1015 1015 } 1016 1016 1017 1017 void postvisit( const ast::TraitInstType * ) {} 1018 1018 1019 void postvisit( const ast::TypeInstType * inst ) { 1020 if ( ! widen.first ) return; 1021 if ( const ast::NamedTypeDecl * nt = symtab.lookupType( inst->name ) ) { 1022 if ( const ast::Type * base = 1023 strict_dynamic_cast< const ast::TypeDecl * >( nt )->base 1024 ) { 1025 ast::CV::Qualifiers q1 = inst->qualifiers, q2 = type2->qualifiers; 1026 1027 // force t{1,2} to be cloned if their qualifiers must be mutated 1028 ast::ptr< ast::Type > t1{ base }, t2{ type2 }; 1029 reset_qualifiers( t1, q1 ); 1030 reset_qualifiers( t2 ); 1031 1032 ast::OpenVarSet newOpen{ open }; 1033 if ( unifyExact( t1, t2, tenv, have, need, newOpen, noWiden(), symtab ) ) { 1034 result = type2; 1035 reset_qualifiers( result, q1 | q2 ); 1036 } else { 1037 tryResolveWithTypedEnum( t1 ); 1038 } 1039 } 1040 } 1041 } 1042 1043 void postvisit( const ast::TupleType * tuple) { 1019 void postvisit( const ast::TypeInstType * ) {} 1020 1021 void postvisit( const ast::TupleType * tuple ) { 1044 1022 tryResolveWithTypedEnum( tuple ); 1045 1023 } … … 1102 1080 ast::ptr< ast::Type > handleReference( 1103 1081 const ast::ptr< ast::Type > & t1, const ast::ptr< ast::Type > & t2, WidenMode widen, 1104 const ast::SymbolTable & symtab,ast::TypeEnvironment & env,1082 ast::TypeEnvironment & env, 1105 1083 const ast::OpenVarSet & open 1106 1084 ) { … … 1110 1088 1111 1089 // need unify to bind type variables 1112 if ( unify( t1, t2, env, have, need, newOpen, symtab,common ) ) {1090 if ( unify( t1, t2, env, have, need, newOpen, common ) ) { 1113 1091 ast::CV::Qualifiers q1 = t1->qualifiers, q2 = t2->qualifiers; 1114 1092 PRINT( … … 1134 1112 const ast::ptr< ast::Type > & type1, const ast::ptr< ast::Type > & type2, 1135 1113 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 1136 const ast::OpenVarSet & open, WidenMode widen , const ast::SymbolTable & symtab1114 const ast::OpenVarSet & open, WidenMode widen 1137 1115 ) { 1138 1116 unsigned depth1 = type1->referenceDepth(); … … 1149 1127 if ( depth1 > depth2 ) { 1150 1128 assert( ref1 ); 1151 result = handleReference( ref1->base, type2, widen, symtab,env, open );1129 result = handleReference( ref1->base, type2, widen, env, open ); 1152 1130 } else { // implies depth1 < depth2 1153 1131 assert( ref2 ); 1154 result = handleReference( type1, ref2->base, widen, symtab,env, open );1132 result = handleReference( type1, ref2->base, widen, env, open ); 1155 1133 } 1156 1134 … … 1170 1148 } 1171 1149 // otherwise both are reference types of the same depth and this is handled by the visitor 1172 ast::Pass<CommonType_new> visitor{ type2, widen, symtab,env, open, need, have };1150 ast::Pass<CommonType_new> visitor{ type2, widen, env, open, need, have }; 1173 1151 type1->accept( visitor ); 1174 ast::ptr< ast::Type > result = visitor.core.result; 1175 1176 // handling for opaque type declarations (?) 1177 if ( ! result && widen.second ) { 1178 if ( const ast::TypeInstType * inst = type2.as< ast::TypeInstType >() ) { 1179 if ( const ast::NamedTypeDecl * nt = symtab.lookupType( inst->name ) ) { 1180 auto type = strict_dynamic_cast< const ast::TypeDecl * >( nt ); 1181 if ( type->base ) { 1182 ast::CV::Qualifiers q1 = type1->qualifiers, q2 = type2->qualifiers; 1183 ast::OpenVarSet newOpen{ open }; 1184 1185 // force t{1,2} to be cloned if its qualifiers must be stripped, so that 1186 // type1 and type->base are left unchanged; calling convention forces 1187 // {type1,type->base}->strong_ref >= 1 1188 ast::ptr<ast::Type> t1{ type1 }, t2{ type->base }; 1189 reset_qualifiers( t1 ); 1190 reset_qualifiers( t2, q1 ); 1191 1192 if ( unifyExact( t1, t2, env, have, need, newOpen, noWiden(), symtab ) ) { 1193 result = t1; 1194 reset_qualifiers( result, q1 | q2 ); 1195 } 1196 } 1197 } 1198 } 1199 } 1200 1201 return result; 1152 // ast::ptr< ast::Type > result = visitor.core.result; 1153 1154 return visitor.core.result; 1202 1155 } 1203 1156 -
src/ResolvExpr/CommonType.hpp
r2b78949 r8a930c03 36 36 ast::TypeEnvironment & env, 37 37 ast::AssertionSet & need, ast::AssertionSet & have, 38 const ast::OpenVarSet & open, WidenMode widen, 39 const ast::SymbolTable & symtab ); 38 const ast::OpenVarSet & open, WidenMode widen); 40 39 41 40 } -
src/ResolvExpr/ConversionCost.cc
r2b78949 r8a930c03 532 532 } 533 533 } 534 if ( typesCompatibleIgnoreQualifiers( src, dst, symtab,env ) ) {534 if ( typesCompatibleIgnoreQualifiers( src, dst, env ) ) { 535 535 return Cost::zero; 536 536 } else if ( dynamic_cast< const ast::VoidType * >( dst ) ) { … … 566 566 ast::CV::Qualifiers tq2 = dstAsRef->base->qualifiers; 567 567 if ( tq1 <= tq2 && typesCompatibleIgnoreQualifiers( 568 srcAsRef->base, dstAsRef->base, symtab,env ) ) {568 srcAsRef->base, dstAsRef->base, env ) ) { 569 569 if ( tq1 == tq2 ) { 570 570 return Cost::zero; … … 587 587 const ast::ReferenceType * dstAsRef = dynamic_cast< const ast::ReferenceType * >( dst ); 588 588 assert( dstAsRef ); 589 if ( typesCompatibleIgnoreQualifiers( src, dstAsRef->base, symtab,env ) ) {589 if ( typesCompatibleIgnoreQualifiers( src, dstAsRef->base, env ) ) { 590 590 if ( srcIsLvalue ) { 591 591 if ( src->qualifiers == dstAsRef->base->qualifiers ) { … … 653 653 ast::CV::Qualifiers tq2 = dstAsPtr->base->qualifiers; 654 654 if ( tq1 <= tq2 && typesCompatibleIgnoreQualifiers( 655 pointerType->base, dstAsPtr->base, symtab,env ) ) {655 pointerType->base, dstAsPtr->base, env ) ) { 656 656 if ( tq1 == tq2 ) { 657 657 cost = Cost::zero; -
src/ResolvExpr/PolyCost.cc
r2b78949 r8a930c03 15 15 16 16 #include "AST/SymbolTable.hpp" 17 #include "AST/Pass.hpp" 17 18 #include "AST/Type.hpp" 18 19 #include "AST/TypeEnvironment.hpp" -
src/ResolvExpr/Resolver.cc
r2b78949 r8a930c03 1106 1106 1107 1107 /// Removes cast to type of argument (unlike StripCasts, also handles non-generated casts) 1108 void removeExtraneousCast( ast::ptr<ast::Expr> & expr , const ast::SymbolTable & symtab) {1108 void removeExtraneousCast( ast::ptr<ast::Expr> & expr ) { 1109 1109 if ( const ast::CastExpr * castExpr = expr.as< ast::CastExpr >() ) { 1110 if ( typesCompatible( castExpr->arg->result, castExpr->result , symtab) ) {1110 if ( typesCompatible( castExpr->arg->result, castExpr->result ) ) { 1111 1111 // cast is to the same type as its argument, remove it 1112 1112 swap_and_save_env( expr, castExpr->arg ); … … 1196 1196 ast::ptr< ast::Expr > castExpr = new ast::CastExpr{ untyped, type }; 1197 1197 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, context ); 1198 removeExtraneousCast( newExpr , context.symtab);1198 removeExtraneousCast( newExpr ); 1199 1199 return newExpr; 1200 1200 } … … 1261 1261 static size_t traceId; 1262 1262 Resolver_new( const ast::TranslationGlobal & global ) : 1263 ast::WithSymbolTable(ast::SymbolTable::ErrorDetection::ValidateOnAdd), 1263 1264 context{ symtab, global } {} 1264 1265 Resolver_new( const ResolveContext & context ) : … … 1834 1835 if ( 1835 1836 ! unify( 1836 arg->expr->result, *param, resultEnv, need, have, open, 1837 symtab ) 1837 arg->expr->result, *param, resultEnv, need, have, open ) 1838 1838 ) { 1839 1839 // Type doesn't match … … 2041 2041 const ast::Type * initContext = currentObject.getCurrentType(); 2042 2042 2043 removeExtraneousCast( newExpr , symtab);2043 removeExtraneousCast( newExpr ); 2044 2044 2045 2045 // check if actual object's type is char[] -
src/ResolvExpr/SatisfyAssertions.cpp
r2b78949 r8a930c03 215 215 findOpenVars( adjType, newOpen, closed, newNeed, have, FirstOpen ); 216 216 if ( allowConversion ) { 217 if ( auto c = commonType( toType, adjType, newEnv, newNeed, have, newOpen, WidenMode {true, true} , sat.symtab) ) {217 if ( auto c = commonType( toType, adjType, newEnv, newNeed, have, newOpen, WidenMode {true, true} ) ) { 218 218 // set up binding slot for recursive assertions 219 219 ast::UniqueId crntResnSlot = 0; … … 229 229 } 230 230 else { 231 if ( unifyExact( toType, adjType, newEnv, newNeed, have, newOpen, WidenMode {true, true} , sat.symtab) ) {231 if ( unifyExact( toType, adjType, newEnv, newNeed, have, newOpen, WidenMode {true, true} ) ) { 232 232 // set up binding slot for recursive assertions 233 233 ast::UniqueId crntResnSlot = 0; … … 392 392 mergeOpenVars( open, i.match.open ); 393 393 394 if ( ! env.combine( i.match.env, open , symtab) ) return false;394 if ( ! env.combine( i.match.env, open ) ) return false; 395 395 396 396 crnt.emplace_back( i ); -
src/ResolvExpr/Unify.cc
r2b78949 r8a930c03 128 128 const ast::Type * type1, const ast::Type * type2, ast::TypeEnvironment & env, 129 129 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open, 130 WidenMode widen , const ast::SymbolTable & symtab);130 WidenMode widen ); 131 131 132 132 bool typesCompatible( const Type * first, const Type * second, const SymTab::Indexer & indexer, const TypeEnvironment & env ) { … … 150 150 151 151 bool typesCompatible( 152 const ast::Type * first, const ast::Type * second, const ast::SymbolTable & symtab,152 const ast::Type * first, const ast::Type * second, 153 153 const ast::TypeEnvironment & env ) { 154 154 ast::TypeEnvironment newEnv; … … 163 163 findOpenVars( newSecond, open, closed, need, have, FirstOpen ); 164 164 165 return unifyExact(newFirst, newSecond, newEnv, need, have, open, noWiden() , symtab);165 return unifyExact(newFirst, newSecond, newEnv, need, have, open, noWiden() ); 166 166 } 167 167 … … 183 183 184 184 bool typesCompatibleIgnoreQualifiers( 185 const ast::Type * first, const ast::Type * second, const ast::SymbolTable & symtab,185 const ast::Type * first, const ast::Type * second, 186 186 const ast::TypeEnvironment & env ) { 187 187 ast::TypeEnvironment newEnv; … … 216 216 subFirst, 217 217 subSecond, 218 newEnv, need, have, open, noWiden() , symtab);218 newEnv, need, have, open, noWiden() ); 219 219 } 220 220 … … 786 786 const ast::OpenVarSet & open; 787 787 WidenMode widen; 788 const ast::SymbolTable & symtab;789 788 public: 790 789 static size_t traceId; … … 793 792 Unify_new( 794 793 const ast::Type * type2, ast::TypeEnvironment & env, ast::AssertionSet & need, 795 ast::AssertionSet & have, const ast::OpenVarSet & open, WidenMode widen, 796 const ast::SymbolTable & symtab ) 794 ast::AssertionSet & have, const ast::OpenVarSet & open, WidenMode widen ) 797 795 : type2(type2), tenv(env), need(need), have(have), open(open), widen(widen), 798 symtab(symtab),result(false) {}796 result(false) {} 799 797 800 798 void previsit( const ast::Node * ) { visit_children = false; } … … 814 812 result = unifyExact( 815 813 pointer->base, pointer2->base, tenv, need, have, open, 816 noWiden() , symtab);814 noWiden()); 817 815 } 818 816 } … … 837 835 838 836 result = unifyExact( 839 array->base, array2->base, tenv, need, have, open, noWiden(), 840 symtab ); 837 array->base, array2->base, tenv, need, have, open, noWiden()); 841 838 } 842 839 … … 844 841 if ( auto ref2 = dynamic_cast< const ast::ReferenceType * >( type2 ) ) { 845 842 result = unifyExact( 846 ref->base, ref2->base, tenv, need, have, open, noWiden(), 847 symtab ); 843 ref->base, ref2->base, tenv, need, have, open, noWiden()); 848 844 } 849 845 } … … 854 850 static bool unifyTypeList( 855 851 Iter crnt1, Iter end1, Iter crnt2, Iter end2, ast::TypeEnvironment & env, 856 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open, 857 const ast::SymbolTable & symtab 852 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open 858 853 ) { 859 854 while ( crnt1 != end1 && crnt2 != end2 ) { … … 868 863 return unifyExact( 869 864 t1, tupleFromTypes( crnt2, end2 ), env, need, have, open, 870 noWiden() , symtab);865 noWiden() ); 871 866 } else if ( ! isTuple1 && isTuple2 ) { 872 867 // combine remainder of list1, then unify 873 868 return unifyExact( 874 869 tupleFromTypes( crnt1, end1 ), t2, env, need, have, open, 875 noWiden() , symtab);870 noWiden() ); 876 871 } 877 872 878 873 if ( ! unifyExact( 879 t1, t2, env, need, have, open, noWiden() , symtab)874 t1, t2, env, need, have, open, noWiden() ) 880 875 ) return false; 881 876 … … 891 886 return unifyExact( 892 887 t1, tupleFromTypes( crnt2, end2 ), env, need, have, open, 893 noWiden() , symtab);888 noWiden() ); 894 889 } else if ( crnt2 != end2 ) { 895 890 // try unifying empty tuple with ttype … … 898 893 return unifyExact( 899 894 tupleFromTypes( crnt1, end1 ), t2, env, need, have, open, 900 noWiden() , symtab);895 noWiden() ); 901 896 } 902 897 … … 908 903 const std::vector< ast::ptr< ast::Type > > & list2, 909 904 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 910 const ast::OpenVarSet & open , const ast::SymbolTable & symtab905 const ast::OpenVarSet & open 911 906 ) { 912 907 return unifyTypeList( 913 list1.begin(), list1.end(), list2.begin(), list2.end(), env, need, have, open, 914 symtab ); 908 list1.begin(), list1.end(), list2.begin(), list2.end(), env, need, have, open); 915 909 } 916 910 … … 953 947 ) return; 954 948 955 if ( ! unifyTypeList( params, params2, tenv, need, have, open , symtab) ) return;949 if ( ! unifyTypeList( params, params2, tenv, need, have, open ) ) return; 956 950 if ( ! unifyTypeList( 957 func->returns, func2->returns, tenv, need, have, open , symtab) ) return;951 func->returns, func2->returns, tenv, need, have, open ) ) return; 958 952 959 953 markAssertions( have, need, func ); … … 1026 1020 1027 1021 if ( ! unifyExact( 1028 pty, pty2, tenv, need, have, open, noWiden() , symtab) ) {1022 pty, pty2, tenv, need, have, open, noWiden() ) ) { 1029 1023 result = false; 1030 1024 return; … … 1065 1059 const std::vector< ast::ptr< ast::Type > > & list1, 1066 1060 const std::vector< ast::ptr< ast::Type > > & list2, ast::TypeEnvironment & env, 1067 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open, 1068 const ast::SymbolTable & symtab 1061 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open 1069 1062 ) { 1070 1063 auto crnt1 = list1.begin(); … … 1081 1074 return unifyExact( 1082 1075 t1, tupleFromTypes( list2 ), env, need, have, open, 1083 noWiden() , symtab);1076 noWiden() ); 1084 1077 } else if ( ! isTuple1 && isTuple2 ) { 1085 1078 // combine entirety of list1, then unify 1086 1079 return unifyExact( 1087 1080 tupleFromTypes( list1 ), t2, env, need, have, open, 1088 noWiden() , symtab);1081 noWiden() ); 1089 1082 } 1090 1083 1091 1084 if ( ! unifyExact( 1092 t1, t2, env, need, have, open, noWiden() , symtab)1085 t1, t2, env, need, have, open, noWiden() ) 1093 1086 ) return false; 1094 1087 … … 1104 1097 return unifyExact( 1105 1098 t1, tupleFromTypes( list2 ), env, need, have, open, 1106 noWiden() , symtab);1099 noWiden() ); 1107 1100 } else if ( crnt2 != list2.end() ) { 1108 1101 // try unifying empty tuple with ttype … … 1113 1106 return unifyExact( 1114 1107 tupleFromTypes( list1 ), t2, env, need, have, open, 1115 noWiden() , symtab);1108 noWiden() ); 1116 1109 } 1117 1110 … … 1132 1125 auto types2 = flatten( flat2 ); 1133 1126 1134 result = unifyList( types, types2, tenv, need, have, open , symtab);1127 result = unifyList( types, types2, tenv, need, have, open ); 1135 1128 } 1136 1129 … … 1156 1149 const ast::ptr<ast::Type> & type1, const ast::ptr<ast::Type> & type2, 1157 1150 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 1158 ast::OpenVarSet & open , const ast::SymbolTable & symtab1151 ast::OpenVarSet & open 1159 1152 ) { 1160 1153 ast::ptr<ast::Type> common; 1161 return unify( type1, type2, env, need, have, open, symtab,common );1154 return unify( type1, type2, env, need, have, open, common ); 1162 1155 } 1163 1156 … … 1165 1158 const ast::ptr<ast::Type> & type1, const ast::ptr<ast::Type> & type2, 1166 1159 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 1167 ast::OpenVarSet & open, const ast::SymbolTable & symtab,ast::ptr<ast::Type> & common1160 ast::OpenVarSet & open, ast::ptr<ast::Type> & common 1168 1161 ) { 1169 1162 ast::OpenVarSet closed; … … 1171 1164 findOpenVars( type2, open, closed, need, have, FirstOpen ); 1172 1165 return unifyInexact( 1173 type1, type2, env, need, have, open, WidenMode{ true, true }, symtab,common );1166 type1, type2, env, need, have, open, WidenMode{ true, true }, common ); 1174 1167 } 1175 1168 … … 1177 1170 const ast::Type * type1, const ast::Type * type2, ast::TypeEnvironment & env, 1178 1171 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open, 1179 WidenMode widen , const ast::SymbolTable & symtab1172 WidenMode widen 1180 1173 ) { 1181 1174 if ( type1->qualifiers != type2->qualifiers ) return false; … … 1193 1186 return env.bindVarToVar( 1194 1187 var1, var2, ast::TypeData{ entry1->second, entry2->second }, need, have, 1195 open, widen , symtab);1188 open, widen ); 1196 1189 } else if ( isopen1 ) { 1197 return env.bindVar( var1, type2, entry1->second, need, have, open, widen , symtab);1190 return env.bindVar( var1, type2, entry1->second, need, have, open, widen ); 1198 1191 } else if ( isopen2 ) { 1199 return env.bindVar( var2, type1, entry2->second, need, have, open, widen , symtab);1192 return env.bindVar( var2, type1, entry2->second, need, have, open, widen ); 1200 1193 } else { 1201 1194 return ast::Pass<Unify_new>::read( 1202 type1, type2, env, need, have, open, widen , symtab);1195 type1, type2, env, need, have, open, widen ); 1203 1196 } 1204 1197 } … … 1207 1200 const ast::ptr<ast::Type> & type1, const ast::ptr<ast::Type> & type2, 1208 1201 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 1209 const ast::OpenVarSet & open, WidenMode widen, const ast::SymbolTable & symtab,1202 const ast::OpenVarSet & open, WidenMode widen, 1210 1203 ast::ptr<ast::Type> & common 1211 1204 ) { … … 1221 1214 ast::ptr< ast::Type > t2_(t2); 1222 1215 1223 if ( unifyExact( t1, t2, env, need, have, open, widen , symtab) ) {1216 if ( unifyExact( t1, t2, env, need, have, open, widen ) ) { 1224 1217 // if exact unification on unqualified types, try to merge qualifiers 1225 1218 if ( q1 == q2 || ( ( q1 > q2 || widen.first ) && ( q2 > q1 || widen.second ) ) ) { … … 1231 1224 } 1232 1225 1233 } else if (( common = commonType( t1, t2, env, need, have, open, widen , symtab))) {1226 } else if (( common = commonType( t1, t2, env, need, have, open, widen ))) { 1234 1227 // no exact unification, but common type 1235 1228 auto c = shallowCopy(common.get()); -
src/ResolvExpr/Unify.h
r2b78949 r8a930c03 59 59 const ast::ptr<ast::Type> & type1, const ast::ptr<ast::Type> & type2, 60 60 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 61 ast::OpenVarSet & open , const ast::SymbolTable & symtab);61 ast::OpenVarSet & open ); 62 62 63 63 bool unify( 64 64 const ast::ptr<ast::Type> & type1, const ast::ptr<ast::Type> & type2, 65 65 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 66 ast::OpenVarSet & open, const ast::SymbolTable & symtab,ast::ptr<ast::Type> & common );66 ast::OpenVarSet & open, ast::ptr<ast::Type> & common ); 67 67 68 68 bool unifyExact( 69 69 const ast::Type * type1, const ast::Type * type2, ast::TypeEnvironment & env, 70 70 ast::AssertionSet & need, ast::AssertionSet & have, const ast::OpenVarSet & open, 71 WidenMode widen , const ast::SymbolTable & symtab);71 WidenMode widen ); 72 72 73 73 bool unifyInexact( 74 74 const ast::ptr<ast::Type> & type1, const ast::ptr<ast::Type> & type2, 75 75 ast::TypeEnvironment & env, ast::AssertionSet & need, ast::AssertionSet & have, 76 const ast::OpenVarSet & open, WidenMode widen, const ast::SymbolTable & symtab,76 const ast::OpenVarSet & open, WidenMode widen, 77 77 ast::ptr<ast::Type> & common ); 78 78 79 79 bool typesCompatible( 80 const ast::Type *, const ast::Type *, const ast::SymbolTable & symtab = {},80 const ast::Type *, const ast::Type *, 81 81 const ast::TypeEnvironment & env = {} ); 82 82 83 83 bool typesCompatibleIgnoreQualifiers( 84 const ast::Type *, const ast::Type *, const ast::SymbolTable & symtab = {},84 const ast::Type *, const ast::Type *, 85 85 const ast::TypeEnvironment & env = {} ); 86 86 -
src/SymTab/Autogen.h
r2b78949 r8a930c03 20 20 #include <string> // for string 21 21 22 #include "AST/Decl.hpp"23 #include "AST/Expr.hpp"24 #include "AST/Init.hpp"25 #include "AST/Node.hpp"26 #include "AST/Stmt.hpp"27 #include "AST/Type.hpp"28 22 #include "CodeGen/OperatorTable.h" 29 23 #include "Common/UniqueName.h" // for UniqueName … … 57 51 /// maybePolymorphic is true if the resulting FunctionType is allowed to be polymorphic 58 52 FunctionType * genCopyType( Type * paramType, bool maybePolymorphic = true ); 59 60 /// Enum for loop direction61 enum LoopDirection { LoopBackward, LoopForward };62 53 63 54 /// inserts into out a generated call expression to function fname with arguments dstParam and srcParam. Intended to be used with generated ?=?, ?{}, and ^?{} calls. -
src/SymTab/GenImplicitCall.cpp
r2b78949 r8a930c03 16 16 #include "GenImplicitCall.hpp" 17 17 18 #include "AST/Decl.hpp" // for ObjectDecl 19 #include "AST/Expr.hpp" // for ConstantExpr, UntypedExpr,... 20 #include "AST/Init.hpp" // for SingleInit 18 21 #include "AST/Inspect.hpp" // for isUnnamedBitfield 22 #include "AST/Stmt.hpp" // for ExprStmt 23 #include "AST/Type.hpp" // for ArrayType, BasicType, ... 19 24 #include "CodeGen/OperatorTable.h" // for isCtorDtor 20 25 #include "Common/UniqueName.h" // for UniqueName 21 26 22 27 namespace SymTab { 28 29 namespace { 23 30 24 31 template< typename OutIter > … … 173 180 } 174 181 182 } // namespace 183 175 184 ast::ptr< ast::Stmt > genImplicitCall( 176 185 InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, -
src/SymTab/GenImplicitCall.hpp
r2b78949 r8a930c03 17 17 18 18 #include "InitTweak/InitTweak.h" // for InitExpander 19 #include "SymTab/Autogen.h" // for LoopDirection20 19 21 20 namespace SymTab { 22 21 22 /// Enum for loop direction 23 enum LoopDirection { LoopBackward, LoopForward }; 24 25 /// Returns a generated call expression to function fname with srcParam and 26 /// dstParam. Intended to be used with generated ?=?, ?{}, and ^?{} calls. 23 27 ast::ptr<ast::Stmt> genImplicitCall( 24 28 InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, … … 34 38 // compile-command: "make install" // 35 39 // End: // 36 -
src/Tuples/Explode.cc
r2b78949 r8a930c03 17 17 #include <list> // for list 18 18 19 #include "AST/Pass.hpp" // for Pass 19 20 #include "SynTree/Mutator.h" // for Mutator 20 21 #include "Common/PassVisitor.h" // for PassVisitor -
src/Validate/Autogen.cpp
r2b78949 r8a930c03 25 25 26 26 #include "AST/Attribute.hpp" 27 #include "AST/Copy.hpp" 27 28 #include "AST/Create.hpp" 28 29 #include "AST/Decl.hpp" … … 43 44 #include "CompilationState.h" 44 45 45 // TODO: The other new ast function should be moved over to this file.46 #include "SymTab/Autogen.h"47 48 46 namespace Validate { 49 47 … … 95 93 96 94 const CodeLocation& getLocation() const { return getDecl()->location; } 97 ast::FunctionDecl * genProto( const std::string& name,95 ast::FunctionDecl * genProto( std::string&& name, 98 96 std::vector<ast::ptr<ast::DeclWithType>>&& params, 99 97 std::vector<ast::ptr<ast::DeclWithType>>&& returns ) const; … … 336 334 } 337 335 336 void replaceAll( std::vector<ast::ptr<ast::DeclWithType>> & dwts, 337 const ast::DeclReplacer::TypeMap & map ) { 338 for ( auto & dwt : dwts ) { 339 dwt = strict_dynamic_cast<const ast::DeclWithType *>( 340 ast::DeclReplacer::replace( dwt, map ) ); 341 } 342 } 343 338 344 /// Generates a basic prototype function declaration. 339 ast::FunctionDecl * FuncGenerator::genProto( const std::string& name,345 ast::FunctionDecl * FuncGenerator::genProto( std::string&& name, 340 346 std::vector<ast::ptr<ast::DeclWithType>>&& params, 341 347 std::vector<ast::ptr<ast::DeclWithType>>&& returns ) const { … … 343 349 // Handle generic prameters and assertions, if any. 344 350 auto const & old_type_params = getGenericParams( type ); 351 ast::DeclReplacer::TypeMap oldToNew; 345 352 std::vector<ast::ptr<ast::TypeDecl>> type_params; 346 353 std::vector<ast::ptr<ast::DeclWithType>> assertions; 347 354 for ( auto & old_param : old_type_params ) { 348 355 ast::TypeDecl * decl = ast::deepCopy( old_param ); 349 for ( auto assertion : decl->assertions ) { 350 assertions.push_back( assertion ); 351 } 352 decl->assertions.clear(); 356 decl->init = nullptr; 357 splice( assertions, decl->assertions ); 358 oldToNew.emplace( std::make_pair( old_param, decl ) ); 353 359 type_params.push_back( decl ); 354 360 } 355 // TODO: The values in params and returns still may point at the old356 // generic params, that does not appear to be an issue but perhaps it357 // should be addressed.361 replaceAll( params, oldToNew ); 362 replaceAll( returns, oldToNew ); 363 replaceAll( assertions, oldToNew ); 358 364 359 365 ast::FunctionDecl * decl = new ast::FunctionDecl( 360 366 // Auto-generated routines use the type declaration's location. 361 367 getLocation(), 362 name,368 std::move( name ), 363 369 std::move( type_params ), 364 370 std::move( assertions ), -
src/Validate/FixQualifiedTypes.cpp
r2b78949 r8a930c03 16 16 #include "Validate/FixQualifiedTypes.hpp" 17 17 18 #include "AST/Copy.hpp" 18 19 #include "AST/LinkageSpec.hpp" // for Linkage 19 20 #include "AST/Pass.hpp" -
src/Validate/GenericParameter.cpp
r2b78949 r8a930c03 16 16 #include "GenericParameter.hpp" 17 17 18 #include "AST/Copy.hpp" 18 19 #include "AST/Decl.hpp" 19 20 #include "AST/Expr.hpp" -
src/Validate/HoistStruct.cpp
r2b78949 r8a930c03 18 18 #include <sstream> 19 19 20 #include "AST/DeclReplacer.hpp" 20 21 #include "AST/Pass.hpp" 21 22 #include "AST/TranslationUnit.hpp" 23 #include "AST/Vector.hpp" 22 24 23 25 namespace Validate { … … 51 53 template<typename AggrDecl> 52 54 AggrDecl const * postAggregate( AggrDecl const * ); 55 template<typename InstType> 56 InstType const * preCollectionInstType( InstType const * type ); 53 57 54 58 ast::AggregateDecl const * parent = nullptr; … … 66 70 qualifiedName( decl, ss ); 67 71 return ss.str(); 72 } 73 74 void extendParams( ast::vector<ast::TypeDecl> & dstParams, 75 ast::vector<ast::TypeDecl> const & srcParams ) { 76 if ( srcParams.empty() ) return; 77 78 ast::DeclReplacer::TypeMap newToOld; 79 ast::vector<ast::TypeDecl> params; 80 for ( ast::ptr<ast::TypeDecl> const & srcParam : srcParams ) { 81 ast::TypeDecl * dstParam = ast::deepCopy( srcParam.get() ); 82 dstParam->init = nullptr; 83 newToOld.emplace( srcParam, dstParam ); 84 for ( auto assertion : dstParam->assertions ) { 85 assertion = ast::DeclReplacer::replace( assertion, newToOld ); 86 } 87 params.emplace_back( dstParam ); 88 } 89 spliceBegin( dstParams, params ); 68 90 } 69 91 … … 74 96 mut->parent = parent; 75 97 mut->name = qualifiedName( mut ); 76 return mut;77 } else {78 GuardValue( parent ) = decl;79 returndecl;80 }98 extendParams( mut->params, parent->params ); 99 decl = mut; 100 } 101 GuardValue( parent ) = decl; 102 return decl; 81 103 } 82 104 … … 112 134 } 113 135 136 ast::AggregateDecl const * commonParent( 137 ast::AggregateDecl const * lhs, ast::AggregateDecl const * rhs ) { 138 for ( auto outer = lhs ; outer ; outer = outer->parent ) { 139 for ( auto inner = rhs ; inner ; inner = inner->parent ) { 140 if ( outer == inner ) { 141 return outer; 142 } 143 } 144 } 145 return nullptr; 146 } 147 148 template<typename InstType> 149 InstType const * HoistStructCore::preCollectionInstType( InstType const * type ) { 150 if ( !type->base->parent ) return type; 151 if ( type->base->params.empty() ) return type; 152 153 InstType * mut = ast::mutate( type ); 154 ast::AggregateDecl const * parent = 155 commonParent( this->parent, mut->base->parent ); 156 assert( parent ); 157 158 std::vector<ast::ptr<ast::Expr>> args; 159 for ( const ast::ptr<ast::TypeDecl> & param : parent->params ) { 160 args.emplace_back( new ast::TypeExpr( param->location, 161 new ast::TypeInstType( param ) 162 ) ); 163 } 164 spliceBegin( mut->params, args ); 165 return mut; 166 } 167 114 168 template<typename InstType> 115 169 InstType const * preInstType( InstType const * type ) { … … 121 175 122 176 ast::StructInstType const * HoistStructCore::previsit( ast::StructInstType const * type ) { 123 return preInstType( type);177 return preInstType( preCollectionInstType( type ) ); 124 178 } 125 179 126 180 ast::UnionInstType const * HoistStructCore::previsit( ast::UnionInstType const * type ) { 127 return preInstType( type);181 return preInstType( preCollectionInstType( type ) ); 128 182 } 129 183 -
src/Validate/ReplaceTypedef.cpp
r2b78949 r8a930c03 16 16 #include "ReplaceTypedef.hpp" 17 17 18 #include "AST/Copy.hpp" 18 19 #include "AST/Pass.hpp" 19 20 #include "Common/ScopedMap.h" … … 149 150 // constant/enumerator. The effort required to fix this corner case 150 151 // likely outweighs the utility of allowing it. 151 if ( !ResolvExpr::typesCompatible( t0, t1 , ast::SymbolTable())152 if ( !ResolvExpr::typesCompatible( t0, t1 ) 152 153 || ast::Pass<VarLenChecker>::read( t0 ) 153 154 || ast::Pass<VarLenChecker>::read( t1 ) ) { -
src/Virtual/ExpandCasts.cc
r2b78949 r8a930c03 20 20 #include <string> // for string, allocator, operator==, ope... 21 21 22 #include "AST/Copy.hpp" 22 23 #include "AST/Decl.hpp" 23 24 #include "AST/Expr.hpp" -
src/main.cc
r2b78949 r8a930c03 32 32 33 33 #include "AST/Convert.hpp" 34 #include "AST/Pass.hpp" // for pass_visitor_stats 35 #include "AST/TranslationUnit.hpp" // for TranslationUnit 34 36 #include "AST/Util.hpp" // for checkInvariants 35 37 #include "CompilationState.h" -
tests/.expect/array.txt
r2b78949 r8a930c03 1 array.cfa: 52:25: warning: Compiled1 array.cfa:119:25: warning: Preprocessor started -
tests/.expect/copyfile.txt
r2b78949 r8a930c03 10 10 // Created On : Fri Jun 19 13:44:05 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 19 17:58:03 202013 // Update Count : 412 // Last Modified On : Mon Jun 5 21:20:07 2023 13 // Update Count : 5 14 14 // 15 15 … … 30 30 exit | "Usage" | argv[0] | "[ input-file (default stdin) [ output-file (default stdout) ] ]"; 31 31 } // choose 32 } catch( Open_Failure * ex ; ex->istream == &in ) {32 } catch( open_failure * ex ; ex->istream == &in ) { 33 33 exit | "Unable to open input file" | argv[1]; 34 } catch( Open_Failure * ex ; ex->ostream == &out ) {34 } catch( open_failure * ex ; ex->ostream == &out ) { 35 35 close( in ); // optional 36 36 exit | "Unable to open output file" | argv[2]; -
tests/.in/copyfile.txt
r2b78949 r8a930c03 10 10 // Created On : Fri Jun 19 13:44:05 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 19 17:58:03 202013 // Update Count : 412 // Last Modified On : Mon Jun 5 21:20:07 2023 13 // Update Count : 5 14 14 // 15 15 … … 30 30 exit | "Usage" | argv[0] | "[ input-file (default stdin) [ output-file (default stdout) ] ]"; 31 31 } // choose 32 } catch( Open_Failure * ex ; ex->istream == &in ) {32 } catch( open_failure * ex ; ex->istream == &in ) { 33 33 exit | "Unable to open input file" | argv[1]; 34 } catch( Open_Failure * ex ; ex->ostream == &out ) {34 } catch( open_failure * ex ; ex->ostream == &out ) { 35 35 close( in ); // optional 36 36 exit | "Unable to open output file" | argv[2]; -
tests/Makefile.am
r2b78949 r8a930c03 11 11 ## Created On : Sun May 31 09:08:15 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Tue May 16 09:27:48202314 ## Update Count : 1 7813 ## Last Modified On : Sun May 28 08:15:43 2023 14 ## Update Count : 196 15 15 ############################################################################### 16 16 … … 26 26 ARCH = ${if ${arch},"--arch=${arch}"} 27 27 arch_support = "x86/x64/arm" 28 TIMEOUT = ${if ${timeout},"--timeout=${timeout}"} 29 GLOBAL_TIMEOUT = ${if ${global-timeout},"--global-timeout=${global-timeout}"} 30 ARCHIVE_ERRORS = ${if ${archive-errors},"--archive-errors=${archive-errors}"} 31 28 32 DEBUG_FLAGS = -debug -g -O0 29 33 30 34 quick_test = avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes meta/dumpable 31 32 archiveerrors=33 concurrent=34 timeouts=35 35 36 36 TEST_PY = python3 ${builddir}/test.py … … 67 67 PRETTY_PATH = mkdir -p ${dir ${abspath ${@}}} && cd ${srcdir} && 68 68 69 .PHONY : list .validate .test_makeflags69 .PHONY : concurrency list .validate .test_makeflags 70 70 .INTERMEDIATE : .validate .validate.cfa .test_makeflags 71 71 EXTRA_PROGRAMS = avl_test linkonce linking/mangling/anon .dummy_hack # build but do not install … … 79 79 avltree/avl-private.h \ 80 80 avltree/avl.h \ 81 concurrent/clib_tls.c \82 concurrent/clib.c \83 81 configs/.in/parseconfig-all.txt \ 84 82 configs/.in/parseconfig-errors.txt \ … … 89 87 io/.in/many_read.data \ 90 88 meta/fork+exec.hfa \ 91 concurrent/unified_locking/mutex_test.hfa \ 92 concurrent/channels/parallel_harness.hfa 89 concurrency/clib_tls.c \ 90 concurrency/clib.c \ 91 concurrency/unified_locking/mutex_test.hfa \ 92 concurrency/channels/parallel_harness.hfa 93 93 94 94 dist-hook: … … 109 109 #---------------------------------------------------------------------------------------------------------------- 110 110 111 # '@' => do not echo command (SILENT), '+' => allows recursive make from within python program 111 112 all-local : # This name is important to automake and implies the default build target. 112 @+${TEST_PY} --debug=${debug} --install=${installed} --invariant --archive-errors=${archiveerrors} ${concurrent} ${timeouts} ${ARCH} --all # '@' => do not echo command (SILENT), '+' => allows recursive make from within python program113 114 install : all-local # PAB only 115 116 tests : all-local 113 @+${TEST_PY} --debug=${debug} --install=${installed} --invariant ${ARCHIVE_ERRORS} ${TIMEOUT} ${GLOBAL_TIMEOUT} ${ARCH} --all 114 115 tests : all-local # synonym 116 117 install : all-local # synonym, PAB only 117 118 118 119 quick : 119 @+${TEST_PY} --debug=${debug} --install=${installed} --archive-errors=${archiveerrors} ${concurrent} ${timeouts} ${ARCH} ${quick_test}120 @+${TEST_PY} --debug=${debug} --install=${installed} ${ARCHIVE_ERRORS} ${ARCH} ${quick_test} 120 121 121 122 concurrency : 122 @+${TEST_PY} --debug=${debug} --install=${installed} ${ARCH } -Iconcurrent123 @+${TEST_PY} --debug=${debug} --install=${installed} ${ARCHIVE_ERRORS} ${TIMEOUT} ${GLOBAL_TIMEOUT} ${ARCH} -Iconcurrency 123 124 124 125 list : 125 @+${TEST_PY} --list ${concurrent}126 @+${TEST_PY} --list 126 127 127 128 help : 128 129 @echo "user targets:" 129 130 @echo " Run the complete test suite." 130 @echo " $$ make (null) / tests [debug=yes/no] [installed=yes/no] [arch =${arch_support}]"131 @echo " $$ make (null) / tests [debug=yes/no] [installed=yes/no] [archive-errors=dump-dir] [timeout=seconds] [global-timeout=seconds] [arch=${arch_support}]" 131 132 @echo "" 132 133 @echo " Run the short (quick) test suite." 133 @echo " $$ make quick [debug=yes/no] [installed=yes/no] [arch =${arch_support}]"134 @echo " $$ make quick [debug=yes/no] [installed=yes/no] [archive-errors=dump-dir] [arch=${arch_support}]" 134 135 @echo "" 135 @echo " Run the concurren ttest suite."136 @echo " $$ make concurrency [debug=yes/no] [installed=yes/no] [arch =${arch_support}]"136 @echo " Run the concurrency test suite." 137 @echo " $$ make concurrency [debug=yes/no] [installed=yes/no] [archive-errors=dump-dir] [timeout=seconds] [global-timeout=seconds] [arch=${arch_support}]" 137 138 @echo "" 138 139 @echo " List all tests in the test suite." … … 204 205 205 206 SYNTAX_ONLY_CODE = expression typedefRedef variableDeclarator switch numericConstants identFuncDeclarator \ 206 init1 limits nested-types cast labelledExit array quasiKeyword include/stdincludes include/includes builtins/sync warnings/self-assignment concurren t/waitfor/parse207 init1 limits nested-types cast labelledExit array quasiKeyword include/stdincludes include/includes builtins/sync warnings/self-assignment concurrency/waitfor/parse 207 208 ${SYNTAX_ONLY_CODE} : % : %.cfa ${CFACCBIN} 208 209 ${CFACOMPILE_SYNTAX} … … 211 212 # expected failures 212 213 # use custom target since they require a custom define *and* have a name that doesn't match the file 214 215 array-ERR1 : array.cfa ${CFACCBIN} 216 ${CFACOMPILE_SYNTAX} -DERR1 217 -cp ${test} ${abspath ${@}} 218 219 array-ERR2 : array.cfa ${CFACCBIN} 220 ${CFACOMPILE_SYNTAX} -DERR2 221 -cp ${test} ${abspath ${@}} 222 223 array-ERR3 : array.cfa ${CFACCBIN} 224 ${CFACOMPILE_SYNTAX} -DERR3 225 -cp ${test} ${abspath ${@}} 226 213 227 alloc-ERROR : alloc.cfa ${CFACCBIN} 214 228 ${CFACOMPILE_SYNTAX} -DERR1 -
tests/PRNG.cfa
r2b78949 r8a930c03 1 // -*- Mode: C -*- 2 // 1 // 3 2 // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo 4 // 5 // PRNG.c -- 6 // 3 // 4 // PRNG.c -- high-perforamnce pseudo-random numbers 5 // 6 // The contents of this file are covered under the licence agreement in the 7 // file "LICENCE" distributed with Cforall. 8 // 7 9 // Author : Peter A. Buhr 8 10 // Created On : Wed Dec 29 09:38:12 2021 9 11 // Last Modified By : Peter A. Buhr 10 // Last Modified On : Sun Apr 23 22:02:09202311 // Update Count : 42 012 // Last Modified On : Thu May 25 15:39:52 2023 13 // Update Count : 422 12 14 // 13 15 -
tests/array.cfa
r2b78949 r8a930c03 15 15 // 16 16 17 int a1[0]; 18 // int a2[*];19 // double a4[3.0];17 // Tests syntax. Comments explain semantics. Test does not show semantics. 18 // Mostly illustrates facts about C (with which CFA is being tested to agree). 19 // Is a test oracle under `gcc -x c`. 20 20 21 int m1[0][3]; 22 //int m2[*][*]; 23 int m4[3][3]; 21 #ifdef ERR1 22 #define E1(...) __VA_ARGS__ 23 #else 24 #define E1(...) 25 #endif 24 26 25 typedef int T; 27 #ifdef ERR2 28 #define E2(...) __VA_ARGS__ 29 #else 30 #define E2(...) 31 #endif 26 32 27 int fred() { 28 // int a1[]; 29 // int a2[*]; 30 int a4[3]; 31 int T[3]; 32 } 33 #ifdef ERR3 34 #define E3(...) __VA_ARGS__ 35 #else 36 #define E3(...) 37 #endif 33 38 34 int mary( int T[3], 35 int p1[const 3], 36 int p2[static 3], 37 int p3[static const 3] 38 ) { 39 } 39 int a1[0]; 40 E1( int a2[*]; ) 41 #ifndef __cforall 42 E1( double a4[3.0]; ) // BUG 275: CFA accepts but should reject 43 #endif 40 44 41 int (*tom())[3] { 42 } 45 int m1[0][3]; 46 E1( int m2[*][*]; ) 47 int m4[3][3]; 43 48 44 int (*(jane)())( int T[3], 45 int p1[const 3], 46 int p2[static 3], 47 int p3[static const 3] 48 ) { 49 } 49 typedef int T; 50 51 int fred(int n) { 52 E1( int a1[]; ) 53 E1( int a2[*]; ) 54 int a4[3]; 55 int T[3]; 56 int a5[n]; 57 } 58 59 int mary( int T[3], // same as: int *T 60 int p1[const 3], // same as: int const *p1 61 int p2[static 3], // same as T, but length >=3 checked 62 int p3[static const 3] // both above: 3 is static, p3 is const 63 ) { 64 } 65 66 // function taking (), returning pointer to array of ints 67 int (*tom())[3] { 68 } 69 70 // function taking (), returning pointer to function of same type as mary 71 int (*(jane)())( int T[3], 72 int p1[const 3], 73 int p2[static 3], 74 int p3[static const 3] 75 ) { 76 } 77 78 // functions returning same exotic pointers, in CFA's non-onion syntax 79 #ifdef __cforall 80 [ * [3] int ] toms_twin(...) { 81 } 82 [ * [int]( [3] int T, 83 [const 3] int p1, 84 [static 3] int p2, 85 [static const 3] int p3 86 ) 87 ] janes_twin(...) { 88 } 89 #endif 90 91 // GCC 11+ gives a false warning (-Wvla-parameter) on the valid (C11 ARM p134-135) combination: 92 // declare with type int[*], define with type int[n]. 93 // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=100420 suggests the internal representation of 94 // of a[*] is the same as a[0]. 95 // https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wno-vla-parameter explains 96 // the purpose of -Wvla-parameter is to report conflicts between int[] and int[n], which would 97 // understandably also include those between int[42] and int[n]. 98 // https://stackoverflow.com/questions/17371645/why-use-an-asterisk-instead-of-an-integer-for-a-vla-array-parameter-of-a-f 99 // explains the declare-*, define-n pattern. 100 101 // To work around the false warning, and keep to this test's purpose of exercising CFA's 102 // handling of exotic C array syntax, what would ideally be demonstrated as a declaration of 103 // fm1, followed by its definition, is instead split into fm1x and fm1y. And similarly for 104 // fm5. 105 106 int fm1x( int, int, int[][*] ); 107 int fm1y( int r, int c, int m[][c] ) {} 108 int fm2( int r, int c, int (*m)[c] ) {} // same as fm1 109 E2( int fm3( int r, int c, int m[][static c] ) {} ) // that's not static 110 E3( int fm4( int r, int c, int m[][] ); ) // m's immediate element type is incomplete 111 int fm5x( int, int, int[*][*] ); // same as fm1 decl 112 #ifndef __cforall 113 int fm5y( int r, int c, int m[r][c] ) {} // BUG 276: CFA chokes but should accept 114 // C: same as fm1 defn 115 #endif 116 50 117 51 118 int main() { 52 #pragma GCC warning " Compiled"// force non-empty .expect file, NO TABS!!!119 #pragma GCC warning "Preprocessor started" // force non-empty .expect file, NO TABS!!! 53 120 } 54 121 -
tests/concurrency/.expect/ctor-check.txt
r2b78949 r8a930c03 1 concurren t/ctor-check.cfa:11:1 error: constructors cannot have mutex parameters1 concurrency/ctor-check.cfa:11:1 error: constructors cannot have mutex parameters 2 2 ?{}: function 3 3 ... with parameters -
tests/concurrency/actors/dynamic.cfa
r2b78949 r8a930c03 19 19 void ?{}( derived_msg & this ) { ((derived_msg &)this){ 0 }; } 20 20 21 Allocation receive( derived_actor & receiver, derived_msg & msg ) {21 allocation receive( derived_actor & receiver, derived_msg & msg ) { 22 22 if ( msg.cnt >= Times ) { 23 23 sout | "Done"; -
tests/concurrency/actors/executor.cfa
r2b78949 r8a930c03 24 24 struct d_msg { inline message; } shared_msg; 25 25 26 Allocation receive( d_actor & this, d_msg & msg ) with( this ) {26 allocation receive( d_actor & this, d_msg & msg ) with( this ) { 27 27 if ( recs == rounds ) return Finished; 28 28 if ( recs % Batch == 0 ) { -
tests/concurrency/actors/inherit.cfa
r2b78949 r8a930c03 15 15 void ^?{}( D_msg & this ) { mutex(sout) sout | 'A'; } 16 16 17 Allocation handle() {17 allocation handle() { 18 18 return Finished; 19 19 } 20 20 21 Allocation receive( Server & receiver, D_msg & msg ) { return handle(); }22 Allocation receive( Server & receiver, D_msg2 & msg ) { return handle(); }23 Allocation receive( Server2 & receiver, D_msg & msg ) { return Delete; }24 Allocation receive( Server2 & receiver, D_msg2 & msg ) { return Delete; }21 allocation receive( Server & receiver, D_msg & msg ) { return handle(); } 22 allocation receive( Server & receiver, D_msg2 & msg ) { return handle(); } 23 allocation receive( Server2 & receiver, D_msg & msg ) { return Delete; } 24 allocation receive( Server2 & receiver, D_msg2 & msg ) { return Delete; } 25 25 26 26 int main() { -
tests/concurrency/actors/matrix.cfa
r2b78949 r8a930c03 24 24 } 25 25 26 Allocation receive( derived_actor & receiver, derived_msg & msg ) {26 allocation receive( derived_actor & receiver, derived_msg & msg ) { 27 27 for ( unsigned int i = 0; i < yc; i += 1 ) { // multiply X_row by Y_col and sum products 28 28 msg.Z[i] = 0; -
tests/concurrency/actors/pingpong.cfa
r2b78949 r8a930c03 19 19 size_t times = 100000; 20 20 21 Allocation receive( ping & receiver, p_msg & msg ) {21 allocation receive( ping & receiver, p_msg & msg ) { 22 22 msg.count++; 23 23 if ( msg.count > times ) return Finished; 24 24 25 Allocation retval = Nodelete;25 allocation retval = Nodelete; 26 26 if ( msg.count == times ) retval = Finished; 27 27 *po << msg; … … 29 29 } 30 30 31 Allocation receive( pong & receiver, p_msg & msg ) {31 allocation receive( pong & receiver, p_msg & msg ) { 32 32 msg.count++; 33 33 if ( msg.count > times ) return Finished; 34 34 35 Allocation retval = Nodelete;35 allocation retval = Nodelete; 36 36 if ( msg.count == times ) retval = Finished; 37 37 *pi << msg; -
tests/concurrency/actors/poison.cfa
r2b78949 r8a930c03 18 18 Server s[10]; 19 19 for ( i; 10 ) { 20 s[i] << FinishedMsg;20 s[i] << finished_msg; 21 21 } 22 22 stop_actor_system(); … … 29 29 Server * s = alloc(); 30 30 (*s){}; 31 (*s) << DeleteMsg;31 (*s) << delete_msg; 32 32 } 33 33 stop_actor_system(); … … 39 39 Server s[10]; 40 40 for ( i; 10 ) 41 s[i] << DestroyMsg;41 s[i] << destroy_msg; 42 42 stop_actor_system(); 43 43 for ( i; 10 ) -
tests/concurrency/actors/static.cfa
r2b78949 r8a930c03 19 19 void ?{}( derived_msg & this ) { ((derived_msg &)this){ 0 }; } 20 20 21 Allocation receive( derived_actor & receiver, derived_msg & msg ) {21 allocation receive( derived_actor & receiver, derived_msg & msg ) { 22 22 if ( msg.cnt >= Times ) { 23 23 sout | "Done"; -
tests/concurrency/actors/types.cfa
r2b78949 r8a930c03 20 20 21 21 // this isn't a valid receive routine since int is not a message type 22 Allocation receive( derived_actor & receiver, int i ) with( receiver ) {22 allocation receive( derived_actor & receiver, int i ) with( receiver ) { 23 23 mutex(sout) sout | i; 24 24 counter++; … … 27 27 } 28 28 29 Allocation receive( derived_actor & receiver, d_msg & msg ) {29 allocation receive( derived_actor & receiver, d_msg & msg ) { 30 30 return receive( receiver, msg.num ); 31 31 } … … 36 36 }; 37 37 38 Allocation receive( derived_actor2 & receiver, d_msg & msg ) {38 allocation receive( derived_actor2 & receiver, d_msg & msg ) { 39 39 mutex(sout) sout | msg.num; 40 40 return Finished; … … 48 48 }; 49 49 50 Allocation receive( derived_actor3 & receiver, d_msg & msg ) {50 allocation receive( derived_actor3 & receiver, d_msg & msg ) { 51 51 mutex(sout) sout | msg.num; 52 52 if ( msg.num == -1 ) return Nodelete; … … 54 54 } 55 55 56 Allocation receive( derived_actor3 & receiver, d_msg2 & msg ) {56 allocation receive( derived_actor3 & receiver, d_msg2 & msg ) { 57 57 mutex(sout) sout | msg.num; 58 58 return Finished; -
tests/concurrency/barrier/gen_generation_expect.cfa
r2b78949 r8a930c03 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // gen_generation_expect.cfa -- simple 'script' generates the expect file for concurren t/barrier/generation7 // gen_generation_expect.cfa -- simple 'script' generates the expect file for concurrency/barrier/generation 8 8 // 9 9 // Author : Thierry Delisle -
tests/concurrency/barrier/generation.cfa
r2b78949 r8a930c03 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // concurrent/barrier/generation.cfa -- simple test that validates barriers by printing 8 // alphabetical generations 7 // generation.cfa -- simple test that validates barriers by printing alphabetical generations 9 8 // 10 9 // Author : Thierry Delisle -
tests/concurrency/barrier/last.cfa
r2b78949 r8a930c03 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // concurrent/barrier/last.cfa -- validates barrier's last hook functionality7 // last.cfa -- validates barrier's last hook functionality 8 8 // 9 9 // Author : Thierry Delisle -
tests/concurrency/barrier/order.cfa
r2b78949 r8a930c03 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // concurrent/barrier/order.cfa -- validates barriers the return value of7 // order.cfa -- validates barriers the return value of 8 8 // barrier block 9 9 // -
tests/concurrency/readyQ/barrier_sleeper.cfa
r2b78949 r8a930c03 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // concurrent/readyQ/barrier_sleeper.cfa -- testing the ready-queue7 // barrier_sleeper.cfa -- testing the ready-queue 8 8 // 9 9 // Author : Thierry Delisle -
tests/concurrency/readyQ/leader_spin.cfa
r2b78949 r8a930c03 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // concurrent/readyQ/leader_spin.cfa -- validates ready queue fairness7 // leader_spin.cfa -- validates ready queue fairness 8 8 // 9 9 // Author : Thierry Delisle -
tests/concurrency/waituntil/locks.cfa
r2b78949 r8a930c03 2 2 #include <thread.hfa> 3 3 #include <locks.hfa> 4 #include <fstream.hfa> 4 5 #include <mutex_stmt.hfa> 5 6 -
tests/configs/.expect/parseconfig.txt
r2b78949 r8a930c03 12 12 Maximum student trips: 3 13 13 14 Open_Failure thrown when config file does not exist14 open_failure thrown when config file does not exist 15 15 Failed to open the config file 16 16 -
tests/configs/parseconfig.cfa
r2b78949 r8a930c03 66 66 67 67 68 sout | " Open_Failure thrown when config file does not exist";68 sout | "open_failure thrown when config file does not exist"; 69 69 try { 70 70 parse_config( xstr(IN_DIR) "doesnt-exist.txt", entries, NUM_ENTRIES, parse_tabular_config_format ); 71 } catch( Open_Failure * ex ) {71 } catch( open_failure * ex ) { 72 72 sout | "Failed to open the config file"; 73 73 } -
tests/copyfile.cfa
r2b78949 r8a930c03 10 10 // Created On : Fri Jun 19 13:44:05 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Aug 15 15:00:48 202013 // Update Count : 612 // Last Modified On : Mon Jun 5 21:20:19 2023 13 // Update Count : 7 14 14 // 15 15 … … 30 30 exit | "Usage" | argv[0] | "[ input-file (default stdin) [ output-file (default stdout) ] ]"; 31 31 } // choose 32 } catch( Open_Failure * ex ; ex->istream == &in ) {32 } catch( open_failure * ex ; ex->istream == &in ) { 33 33 exit | "Unable to open input file" | argv[1]; 34 } catch( Open_Failure * ex ; ex->ostream == &out ) {34 } catch( open_failure * ex ; ex->ostream == &out ) { 35 35 close( in ); // optional 36 36 exit | "Unable to open output file" | argv[2]; -
tests/rational.cfa
r2b78949 r8a930c03 10 10 // Created On : Mon Mar 28 08:43:12 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Jul 20 18:13:40 202113 // Update Count : 10 712 // Last Modified On : Mon Jun 5 22:58:09 2023 13 // Update Count : 108 14 14 // 15 15 … … 19 19 #include <fstream.hfa> 20 20 21 typedef Rational(int) RatInt;21 typedef rational(int) rat_int; 22 22 double convert( int i ) { return (double)i; } // used by narrow/widen 23 23 int convert( double d ) { return (int)d; } … … 25 25 int main() { 26 26 sout | "constructor"; 27 RatInt a = { 3 }, b = { 4 }, c, d = 0, e = 1;27 rat_int a = { 3 }, b = { 4 }, c, d = 0, e = 1; 28 28 sout | "a : " | a | "b : " | b | "c : " | c | "d : " | d | "e : " | e; 29 29 30 a = ( RatInt){ 4, 8 };31 b = ( RatInt){ 5, 7 };30 a = (rat_int){ 4, 8 }; 31 b = (rat_int){ 5, 7 }; 32 32 sout | "a : " | a | "b : " | b; 33 a = ( RatInt){ -2, -3 };34 b = ( RatInt){ 3, -2 };33 a = (rat_int){ -2, -3 }; 34 b = (rat_int){ 3, -2 }; 35 35 sout | "a : " | a | "b : " | b; 36 a = ( RatInt){ -2, 3 };37 b = ( RatInt){ 3, 2 };36 a = (rat_int){ -2, 3 }; 37 b = (rat_int){ 3, 2 }; 38 38 sout | "a : " | a | "b : " | b; 39 39 sout | nl; 40 40 41 41 sout | "comparison"; 42 a = ( RatInt){ -2 };43 b = ( RatInt){ -3, 2 };42 a = (rat_int){ -2 }; 43 b = (rat_int){ -3, 2 }; 44 44 sout | "a : " | a | "b : " | b; 45 sout | "a == 0 : " | a == ( Rational(int)){0}; // FIX ME46 sout | "a == 1 : " | a == ( Rational(int)){1}; // FIX ME45 sout | "a == 0 : " | a == (rational(int)){0}; // FIX ME 46 sout | "a == 1 : " | a == (rational(int)){1}; // FIX ME 47 47 sout | "a != 0 : " | a != 0; 48 48 sout | "! a : " | ! a; … … 73 73 74 74 sout | "conversion"; 75 a = ( RatInt){ 3, 4 };75 a = (rat_int){ 3, 4 }; 76 76 sout | widen( a ); 77 a = ( RatInt){ 1, 7 };77 a = (rat_int){ 1, 7 }; 78 78 sout | widen( a ); 79 a = ( RatInt){ 355, 113 };79 a = (rat_int){ 355, 113 }; 80 80 sout | widen( a ); 81 81 sout | narrow( 0.75, 4 ); … … 90 90 91 91 sout | "more tests"; 92 RatInt x = { 1, 2 }, y = { 2 };92 rat_int x = { 1, 2 }, y = { 2 }; 93 93 sout | x - y; 94 94 sout | x > y; … … 96 96 sout | y | denominator( y, -2 ) | y; 97 97 98 RatInt z = { 0, 5 };98 rat_int z = { 0, 5 }; 99 99 sout | z; 100 100 101 101 sout | x | numerator( x, 0 ) | x; 102 102 103 x = ( RatInt){ 1, MAX } + (RatInt){ 1, MAX };103 x = (rat_int){ 1, MAX } + (rat_int){ 1, MAX }; 104 104 sout | x; 105 x = ( RatInt){ 3, MAX } + (RatInt){ 2, MAX };105 x = (rat_int){ 3, MAX } + (rat_int){ 2, MAX }; 106 106 sout | x; 107 107
Note:
See TracChangeset
for help on using the changeset viewer.