Index: benchmark/plot.py
===================================================================
--- benchmark/plot.py	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ benchmark/plot.py	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -22,8 +22,9 @@
 
 class Field:
-	def __init__(self, unit, _min, _log):
+	def __init__(self, unit, _min, _log, _name=None):
 		self.unit = unit
 		self.min  = _min
 		self.log  = _log
+		self.name = _name
 
 field_names = {
@@ -32,10 +33,10 @@
 	"Ops per procs"         : Field('Ops'   , 0, False),
 	"Ops per threads"       : Field('Ops'   , 0, False),
-	"ns per ops/procs"      : Field('ns'    , 0, False),
+	"ns per ops/procs"      : Field(''    , 0, False, _name = "Latency (ns $/$ (Processor $\\times$ Operation))" ),
 	"Number of threads"     : Field(''      , 1, False),
 	"Total Operations(ops)" : Field('Ops'   , 0, False),
 	"Ops/sec/procs"         : Field('Ops'   , 0, False),
 	"Total blocks"          : Field('Blocks', 0, False),
-	"Ops per second"        : Field('Ops'   , 0, False),
+	"Ops per second"        : Field(''   , 0, False),
 	"Cycle size (# thrds)"  : Field('thrd'  , 1, False),
 	"Duration (ms)"         : Field('ms'    , 0, False),
@@ -51,5 +52,5 @@
 }
 
-def plot(in_data, x, y, out):
+def plot(in_data, x, y, options):
 	fig, ax = plt.subplots()
 	colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00'])
@@ -109,10 +110,12 @@
 	print("Finishing Plots")
 
-	plt.ylabel(y)
+	plt.ylabel(field_names[y].name if field_names[y].name else y)
 	# plt.xticks(range(1, math.ceil(mx) + 1))
-	plt.xlabel(x)
+	plt.xlabel(field_names[x].name if field_names[x].name else x)
 	plt.grid(b = True)
 	ax.xaxis.set_major_formatter( EngFormatter(unit=field_names[x].unit) )
-	if field_names[x].log:
+	if options.logx:
+		ax.set_xscale('log')
+	elif field_names[x].log:
 		ax.set_xscale('log')
 	else:
@@ -120,14 +123,16 @@
 
 	ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
-	if field_names[y].log:
+	if options.logy:
+		ax.set_yscale('log')
+	elif field_names[y].log:
 		ax.set_yscale('log')
 	else:
-		plt.ylim(field_names[y].min, my*1.2)
+		plt.ylim(field_names[y].min, options.MaxY if options.MaxY else my*1.2)
 
 	plt.legend(loc='upper left')
 
 	print("Results Ready")
-	if out:
-		plt.savefig(out)
+	if options.out:
+		plt.savefig(options.out, bbox_inches='tight')
 	else:
 		plt.show()
@@ -142,4 +147,7 @@
 	parser.add_argument('-y', nargs='?', type=str, default="", help="Which field to use as the Y axis")
 	parser.add_argument('-x', nargs='?', type=str, default="", help="Which field to use as the X axis")
+	parser.add_argument('--logx', action='store_true', help="if set, makes the x-axis logscale")
+	parser.add_argument('--logy', action='store_true', help="if set, makes the y-axis logscale")
+	parser.add_argument('--MaxY', nargs='?', type=int, help="maximum value of the y-axis")
 
 	options =  parser.parse_args()
@@ -185,3 +193,3 @@
 
 
-	plot(data, wantx, wanty, options.out)
+	plot(data, wantx, wanty, options)
Index: doc/theses/mubeen_zulfiqar_MMath/allocator.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/allocator.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/allocator.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -29,5 +29,5 @@
 llheap's design was reviewed and changed multiple times throughout the thesis.
 Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}).
-Note, a few simples tests for a design choice were compared with the current best allocators to determine the viability of a design.
+Note, a few simple tests for a design choice were compared with the current best allocators to determine the viability of a design.
 
 
@@ -37,6 +37,6 @@
 These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced.
 \paragraph{T:1 model}
-\VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared across KTs.
-This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5 different request sizes.
+\VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are shared among N KTs.
+This design leverages the fact that usually the allocation requests are less than 1024 bytes and there are only a few different request sizes.
 When KTs $\le$ N, the common bucket sizes are uncontented;
 when KTs $>$ N, the free buckets are contented and latency increases significantly.
@@ -64,10 +64,10 @@
 
 \paragraph{T:H model}
-\VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs.
+\VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded (distributed) across the KTs.
 A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket.
-When KT $\le$ N, the heaps are uncontented;
+When KT $\le$ N, the heaps might be uncontented;
 when KTs $>$ N, the heaps are contented.
 In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared.
-By adjusting N upwards, this approach reduces contention but increases storage (time versus space);
+By increasing N, this approach reduces contention but increases storage (time versus space);
 however, picking N is workload specific.
 
@@ -109,5 +109,5 @@
 Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
 \begin{quote}
-A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}
+A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
 \end{quote}
 If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
@@ -138,10 +138,10 @@
 (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
 Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
-Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership).
+Heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership.
 
 Problems:
 \begin{itemize}
 \item
-Need to know when a KT is starts/terminates to create/delete its heap.
+Need to know when a KT starts/terminates to create/delete its heap.
 
 \noindent
@@ -161,5 +161,5 @@
 \noindent
 In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
-Since the number of CPUs is relatively small, >~1024, and a heap relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
+Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
 \item
 There is the same serially-reusable problem with UTs migrating across KTs.
@@ -171,9 +171,9 @@
 \noindent
 The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
-For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps maybe shared by multiple threads, even when KTs $\le$ N.
+For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N.
 For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
 However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
 More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
-Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required.
+So the 1:1 model had no atomic actions along the fastpath and no special operating-system support requirements.
 The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
 
@@ -212,5 +212,5 @@
 Ideally latency is $O(1)$ with a small constant.
 
-To obtain $O(1)$ internal latency means no searching on the allocation fastpath, largely prohibits coalescing, which leads to external fragmentation.
+To obtain $O(1)$ internal latency means no searching on the allocation fastpath and largely prohibits coalescing, which leads to external fragmentation.
 The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
 
@@ -257,16 +257,17 @@
 llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
 There is a global bump-pointer to the next free heap in the array.
-When this array is exhausted, another array is allocated.
-There is a global top pointer for a heap intrusive link to chain free heaps from terminated threads.
-When statistics are turned on, there is a global top pointer for a heap intrusive link to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
+When this array is exhausted, another array of heaps is allocated.
+There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads.
+When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
 
 When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
-When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps.
-The free heaps is a stack so hot storage is reused first.
-Preserving all heaps created during the program lifetime, solves the storage lifetime problem, when ownership is used.
+When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps.
+The free heaps are stored on stack so hot storage is reused first.
+Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used.
 This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
 llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
 
 Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
+All objects in a bucket are of the same size.
 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system.
 Each free bucket of a specific size has the following two lists:
@@ -286,5 +287,5 @@
 Quantizing is performed using a binary search over the ordered bucket array.
 An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
-(Type @char@ restricts the number of bucket sizes to 256.)
+The @char@ type restricts the number of bucket sizes to 256.
 For $S$ > 64K, a binary search is used.
 Then, the allocation storage is obtained from the following locations (in order), with increasing latency.
@@ -381,5 +382,5 @@
 Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
 
-Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap.
+Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
 Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
 This design simplifies heap-management code during development and maintenance.
@@ -388,9 +389,9 @@
 \subsection{Alignment}
 
-All dynamic memory allocations must have a minimum storage alignment for the contained object(s).
+Most dynamic memory allocations have a minimum storage alignment for the contained object(s).
 Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX).
 In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers.
 For consistency, the object header is normally aligned at this same boundary.
-Larger alignments must be a power of 2, such page alignment (4/8K).
+Larger alignments must be a power of 2, such as page alignment (4/8K).
 Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment.
 
@@ -400,5 +401,5 @@
 \end{center}
 The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations.
-This approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
+The same approach is used for sufficiently large free blocks, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
 In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation.
 However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object.
@@ -407,5 +408,5 @@
 Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request.
 
-Instead, llheap alignment is accomplished by making a \emph{pessimistically} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
+Instead, llheap alignment is accomplished by making a \emph{pessimistic} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
 \begin{center}
 \input{Alignment2}
@@ -424,5 +425,5 @@
 \input{Alignment2Impl}
 \end{center}
-Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater.
+Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments greater than @M@.
 When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation.
 Therefore, there is always room for an @M@-byte fake header before @A@.
@@ -439,5 +440,5 @@
 \label{s:ReallocStickyProperties}
 
-Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
+The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
 \begin{flushleft}
 \begin{tabular}{ll}
@@ -460,10 +461,10 @@
 The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying.
 This pattern is not used enough to reduce storage management costs.
-In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern.
+In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the allocation pattern.
 
 The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation.
 Are these properties transient or persistent (``sticky'')?
-For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size.
-That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve?
+For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size?
+That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserved?
 Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment.
 This silent problem is unintuitive to programmers and difficult to locate because it is transient.
@@ -475,5 +476,5 @@
 
 To preserve allocation properties requires storing additional information with an allocation,
-The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.
+The best available option is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.
 The header has two data field sized appropriately for 32/64-bit alignment requirements.
 The first field is a union of three values:
@@ -487,5 +488,5 @@
 \end{description}
 The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes.
-Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors.
+Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management.
 
 \begin{figure}
@@ -496,5 +497,5 @@
 \end{figure}
 
-The low-order 3-bits of the first field are \emph{unused} for any stored values, whereas the second field may use all of its bits.
+The low-order 3-bits of the first field are \emph{unused} for any stored values as these values are 16-byte aligned by default, whereas the second field may use all of its bits.
 The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
 Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header.
@@ -502,5 +503,5 @@
 If no bits are on, it implies a basic allocation, which is handled quickly;
 otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
-Since most allocations are basic, this implementation results in a significant performance gain along the allocation and free fastpath.
+Since most allocations are basic, they will take significantly less time as the memory operations will be done along the allocation and free fastpath.
 
 
@@ -514,8 +515,8 @@
 To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
 Note, the list is locked to prevent errors traversing an active list;
-the statistics counters are not locked and can flicker during accumulation, which is not an issue with atomic read/write.
+the statistics counters are not locked and can flicker during accumulation.
 \VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
 No other memory allocator studied provides as comprehensive statistical information.
-Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness, and hence, should be equally valuable to application developers.
+Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness and should be equally valuable to application developers.
 
 \begin{figure}
@@ -547,9 +548,9 @@
 Nevertheless, the checks detect many allocation problems.
 There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
-For example, @printf@ allocates a 1024 buffer on first call and never deletes this buffer.
+For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer.
 To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
 Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
 
-Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
+Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
 
 
@@ -557,19 +558,19 @@
 \label{s:UserlevelThreadingSupport}
 
-The serially-reusable problem (see \VRef{s:AllocationFastpath}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
-The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections.
+The serially-reusable problem (see \VPageref{p:SeriallyReusable}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
+The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical sections such as starting a memory operation on one KT and completing it on another.
 Locking these critical sections negates any attempt for a quick fastpath and results in high contention.
 For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
-Without time slicing, a user thread performing a long computation can prevent execution (starve) other threads.
-To prevent starvation for an allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
+Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads.
+To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-section for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
 The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
 
-llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period.
-On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately.
-On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and not thread-safe.
+llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period.
+On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately.
+On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe.
 For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
-Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
-
-The fast technique defines a special code section and places all non-interruptible routines in this section.
+Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
+
+The fast technique (with lower run time cost) is to define a special code section and places all non-interruptible routines in this section.
 The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified.
 Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag.
@@ -577,5 +578,5 @@
 Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section.
 This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined.
-These techniques are used in both the \uC and \CFA versions of llheap, where both of these systems have user-level threading.
+These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading.
 
 
@@ -587,5 +588,5 @@
 Programs can be statically or dynamically linked.
 \item
-The order the linker schedules startup code is poorly supported.
+The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely.
 \item
 Knowing a KT's start and end independently from the KT code is difficult.
@@ -600,9 +601,11 @@
 Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
 Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
-Testing showed up to a 5\% performance increase for dynamic linking over static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
+Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
 
 All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
-The problem is getting initialized done before the first allocator call.
+The problem is getting initialization done before the first allocator call.
 However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
+Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}.
+This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call.
 As a result, calls to allocation routines occur without initialization.
 To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
@@ -641,5 +644,5 @@
 Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
 Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
-Now when a KT terminates, @~ThreadManager@ is called to chained it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
+Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
 The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
 
@@ -660,5 +663,5 @@
 bool traceHeapOff();			$\C{// stop printing allocation/free calls}$
 \end{lstlisting}
-This kind of API is necessary to allow concurrent runtime systems to interact with difference memory allocators in a consistent way.
+This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way.
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -712,5 +715,5 @@
 Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
 hence the need to return an alternate value for a zero-sized allocation.
-A different approach allowed by the C API is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
+A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
 In theory, notifying the programmer of memory failure allows recovery;
 in practice, it is almost impossible to gracefully recover when out of memory.
@@ -736,5 +739,5 @@
 \paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
 extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory.
-@aalloc@ is significantly faster than @calloc@, which is the only alternative.
+@aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines.
 
 \noindent\textbf{Usage}
@@ -825,5 +828,5 @@
 \begin{itemize}
 \item
-@fd@: files description.
+@fd@: file descriptor.
 \end{itemize}
 It returns the previous file descriptor.
@@ -832,5 +835,5 @@
 \label{p:malloc_expansion}
 set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
-It returns the heap extension size used throughout a program, \ie called once at heap initialization.
+It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization.
 
 \paragraph{\lstinline{size_t malloc_mmap_start()}}
@@ -915,9 +918,9 @@
 \begin{itemize}
 \item
-naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
-\item
-named arguments: individual allocation properties are specified using postfix function call, so programmers do have to remember parameter positions in allocation calls.
-\item
-object size: like the \CFA C-style interface, programmers do not have to specify object size or cast allocation results.
+naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
+\item
+named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls.
+\item
+object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results.
 \end{itemize}
 Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg
@@ -928,8 +931,8 @@
 duration dur = 3@`@h + 42@`@m + 17@`@s;
 \end{cfa}
-@ttype@ polymorphism is similar to \CC variadic templates.
 
 \paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}}
-is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines.
+is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations.
+These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine.
 A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
 A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
@@ -980,6 +983,6 @@
 5 5 5 -555819298 -555819298  // two undefined values
 \end{lstlisting}
-Examples 1 to 3, fill an object with a value or characters.
-Examples 4 to 7, fill an array of objects with values, another array, or part of an array.
+Examples 1 to 3 fill an object with a value or characters.
+Examples 4 to 7 fill an array of objects with values, another array, or part of an array.
 
 \subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}}
@@ -1015,5 +1018,5 @@
 \subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}}
 used to resize, realign, and fill, where the old object data is copied to the new object.
-The old object type must be the same as the new object type, since the values used.
+The old object type must be the same as the new object type, since the value is used.
 Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
 For example:
@@ -1029,5 +1032,5 @@
 \end{lstlisting}
 Examples 2 to 3 change the alignment for the initial storage of @i@.
-The @13`fill@ for example 3 does nothing because no extra space is added.
+The @13`fill@ in example 3 does nothing because no extra space is added.
 
 \begin{cfa}[numbers=left]
@@ -1044,5 +1047,5 @@
 \end{lstlisting}
 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
-The @13`fill@ for example 3 does nothing because no extra space is added.
+The @13`fill@ in example 3 does nothing because no extra space is added.
 
 These \CFA allocation features are used extensively in the development of the \CFA runtime.
Index: doc/theses/mubeen_zulfiqar_MMath/background.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/background.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/background.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -36,6 +36,6 @@
 The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
 The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
-Allocated objects (light grey) are variable sized, and allocated and maintained by the program;
-\ie only the program knows the location of allocated storage, not the memory allocator.
+Allocated objects (light grey) are variable sized, and are allocated and maintained by the program;
+\ie only the memory allocator knows the location of allocated storage, not the program.
 \begin{figure}[h]
 \centering
@@ -49,5 +49,5 @@
 if there are multiple reserved blocks, they are also chained together, usually internally.
 
-Allocated and freed objects typically have additional management data embedded within them.
+In some allocator designs, allocated and freed objects have additional management data embedded within them.
 \VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object.
 The header contains information about the object, \eg size, type, etc.
@@ -104,5 +104,5 @@
 \VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
 Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
-Memory is highly fragmented when the sizes of most free blocks are unusable.
+Memory is highly fragmented when most free blocks are unusable because of their sizes.
 For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented.
 If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system.
@@ -137,5 +137,5 @@
 The fewer bin-sizes, the fewer lists need to be searched and maintained;
 however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation.
-The more bin-sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.
+The more bin sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.
 A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size.
 For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
@@ -157,5 +157,5 @@
 The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}.
 Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period.
-Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.
+Temporal locality commonly occurs during an iterative computation with a fixed set of disjoint variables, while spatial locality commonly occurs when traversing an array.
 
 Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy.
@@ -328,5 +328,5 @@
 For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
 At creation, a thread is associated with a heap from the pool.
-When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
+In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
 If an unlocked heap is found, the thread changes its association and uses that heap.
 If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
@@ -347,5 +347,5 @@
 The management information in the static zone must be able to locate all heaps in the dynamic zone.
 The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
-Each heap in the dynamic zone is composed of a list of a free objects and a pointer to its reserved memory.
+Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory.
 An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
 Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
@@ -361,6 +361,6 @@
 Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
 The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
-Additionally, objects freed by one heap cannot be reused by other threads, except indirectly by returning free memory to the operating system, which can be expensive.
-(Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.)
+Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the operating system, which can be expensive.
+Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
 In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
 
@@ -384,12 +384,12 @@
 In contrast, the T:H model spreads each thread's objects over a larger area in different heaps.
 Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap.
-For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory, no two threads share a page or cache line unless pointers are passed among them.
+For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them.
 Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps.
 
-When a thread terminates, there are two options for handling its heap.
-First is to free all objects in the heap to the global heap and destroy the thread heap.
+When a thread terminates, there are two options for handling its thread heap.
+First is to free all objects in the thread heap to the global heap and destroy the thread heap.
 Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
 Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
-Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible..
+Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible.
 
 
@@ -417,21 +417,7 @@
 When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
 To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
-However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).
+However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption does not happen that frequently.
 Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
 Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation.
-
-
-\begin{figure}
-\centering
-\subfigure[Ownership]{
-	\input{MultipleHeapsOwnership}
-} % subfigure
-\hspace{0.25in}
-\subfigure[No Ownership]{
-	\input{MultipleHeapsNoOwnership}
-} % subfigure
-\caption{Heap Ownership}
-\label{f:HeapsOwnership}
-\end{figure}
 
 
@@ -447,6 +433,19 @@
 For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
 
+\begin{figure}
+\centering
+\subfigure[Ownership]{
+	\input{MultipleHeapsOwnership}
+} % subfigure
+\hspace{0.25in}
+\subfigure[No Ownership]{
+	\input{MultipleHeapsNoOwnership}
+} % subfigure
+\caption{Heap Ownership}
+\label{f:HeapsOwnership}
+\end{figure}
+
 \VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
-(For simplicity assume the heaps all use the same size of reserves storage.)
+(For simplicity, assume the heaps all use the same size of reserves storage.)
 In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
 Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
@@ -473,7 +472,7 @@
 While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
 It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
-Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads.
-
-It is possible for heaps to steal objects rather than return them and reallocating these objects when storage runs out on a heap.
+Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads.
+
+It is possible for heaps to steal objects rather than return them and then reallocate these objects again when storage runs out on a heap.
 However, stealing can result in passive false-sharing.
 For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially.
@@ -485,5 +484,5 @@
 
 Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}.
-Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
+Especially if the headers contain redundant management information, then storing that information is a waste of storage, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
 As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective.
 Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}:
@@ -660,5 +659,5 @@
 With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list.
 Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
-The cost is adding information to a header, which increases the header size, and therefore internal fragmentation.
+However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation.
 
 \begin{figure}
@@ -689,5 +688,5 @@
 The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
 In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
-Similarly, a thread first deallocates an object its private heap, and second to the public heap.
+Similarly, a thread first deallocates an object to its private heap, and second to the public heap.
 Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
 Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
Index: doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -12,5 +12,5 @@
 \item[Benchmarks]
 are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc).
-The applications are suppose to represent common execution patterns that need to perform well with respect to an underlying software implementation.
+The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation.
 Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns.
 \item[Micro-Benchmarks]
@@ -26,11 +26,11 @@
 
 This thesis designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters.
-The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance matrices such as speed, memory overhead, and cache performance.
+The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance.
 % These programs can be taken as a standard to benchmark an allocator's basic goals.
 These programs give details of an allocator's memory overhead and speed under certain allocation patterns.
-The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum of events for a desired allocation pattern, which is seldom possible with benchmark programs.
+The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs.
 Each micro-benchmark program has multiple control knobs specified by command-line arguments.
 
-The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific matrices.
+The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics.
 An allocator's speed is benchmarked in different ways, as are issues like false sharing.
 
@@ -40,5 +40,5 @@
 Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level.
 The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level.
-None of the prior work address multi-threading at the UT level.
+None of the prior work addresses multi-threading at the UT level.
 
 
@@ -47,6 +47,6 @@
 This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently.
 There is no interaction among threads, \ie no object sharing.
-Each thread repeatedly allocate 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
-Runtime of the benchmark evaluates its efficiency.
+Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
+The execution time of the benchmark evaluates its efficiency.
 
 
@@ -63,5 +63,5 @@
 Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process.
 The number of thread generations varies depending on the thread speed.
-It calculates memory operations per second as an indicator of memory allocator's performance.
+It calculates memory operations per second as an indicator of the memory allocator's performance.
 
 
@@ -75,7 +75,7 @@
 \label{s:ChurnBenchmark}
 
-The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenerio, where each thread extensively allocates and frees dynamic memory.
+The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory.
 Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@.
-Churn simulates a memory intensive program that can be tuned to create different scenarios.
+Churn simulates a memory intensive program and can be tuned to create different scenarios.
 
 \VRef[Figure]{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark.
@@ -133,5 +133,5 @@
 When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases.
 
-Cache thrash tries to create a scenerio that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
+Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
 Ideally, a memory allocator should distance the dynamic memory region of one thread from another.
 Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads.
@@ -141,6 +141,6 @@
 Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line.
 Each thread repeats this for N times.
-The main thread measures the total time taken to for all worker threads to complete.
-Worker threads sharing cache lines with each other will take longer.
+The main thread measures the total time taken for all worker threads to complete.
+Worker threads sharing cache lines with each other are expected to take longer.
 
 \begin{figure}
@@ -156,13 +156,12 @@
 	signal workers to free
 	...
-	print addresses from each $thread$
 Worker Thread$\(_1\)$
-	allocate, write, read, free
-	warmup memory in chunkc of 16 bytes
-	...
-	malloc N objects
-	...
-	free objects
-	return object address to Main Thread
+	warm up memory in chunks of 16 bytes
+	...
+	For N
+		malloc an object
+		read/write the object M times
+		free the object
+	...
 Worker Thread$\(_2\)$
 	// same as Worker Thread$\(_1\)$
@@ -191,5 +190,5 @@
 
 The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in \VRef{s:AllocatorInducedPassiveFalseSharing}.
-As for cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
+As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
 In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object.
 
@@ -202,5 +201,5 @@
 Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly.
 An allocator using object ownership, as described in section \VRef{s:Ownership}, is less susceptible to allocator-induced passive false-sharing.
-If the object is returned to the thread who owns it, then the thread that gets a new object is less likely to be on the same cache line.
+If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line.
 
 \VRef[Figure]{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark.
@@ -224,15 +223,13 @@
 	signal workers to free
 	...
-	print addresses from each $thread$
 Worker Thread$\(_1\)$
-	allocate, write, read, free
-	warmup memory in chunkc of 16 bytes
-	...
-	for ( N )
-		free an object passed by Main Thread
+	warmup memory in chunks of 16 bytes
+	...
+	free the object passed by the Main Thread
+	For N
 		malloc new object
-	...
-	free objects
-	return new object addresses to Main Thread
+		read/write the object M times
+		free the object
+	...
 Worker Thread$\(_2\)$
 	// same as Worker Thread$\(_1\)$
@@ -248,5 +245,5 @@
 
 Similar to benchmark cache thrash in section \VRef{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments.
-\begin{description}[itemsep=0pt,parsep=0pt]
+\begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt]
 \item[threads:]
 number of threads (K).
@@ -262,7 +259,8 @@
 \subsection{Speed Micro-Benchmark}
 \label{s:SpeedMicroBenchmark}
+\vspace*{-4pt}
 
 The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines:
-\begin{enumerate}[itemsep=0pt,parsep=0pt]
+\begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt]
 \item malloc
 \item realloc
@@ -332,5 +330,5 @@
 \VRef[Figure]{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark.
 It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads.
-A producer has a separate buffer for each consumer and allocates N objects of random sizes following a settable distribution for each consumer.
+A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer.
 A consumer frees these objects.
 After every memory operation, program memory usage is recorded throughout the runtime.
Index: doc/theses/mubeen_zulfiqar_MMath/conclusion.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/conclusion.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/conclusion.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -17,17 +17,17 @@
 % ====================
 
-The goal of this thesis was to build a low-latency memory allocator for both KT and UT multi-threads systems, which is competitive with the best current memory allocators, while extending the feature set of existing and new allocator routines.
+The goal of this thesis was to build a low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
 The new llheap memory-allocator achieves all of these goals, while maintaining and managing sticky allocation information without a performance loss.
 Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally.
 Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
 
-Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not make mistakes writing theses useful allocation operations.
+Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not have to do these useful allocation operations themselves.
 The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
 
 Providing comprehensive statistics for all allocation operations is invaluable in understanding and debugging a program's dynamic behaviour.
-No other memory allocator provides comprehensive statistics gathering.
+No other memory allocator provides such comprehensive statistics gathering.
 This capability was used extensively during the development of llheap to verify its behaviour.
 As well, providing a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
-While not as powerful as the @valgrind@ interpreter, a large number of allocations mistakes are detected.
+While not as powerful as the @valgrind@ interpreter, a large number of allocation mistakes are detected.
 Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
 
@@ -36,5 +36,5 @@
 
 Starting a micro-benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
-The current micro-benchmarks allow some understand of allocator implementation properties without actually looking at the implementation.
+The current micro-benchmarks allow some understanding of allocator implementation properties without actually looking at the implementation.
 For example, the memory micro-benchmark quickly identified how several of the allocators work at the global level.
 It was not possible to show how the micro-benchmarks adjustment knobs were used to tune to an interesting test point.
@@ -45,10 +45,10 @@
 
 A careful walk-though of the allocator fastpath should yield additional optimizations for a slight performance gain.
-In particular, looking at the implementation of rpmalloc, which is often the fastest allocator,
+In particular, analysing the implementation of rpmalloc, which is often the fastest allocator,
 
-The micro-benchmarks project requires more testing and analysis.
-Additional allocations patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the best tuning knobs.
+The micro-benchmark project requires more testing and analysis.
+Additional allocation patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the most useful tuning knobs.
 Also, identifying ways to visualize the results of the micro-benchmarks is a work in progress.
 
-After llheap is made available on gitHub, interacting with its users to locate problems and improvements, will make llbench a more robust memory allocator.
-As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional feedback.
+After llheap is made available on GitHub, interacting with its users to locate problems and improvements will make llbench a more robust memory allocator.
+As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional information.
Index: doc/theses/mubeen_zulfiqar_MMath/figures/Header.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/Header.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/Header.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -20,21 +20,26 @@
 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
 	 3300 1500 3300 2400
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800
 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
 	1 1 1.00 45.00 90.00
-	 4050 2625 3750 2625 3750 2400
+	 4200 2775 3750 2775 3750 1725
 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
 	1 1 1.00 45.00 90.00
-	 4050 2850 3450 2850 3450 2400
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800
+	 4200 2550 4050 2550 4050 1725
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
+	1 1 1.00 45.00 90.00
+	 4200 3000 3450 3000 3450 2025
 4 0 0 50 -1 0 12 0.0000 2 180 1185 1875 1725 bucket pointer\001
 4 0 0 50 -1 0 12 0.0000 2 180 1005 1875 2025 mapped size\001
 4 0 0 50 -1 0 12 0.0000 2 135 1215 1875 2325 next free block\001
 4 2 0 50 -1 0 12 0.0000 2 135 480 1725 2025 union\001
-4 1 0 50 -1 0 12 0.0000 2 135 270 3775 2325 0/1\001
-4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2325 0/1\001
 4 1 0 50 -1 0 12 0.0000 2 180 945 5400 2025 request size\001
 4 1 0 50 -1 0 12 0.0000 2 180 765 5400 1425 4/8-bytes\001
 4 1 0 50 -1 0 12 0.0000 2 180 765 3000 1425 4/8-bytes\001
-4 0 0 50 -1 0 12 0.0000 2 135 825 4125 2700 zero filled\001
-4 0 0 50 -1 0 12 0.0000 2 180 1515 4125 2925 mapped allocation\001
+4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2025 0/1\001
+4 1 0 50 -1 0 12 0.0000 2 135 270 3775 1725 0/1\001
+4 1 0 50 -1 0 12 0.0000 2 135 270 4075 1725 0/1\001
+4 0 0 50 -1 0 12 0.0000 2 180 1515 4275 3075 mapped allocation\001
+4 0 0 50 -1 0 12 0.0000 2 135 825 4275 2850 zero filled\001
+4 0 0 50 -1 0 12 0.0000 2 180 1920 4275 2625 alignment (fake header)\001
Index: doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsNoOwnership.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsNoOwnership.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsNoOwnership.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,7 +1,7 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
@@ -11,33 +11,33 @@
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1200 2100 1500 2100 1500 1800 1200 1800 1200 2100
-4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001
 -6
 6 1950 1800 2550 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 2100 2100 2400 2100 2400 1800 2100 1800 2100 2100
-4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001
 -6
 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350
 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1275 1800 1275 1500
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	0 0 1.00 45.00 90.00
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
 	 1425 1500 1425 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1425 1500 2175 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2175 1500 1425 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2175 1500 2175 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2325 1800 2325 1500
-4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
-4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001
Index: doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsOwnership.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsOwnership.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsOwnership.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,7 +1,7 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
@@ -11,29 +11,29 @@
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1200 2100 1500 2100 1500 1800 1200 1800 1200 2100
-4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001
 -6
 6 1950 1800 2550 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 2100 2100 2400 2100 2400 1800 2100 1800 2100 2100
-4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001
 -6
 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350
 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2175 1500 1425 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
-	 1425 1500 2175 1800
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1275 1800 1275 1500
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2325 1800 2325 1500
-4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001
-4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	 1425 1500 2175 1800
+4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
Index: doc/theses/mubeen_zulfiqar_MMath/figures/PerThreadHeap.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/PerThreadHeap.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/PerThreadHeap.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,7 +1,7 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
@@ -11,5 +11,5 @@
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 2700 1800 3000 1800 3000 2100 2700 2100 2700 1800
-4 1 0 50 -1 0 11 0.0000 2 135 135 2850 2025 G\001
+4 1 0 50 -1 0 11 0.0000 2 120 135 2850 2025 G\001
 -6
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
@@ -17,6 +17,6 @@
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1350 1500 1350 1800
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
@@ -27,18 +27,18 @@
 	 2100 1800 2400 1800 2400 2100 2100 2100 2100 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1800 1500 1800 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2250 1500 2250 1800
-4 1 0 50 -1 0 11 0.0000 2 195 1320 2550 2025 $\\Leftrightarrow$\001
-4 1 0 50 -1 0 11 0.0000 2 195 1320 3150 2025 $\\Leftrightarrow$\001
-4 0 0 50 -1 0 11 0.0000 2 135 240 3300 2025 OS\001
-4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001
-4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
-4 1 0 50 -1 0 11 0.0000 2 195 495 1800 2025 H$_2$\001
-4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001
-4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_3$\001
-4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001
+4 1 0 50 -1 0 11 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001
+4 1 0 50 -1 0 11 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001
+4 0 0 50 -1 0 11 0.0000 2 120 240 3300 2025 OS\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_3$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
Index: doc/theses/mubeen_zulfiqar_MMath/figures/SharedHeaps.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/SharedHeaps.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/SharedHeaps.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,7 +1,7 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
@@ -10,50 +10,50 @@
 6 1500 1200 2100 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
-4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001
 -6
 6 1050 1200 1650 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
-4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
 -6
 6 1950 1200 2550 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
-4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
 -6
 6 1275 1800 1875 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1425 1800 1725 1800 1725 2100 1425 2100 1425 1800
-4 1 0 50 -1 0 11 0.0000 2 195 495 1575 2025 H$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 1575 2025 H$_1$\001
 -6
 6 1725 1800 2325 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1875 1800 2175 1800 2175 2100 1875 2100 1875 1800
-4 1 0 50 -1 0 11 0.0000 2 195 495 2025 2025 H$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 2025 2025 H$_2$\001
 -6
 6 2475 1800 2775 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 2475 1800 2775 1800 2775 2100 2475 2100 2475 1800
-4 1 0 50 -1 0 11 0.0000 2 135 135 2625 2025 G\001
+4 1 0 50 -1 0 11 0.0000 2 120 135 2625 2025 G\001
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1275 1500 1500 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1425 1500 1950 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1725 1500 1650 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1875 1500 2025 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2250 1500 2100 1800
-4 0 0 50 -1 0 11 0.0000 2 135 240 3075 2025 OS\001
-4 1 0 50 -1 0 11 0.0000 2 195 1320 2325 2025 $\\Leftrightarrow$\001
-4 1 0 50 -1 0 11 0.0000 2 195 1320 2925 2025 $\\Leftrightarrow$\001
+4 0 0 50 -1 0 11 0.0000 2 120 240 3075 2025 OS\001
+4 1 0 50 -1 0 11 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001
+4 1 0 50 -1 0 11 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
Index: doc/theses/mubeen_zulfiqar_MMath/figures/SingleHeap.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/SingleHeap.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/SingleHeap.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,7 +1,7 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
@@ -10,29 +10,29 @@
 6 1500 1200 2100 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
-4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001
 -6
 6 1050 1200 1650 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
-4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
 -6
 6 1950 1200 2550 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
-4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001
+4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1350 1500 1725 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2250 1500 1875 1800
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1650 1800 1950 1800 1950 2100 1650 2100 1650 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 1800 1500 1800 1800
-4 1 0 50 -1 0 11 0.0000 2 195 495 1800 2025 H$_1$\001
-4 1 0 50 -1 0 11 0.0000 2 195 1320 2100 2025 $\\Leftrightarrow$\001
-4 0 0 50 -1 0 11 0.0000 2 135 240 2250 2025 OS\001
+4 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_1$\001
+4 1 0 50 -1 0 11 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001
+4 0 0 50 -1 0 11 0.0000 2 120 240 2250 2025 OS\001
Index: doc/theses/mubeen_zulfiqar_MMath/figures/UserKernelHeaps.fig
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/figures/UserKernelHeaps.fig	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/figures/UserKernelHeaps.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -45,14 +45,14 @@
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2025 2100 2025 2400
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2475 2100 2475 2400
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	0 0 1.00 45.00 90.00
-	0 0 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
+	1 1 1.00 45.00 90.00
 	 2925 2100 2925 2400
 4 1 0 50 -1 0 11 0.0000 2 135 2235 2475 1725 scheduled across kernel threads\001
Index: doc/theses/mubeen_zulfiqar_MMath/intro.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/intro.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/intro.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -53,5 +53,5 @@
 When this allocator proves inadequate, programmers often write specialize allocators for specific needs.
 C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator.
-(Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.)
+Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
 However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved.
 For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
@@ -65,8 +65,5 @@
 \begin{enumerate}[leftmargin=*]
 \item
-Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
-
-\item
-Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@.
+Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
 
 \item
@@ -104,5 +101,5 @@
 
 \item
-Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties.
+Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties.
 
 \item
@@ -111,5 +108,5 @@
 \item
 @malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@.
-If the allocation is not aligned or @addr@ is the @nulladdr@, the minimal alignment is returned.
+If the allocation is not aligned or @addr@ is the @NULL@, the minimal alignment is returned.
 \item
 @malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@.
@@ -119,7 +116,4 @@
 @malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
 \end{itemize}
-
-\item
-Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation.
 
 \item
@@ -136,5 +130,5 @@
 
 \item
-Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination.
+Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
 
 \item
Index: doc/theses/mubeen_zulfiqar_MMath/performance.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/performance.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/performance.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -3,5 +3,5 @@
 
 This chapter uses the micro-benchmarks from \VRef[Chapter]{s:Benchmarks} to test a number of current memory allocators, including llheap.
-The goal is to see if llheap is competitive with the current best memory allocators.
+The goal is to see if llheap is competitive with the currently popular memory allocators.
 
 
@@ -11,7 +11,7 @@
 \begin{itemize}
 \item
+\textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
+\item
 \textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0
-\item
-\textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
 \end{itemize}
 
@@ -31,5 +31,5 @@
 
 \paragraph{glibc (\textsf{glc})}
-\cite{glibc} is the default gcc thread-safe allocator.
+\cite{glibc} is the default glibc thread-safe allocator.
 \\
 \textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\
@@ -46,5 +46,5 @@
 
 \paragraph{hoard (\textsf{hrd})}
-\cite{hoard} is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
+\cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
 \\
 \textbf{Version:} 3.13\\
@@ -78,5 +78,5 @@
 
 \paragraph{tbb malloc (\textsf{tbb})}
-\cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses private heap for each thread.
+\cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread.
 Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
 \\
@@ -90,6 +90,6 @@
 \section{Experiments}
 
-The each micro-benchmark is configured and run with each of the allocators,
-The less time an allocator takes to complete a benchmark the better, so lower in the graphs is better.
+Each micro-benchmark is configured and run with each of the allocators,
+The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs.
 All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see \VRef{s:MemoryMicroBenchmark}).
 
@@ -231,9 +231,8 @@
 Second is the low-performer group, which includes the rest of the memory allocators.
 These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator.
-All of the allocator's in this group are sharing heaps among threads at some level.
-
-Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}).
-But, these allocators are among the low performers in the cache scratch.
-It suggests these allocators do not actively produce false-sharing but preserve program-induced passive false sharing.
+All of the allocators in this group are sharing heaps among threads at some level.
+
+Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch.
+It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing.
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Index: doc/theses/mubeen_zulfiqar_MMath/uw-ethesis-frontpgs.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/uw-ethesis-frontpgs.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/uw-ethesis-frontpgs.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -13,5 +13,5 @@
         \vspace*{1.0cm}
 
-        {\Huge\bf \CFA Memory Allocation}
+        {\Huge\bf High-Performance Concurrent Memory Allocation}
 
         \vspace*{1.0cm}
@@ -108,5 +108,5 @@
 % D E C L A R A T I O N   P A G E
 % -------------------------------
-  % The following is a sample Delaration Page as provided by the GSO
+  % The following is a sample Declaration Page as provided by the GSO
   % December 13th, 2006.  It is designed for an electronic thesis.
  \begin{center}\textbf{Author's Declaration}\end{center}
@@ -136,20 +136,20 @@
 
 The goal of this thesis is to build a low-latency memory allocator for both kernel and user multi-threaded systems, which is competitive with the best current memory allocators, while extending the feature set of existing and new allocator routines.
-A new llheap memory-allocator is created that achieves all of these goals, while maintaining and managing sticky allocation properties for zero-fill and alignment allocations without a performance loss.
+A new llheap memory-allocator is created that achieves all of these goals, while maintaining and managing sticky allocation properties for zero-filled and aligned allocations without a performance loss.
 Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally, because it preserves sticky properties when enlarging storage requests.
 Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
 The C allocation API is also extended with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ so programmers do not make mistakes writing theses useful allocation operations.
 llheap is embedded into the \uC and \CFA runtime systems, both of which have user-level threading.
-The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
+The ability to use \CFA's advanced type-system (and possibly \CC's too) to combine advanced memory operations into one allocation routine using named arguments shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
 
 The llheap allocator also provides comprehensive statistics for all allocation operations, which are invaluable in understanding and debugging a program's dynamic behaviour.
-No other memory allocator examined in the thesis provides comprehensive statistics gathering.
-As well, llheap provides a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
+No other memory allocator examined in the thesis provides such comprehensive statistics gathering.
+As well, llheap provides a debugging mode where allocations are checked with internal pre/post conditions and invariants. It is extremely useful, especially for students.
 While not as powerful as the @valgrind@ interpreter, a large number of allocations mistakes are detected.
 Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
 
-A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
+A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs. It has been an interesting challenge.
 These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs.
-Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc and the new allocator llheap are all compared using the new micro-benchmark test-suite.
+Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc, and the new allocator llheap are all compared using the new micro-benchmark test-suite.
 \cleardoublepage
 
@@ -162,8 +162,12 @@
 I would like to thank all the people who made this thesis possible.
 
-I would like to acknowledge Peter A. Buhr for his assistance and support throughtout the process.
+I would like to acknowledge Peter A. Buhr for his assistance and support throughout the process.
 It would have been impossible without him.
 
+I would like to acknowledge Gregor Richards and Trevor Brown for reading my thesis quickly and giving me great feedback on my work.
+
 Also, I would say thanks to my team members at PLG especially Thierry, Michael, and Andrew for their input.
+
+Finally, a special thank you to Huawei Canada for funding this work.
 \end{center}
 \cleardoublepage
@@ -195,8 +199,8 @@
 % L I S T   O F   T A B L E S
 % ---------------------------
-\addcontentsline{toc}{chapter}{List of Tables}
-\listoftables
-\cleardoublepage
-\phantomsection		% allows hyperref to link to the correct page
+% \addcontentsline{toc}{chapter}{List of Tables}
+% \listoftables
+% \cleardoublepage
+% \phantomsection		% allows hyperref to link to the correct page
 
 % Change page numbering back to Arabic numerals
Index: doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex
===================================================================
--- doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -106,5 +106,5 @@
     pdffitwindow=false,     % window fit to page when opened
     pdfstartview={FitH},    % fits the width of the page to the window
-    pdftitle={Cforall Memory Allocation}, % title: CHANGE THIS TEXT!
+    pdftitle={High-Performance Concurrent Memory Allocation}, % title: CHANGE THIS TEXT!
     pdfauthor={Mubeen Zulfiqar},    % author: CHANGE THIS TEXT! and uncomment this line
     pdfsubject={Cforall},  % subject: CHANGE THIS TEXT! and uncomment this line
Index: doc/theses/thierry_delisle_PhD/thesis/Makefile
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/Makefile	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/Makefile	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -3,6 +3,10 @@
 Build = build
 Figures = img
-Macros = ../../../LaTeXmacros
-TeXLIB = .:${Macros}:${Build}:../../../bibliography:
+
+LaTMac = ../../../LaTeXmacros
+BibRep = ../../../bibliography
+
+Macros = ${LaTMac}
+TeXLIB = .:${Macros}:${Build}:${BibRep}:
 LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex
@@ -37,4 +41,8 @@
 	emptytree \
 	fairness \
+	idle \
+	idle1 \
+	idle2 \
+	idle_state \
 	io_uring \
 	pivot_ring \
@@ -42,4 +50,19 @@
 	cycle \
 	result.cycle.jax.ops \
+	result.yield.jax.ops \
+	result.churn.jax.ops \
+	result.cycle.jax.ns \
+	result.yield.jax.ns \
+	result.churn.jax.ns \
+	result.cycle.low.jax.ops \
+	result.yield.low.jax.ops \
+	result.churn.low.jax.ops \
+	result.cycle.low.jax.ns \
+	result.yield.low.jax.ns \
+	result.churn.low.jax.ns \
+	result.memcd.updt.qps \
+	result.memcd.updt.lat \
+	result.memcd.rate.qps \
+	result.memcd.rate.99th \
 }
 
@@ -52,5 +75,5 @@
 ## Define the documents that need to be made.
 all: thesis.pdf
-thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty
+thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ${LaTMac}/common.tex ${LaTMac}/common.sty ${BibRep}/pl.bib
 
 DOCUMENT = thesis.pdf
@@ -116,9 +139,31 @@
 	python3 $< $@
 
-build/result.%.ns.svg : data/% | ${Build}
-	../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops"
+cycle_jax_ops_FLAGS = --MaxY=120000000
+cycle_low_jax_ops_FLAGS = --MaxY=120000000
+cycle_jax_ns_FLAGS = --MaxY=2000
+cycle_low_jax_ns_FLAGS = --MaxY=2000
 
-build/result.%.ops.svg : data/% | ${Build}
-	../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second"
+yield_jax_ops_FLAGS = --MaxY=150000000
+yield_low_jax_ops_FLAGS = --MaxY=150000000
+yield_jax_ns_FLAGS = --MaxY=1500
+yield_low_jax_ns_FLAGS = --MaxY=1500
+
+build/result.%.ns.svg : data/% Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops/procs" $($(subst .,_,$*)_ns_FLAGS)
+
+build/result.%.ops.svg : data/% Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second" $($(subst .,_,$*)_ops_FLAGS)
+
+build/result.memcd.updt.qps.svg : data/memcd.updt Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Update Ratio"
+
+build/result.memcd.updt.lat.svg : data/memcd.updt Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Average Read Latency" -x "Update Ratio"
+
+build/result.memcd.rate.qps.svg : data/memcd.rate Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Target QPS"
+
+build/result.memcd.rate.99th.svg : data/memcd.rate Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Tail Read Latency" -x "Target QPS"
 
 ## pstex with inverted colors
Index: doc/theses/thierry_delisle_PhD/thesis/data/churn.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/churn.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/churn.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10016.628354, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 103371393.0, "Total blocks": 42643001.0, "Ops per second": 10319978.87, "ns per ops": 96.9, "Ops per threads": 1033713.0, "Ops per procs": 103371393.0, "Ops/sec/procs": 10319978.87, "ns per ops/procs": 96.9}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 6.0, "Total Operations(ops)": 256473373.0, "Ops per second": 25647337.0, "ns per ops": 39.0, "Ops per threads": 320591.0, "Ops per procs": 32059171.0, "Ops/sec/procs": 3205917.0, "ns per ops/procs": 315.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10017.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 76683227.0, "Total blocks": 27590624.0, "Ops per second": 7655096.57, "ns per ops": 130.63, "Ops per threads": 47927.0, "Ops per procs": 4792701.0, "Ops/sec/procs": 478443.54, "ns per ops/procs": 2090.11}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 190480943.0, "Ops per second": 19045032.48, "ns per ops": 52.51, "Ops per threads": 79367.0, "Ops per procs": 7936705.0, "Ops/sec/procs": 793543.02, "ns per ops/procs": 1260.17}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10016.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 76380433.0, "Total blocks": 27484490.0, "Ops per second": 7625307.92, "ns per ops": 131.14, "Ops per threads": 47737.0, "Ops per procs": 4773777.0, "Ops/sec/procs": 476581.75, "ns per ops/procs": 2098.28}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 6.0, "Total Operations(ops)": 73903563.0, "Ops per second": 7390356.0, "ns per ops": 136.0, "Ops per threads": 739035.0, "Ops per procs": 73903563.0, "Ops/sec/procs": 7390356.0, "ns per ops/procs": 136.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 6.0, "Total Operations(ops)": 469714305.0, "Ops per second": 46971430.0, "ns per ops": 21.0, "Ops per threads": 293571.0, "Ops per procs": 29357144.0, "Ops/sec/procs": 2935714.0, "ns per ops/procs": 340.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 6.0, "Total Operations(ops)": 653514397.0, "Ops per second": 65351439.0, "ns per ops": 15.0, "Ops per threads": 272297.0, "Ops per procs": 27229766.0, "Ops/sec/procs": 2722976.0, "ns per ops/procs": 367.0}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 213476978.0, "Ops per second": 21344951.67, "ns per ops": 46.85, "Ops per threads": 133423.0, "Ops per procs": 13342311.0, "Ops/sec/procs": 1334059.48, "ns per ops/procs": 749.59}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 214624132.0, "Ops per second": 21458252.32, "ns per ops": 46.6, "Ops per threads": 134140.0, "Ops per procs": 13414008.0, "Ops/sec/procs": 1341140.77, "ns per ops/procs": 745.63}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10025.783632, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 550743553.0, "Total blocks": 240452132.0, "Ops per second": 54932718.8, "ns per ops": 18.2, "Ops per threads": 344214.0, "Ops per procs": 34421472.0, "Ops/sec/procs": 3433294.92, "ns per ops/procs": 291.27}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10026.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 75322787.0, "Total blocks": 28388443.0, "Ops per second": 7512321.69, "ns per ops": 133.11, "Ops per threads": 31384.0, "Ops per procs": 3138449.0, "Ops/sec/procs": 313013.4, "ns per ops/procs": 3194.75}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 79715530.0, "Total blocks": 24912815.0, "Ops per second": 7964937.17, "ns per ops": 125.55, "Ops per threads": 797155.0, "Ops per procs": 79715530.0, "Ops/sec/procs": 7964937.17, "ns per ops/procs": 125.55}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 79751618.0, "Total blocks": 24924094.0, "Ops per second": 7968946.78, "ns per ops": 125.49, "Ops per threads": 797516.0, "Ops per procs": 79751618.0, "Ops/sec/procs": 7968946.78, "ns per ops/procs": 125.49}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10016.627702, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 100444522.0, "Total blocks": 42678977.0, "Ops per second": 10027778.31, "ns per ops": 99.72, "Ops per threads": 1004445.0, "Ops per procs": 100444522.0, "Ops/sec/procs": 10027778.31, "ns per ops/procs": 99.72}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10006.863438, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 761548918.0, "Total blocks": 327474630.0, "Ops per second": 76102659.21, "ns per ops": 13.14, "Ops per threads": 317312.0, "Ops per procs": 31731204.0, "Ops/sec/procs": 3170944.13, "ns per ops/procs": 315.36}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10024.630415, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 549456394.0, "Total blocks": 238577198.0, "Ops per second": 54810638.52, "ns per ops": 18.24, "Ops per threads": 343410.0, "Ops per procs": 34341024.0, "Ops/sec/procs": 3425664.91, "ns per ops/procs": 291.91}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 6.0, "Total Operations(ops)": 653669226.0, "Ops per second": 65366922.0, "ns per ops": 15.0, "Ops per threads": 272362.0, "Ops per procs": 27236217.0, "Ops/sec/procs": 2723621.0, "ns per ops/procs": 367.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 6.0, "Total Operations(ops)": 478747005.0, "Ops per second": 47874700.0, "ns per ops": 20.0, "Ops per threads": 299216.0, "Ops per procs": 29921687.0, "Ops/sec/procs": 2992168.0, "ns per ops/procs": 334.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 6.0, "Total Operations(ops)": 259926863.0, "Ops per second": 25992686.0, "ns per ops": 38.0, "Ops per threads": 324908.0, "Ops per procs": 32490857.0, "Ops/sec/procs": 3249085.0, "ns per ops/procs": 310.0}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 186159297.0, "Ops per second": 18611771.03, "ns per ops": 53.73, "Ops per threads": 77566.0, "Ops per procs": 7756637.0, "Ops/sec/procs": 775490.46, "ns per ops/procs": 1289.51}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 79811057.0, "Total blocks": 24942609.0, "Ops per second": 7974829.0, "ns per ops": 125.39, "Ops per threads": 798110.0, "Ops per procs": 79811057.0, "Ops/sec/procs": 7974829.0, "ns per ops/procs": 125.39}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10041.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 75138224.0, "Total blocks": 28316320.0, "Ops per second": 7483121.08, "ns per ops": 133.63, "Ops per threads": 31307.0, "Ops per procs": 3130759.0, "Ops/sec/procs": 311796.71, "ns per ops/procs": 3207.22}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10024.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 76515053.0, "Total blocks": 27532672.0, "Ops per second": 7632511.21, "ns per ops": 131.02, "Ops per threads": 47821.0, "Ops per procs": 4782190.0, "Ops/sec/procs": 477031.95, "ns per ops/procs": 2096.3}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10026.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 75360901.0, "Total blocks": 28401609.0, "Ops per second": 7515905.66, "ns per ops": 133.05, "Ops per threads": 31400.0, "Ops per procs": 3140037.0, "Ops/sec/procs": 313162.74, "ns per ops/procs": 3193.23}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 151314962.0, "Ops per second": 15129984.81, "ns per ops": 66.09, "Ops per threads": 1513149.0, "Ops per procs": 151314962.0, "Ops/sec/procs": 15129984.81, "ns per ops/procs": 66.09}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10011.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 79141078.0, "Total blocks": 29863613.0, "Ops per second": 7904875.43, "ns per ops": 126.5, "Ops per threads": 98926.0, "Ops per procs": 9892634.0, "Ops/sec/procs": 988109.43, "ns per ops/procs": 1012.03}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 188556624.0, "Ops per second": 18852533.83, "ns per ops": 53.04, "Ops per threads": 78565.0, "Ops per procs": 7856526.0, "Ops/sec/procs": 785522.24, "ns per ops/procs": 1273.04}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 228229792.0, "Ops per second": 22820542.67, "ns per ops": 43.82, "Ops per threads": 285287.0, "Ops per procs": 28528724.0, "Ops/sec/procs": 2852567.83, "ns per ops/procs": 350.56}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10020.121849, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 302307497.0, "Total blocks": 128429554.0, "Ops per second": 30170042.0, "ns per ops": 33.15, "Ops per threads": 377884.0, "Ops per procs": 37788437.0, "Ops/sec/procs": 3771255.25, "ns per ops/procs": 265.16}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 226161389.0, "Ops per second": 22613570.5, "ns per ops": 44.22, "Ops per threads": 282701.0, "Ops per procs": 28270173.0, "Ops/sec/procs": 2826696.31, "ns per ops/procs": 353.77}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10017.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 78799007.0, "Total blocks": 29733108.0, "Ops per second": 7865960.45, "ns per ops": 127.13, "Ops per threads": 98498.0, "Ops per procs": 9849875.0, "Ops/sec/procs": 983245.06, "ns per ops/procs": 1017.04}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10005.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 226114608.0, "Ops per second": 22598888.16, "ns per ops": 44.25, "Ops per threads": 282643.0, "Ops per procs": 28264326.0, "Ops/sec/procs": 2824861.02, "ns per ops/procs": 354.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10019.343306, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 291933237.0, "Total blocks": 129498687.0, "Ops per second": 29136963.18, "ns per ops": 34.32, "Ops per threads": 364916.0, "Ops per procs": 36491654.0, "Ops/sec/procs": 3642120.4, "ns per ops/procs": 274.57}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 6.0, "Total Operations(ops)": 260279998.0, "Ops per second": 26027999.0, "ns per ops": 38.0, "Ops per threads": 325349.0, "Ops per procs": 32534999.0, "Ops/sec/procs": 3253499.0, "ns per ops/procs": 310.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10007.059222, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 765662737.0, "Total blocks": 325286764.0, "Ops per second": 76512262.0, "ns per ops": 13.07, "Ops per threads": 319026.0, "Ops per procs": 31902614.0, "Ops/sec/procs": 3188010.92, "ns per ops/procs": 313.68}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10016.849943, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 96124027.0, "Total blocks": 44464161.0, "Ops per second": 9596233.1, "ns per ops": 104.21, "Ops per threads": 961240.0, "Ops per procs": 96124027.0, "Ops/sec/procs": 9596233.1, "ns per ops/procs": 104.21}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 6.0, "Total Operations(ops)": 74842673.0, "Ops per second": 7484267.0, "ns per ops": 134.0, "Ops per threads": 748426.0, "Ops per procs": 74842673.0, "Ops/sec/procs": 7484267.0, "ns per ops/procs": 134.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10016.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 80963737.0, "Total blocks": 30553569.0, "Ops per second": 8082674.95, "ns per ops": 123.72, "Ops per threads": 101204.0, "Ops per procs": 10120467.0, "Ops/sec/procs": 1010334.37, "ns per ops/procs": 989.77}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 6.0, "Total Operations(ops)": 73702204.0, "Ops per second": 7370220.0, "ns per ops": 137.0, "Ops per threads": 737022.0, "Ops per procs": 73702204.0, "Ops/sec/procs": 7370220.0, "ns per ops/procs": 137.0}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10001.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 151011982.0, "Ops per second": 15099599.79, "ns per ops": 66.23, "Ops per threads": 1510119.0, "Ops per procs": 151011982.0, "Ops/sec/procs": 15099599.79, "ns per ops/procs": 66.23}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10001.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 151419332.0, "Ops per second": 15140359.92, "ns per ops": 66.05, "Ops per threads": 1514193.0, "Ops per procs": 151419332.0, "Ops/sec/procs": 15140359.92, "ns per ops/procs": 66.05}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 6.0, "Total Operations(ops)": 648186982.0, "Ops per second": 64818698.0, "ns per ops": 15.0, "Ops per threads": 270077.0, "Ops per procs": 27007790.0, "Ops/sec/procs": 2700779.0, "ns per ops/procs": 370.0}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 213886424.0, "Ops per second": 21385699.62, "ns per ops": 46.76, "Ops per threads": 133679.0, "Ops per procs": 13367901.0, "Ops/sec/procs": 1336606.23, "ns per ops/procs": 748.16}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10025.525505, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 552853400.0, "Total blocks": 239647709.0, "Ops per second": 55144580.67, "ns per ops": 18.13, "Ops per threads": 345533.0, "Ops per procs": 34553337.0, "Ops/sec/procs": 3446536.29, "ns per ops/procs": 290.15}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10020.252098, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 295438407.0, "Total blocks": 128292778.0, "Ops per second": 29484129.15, "ns per ops": 33.92, "Ops per threads": 369298.0, "Ops per procs": 36929800.0, "Ops/sec/procs": 3685516.14, "ns per ops/procs": 271.33}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 6.0, "Total Operations(ops)": 476585040.0, "Ops per second": 47658504.0, "ns per ops": 20.0, "Ops per threads": 297865.0, "Ops per procs": 29786565.0, "Ops/sec/procs": 2978656.0, "ns per ops/procs": 335.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10007.127025, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 777392421.0, "Total blocks": 323387255.0, "Ops per second": 77683876.61, "ns per ops": 12.87, "Ops per threads": 323913.0, "Ops per procs": 32391350.0, "Ops/sec/procs": 3236828.19, "ns per ops/procs": 308.94}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/churn.low.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/churn.low.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/churn.low.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 45886505.0, "Total blocks": 23581519.0, "Ops per second": 4584996.42, "ns per ops": 218.1, "Ops per threads": 2867906.0, "Ops per procs": 5735813.0, "Ops/sec/procs": 573124.55, "ns per ops/procs": 1744.82}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10031.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 52948646.0, "Ops per second": 5278451.52, "ns per ops": 189.45, "Ops per threads": 1654645.0, "Ops per procs": 3309290.0, "Ops/sec/procs": 329903.22, "ns per ops/procs": 3031.19}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10008.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 45571308.0, "Total blocks": 23418912.0, "Ops per second": 4553347.51, "ns per ops": 219.62, "Ops per threads": 2848206.0, "Ops per procs": 5696413.0, "Ops/sec/procs": 569168.44, "ns per ops/procs": 1756.95}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10020.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 57549768.0, "Ops per second": 5743275.64, "ns per ops": 174.12, "Ops per threads": 3596860.0, "Ops per procs": 7193721.0, "Ops/sec/procs": 717909.45, "ns per ops/procs": 1392.93}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10025.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 52431553.0, "Ops per second": 5229696.44, "ns per ops": 191.22, "Ops per threads": 1638486.0, "Ops per procs": 3276972.0, "Ops/sec/procs": 326856.03, "ns per ops/procs": 3059.45}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 67810265.0, "Total blocks": 32862605.0, "Ops per second": 6774910.95, "ns per ops": 147.6, "Ops per threads": 2119070.0, "Ops per procs": 4238141.0, "Ops/sec/procs": 423431.93, "ns per ops/procs": 2361.65}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10013.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 57847054.0, "Ops per second": 5777194.35, "ns per ops": 173.09, "Ops per threads": 3615440.0, "Ops per procs": 7230881.0, "Ops/sec/procs": 722149.29, "ns per ops/procs": 1384.76}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10065.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 75010542.0, "Ops per second": 7452234.24, "ns per ops": 134.19, "Ops per threads": 37505271.0, "Ops per procs": 75010542.0, "Ops/sec/procs": 7452234.24, "ns per ops/procs": 134.19}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10018.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 75353303.0, "Ops per second": 7521765.77, "ns per ops": 132.95, "Ops per threads": 37676651.0, "Ops per procs": 75353303.0, "Ops/sec/procs": 7521765.77, "ns per ops/procs": 132.95}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10038.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 77580756.0, "Ops per second": 7728213.1, "ns per ops": 129.4, "Ops per threads": 1616265.0, "Ops per procs": 3232531.0, "Ops/sec/procs": 322008.88, "ns per ops/procs": 3105.5}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 6.0, "Total Operations(ops)": 55704847.0, "Ops per second": 5570484.0, "ns per ops": 181.0, "Ops per threads": 1740776.0, "Ops per procs": 3481552.0, "Ops/sec/procs": 348155.0, "ns per ops/procs": 2901.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 75768750.0, "Total blocks": 39198331.0, "Ops per second": 7575582.46, "ns per ops": 132.0, "Ops per threads": 1578515.0, "Ops per procs": 3157031.0, "Ops/sec/procs": 315649.27, "ns per ops/procs": 3168.07}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 68082972.0, "Total blocks": 32947596.0, "Ops per second": 6801956.28, "ns per ops": 147.02, "Ops per threads": 2127592.0, "Ops per procs": 4255185.0, "Ops/sec/procs": 425122.27, "ns per ops/procs": 2352.26}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 6.0, "Total Operations(ops)": 53235539.0, "Ops per second": 5323553.0, "ns per ops": 189.0, "Ops per threads": 26617769.0, "Ops per procs": 53235539.0, "Ops/sec/procs": 5323553.0, "ns per ops/procs": 189.0}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10029.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 51995117.0, "Ops per second": 5184034.28, "ns per ops": 192.9, "Ops per threads": 1624847.0, "Ops per procs": 3249694.0, "Ops/sec/procs": 324002.14, "ns per ops/procs": 3086.4}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 6.0, "Total Operations(ops)": 75855247.0, "Ops per second": 7585524.0, "ns per ops": 133.0, "Ops per threads": 1580317.0, "Ops per procs": 3160635.0, "Ops/sec/procs": 316063.0, "ns per ops/procs": 3195.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10016.677107, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 82722736.0, "Total blocks": 51097615.0, "Ops per second": 8258500.81, "ns per ops": 121.09, "Ops per threads": 41361368.0, "Ops per procs": 82722736.0, "Ops/sec/procs": 8258500.81, "ns per ops/procs": 121.09}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 6.0, "Total Operations(ops)": 82039759.0, "Ops per second": 8203975.0, "ns per ops": 123.0, "Ops per threads": 1709161.0, "Ops per procs": 3418323.0, "Ops/sec/procs": 341832.0, "ns per ops/procs": 2954.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10019.859008, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 60823669.0, "Total blocks": 20963529.0, "Ops per second": 6070311.86, "ns per ops": 164.74, "Ops per threads": 3801479.0, "Ops per procs": 7602958.0, "Ops/sec/procs": 758788.98, "ns per ops/procs": 1317.89}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10026.064514, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 84419895.0, "Total blocks": 31896077.0, "Ops per second": 8420043.07, "ns per ops": 118.76, "Ops per threads": 2638121.0, "Ops per procs": 5276243.0, "Ops/sec/procs": 526252.69, "ns per ops/procs": 1900.23}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10066.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 86259431.0, "Ops per second": 8569319.99, "ns per ops": 116.7, "Ops per threads": 43129715.0, "Ops per procs": 86259431.0, "Ops/sec/procs": 8569319.99, "ns per ops/procs": 116.7}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10020.476753, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 61723297.0, "Total blocks": 27893419.0, "Ops per second": 6159716.6, "ns per ops": 162.35, "Ops per threads": 3857706.0, "Ops per procs": 7715412.0, "Ops/sec/procs": 769964.58, "ns per ops/procs": 1298.76}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 6.0, "Total Operations(ops)": 39895438.0, "Ops per second": 3989543.0, "ns per ops": 253.0, "Ops per threads": 2493464.0, "Ops per procs": 4986929.0, "Ops/sec/procs": 498692.0, "ns per ops/procs": 2025.0}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10044.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 78307639.0, "Ops per second": 7795946.42, "ns per ops": 128.27, "Ops per threads": 1631409.0, "Ops per procs": 3262818.0, "Ops/sec/procs": 324831.1, "ns per ops/procs": 3078.52}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10008.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 45589496.0, "Total blocks": 23479172.0, "Ops per second": 4555270.66, "ns per ops": 219.53, "Ops per threads": 2849343.0, "Ops per procs": 5698687.0, "Ops/sec/procs": 569408.83, "ns per ops/procs": 1756.21}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 6.0, "Total Operations(ops)": 52936021.0, "Ops per second": 5293602.0, "ns per ops": 190.0, "Ops per threads": 26468010.0, "Ops per procs": 52936021.0, "Ops/sec/procs": 5293602.0, "ns per ops/procs": 190.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 74937686.0, "Total blocks": 38772422.0, "Ops per second": 7492506.92, "ns per ops": 133.47, "Ops per threads": 1561201.0, "Ops per procs": 3122403.0, "Ops/sec/procs": 312187.79, "ns per ops/procs": 3203.2}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10019.966204, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 66694419.0, "Total blocks": 28038485.0, "Ops per second": 6656152.09, "ns per ops": 150.24, "Ops per threads": 4168401.0, "Ops per procs": 8336802.0, "Ops/sec/procs": 832019.01, "ns per ops/procs": 1201.9}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 6.0, "Total Operations(ops)": 55692916.0, "Ops per second": 5569291.0, "ns per ops": 181.0, "Ops per threads": 1740403.0, "Ops per procs": 3480807.0, "Ops/sec/procs": 348080.0, "ns per ops/procs": 2901.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10007.064432, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 112046278.0, "Total blocks": 39631405.0, "Ops per second": 11196717.95, "ns per ops": 89.31, "Ops per threads": 2334297.0, "Ops per procs": 4668594.0, "Ops/sec/procs": 466529.91, "ns per ops/procs": 2143.49}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 6.0, "Total Operations(ops)": 34267306.0, "Ops per second": 3426730.0, "ns per ops": 294.0, "Ops per threads": 2141706.0, "Ops per procs": 4283413.0, "Ops/sec/procs": 428341.0, "ns per ops/procs": 2357.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10016.937779, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 77026352.0, "Total blocks": 52134150.0, "Ops per second": 7689610.71, "ns per ops": 130.05, "Ops per threads": 38513176.0, "Ops per procs": 77026352.0, "Ops/sec/procs": 7689610.71, "ns per ops/procs": 130.05}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 59271050.0, "Total blocks": 0.0, "Ops per second": 5922868.5, "ns per ops": 168.84, "Ops per threads": 29635525.0, "Ops per procs": 59271050.0, "Ops/sec/procs": 5922868.5, "ns per ops/procs": 168.84}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 59229442.0, "Total blocks": 0.0, "Ops per second": 5918658.48, "ns per ops": 168.96, "Ops per threads": 29614721.0, "Ops per procs": 59229442.0, "Ops/sec/procs": 5918658.48, "ns per ops/procs": 168.96}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 6.0, "Total Operations(ops)": 76525587.0, "Ops per second": 7652558.0, "ns per ops": 131.0, "Ops per threads": 1594283.0, "Ops per procs": 3188566.0, "Ops/sec/procs": 318856.0, "ns per ops/procs": 3167.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 6.0, "Total Operations(ops)": 35399271.0, "Ops per second": 3539927.0, "ns per ops": 285.0, "Ops per threads": 2212454.0, "Ops per procs": 4424908.0, "Ops/sec/procs": 442490.0, "ns per ops/procs": 2282.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 6.0, "Total Operations(ops)": 52944602.0, "Ops per second": 5294460.0, "ns per ops": 190.0, "Ops per threads": 26472301.0, "Ops per procs": 52944602.0, "Ops/sec/procs": 5294460.0, "ns per ops/procs": 190.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 59246475.0, "Total blocks": 0.0, "Ops per second": 5920233.28, "ns per ops": 168.91, "Ops per threads": 29623237.0, "Ops per procs": 59246475.0, "Ops/sec/procs": 5920233.28, "ns per ops/procs": 168.91}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10017.056033, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 78139970.0, "Total blocks": 50626382.0, "Ops per second": 7800692.11, "ns per ops": 128.19, "Ops per threads": 39069985.0, "Ops per procs": 78139970.0, "Ops/sec/procs": 7800692.11, "ns per ops/procs": 128.19}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10024.66772, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 91752654.0, "Total blocks": 25309024.0, "Ops per second": 9152687.81, "ns per ops": 109.26, "Ops per threads": 2867270.0, "Ops per procs": 5734540.0, "Ops/sec/procs": 572042.99, "ns per ops/procs": 1748.12}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10007.111246, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 103630541.0, "Total blocks": 37194166.0, "Ops per second": 10355689.91, "ns per ops": 96.57, "Ops per threads": 2158969.0, "Ops per procs": 4317939.0, "Ops/sec/procs": 431487.08, "ns per ops/procs": 2317.57}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 75844225.0, "Total blocks": 39206982.0, "Ops per second": 7583184.77, "ns per ops": 131.87, "Ops per threads": 1580088.0, "Ops per procs": 3160176.0, "Ops/sec/procs": 315966.03, "ns per ops/procs": 3164.9}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10039.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 78988905.0, "Ops per second": 7867923.65, "ns per ops": 127.1, "Ops per threads": 1645602.0, "Ops per procs": 3291204.0, "Ops/sec/procs": 327830.15, "ns per ops/procs": 3050.36}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10007.049884, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 103737120.0, "Total blocks": 40651067.0, "Ops per second": 10366403.81, "ns per ops": 96.47, "Ops per threads": 2161190.0, "Ops per procs": 4322380.0, "Ops/sec/procs": 431933.49, "ns per ops/procs": 2315.17}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 6.0, "Total Operations(ops)": 46882118.0, "Ops per second": 4688211.0, "ns per ops": 215.0, "Ops per threads": 1465066.0, "Ops per procs": 2930132.0, "Ops/sec/procs": 293013.0, "ns per ops/procs": 3446.0}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10018.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 57617091.0, "Ops per second": 5751024.83, "ns per ops": 173.88, "Ops per threads": 3601068.0, "Ops per procs": 7202136.0, "Ops/sec/procs": 718878.1, "ns per ops/procs": 1391.06}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10025.5689, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 93174864.0, "Total blocks": 36631659.0, "Ops per second": 9293723.37, "ns per ops": 107.6, "Ops per threads": 2911714.0, "Ops per procs": 5823429.0, "Ops/sec/procs": 580857.71, "ns per ops/procs": 1721.59}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10008.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 68440910.0, "Total blocks": 33142661.0, "Ops per second": 6838025.5, "ns per ops": 146.24, "Ops per threads": 2138778.0, "Ops per procs": 4277556.0, "Ops/sec/procs": 427376.59, "ns per ops/procs": 2339.86}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,1 +1,1 @@
-[["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43606897.0, "Ops per second": 8720908.73, "ns per ops": 114.67, "Ops per threads": 2180344.0, "Ops per procs": 10901724.0, "Ops/sec/procs": 2180227.18, "ns per ops/procs": 458.67}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5010.922033, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 93993568.0, "Total blocks": 93993209.0, "Ops per second": 18757739.07, "ns per ops": 53.31, "Ops per threads": 1174919.0, "Ops per procs": 5874598.0, "Ops/sec/procs": 1172358.69, "ns per ops/procs": 852.98}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 136763517.0, "Ops per second": 27351079.35, "ns per ops": 36.56, "Ops per threads": 1709543.0, "Ops per procs": 8547719.0, "Ops/sec/procs": 1709442.46, "ns per ops/procs": 584.99}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27778961.0, "Ops per second": 5555545.09, "ns per ops": 180.0, "Ops per threads": 5555792.0, "Ops per procs": 27778961.0, "Ops/sec/procs": 5555545.09, "ns per ops/procs": 180.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.290878, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43976310.0, "Total blocks": 43976217.0, "Ops per second": 8778949.17, "ns per ops": 113.91, "Ops per threads": 2198815.0, "Ops per procs": 10994077.0, "Ops/sec/procs": 2194737.29, "ns per ops/procs": 455.64}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.151542, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44132300.0, "Total blocks": 44132201.0, "Ops per second": 8810334.37, "ns per ops": 113.5, "Ops per threads": 2206615.0, "Ops per procs": 11033075.0, "Ops/sec/procs": 2202583.59, "ns per ops/procs": 454.01}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 46353896.0, "Ops per second": 9270294.11, "ns per ops": 107.87, "Ops per threads": 2317694.0, "Ops per procs": 11588474.0, "Ops/sec/procs": 2317573.53, "ns per ops/procs": 431.49}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27894379.0, "Ops per second": 5578591.58, "ns per ops": 179.26, "Ops per threads": 5578875.0, "Ops per procs": 27894379.0, "Ops/sec/procs": 5578591.58, "ns per ops/procs": 179.26}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.743463, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32825528.0, "Total blocks": 32825527.0, "Ops per second": 6553645.29, "ns per ops": 152.59, "Ops per threads": 6565105.0, "Ops per procs": 32825528.0, "Ops/sec/procs": 6553645.29, "ns per ops/procs": 152.59}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 138213098.0, "Ops per second": 27640977.5, "ns per ops": 36.18, "Ops per threads": 1727663.0, "Ops per procs": 8638318.0, "Ops/sec/procs": 1727561.09, "ns per ops/procs": 578.85}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5007.914168, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44109513.0, "Total blocks": 44109419.0, "Ops per second": 8807961.06, "ns per ops": 113.53, "Ops per threads": 2205475.0, "Ops per procs": 11027378.0, "Ops/sec/procs": 2201990.27, "ns per ops/procs": 454.13}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5012.121876, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 94130673.0, "Total blocks": 94130291.0, "Ops per second": 18780603.37, "ns per ops": 53.25, "Ops per threads": 1176633.0, "Ops per procs": 5883167.0, "Ops/sec/procs": 1173787.71, "ns per ops/procs": 851.94}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 140936367.0, "Ops per second": 28185668.38, "ns per ops": 35.48, "Ops per threads": 1761704.0, "Ops per procs": 8808522.0, "Ops/sec/procs": 1761604.27, "ns per ops/procs": 567.66}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44279585.0, "Ops per second": 8855475.01, "ns per ops": 112.92, "Ops per threads": 2213979.0, "Ops per procs": 11069896.0, "Ops/sec/procs": 2213868.75, "ns per ops/procs": 451.7}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.37392, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32227534.0, "Total blocks": 32227533.0, "Ops per second": 6434730.02, "ns per ops": 155.41, "Ops per threads": 6445506.0, "Ops per procs": 32227534.0, "Ops/sec/procs": 6434730.02, "ns per ops/procs": 155.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5011.019789, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 90600569.0, "Total blocks": 90600173.0, "Ops per second": 18080265.66, "ns per ops": 55.31, "Ops per threads": 1132507.0, "Ops per procs": 5662535.0, "Ops/sec/procs": 1130016.6, "ns per ops/procs": 884.94}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.52474, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32861776.0, "Total blocks": 32861775.0, "Ops per second": 6561168.75, "ns per ops": 152.41, "Ops per threads": 6572355.0, "Ops per procs": 32861776.0, "Ops/sec/procs": 6561168.75, "ns per ops/procs": 152.41}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 28097680.0, "Ops per second": 5619274.9, "ns per ops": 177.96, "Ops per threads": 5619536.0, "Ops per procs": 28097680.0, "Ops/sec/procs": 5619274.9, "ns per ops/procs": 177.96}]]
+[["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1138076440.0, "Ops per second": 113792094.48, "ns per ops": 8.79, "Ops per threads": 94839.0, "Ops per procs": 47419851.0, "Ops/sec/procs": 4741337.27, "ns per ops/procs": 210.91}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 200285.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 17638575791.0, "Ops per second": 88067238.72, "ns per ops": 11.35, "Ops per threads": 2204821.0, "Ops per procs": 1102410986.0, "Ops/sec/procs": 5504202.42, "ns per ops/procs": 181.68}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54856916.0, "Ops per second": 5485691.0, "ns per ops": 184.0, "Ops per threads": 109713.0, "Ops per procs": 54856916.0, "Ops/sec/procs": 5485691.0, "ns per ops/procs": 184.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.449006, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 558836360.0, "Total blocks": 558836360.0, "Ops per second": 55741778.71, "ns per ops": 17.94, "Ops per threads": 69854.0, "Ops per procs": 34927272.0, "Ops/sec/procs": 3483861.17, "ns per ops/procs": 287.04}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10038.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 58647049.0, "Total blocks": 58647049.0, "Ops per second": 5842287.68, "ns per ops": 171.17, "Ops per threads": 7330.0, "Ops per procs": 3665440.0, "Ops/sec/procs": 365142.98, "ns per ops/procs": 2738.65}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10003.489711, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 728096996.0, "Total blocks": 728096996.0, "Ops per second": 72784299.98, "ns per ops": 13.74, "Ops per threads": 60674.0, "Ops per procs": 30337374.0, "Ops/sec/procs": 3032679.17, "ns per ops/procs": 329.74}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 63157049.0, "Total blocks": 63157049.0, "Ops per second": 6302255.13, "ns per ops": 158.67, "Ops per threads": 15789.0, "Ops per procs": 7894631.0, "Ops/sec/procs": 787781.89, "ns per ops/procs": 1269.39}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62412200.0, "Total blocks": 62411700.0, "Ops per second": 6235572.31, "ns per ops": 160.37, "Ops per threads": 124824.0, "Ops per procs": 62412200.0, "Ops/sec/procs": 6235572.31, "ns per ops/procs": 160.37}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 464608617.0, "Ops per second": 46457191.42, "ns per ops": 21.53, "Ops per threads": 116152.0, "Ops per procs": 58076077.0, "Ops/sec/procs": 5807148.93, "ns per ops/procs": 172.2}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391521066.0, "Ops per second": 39152106.0, "ns per ops": 25.0, "Ops per threads": 97880.0, "Ops per procs": 48940133.0, "Ops/sec/procs": 4894013.0, "ns per ops/procs": 206.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 963549550.0, "Ops per second": 96354955.0, "ns per ops": 10.0, "Ops per threads": 80295.0, "Ops per procs": 40147897.0, "Ops/sec/procs": 4014789.0, "ns per ops/procs": 251.0}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 867718190.0, "Ops per second": 86761170.55, "ns per ops": 11.53, "Ops per threads": 108464.0, "Ops per procs": 54232386.0, "Ops/sec/procs": 5422573.16, "ns per ops/procs": 184.41}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 962016289.0, "Ops per second": 96201628.0, "ns per ops": 10.0, "Ops per threads": 80168.0, "Ops per procs": 40084012.0, "Ops/sec/procs": 4008401.0, "ns per ops/procs": 251.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.837824, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54738237.0, "Total blocks": 54737741.0, "Ops per second": 5464622.46, "ns per ops": 183.0, "Ops per threads": 109476.0, "Ops per procs": 54738237.0, "Ops/sec/procs": 5464622.46, "ns per ops/procs": 183.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 731309408.0, "Ops per second": 73130940.0, "ns per ops": 13.0, "Ops per threads": 91413.0, "Ops per procs": 45706838.0, "Ops/sec/procs": 4570683.0, "ns per ops/procs": 220.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 739772688.0, "Ops per second": 73977268.0, "ns per ops": 13.0, "Ops per threads": 92471.0, "Ops per procs": 46235793.0, "Ops/sec/procs": 4623579.0, "ns per ops/procs": 218.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391449785.0, "Ops per second": 39144978.0, "ns per ops": 25.0, "Ops per threads": 97862.0, "Ops per procs": 48931223.0, "Ops/sec/procs": 4893122.0, "ns per ops/procs": 206.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10048.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57239183.0, "Total blocks": 57239183.0, "Ops per second": 5696211.13, "ns per ops": 175.56, "Ops per threads": 4769.0, "Ops per procs": 2384965.0, "Ops/sec/procs": 237342.13, "ns per ops/procs": 4213.33}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55248375.0, "Ops per second": 5524562.87, "ns per ops": 181.01, "Ops per threads": 110496.0, "Ops per procs": 55248375.0, "Ops/sec/procs": 5524562.87, "ns per ops/procs": 181.01}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61553053.0, "Total blocks": 61553053.0, "Ops per second": 6142186.88, "ns per ops": 162.81, "Ops per threads": 15388.0, "Ops per procs": 7694131.0, "Ops/sec/procs": 767773.36, "ns per ops/procs": 1302.47}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62811642.0, "Total blocks": 62811142.0, "Ops per second": 6275517.47, "ns per ops": 159.35, "Ops per threads": 125623.0, "Ops per procs": 62811642.0, "Ops/sec/procs": 6275517.47, "ns per ops/procs": 159.35}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10018.820873, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 260866706.0, "Total blocks": 260862710.0, "Ops per second": 26037665.44, "ns per ops": 38.41, "Ops per threads": 65216.0, "Ops per procs": 32608338.0, "Ops/sec/procs": 3254708.18, "ns per ops/procs": 307.25}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 874581175.0, "Ops per second": 87449851.2, "ns per ops": 11.44, "Ops per threads": 109322.0, "Ops per procs": 54661323.0, "Ops/sec/procs": 5465615.7, "ns per ops/procs": 182.96}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55228782.0, "Ops per second": 5522878.0, "ns per ops": 182.0, "Ops per threads": 110457.0, "Ops per procs": 55228782.0, "Ops/sec/procs": 5522878.0, "ns per ops/procs": 182.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62564955.0, "Total blocks": 62564455.0, "Ops per second": 6250797.96, "ns per ops": 159.98, "Ops per threads": 125129.0, "Ops per procs": 62564955.0, "Ops/sec/procs": 6250797.96, "ns per ops/procs": 159.98}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 738848909.0, "Ops per second": 73884890.0, "ns per ops": 13.0, "Ops per threads": 92356.0, "Ops per procs": 46178056.0, "Ops/sec/procs": 4617805.0, "ns per ops/procs": 218.0}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1131221613.0, "Ops per second": 113108175.94, "ns per ops": 8.84, "Ops per threads": 94268.0, "Ops per procs": 47134233.0, "Ops/sec/procs": 4712840.66, "ns per ops/procs": 212.19}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10008.209159, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 729328104.0, "Total blocks": 729328099.0, "Ops per second": 72872987.81, "ns per ops": 13.72, "Ops per threads": 60777.0, "Ops per procs": 30388671.0, "Ops/sec/procs": 3036374.49, "ns per ops/procs": 329.34}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 961002611.0, "Ops per second": 96100261.0, "ns per ops": 10.0, "Ops per threads": 80083.0, "Ops per procs": 40041775.0, "Ops/sec/procs": 4004177.0, "ns per ops/procs": 252.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 390098231.0, "Ops per second": 39009823.0, "ns per ops": 25.0, "Ops per threads": 97524.0, "Ops per procs": 48762278.0, "Ops/sec/procs": 4876227.0, "ns per ops/procs": 207.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55237591.0, "Ops per second": 5523759.0, "ns per ops": 182.0, "Ops per threads": 110475.0, "Ops per procs": 55237591.0, "Ops/sec/procs": 5523759.0, "ns per ops/procs": 182.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.576699, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54510321.0, "Total blocks": 54509820.0, "Ops per second": 5442011.04, "ns per ops": 183.76, "Ops per threads": 109020.0, "Ops per procs": 54510321.0, "Ops/sec/procs": 5442011.04, "ns per ops/procs": 183.76}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1135730371.0, "Ops per second": 113558509.97, "ns per ops": 8.81, "Ops per threads": 94644.0, "Ops per procs": 47322098.0, "Ops/sec/procs": 4731604.58, "ns per ops/procs": 211.34}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10039.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61004037.0, "Total blocks": 61004037.0, "Ops per second": 6076255.04, "ns per ops": 164.58, "Ops per threads": 7625.0, "Ops per procs": 3812752.0, "Ops/sec/procs": 379765.94, "ns per ops/procs": 2633.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10004.891999, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 747946345.0, "Total blocks": 747934349.0, "Ops per second": 74758062.86, "ns per ops": 13.38, "Ops per threads": 62328.0, "Ops per procs": 31164431.0, "Ops/sec/procs": 3114919.29, "ns per ops/procs": 321.04}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 466424792.0, "Ops per second": 46638931.23, "ns per ops": 21.44, "Ops per threads": 116606.0, "Ops per procs": 58303099.0, "Ops/sec/procs": 5829866.4, "ns per ops/procs": 171.53}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10086.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57343570.0, "Total blocks": 57343570.0, "Ops per second": 5685308.81, "ns per ops": 175.89, "Ops per threads": 4778.0, "Ops per procs": 2389315.0, "Ops/sec/procs": 236887.87, "ns per ops/procs": 4221.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10020.39533, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 263517289.0, "Total blocks": 263513293.0, "Ops per second": 26298093.07, "ns per ops": 38.03, "Ops per threads": 65879.0, "Ops per procs": 32939661.0, "Ops/sec/procs": 3287261.63, "ns per ops/procs": 304.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.357431, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 551670395.0, "Total blocks": 551662399.0, "Ops per second": 55027503.89, "ns per ops": 18.17, "Ops per threads": 68958.0, "Ops per procs": 34479399.0, "Ops/sec/procs": 3439218.99, "ns per ops/procs": 290.76}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10050.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56162695.0, "Total blocks": 56162695.0, "Ops per second": 5588033.65, "ns per ops": 178.95, "Ops per threads": 4680.0, "Ops per procs": 2340112.0, "Ops/sec/procs": 232834.74, "ns per ops/procs": 4294.89}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10019.690183, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 271866976.0, "Total blocks": 271862980.0, "Ops per second": 27133271.69, "ns per ops": 36.86, "Ops per threads": 67966.0, "Ops per procs": 33983372.0, "Ops/sec/procs": 3391658.96, "ns per ops/procs": 294.84}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10057.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62105022.0, "Total blocks": 62105022.0, "Ops per second": 6175186.04, "ns per ops": 161.94, "Ops per threads": 15526.0, "Ops per procs": 7763127.0, "Ops/sec/procs": 771898.25, "ns per ops/procs": 1295.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.81217, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 537080117.0, "Total blocks": 537072121.0, "Ops per second": 53569736.59, "ns per ops": 18.67, "Ops per threads": 67135.0, "Ops per procs": 33567507.0, "Ops/sec/procs": 3348108.54, "ns per ops/procs": 298.68}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55967030.0, "Ops per second": 5596438.25, "ns per ops": 178.69, "Ops per threads": 111934.0, "Ops per procs": 55967030.0, "Ops/sec/procs": 5596438.25, "ns per ops/procs": 178.69}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55703320.0, "Ops per second": 5570084.72, "ns per ops": 179.53, "Ops per threads": 111406.0, "Ops per procs": 55703320.0, "Ops/sec/procs": 5570084.72, "ns per ops/procs": 179.53}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 469211793.0, "Ops per second": 46918327.16, "ns per ops": 21.31, "Ops per threads": 117302.0, "Ops per procs": 58651474.0, "Ops/sec/procs": 5864790.9, "ns per ops/procs": 170.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.545208, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54925472.0, "Total blocks": 54924976.0, "Ops per second": 5483474.68, "ns per ops": 182.37, "Ops per threads": 109850.0, "Ops per procs": 54925472.0, "Ops/sec/procs": 5483474.68, "ns per ops/procs": 182.37}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10037.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60770550.0, "Total blocks": 60770550.0, "Ops per second": 6054474.7, "ns per ops": 165.17, "Ops per threads": 7596.0, "Ops per procs": 3798159.0, "Ops/sec/procs": 378404.67, "ns per ops/procs": 2642.67}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/cycle.low.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/cycle.low.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/cycle.low.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10012.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 76310077.0, "Ops per second": 7631007.0, "ns per ops": 131.0, "Ops per threads": 1907751.0, "Ops per procs": 9538759.0, "Ops/sec/procs": 953875.0, "ns per ops/procs": 1049.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10010.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60971759.0, "Total blocks": 60971758.0, "Ops per second": 6090773.1, "ns per ops": 164.18, "Ops per threads": 762146.0, "Ops per procs": 3810734.0, "Ops/sec/procs": 380673.32, "ns per ops/procs": 2626.92}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10025.310277, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 181018643.0, "Total blocks": 181017892.0, "Ops per second": 18056163.65, "ns per ops": 55.38, "Ops per threads": 2262733.0, "Ops per procs": 11313665.0, "Ops/sec/procs": 1128510.23, "ns per ops/procs": 886.12}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10026.598882, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 194131436.0, "Total blocks": 194130800.0, "Ops per second": 19361643.79, "ns per ops": 51.65, "Ops per threads": 2426642.0, "Ops per procs": 12133214.0, "Ops/sec/procs": 1210102.74, "ns per ops/procs": 826.38}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10006.334698, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 227505184.0, "Total blocks": 227503833.0, "Ops per second": 22736115.76, "ns per ops": 43.98, "Ops per threads": 1895876.0, "Ops per procs": 9479382.0, "Ops/sec/procs": 947338.16, "ns per ops/procs": 1055.59}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10016.990169, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 66752749.0, "Total blocks": 66752748.0, "Ops per second": 6663952.73, "ns per ops": 150.06, "Ops per threads": 13350549.0, "Ops per procs": 66752749.0, "Ops/sec/procs": 6663952.73, "ns per ops/procs": 150.06}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 279630079.0, "Ops per second": 27961432.66, "ns per ops": 35.76, "Ops per threads": 3495375.0, "Ops per procs": 17476879.0, "Ops/sec/procs": 1747589.54, "ns per ops/procs": 572.22}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 59771852.0, "Total blocks": 59771852.0, "Ops per second": 5972444.0, "ns per ops": 167.44, "Ops per threads": 1494296.0, "Ops per procs": 7471481.0, "Ops/sec/procs": 746555.5, "ns per ops/procs": 1339.49}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 232398538.0, "Ops per second": 23239853.0, "ns per ops": 43.0, "Ops per threads": 1936654.0, "Ops per procs": 9683272.0, "Ops/sec/procs": 968327.0, "ns per ops/procs": 1043.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10020.460683, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 123722971.0, "Total blocks": 123722720.0, "Ops per second": 12347034.22, "ns per ops": 80.99, "Ops per threads": 3093074.0, "Ops per procs": 15465371.0, "Ops/sec/procs": 1543379.28, "ns per ops/procs": 647.93}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56469367.0, "Ops per second": 5646680.63, "ns per ops": 177.1, "Ops per threads": 11293873.0, "Ops per procs": 56469367.0, "Ops/sec/procs": 5646680.63, "ns per ops/procs": 177.1}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10016.913984, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 66648794.0, "Total blocks": 66648790.0, "Ops per second": 6653625.47, "ns per ops": 150.29, "Ops per threads": 13329758.0, "Ops per procs": 66648794.0, "Ops/sec/procs": 6653625.47, "ns per ops/procs": 150.29}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 178174877.0, "Ops per second": 17816504.68, "ns per ops": 56.13, "Ops per threads": 4454371.0, "Ops per procs": 22271859.0, "Ops/sec/procs": 2227063.08, "ns per ops/procs": 449.02}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 59200307.0, "Total blocks": 59200307.0, "Ops per second": 5917304.82, "ns per ops": 169.0, "Ops per threads": 493335.0, "Ops per procs": 2466679.0, "Ops/sec/procs": 246554.37, "ns per ops/procs": 4055.9}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 489892922.0, "Ops per second": 48986097.69, "ns per ops": 20.41, "Ops per threads": 4082441.0, "Ops per procs": 20412205.0, "Ops/sec/procs": 2041087.4, "ns per ops/procs": 489.93}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 267579722.0, "Ops per second": 26757972.0, "ns per ops": 37.0, "Ops per threads": 2229831.0, "Ops per procs": 11149155.0, "Ops/sec/procs": 1114915.0, "ns per ops/procs": 905.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10002.567137, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 221454282.0, "Total blocks": 221452348.0, "Ops per second": 22139744.62, "ns per ops": 45.17, "Ops per threads": 1845452.0, "Ops per procs": 9227261.0, "Ops/sec/procs": 922489.36, "ns per ops/procs": 1084.02}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10020.640204, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 119212534.0, "Total blocks": 119212291.0, "Ops per second": 11896698.37, "ns per ops": 84.06, "Ops per threads": 2980313.0, "Ops per procs": 14901566.0, "Ops/sec/procs": 1487087.3, "ns per ops/procs": 672.46}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10015.706272, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 66731723.0, "Total blocks": 66731719.0, "Ops per second": 6662707.67, "ns per ops": 150.09, "Ops per threads": 13346344.0, "Ops per procs": 66731723.0, "Ops/sec/procs": 6662707.67, "ns per ops/procs": 150.09}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 152568234.0, "Ops per second": 15255771.25, "ns per ops": 65.55, "Ops per threads": 3814205.0, "Ops per procs": 19071029.0, "Ops/sec/procs": 1906971.41, "ns per ops/procs": 524.39}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 289322235.0, "Ops per second": 28930586.92, "ns per ops": 34.57, "Ops per threads": 3616527.0, "Ops per procs": 18082639.0, "Ops/sec/procs": 1808161.68, "ns per ops/procs": 553.05}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 487856302.0, "Ops per second": 48781608.3, "ns per ops": 20.5, "Ops per threads": 4065469.0, "Ops per procs": 20327345.0, "Ops/sec/procs": 2032567.01, "ns per ops/procs": 491.99}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10009.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 76846283.0, "Ops per second": 7684628.0, "ns per ops": 130.0, "Ops per threads": 1921157.0, "Ops per procs": 9605785.0, "Ops/sec/procs": 960578.0, "ns per ops/procs": 1042.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57552105.0, "Total blocks": 57552103.0, "Ops per second": 5753926.0, "ns per ops": 173.79, "Ops per threads": 479600.0, "Ops per procs": 2398004.0, "Ops/sec/procs": 239746.92, "ns per ops/procs": 4171.07}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10016.759028, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 123970604.0, "Total blocks": 123970351.0, "Ops per second": 12376318.89, "ns per ops": 80.8, "Ops per threads": 3099265.0, "Ops per procs": 15496325.0, "Ops/sec/procs": 1547039.86, "ns per ops/procs": 646.4}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61716938.0, "Total blocks": 61716938.0, "Ops per second": 6166945.93, "ns per ops": 162.15, "Ops per threads": 1542923.0, "Ops per procs": 7714617.0, "Ops/sec/procs": 770868.24, "ns per ops/procs": 1297.24}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 70872904.0, "Total blocks": 70872899.0, "Ops per second": 7081970.19, "ns per ops": 141.2, "Ops per threads": 14174580.0, "Ops per procs": 70872904.0, "Ops/sec/procs": 7081970.19, "ns per ops/procs": 141.2}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 156397978.0, "Ops per second": 15639797.0, "ns per ops": 63.0, "Ops per threads": 1954974.0, "Ops per procs": 9774873.0, "Ops/sec/procs": 977487.0, "ns per ops/procs": 1023.0}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 150292371.0, "Ops per second": 15028431.57, "ns per ops": 66.54, "Ops per threads": 3757309.0, "Ops per procs": 18786546.0, "Ops/sec/procs": 1878553.95, "ns per ops/procs": 532.32}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 59322364.0, "Total blocks": 59322364.0, "Ops per second": 5926827.38, "ns per ops": 168.72, "Ops per threads": 741529.0, "Ops per procs": 3707647.0, "Ops/sec/procs": 370426.71, "ns per ops/procs": 2699.59}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 484392787.0, "Ops per second": 48436133.63, "ns per ops": 20.65, "Ops per threads": 4036606.0, "Ops per procs": 20183032.0, "Ops/sec/procs": 2018172.23, "ns per ops/procs": 495.5}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 71150883.0, "Total blocks": 71150878.0, "Ops per second": 7109813.4, "ns per ops": 140.65, "Ops per threads": 14230176.0, "Ops per procs": 71150883.0, "Ops/sec/procs": 7109813.4, "ns per ops/procs": 140.65}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10008.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 80740751.0, "Ops per second": 8074075.0, "ns per ops": 123.0, "Ops per threads": 2018518.0, "Ops per procs": 10092593.0, "Ops/sec/procs": 1009259.0, "ns per ops/procs": 991.0}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 288121315.0, "Ops per second": 28809957.03, "ns per ops": 34.71, "Ops per threads": 3601516.0, "Ops per procs": 18007582.0, "Ops/sec/procs": 1800622.31, "ns per ops/procs": 555.36}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 153491548.0, "Ops per second": 15349154.0, "ns per ops": 65.0, "Ops per threads": 1918644.0, "Ops per procs": 9593221.0, "Ops/sec/procs": 959322.0, "ns per ops/procs": 1052.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 265150851.0, "Ops per second": 26515085.0, "ns per ops": 38.0, "Ops per threads": 2209590.0, "Ops per procs": 11047952.0, "Ops/sec/procs": 1104795.0, "ns per ops/procs": 914.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54873960.0, "Ops per second": 5487396.0, "ns per ops": 184.0, "Ops per threads": 10974792.0, "Ops per procs": 54873960.0, "Ops/sec/procs": 5487396.0, "ns per ops/procs": 184.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 164735691.0, "Ops per second": 16473569.0, "ns per ops": 60.0, "Ops per threads": 2059196.0, "Ops per procs": 10295980.0, "Ops/sec/procs": 1029598.0, "ns per ops/procs": 971.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 71038106.0, "Total blocks": 71038101.0, "Ops per second": 7098555.38, "ns per ops": 140.87, "Ops per threads": 14207621.0, "Ops per procs": 71038106.0, "Ops/sec/procs": 7098555.38, "ns per ops/procs": 140.87}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10007.037227, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 233110848.0, "Total blocks": 233109436.0, "Ops per second": 23294691.8, "ns per ops": 42.93, "Ops per threads": 1942590.0, "Ops per procs": 9712952.0, "Ops/sec/procs": 970612.16, "ns per ops/procs": 1030.28}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10012.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60897008.0, "Total blocks": 60897007.0, "Ops per second": 6082228.89, "ns per ops": 164.41, "Ops per threads": 761212.0, "Ops per procs": 3806063.0, "Ops/sec/procs": 380139.31, "ns per ops/procs": 2630.61}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56664861.0, "Ops per second": 5666229.66, "ns per ops": 176.48, "Ops per threads": 11332972.0, "Ops per procs": 56664861.0, "Ops/sec/procs": 5666229.66, "ns per ops/procs": 176.48}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54918505.0, "Ops per second": 5491850.0, "ns per ops": 183.0, "Ops per threads": 10983701.0, "Ops per procs": 54918505.0, "Ops/sec/procs": 5491850.0, "ns per ops/procs": 183.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54930921.0, "Ops per second": 5493092.0, "ns per ops": 183.0, "Ops per threads": 10986184.0, "Ops per procs": 54930921.0, "Ops/sec/procs": 5493092.0, "ns per ops/procs": 183.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56683908.0, "Total blocks": 56683908.0, "Ops per second": 5666230.51, "ns per ops": 176.48, "Ops per threads": 472365.0, "Ops per procs": 2361829.0, "Ops/sec/procs": 236092.94, "ns per ops/procs": 4235.62}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 63241275.0, "Total blocks": 63241275.0, "Ops per second": 6319199.64, "ns per ops": 158.25, "Ops per threads": 1581031.0, "Ops per procs": 7905159.0, "Ops/sec/procs": 789899.95, "ns per ops/procs": 1265.98}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56846729.0, "Ops per second": 5684418.62, "ns per ops": 175.92, "Ops per threads": 11369345.0, "Ops per procs": 56846729.0, "Ops/sec/procs": 5684418.62, "ns per ops/procs": 175.92}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10024.602154, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 189976405.0, "Total blocks": 189975754.0, "Ops per second": 18951016.92, "ns per ops": 52.77, "Ops per threads": 2374705.0, "Ops per procs": 11873525.0, "Ops/sec/procs": 1184438.56, "ns per ops/procs": 844.28}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/memcd.rate
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/memcd.rate	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/memcd.rate	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498755.2, "Median Read Latency": 3190.7, "Tail Read Latency": 225397.5, "Median Update Latency": 2830.3, "Tail Update Latency": 226163.8}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 199980.9, "Median Read Latency": 95.5, "Tail Read Latency": 170.9, "Median Update Latency": 100.7, "Tail Update Latency": 176.2}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399978.4, "Median Read Latency": 117.8, "Tail Read Latency": 1207.1, "Median Update Latency": 121.9, "Tail Update Latency": 1336.8}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 612437.4, "Median Read Latency": 15884.0, "Tail Read Latency": 248252.4, "Median Update Latency": 15749.0, "Tail Update Latency": 247485.6}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399937.0, "Median Read Latency": 111.9, "Tail Read Latency": 869.0, "Median Update Latency": 117.5, "Tail Update Latency": 880.8}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100019.3, "Median Read Latency": 85.8, "Tail Read Latency": 161.8, "Median Update Latency": 92.2, "Tail Update Latency": 169.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499962.9, "Median Read Latency": 185.6, "Tail Read Latency": 12305.1, "Median Update Latency": 250.1, "Tail Update Latency": 12317.4}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100007.8, "Median Read Latency": 85.9, "Tail Read Latency": 161.0, "Median Update Latency": 92.3, "Tail Update Latency": 168.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 557475.3, "Median Read Latency": 18115.8, "Tail Read Latency": 251927.5, "Median Update Latency": 18299.3, "Tail Update Latency": 250306.8}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200004.6, "Median Read Latency": 95.3, "Tail Read Latency": 173.3, "Median Update Latency": 99.9, "Tail Update Latency": 180.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 597071.5, "Median Read Latency": 13212.4, "Tail Read Latency": 62539.9, "Median Update Latency": 13231.2, "Tail Update Latency": 69653.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 199978.5, "Median Read Latency": 95.5, "Tail Read Latency": 177.6, "Median Update Latency": 100.3, "Tail Update Latency": 186.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 614246.9, "Median Read Latency": 16225.4, "Tail Read Latency": 250527.9, "Median Update Latency": 16499.8, "Tail Update Latency": 256449.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300008.6, "Median Read Latency": 87.5, "Tail Read Latency": 183.5, "Median Update Latency": 93.1, "Tail Update Latency": 190.9}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100006.9, "Median Read Latency": 85.4, "Tail Read Latency": 177.6, "Median Update Latency": 92.0, "Tail Update Latency": 194.1}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200005.2, "Median Read Latency": 95.3, "Tail Read Latency": 173.8, "Median Update Latency": 99.8, "Tail Update Latency": 183.7}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572663.5, "Median Read Latency": 371.4, "Tail Read Latency": 227972.8, "Median Update Latency": 1399.1, "Tail Update Latency": 226684.5}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 622639.2, "Median Read Latency": 5780.5, "Tail Read Latency": 230039.3, "Median Update Latency": 7841.8, "Tail Update Latency": 229186.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 592141.1, "Median Read Latency": 13408.6, "Tail Read Latency": 48231.9, "Median Update Latency": 13507.0, "Tail Update Latency": 49970.4}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299997.1, "Median Read Latency": 91.3, "Tail Read Latency": 346.2, "Median Update Latency": 97.8, "Tail Update Latency": 359.3}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299972.9, "Median Read Latency": 100.3, "Tail Read Latency": 454.5, "Median Update Latency": 106.7, "Tail Update Latency": 436.9}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 598258.7, "Median Read Latency": 13440.2, "Tail Read Latency": 50537.1, "Median Update Latency": 13527.6, "Tail Update Latency": 47965.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 299962.2, "Median Read Latency": 88.0, "Tail Read Latency": 181.4, "Median Update Latency": 93.3, "Tail Update Latency": 187.2}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 602219.4, "Median Read Latency": 25297.0, "Tail Read Latency": 250896.6, "Median Update Latency": 25038.5, "Tail Update Latency": 251507.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 564998.0, "Median Read Latency": 20010.7, "Tail Read Latency": 250571.5, "Median Update Latency": 20091.0, "Tail Update Latency": 250161.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499891.1, "Median Read Latency": 7175.0, "Tail Read Latency": 220926.7, "Median Update Latency": 7105.9, "Tail Update Latency": 221994.0}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499974.2, "Median Read Latency": 211.4, "Tail Read Latency": 11680.1, "Median Update Latency": 829.2, "Tail Update Latency": 11990.8}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300012.7, "Median Read Latency": 97.1, "Tail Read Latency": 222.8, "Median Update Latency": 101.7, "Tail Update Latency": 238.1}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300070.5, "Median Read Latency": 96.8, "Tail Read Latency": 220.3, "Median Update Latency": 101.7, "Tail Update Latency": 237.8}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100008.7, "Median Read Latency": 85.6, "Tail Read Latency": 163.0, "Median Update Latency": 92.3, "Tail Update Latency": 175.3}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400028.9, "Median Read Latency": 114.8, "Tail Read Latency": 956.1, "Median Update Latency": 120.3, "Tail Update Latency": 1350.3}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300055.6, "Median Read Latency": 86.4, "Tail Read Latency": 175.9, "Median Update Latency": 91.2, "Tail Update Latency": 180.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 673707.6, "Median Read Latency": 6827.8, "Tail Read Latency": 231197.0, "Median Update Latency": 7140.8, "Tail Update Latency": 231287.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 601228.5, "Median Read Latency": 12871.0, "Tail Read Latency": 219146.6, "Median Update Latency": 12852.8, "Tail Update Latency": 56501.0}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300015.0, "Median Read Latency": 100.1, "Tail Read Latency": 481.1, "Median Update Latency": 105.7, "Tail Update Latency": 488.7}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 99997.9, "Median Read Latency": 85.4, "Tail Read Latency": 167.7, "Median Update Latency": 92.4, "Tail Update Latency": 173.4}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200021.1, "Median Read Latency": 88.1, "Tail Read Latency": 244.9, "Median Update Latency": 95.0, "Tail Update Latency": 253.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400017.0, "Median Read Latency": 113.1, "Tail Read Latency": 849.1, "Median Update Latency": 119.1, "Tail Update Latency": 882.8}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 562604.7, "Median Read Latency": 19819.4, "Tail Read Latency": 249845.8, "Median Update Latency": 19871.1, "Tail Update Latency": 249555.6}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572312.9, "Median Read Latency": 6524.3, "Tail Read Latency": 228864.5, "Median Update Latency": 2602.5, "Tail Update Latency": 228616.6}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621196.7, "Median Read Latency": 2295.2, "Tail Read Latency": 231836.0, "Median Update Latency": 1441.1, "Tail Update Latency": 230955.6}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 595177.6, "Median Read Latency": 13372.4, "Tail Read Latency": 46883.0, "Median Update Latency": 13426.8, "Tail Update Latency": 64667.1}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400026.7, "Median Read Latency": 115.7, "Tail Read Latency": 881.2, "Median Update Latency": 121.1, "Tail Update Latency": 929.1}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498000.8, "Median Read Latency": 4060.3, "Tail Read Latency": 226676.1, "Median Update Latency": 2978.6, "Tail Update Latency": 225731.4}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 579656.7, "Median Read Latency": 12225.7, "Tail Read Latency": 219760.8, "Median Update Latency": 12238.9, "Tail Update Latency": 203698.4}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200001.8, "Median Read Latency": 88.1, "Tail Read Latency": 240.2, "Median Update Latency": 94.6, "Tail Update Latency": 251.4}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 594850.0, "Median Read Latency": 13333.6, "Tail Read Latency": 52682.0, "Median Update Latency": 13351.9, "Tail Update Latency": 56204.4}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499921.3, "Median Read Latency": 6662.5, "Tail Read Latency": 219673.4, "Median Update Latency": 6837.7, "Tail Update Latency": 211568.1}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 572487.8, "Median Read Latency": 10378.4, "Tail Read Latency": 226976.4, "Median Update Latency": 10403.8, "Tail Update Latency": 227094.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 594974.2, "Median Read Latency": 13359.6, "Tail Read Latency": 41942.6, "Median Update Latency": 13334.3, "Tail Update Latency": 44809.8}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 199997.8, "Median Read Latency": 78.3, "Tail Read Latency": 128.0, "Median Update Latency": 82.8, "Tail Update Latency": 134.8}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400037.0, "Median Read Latency": 115.5, "Tail Read Latency": 996.4, "Median Update Latency": 120.1, "Tail Update Latency": 1664.7}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500010.9, "Median Read Latency": 165.0, "Tail Read Latency": 12206.4, "Median Update Latency": 179.4, "Tail Update Latency": 12184.2}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200000.1, "Median Read Latency": 95.5, "Tail Read Latency": 180.0, "Median Update Latency": 99.7, "Tail Update Latency": 182.7}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99978.3, "Median Read Latency": 77.9, "Tail Read Latency": 118.1, "Median Update Latency": 83.2, "Tail Update Latency": 128.9}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399987.9, "Median Read Latency": 108.4, "Tail Read Latency": 539.7, "Median Update Latency": 113.9, "Tail Update Latency": 539.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499820.4, "Median Read Latency": 5658.7, "Tail Read Latency": 212053.1, "Median Update Latency": 5741.1, "Tail Update Latency": 50321.8}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400070.7, "Median Read Latency": 111.9, "Tail Read Latency": 890.4, "Median Update Latency": 117.0, "Tail Update Latency": 828.8}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 601775.8, "Median Read Latency": 23117.3, "Tail Read Latency": 250232.9, "Median Update Latency": 23116.7, "Tail Update Latency": 246551.8}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100001.3, "Median Read Latency": 92.8, "Tail Read Latency": 140.9, "Median Update Latency": 98.0, "Tail Update Latency": 152.9}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 574754.4, "Median Read Latency": 357.0, "Tail Read Latency": 231151.6, "Median Update Latency": 557.4, "Tail Update Latency": 229444.1}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 573363.3, "Median Read Latency": 9889.6, "Tail Read Latency": 229927.1, "Median Update Latency": 9906.8, "Tail Update Latency": 231207.8}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679908.4, "Median Read Latency": 5451.8, "Tail Read Latency": 230889.9, "Median Update Latency": 5681.1, "Tail Update Latency": 232091.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 609366.0, "Median Read Latency": 16768.6, "Tail Read Latency": 247631.3, "Median Update Latency": 17033.4, "Tail Update Latency": 253910.6}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200039.8, "Median Read Latency": 95.6, "Tail Read Latency": 174.6, "Median Update Latency": 100.5, "Tail Update Latency": 180.7}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 596702.7, "Median Read Latency": 13428.9, "Tail Read Latency": 51857.9, "Median Update Latency": 13435.2, "Tail Update Latency": 83668.0}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 576584.3, "Median Read Latency": 10177.2, "Tail Read Latency": 228562.7, "Median Update Latency": 10194.1, "Tail Update Latency": 227658.5}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499992.8, "Median Read Latency": 162.7, "Tail Read Latency": 11374.3, "Median Update Latency": 167.4, "Tail Update Latency": 11372.9}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595017.0, "Median Read Latency": 17998.2, "Tail Read Latency": 243922.3, "Median Update Latency": 18344.5, "Tail Update Latency": 239502.7}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199994.0, "Median Read Latency": 88.5, "Tail Read Latency": 239.3, "Median Update Latency": 95.4, "Tail Update Latency": 248.8}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299957.9, "Median Read Latency": 105.1, "Tail Read Latency": 274.5, "Median Update Latency": 109.5, "Tail Update Latency": 287.3}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200018.7, "Median Read Latency": 95.2, "Tail Read Latency": 172.9, "Median Update Latency": 100.4, "Tail Update Latency": 174.6}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500038.3, "Median Read Latency": 211.6, "Tail Read Latency": 11382.7, "Median Update Latency": 206.1, "Tail Update Latency": 11382.7}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 597722.5, "Median Read Latency": 13397.7, "Tail Read Latency": 58411.2, "Median Update Latency": 13387.9, "Tail Update Latency": 68941.3}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100030.7, "Median Read Latency": 85.7, "Tail Read Latency": 163.9, "Median Update Latency": 92.0, "Tail Update Latency": 171.4}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 558818.6, "Median Read Latency": 13363.8, "Tail Read Latency": 248229.0, "Median Update Latency": 13350.2, "Tail Update Latency": 249960.2}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621757.3, "Median Read Latency": 5239.7, "Tail Read Latency": 234406.2, "Median Update Latency": 6894.0, "Tail Update Latency": 234114.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679182.4, "Median Read Latency": 5967.0, "Tail Read Latency": 228719.6, "Median Update Latency": 6772.4, "Tail Update Latency": 228625.2}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200045.5, "Median Read Latency": 79.2, "Tail Read Latency": 128.4, "Median Update Latency": 84.2, "Tail Update Latency": 137.4}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99987.1, "Median Read Latency": 93.3, "Tail Read Latency": 141.6, "Median Update Latency": 98.5, "Tail Update Latency": 152.4}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300010.7, "Median Read Latency": 92.5, "Tail Read Latency": 349.6, "Median Update Latency": 99.4, "Tail Update Latency": 379.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400028.0, "Median Read Latency": 113.6, "Tail Read Latency": 841.4, "Median Update Latency": 120.0, "Tail Update Latency": 882.9}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 593569.4, "Median Read Latency": 13403.1, "Tail Read Latency": 39646.1, "Median Update Latency": 13399.0, "Tail Update Latency": 37874.8}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400020.1, "Median Read Latency": 107.8, "Tail Read Latency": 564.8, "Median Update Latency": 112.1, "Tail Update Latency": 551.0}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 597114.9, "Median Read Latency": 18109.7, "Tail Read Latency": 246701.4, "Median Update Latency": 17982.1, "Tail Update Latency": 247698.7}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 592504.7, "Median Read Latency": 13426.0, "Tail Read Latency": 44154.2, "Median Update Latency": 13401.8, "Tail Update Latency": 46925.7}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300004.6, "Median Read Latency": 87.4, "Tail Read Latency": 181.7, "Median Update Latency": 92.3, "Tail Update Latency": 184.6}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199991.6, "Median Read Latency": 87.7, "Tail Read Latency": 238.7, "Median Update Latency": 94.3, "Tail Update Latency": 241.1}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300039.2, "Median Read Latency": 100.0, "Tail Read Latency": 465.5, "Median Update Latency": 105.5, "Tail Update Latency": 437.3}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 555859.2, "Median Read Latency": 14324.7, "Tail Read Latency": 252224.7, "Median Update Latency": 14183.0, "Tail Update Latency": 253064.7}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 591884.8, "Median Read Latency": 18852.4, "Tail Read Latency": 245546.0, "Median Update Latency": 18781.5, "Tail Update Latency": 251330.1}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 560411.7, "Median Read Latency": 16212.0, "Tail Read Latency": 249414.2, "Median Update Latency": 16315.3, "Tail Update Latency": 252118.3}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499875.4, "Median Read Latency": 7148.7, "Tail Read Latency": 221728.4, "Median Update Latency": 6854.7, "Tail Update Latency": 223478.5}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 603884.3, "Median Read Latency": 12495.1, "Tail Read Latency": 220449.1, "Median Update Latency": 12489.2, "Tail Update Latency": 220650.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 686509.4, "Median Read Latency": 6617.1, "Tail Read Latency": 231746.5, "Median Update Latency": 6934.3, "Tail Update Latency": 232363.0}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 623145.4, "Median Read Latency": 5661.8, "Tail Read Latency": 229372.2, "Median Update Latency": 6085.7, "Tail Update Latency": 230691.9}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100019.7, "Median Read Latency": 93.3, "Tail Read Latency": 142.6, "Median Update Latency": 98.3, "Tail Update Latency": 152.2}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499889.0, "Median Read Latency": 155.9, "Tail Read Latency": 12059.8, "Median Update Latency": 169.9, "Tail Update Latency": 12072.0}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100018.5, "Median Read Latency": 86.6, "Tail Read Latency": 164.3, "Median Update Latency": 92.9, "Tail Update Latency": 179.2}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100008.8, "Median Read Latency": 85.5, "Tail Read Latency": 162.2, "Median Update Latency": 92.2, "Tail Update Latency": 169.4}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 559071.8, "Median Read Latency": 13053.0, "Tail Read Latency": 247054.3, "Median Update Latency": 13181.4, "Tail Update Latency": 246805.2}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100016.0, "Median Read Latency": 78.2, "Tail Read Latency": 119.3, "Median Update Latency": 84.1, "Tail Update Latency": 130.7}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400025.2, "Median Read Latency": 104.6, "Tail Read Latency": 513.5, "Median Update Latency": 108.7, "Tail Update Latency": 505.0}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100009.2, "Median Read Latency": 93.0, "Tail Read Latency": 143.2, "Median Update Latency": 98.0, "Tail Update Latency": 154.2}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399978.2, "Median Read Latency": 104.6, "Tail Read Latency": 482.3, "Median Update Latency": 109.5, "Tail Update Latency": 481.6}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399992.4, "Median Read Latency": 115.3, "Tail Read Latency": 1018.2, "Median Update Latency": 121.2, "Tail Update Latency": 1420.5}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200039.6, "Median Read Latency": 88.3, "Tail Read Latency": 235.4, "Median Update Latency": 94.9, "Tail Update Latency": 242.1}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498725.8, "Median Read Latency": 3893.4, "Tail Read Latency": 227347.3, "Median Update Latency": 3733.9, "Tail Update Latency": 227619.0}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200028.7, "Median Read Latency": 79.2, "Tail Read Latency": 129.6, "Median Update Latency": 84.3, "Tail Update Latency": 138.4}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 620433.1, "Median Read Latency": 21839.7, "Tail Read Latency": 249643.8, "Median Update Latency": 22022.9, "Tail Update Latency": 249993.9}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 569787.5, "Median Read Latency": 3043.0, "Tail Read Latency": 226727.4, "Median Update Latency": 6870.7, "Tail Update Latency": 227823.6}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 597059.4, "Median Read Latency": 13321.6, "Tail Read Latency": 45190.1, "Median Update Latency": 13316.7, "Tail Update Latency": 44028.0}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300053.7, "Median Read Latency": 86.8, "Tail Read Latency": 184.7, "Median Update Latency": 92.1, "Tail Update Latency": 192.5}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100014.4, "Median Read Latency": 93.1, "Tail Read Latency": 141.1, "Median Update Latency": 98.9, "Tail Update Latency": 150.9}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618846.7, "Median Read Latency": 902.7, "Tail Read Latency": 227141.2, "Median Update Latency": 637.8, "Tail Update Latency": 224313.9}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595532.7, "Median Read Latency": 16978.5, "Tail Read Latency": 248593.5, "Median Update Latency": 16895.7, "Tail Update Latency": 250405.5}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 620493.4, "Median Read Latency": 19597.9, "Tail Read Latency": 247174.2, "Median Update Latency": 19579.3, "Tail Update Latency": 250788.1}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399994.3, "Median Read Latency": 103.5, "Tail Read Latency": 456.3, "Median Update Latency": 108.6, "Tail Update Latency": 464.3}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572443.4, "Median Read Latency": 5819.9, "Tail Read Latency": 229576.2, "Median Update Latency": 5419.3, "Tail Update Latency": 230185.9}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200006.2, "Median Read Latency": 95.2, "Tail Read Latency": 174.1, "Median Update Latency": 99.8, "Tail Update Latency": 183.5}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 675884.4, "Median Read Latency": 5168.9, "Tail Read Latency": 232226.3, "Median Update Latency": 5221.0, "Tail Update Latency": 234665.5}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300048.0, "Median Read Latency": 104.7, "Tail Read Latency": 274.3, "Median Update Latency": 110.1, "Tail Update Latency": 270.9}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 593973.5, "Median Read Latency": 13469.9, "Tail Read Latency": 48997.9, "Median Update Latency": 13481.7, "Tail Update Latency": 43736.0}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400014.9, "Median Read Latency": 114.7, "Tail Read Latency": 985.2, "Median Update Latency": 117.9, "Tail Update Latency": 836.7}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100023.3, "Median Read Latency": 92.3, "Tail Read Latency": 140.0, "Median Update Latency": 97.5, "Tail Update Latency": 150.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200024.8, "Median Read Latency": 78.3, "Tail Read Latency": 127.6, "Median Update Latency": 82.7, "Tail Update Latency": 133.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299955.1, "Median Read Latency": 92.6, "Tail Read Latency": 352.0, "Median Update Latency": 99.0, "Tail Update Latency": 340.3}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499993.2, "Median Read Latency": 496.8, "Tail Read Latency": 11875.1, "Median Update Latency": 347.0, "Tail Update Latency": 11798.5}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400080.4, "Median Read Latency": 105.7, "Tail Read Latency": 515.6, "Median Update Latency": 110.8, "Tail Update Latency": 518.0}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621476.0, "Median Read Latency": 4652.6, "Tail Read Latency": 228156.0, "Median Update Latency": 4843.4, "Tail Update Latency": 226596.6}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618279.2, "Median Read Latency": 4429.6, "Tail Read Latency": 228611.1, "Median Update Latency": 4362.4, "Tail Update Latency": 226378.2}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 593594.2, "Median Read Latency": 13393.0, "Tail Read Latency": 57221.3, "Median Update Latency": 13422.7, "Tail Update Latency": 80690.8}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200022.4, "Median Read Latency": 95.8, "Tail Read Latency": 172.8, "Median Update Latency": 100.5, "Tail Update Latency": 185.4}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 560918.1, "Median Read Latency": 16358.1, "Tail Read Latency": 253540.6, "Median Update Latency": 16212.4, "Tail Update Latency": 251232.8}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 669430.7, "Median Read Latency": 5583.0, "Tail Read Latency": 231084.5, "Median Update Latency": 6019.7, "Tail Update Latency": 232056.7}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399991.3, "Median Read Latency": 106.0, "Tail Read Latency": 530.9, "Median Update Latency": 111.6, "Tail Update Latency": 541.9}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399954.1, "Median Read Latency": 115.9, "Tail Read Latency": 1160.1, "Median Update Latency": 121.1, "Tail Update Latency": 1101.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 606298.3, "Median Read Latency": 12500.1, "Tail Read Latency": 221340.8, "Median Update Latency": 12479.4, "Tail Update Latency": 220126.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 677365.7, "Median Read Latency": 5572.2, "Tail Read Latency": 232186.6, "Median Update Latency": 5377.3, "Tail Update Latency": 234817.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 560278.3, "Median Read Latency": 15552.2, "Tail Read Latency": 251034.1, "Median Update Latency": 15592.0, "Tail Update Latency": 246436.6}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595753.0, "Median Read Latency": 23720.5, "Tail Read Latency": 249422.3, "Median Update Latency": 23512.1, "Tail Update Latency": 249583.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400063.9, "Median Read Latency": 113.4, "Tail Read Latency": 862.6, "Median Update Latency": 120.0, "Tail Update Latency": 817.9}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499823.1, "Median Read Latency": 7675.7, "Tail Read Latency": 221134.1, "Median Update Latency": 7669.5, "Tail Update Latency": 221961.7}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399924.2, "Median Read Latency": 113.2, "Tail Read Latency": 860.6, "Median Update Latency": 119.8, "Tail Update Latency": 955.3}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300058.2, "Median Read Latency": 87.1, "Tail Read Latency": 181.8, "Median Update Latency": 91.9, "Tail Update Latency": 186.4}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100027.5, "Median Read Latency": 79.1, "Tail Read Latency": 123.2, "Median Update Latency": 84.7, "Tail Update Latency": 132.5}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 598396.1, "Median Read Latency": 15300.7, "Tail Read Latency": 244675.0, "Median Update Latency": 15021.1, "Tail Update Latency": 241147.1}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572232.9, "Median Read Latency": 5540.0, "Tail Read Latency": 226307.6, "Median Update Latency": 6565.7, "Tail Update Latency": 228217.0}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100026.0, "Median Read Latency": 79.4, "Tail Read Latency": 119.9, "Median Update Latency": 84.8, "Tail Update Latency": 127.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 612789.2, "Median Read Latency": 20433.4, "Tail Read Latency": 248317.0, "Median Update Latency": 19920.1, "Tail Update Latency": 248162.6}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 633366.9, "Median Read Latency": 27763.5, "Tail Read Latency": 261832.4, "Median Update Latency": 27589.3, "Tail Update Latency": 260902.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300076.6, "Median Read Latency": 91.9, "Tail Read Latency": 355.3, "Median Update Latency": 97.9, "Tail Update Latency": 346.9}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400067.4, "Median Read Latency": 104.2, "Tail Read Latency": 490.8, "Median Update Latency": 109.1, "Tail Update Latency": 489.3}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100018.2, "Median Read Latency": 79.5, "Tail Read Latency": 124.0, "Median Update Latency": 85.6, "Tail Update Latency": 130.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 673606.9, "Median Read Latency": 6399.4, "Tail Read Latency": 232519.9, "Median Update Latency": 7309.5, "Tail Update Latency": 233818.8}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618746.2, "Median Read Latency": 5440.4, "Tail Read Latency": 229943.5, "Median Update Latency": 6759.6, "Tail Update Latency": 229563.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300022.9, "Median Read Latency": 87.6, "Tail Read Latency": 182.6, "Median Update Latency": 92.8, "Tail Update Latency": 187.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400008.0, "Median Read Latency": 111.5, "Tail Read Latency": 841.2, "Median Update Latency": 118.5, "Tail Update Latency": 843.9}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300049.4, "Median Read Latency": 88.0, "Tail Read Latency": 183.8, "Median Update Latency": 94.3, "Tail Update Latency": 187.9}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299973.6, "Median Read Latency": 103.9, "Tail Read Latency": 264.6, "Median Update Latency": 108.6, "Tail Update Latency": 264.8}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 634051.9, "Median Read Latency": 29679.0, "Tail Read Latency": 276428.2, "Median Update Latency": 29553.8, "Tail Update Latency": 275365.1}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595601.7, "Median Read Latency": 16592.4, "Tail Read Latency": 248858.5, "Median Update Latency": 16643.3, "Tail Update Latency": 249676.4}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299996.7, "Median Read Latency": 105.2, "Tail Read Latency": 265.0, "Median Update Latency": 110.0, "Tail Update Latency": 291.7}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 601384.7, "Median Read Latency": 21991.8, "Tail Read Latency": 251918.5, "Median Update Latency": 22250.9, "Tail Update Latency": 254757.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 672509.3, "Median Read Latency": 6597.4, "Tail Read Latency": 231210.5, "Median Update Latency": 6618.7, "Tail Update Latency": 230082.2}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618813.0, "Median Read Latency": 2470.6, "Tail Read Latency": 229954.7, "Median Update Latency": 448.3, "Tail Update Latency": 230106.4}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399964.2, "Median Read Latency": 104.2, "Tail Read Latency": 500.7, "Median Update Latency": 109.3, "Tail Update Latency": 512.1}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200027.6, "Median Read Latency": 88.4, "Tail Read Latency": 242.7, "Median Update Latency": 95.4, "Tail Update Latency": 259.1}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200074.5, "Median Read Latency": 95.6, "Tail Read Latency": 174.1, "Median Update Latency": 100.6, "Tail Update Latency": 180.8}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 590638.3, "Median Read Latency": 20591.5, "Tail Read Latency": 245328.4, "Median Update Latency": 20026.6, "Tail Update Latency": 247076.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499880.6, "Median Read Latency": 6449.0, "Tail Read Latency": 220474.9, "Median Update Latency": 6244.1, "Tail Update Latency": 221755.8}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499651.8, "Median Read Latency": 3100.6, "Tail Read Latency": 224805.6, "Median Update Latency": 3231.5, "Tail Update Latency": 220308.2}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679918.1, "Median Read Latency": 6117.7, "Tail Read Latency": 235370.8, "Median Update Latency": 5461.9, "Tail Update Latency": 235881.3}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 562603.6, "Median Read Latency": 17687.5, "Tail Read Latency": 249811.6, "Median Update Latency": 17850.1, "Tail Update Latency": 248409.0}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498800.0, "Median Read Latency": 4824.4, "Tail Read Latency": 226001.1, "Median Update Latency": 3904.7, "Tail Update Latency": 225754.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300001.7, "Median Read Latency": 87.0, "Tail Read Latency": 177.7, "Median Update Latency": 91.8, "Tail Update Latency": 186.5}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200017.1, "Median Read Latency": 79.3, "Tail Read Latency": 128.7, "Median Update Latency": 84.2, "Tail Update Latency": 141.4}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400017.2, "Median Read Latency": 115.5, "Tail Read Latency": 1159.7, "Median Update Latency": 120.1, "Tail Update Latency": 989.5}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99991.2, "Median Read Latency": 78.9, "Tail Read Latency": 121.9, "Median Update Latency": 84.3, "Tail Update Latency": 129.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200019.4, "Median Read Latency": 78.7, "Tail Read Latency": 127.4, "Median Update Latency": 83.4, "Tail Update Latency": 138.3}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 574746.3, "Median Read Latency": 10094.5, "Tail Read Latency": 229286.1, "Median Update Latency": 10163.4, "Tail Update Latency": 228392.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 666919.6, "Median Read Latency": 5602.6, "Tail Read Latency": 231151.9, "Median Update Latency": 7137.9, "Tail Update Latency": 230738.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300001.0, "Median Read Latency": 87.0, "Tail Read Latency": 178.4, "Median Update Latency": 92.1, "Tail Update Latency": 184.3}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400016.4, "Median Read Latency": 112.3, "Tail Read Latency": 861.1, "Median Update Latency": 119.0, "Tail Update Latency": 826.2}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 554192.0, "Median Read Latency": 14565.8, "Tail Read Latency": 251281.6, "Median Update Latency": 14621.9, "Tail Update Latency": 248503.4}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400004.9, "Median Read Latency": 112.6, "Tail Read Latency": 847.6, "Median Update Latency": 119.8, "Tail Update Latency": 780.5}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499926.2, "Median Read Latency": 202.8, "Tail Read Latency": 11364.8, "Median Update Latency": 225.1, "Tail Update Latency": 11373.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499827.3, "Median Read Latency": 6236.1, "Tail Read Latency": 219172.4, "Median Update Latency": 6311.9, "Tail Update Latency": 218648.6}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299983.9, "Median Read Latency": 105.0, "Tail Read Latency": 266.1, "Median Update Latency": 110.0, "Tail Update Latency": 270.9}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572297.0, "Median Read Latency": 7121.5, "Tail Read Latency": 229699.2, "Median Update Latency": 6446.6, "Tail Update Latency": 229921.5}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300049.7, "Median Read Latency": 100.0, "Tail Read Latency": 462.1, "Median Update Latency": 106.0, "Tail Update Latency": 495.3}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300036.2, "Median Read Latency": 97.4, "Tail Read Latency": 215.2, "Median Update Latency": 101.7, "Tail Update Latency": 222.3}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 592848.3, "Median Read Latency": 13499.3, "Tail Read Latency": 54109.4, "Median Update Latency": 13537.0, "Tail Update Latency": 67041.7}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 599841.2, "Median Read Latency": 13013.2, "Tail Read Latency": 219173.4, "Median Update Latency": 12913.4, "Tail Update Latency": 78396.6}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499623.8, "Median Read Latency": 3350.4, "Tail Read Latency": 224439.8, "Median Update Latency": 2713.4, "Tail Update Latency": 227667.6}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400044.7, "Median Read Latency": 114.6, "Tail Read Latency": 904.6, "Median Update Latency": 121.3, "Tail Update Latency": 889.9}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99961.3, "Median Read Latency": 78.9, "Tail Read Latency": 121.4, "Median Update Latency": 84.1, "Tail Update Latency": 129.1}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399956.7, "Median Read Latency": 111.9, "Tail Read Latency": 913.7, "Median Update Latency": 117.9, "Tail Update Latency": 812.8}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199987.8, "Median Read Latency": 88.7, "Tail Read Latency": 240.9, "Median Update Latency": 94.8, "Tail Update Latency": 263.6}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578039.8, "Median Read Latency": 12007.4, "Tail Read Latency": 222983.8, "Median Update Latency": 12072.4, "Tail Update Latency": 223117.5}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 599235.9, "Median Read Latency": 12980.0, "Tail Read Latency": 89029.3, "Median Update Latency": 13117.6, "Tail Update Latency": 84983.3}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400003.6, "Median Read Latency": 104.1, "Tail Read Latency": 506.4, "Median Update Latency": 108.0, "Tail Update Latency": 499.3}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 667063.6, "Median Read Latency": 7072.8, "Tail Read Latency": 228047.4, "Median Update Latency": 6478.2, "Tail Update Latency": 226000.1}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 591587.5, "Median Read Latency": 13359.7, "Tail Read Latency": 50039.7, "Median Update Latency": 13361.1, "Tail Update Latency": 49096.8}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100046.8, "Median Read Latency": 93.6, "Tail Read Latency": 141.1, "Median Update Latency": 98.7, "Tail Update Latency": 149.6}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100035.3, "Median Read Latency": 93.2, "Tail Read Latency": 140.9, "Median Update Latency": 98.0, "Tail Update Latency": 149.2}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 579557.0, "Median Read Latency": 11146.3, "Tail Read Latency": 225592.5, "Median Update Latency": 11213.4, "Tail Update Latency": 227281.7}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499854.0, "Median Read Latency": 6874.0, "Tail Read Latency": 221957.8, "Median Update Latency": 6881.0, "Tail Update Latency": 223218.6}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199990.1, "Median Read Latency": 88.2, "Tail Read Latency": 238.6, "Median Update Latency": 95.8, "Tail Update Latency": 247.6}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 617227.7, "Median Read Latency": 17171.0, "Tail Read Latency": 249354.5, "Median Update Latency": 16888.9, "Tail Update Latency": 253622.6}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 616791.3, "Median Read Latency": 7783.5, "Tail Read Latency": 231012.6, "Median Update Latency": 7816.4, "Tail Update Latency": 229123.0}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100038.9, "Median Read Latency": 78.3, "Tail Read Latency": 119.7, "Median Update Latency": 83.5, "Tail Update Latency": 130.2}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 600648.1, "Median Read Latency": 19930.5, "Tail Read Latency": 252949.2, "Median Update Latency": 19774.3, "Tail Update Latency": 254769.5}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400077.3, "Median Read Latency": 115.5, "Tail Read Latency": 1407.8, "Median Update Latency": 119.4, "Tail Update Latency": 1102.4}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100003.5, "Median Read Latency": 85.9, "Tail Read Latency": 164.6, "Median Update Latency": 92.1, "Tail Update Latency": 171.8}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99984.3, "Median Read Latency": 78.3, "Tail Read Latency": 120.5, "Median Update Latency": 83.7, "Tail Update Latency": 128.5}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 596507.7, "Median Read Latency": 13251.2, "Tail Read Latency": 73339.7, "Median Update Latency": 13352.3, "Tail Update Latency": 112626.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200030.3, "Median Read Latency": 94.9, "Tail Read Latency": 171.9, "Median Update Latency": 99.8, "Tail Update Latency": 185.1}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499890.3, "Median Read Latency": 6051.4, "Tail Read Latency": 220881.9, "Median Update Latency": 5816.4, "Tail Update Latency": 220025.4}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 617601.3, "Median Read Latency": 6576.4, "Tail Read Latency": 228343.0, "Median Update Latency": 6669.8, "Tail Update Latency": 229107.3}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 614289.3, "Median Read Latency": 27798.8, "Tail Read Latency": 249319.1, "Median Update Latency": 28052.1, "Tail Update Latency": 255922.6}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400085.5, "Median Read Latency": 108.2, "Tail Read Latency": 596.3, "Median Update Latency": 112.3, "Tail Update Latency": 580.3}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499937.2, "Median Read Latency": 246.8, "Tail Read Latency": 12044.7, "Median Update Latency": 370.5, "Tail Update Latency": 11969.0}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300032.5, "Median Read Latency": 99.9, "Tail Read Latency": 450.5, "Median Update Latency": 106.5, "Tail Update Latency": 506.6}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200065.4, "Median Read Latency": 78.6, "Tail Read Latency": 127.6, "Median Update Latency": 83.6, "Tail Update Latency": 136.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200023.6, "Median Read Latency": 95.8, "Tail Read Latency": 175.4, "Median Update Latency": 100.8, "Tail Update Latency": 181.3}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200018.8, "Median Read Latency": 88.0, "Tail Read Latency": 237.2, "Median Update Latency": 95.0, "Tail Update Latency": 241.3}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100003.9, "Median Read Latency": 92.9, "Tail Read Latency": 141.7, "Median Update Latency": 98.0, "Tail Update Latency": 149.9}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200027.7, "Median Read Latency": 78.8, "Tail Read Latency": 128.1, "Median Update Latency": 84.1, "Tail Update Latency": 138.5}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 573103.5, "Median Read Latency": 6813.3, "Tail Read Latency": 228646.3, "Median Update Latency": 6506.7, "Tail Update Latency": 228655.2}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500002.4, "Median Read Latency": 165.8, "Tail Read Latency": 12247.6, "Median Update Latency": 153.3, "Tail Update Latency": 12160.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499939.4, "Median Read Latency": 236.9, "Tail Read Latency": 11686.2, "Median Update Latency": 1735.2, "Tail Update Latency": 11889.0}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100010.1, "Median Read Latency": 92.7, "Tail Read Latency": 140.3, "Median Update Latency": 98.4, "Tail Update Latency": 150.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200049.3, "Median Read Latency": 79.0, "Tail Read Latency": 128.4, "Median Update Latency": 83.8, "Tail Update Latency": 135.6}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499781.9, "Median Read Latency": 6814.0, "Tail Read Latency": 221736.9, "Median Update Latency": 6911.9, "Tail Update Latency": 208931.6}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 570152.2, "Median Read Latency": 5421.5, "Tail Read Latency": 227907.2, "Median Update Latency": 370.5, "Tail Update Latency": 226747.8}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 577102.5, "Median Read Latency": 10328.5, "Tail Read Latency": 229337.1, "Median Update Latency": 10259.6, "Tail Update Latency": 228080.1}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300011.4, "Median Read Latency": 104.6, "Tail Read Latency": 269.3, "Median Update Latency": 108.7, "Tail Update Latency": 273.6}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679919.6, "Median Read Latency": 6524.6, "Tail Read Latency": 232330.6, "Median Update Latency": 6061.6, "Tail Update Latency": 230619.7}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 573394.8, "Median Read Latency": 847.7, "Tail Read Latency": 229024.9, "Median Update Latency": 383.2, "Tail Update Latency": 229457.9}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200085.1, "Median Read Latency": 88.4, "Tail Read Latency": 241.4, "Median Update Latency": 95.2, "Tail Update Latency": 234.6}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200047.0, "Median Read Latency": 94.9, "Tail Read Latency": 171.3, "Median Update Latency": 99.5, "Tail Update Latency": 172.3}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 559301.4, "Median Read Latency": 14825.1, "Tail Read Latency": 254187.0, "Median Update Latency": 14664.1, "Tail Update Latency": 254937.9}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100010.6, "Median Read Latency": 85.3, "Tail Read Latency": 162.3, "Median Update Latency": 91.4, "Tail Update Latency": 173.7}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578074.7, "Median Read Latency": 11791.1, "Tail Read Latency": 223980.5, "Median Update Latency": 11864.7, "Tail Update Latency": 226155.9}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100001.6, "Median Read Latency": 85.8, "Tail Read Latency": 162.2, "Median Update Latency": 92.7, "Tail Update Latency": 171.3}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400002.4, "Median Read Latency": 116.0, "Tail Read Latency": 1275.3, "Median Update Latency": 120.4, "Tail Update Latency": 1950.4}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 99993.8, "Median Read Latency": 85.5, "Tail Read Latency": 165.2, "Median Update Latency": 91.9, "Tail Update Latency": 174.4}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300001.0, "Median Read Latency": 104.6, "Tail Read Latency": 264.9, "Median Update Latency": 109.1, "Tail Update Latency": 267.0}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300047.2, "Median Read Latency": 104.8, "Tail Read Latency": 270.7, "Median Update Latency": 108.8, "Tail Update Latency": 291.1}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99984.4, "Median Read Latency": 92.5, "Tail Read Latency": 140.9, "Median Update Latency": 97.6, "Tail Update Latency": 151.5}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499211.4, "Median Read Latency": 4829.0, "Tail Read Latency": 227460.6, "Median Update Latency": 5820.2, "Tail Update Latency": 228629.5}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399973.0, "Median Read Latency": 116.6, "Tail Read Latency": 1517.8, "Median Update Latency": 122.0, "Tail Update Latency": 2040.5}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 604492.9, "Median Read Latency": 22608.3, "Tail Read Latency": 250393.4, "Median Update Latency": 23071.2, "Tail Update Latency": 256668.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 611693.3, "Median Read Latency": 14027.6, "Tail Read Latency": 243356.0, "Median Update Latency": 14022.5, "Tail Update Latency": 250001.8}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200042.2, "Median Read Latency": 88.5, "Tail Read Latency": 243.2, "Median Update Latency": 94.9, "Tail Update Latency": 236.6}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 559704.6, "Median Read Latency": 14430.7, "Tail Read Latency": 250332.9, "Median Update Latency": 14418.1, "Tail Update Latency": 253148.7}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499223.6, "Median Read Latency": 4367.8, "Tail Read Latency": 226883.4, "Median Update Latency": 5244.0, "Tail Update Latency": 221016.3}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 568544.1, "Median Read Latency": 2365.9, "Tail Read Latency": 228962.5, "Median Update Latency": 4500.7, "Tail Update Latency": 230563.3}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100000.3, "Median Read Latency": 78.8, "Tail Read Latency": 119.8, "Median Update Latency": 84.1, "Tail Update Latency": 127.7}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499122.9, "Median Read Latency": 3607.2, "Tail Read Latency": 224649.4, "Median Update Latency": 3444.9, "Tail Update Latency": 225614.4}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400014.4, "Median Read Latency": 116.2, "Tail Read Latency": 1195.9, "Median Update Latency": 120.5, "Tail Update Latency": 1364.5}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199998.7, "Median Read Latency": 88.4, "Tail Read Latency": 243.1, "Median Update Latency": 94.8, "Tail Update Latency": 250.8}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 99995.7, "Median Read Latency": 86.0, "Tail Read Latency": 169.2, "Median Update Latency": 92.9, "Tail Update Latency": 184.7}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100033.6, "Median Read Latency": 92.2, "Tail Read Latency": 140.4, "Median Update Latency": 97.9, "Tail Update Latency": 150.8}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400016.4, "Median Read Latency": 107.1, "Tail Read Latency": 600.3, "Median Update Latency": 113.1, "Tail Update Latency": 614.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 596136.4, "Median Read Latency": 13397.2, "Tail Read Latency": 56097.2, "Median Update Latency": 13382.5, "Tail Update Latency": 52032.8}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99991.6, "Median Read Latency": 78.8, "Tail Read Latency": 123.0, "Median Update Latency": 84.3, "Tail Update Latency": 129.0}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 577576.0, "Median Read Latency": 11304.9, "Tail Read Latency": 223373.9, "Median Update Latency": 11351.8, "Tail Update Latency": 222493.1}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621059.2, "Median Read Latency": 836.3, "Tail Read Latency": 229758.4, "Median Update Latency": 5313.0, "Tail Update Latency": 229136.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200039.4, "Median Read Latency": 79.0, "Tail Read Latency": 128.4, "Median Update Latency": 83.6, "Tail Update Latency": 137.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 563457.7, "Median Read Latency": 16701.6, "Tail Read Latency": 249200.5, "Median Update Latency": 16861.1, "Tail Update Latency": 248788.2}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399982.2, "Median Read Latency": 113.1, "Tail Read Latency": 881.3, "Median Update Latency": 119.4, "Tail Update Latency": 900.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 595465.4, "Median Read Latency": 13157.2, "Tail Read Latency": 75250.3, "Median Update Latency": 13247.2, "Tail Update Latency": 80003.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499959.8, "Median Read Latency": 262.9, "Tail Read Latency": 12395.7, "Median Update Latency": 295.8, "Tail Update Latency": 12350.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 589612.6, "Median Read Latency": 13433.9, "Tail Read Latency": 47674.3, "Median Update Latency": 13410.5, "Tail Update Latency": 57342.6}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 199957.2, "Median Read Latency": 80.1, "Tail Read Latency": 138.1, "Median Update Latency": 85.2, "Tail Update Latency": 143.2}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200034.1, "Median Read Latency": 94.9, "Tail Read Latency": 171.1, "Median Update Latency": 99.7, "Tail Update Latency": 180.0}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500007.9, "Median Read Latency": 178.2, "Tail Read Latency": 12033.8, "Median Update Latency": 168.2, "Tail Update Latency": 12001.3}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 497856.1, "Median Read Latency": 4145.0, "Tail Read Latency": 226546.5, "Median Update Latency": 3441.9, "Tail Update Latency": 227252.1}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300033.4, "Median Read Latency": 104.2, "Tail Read Latency": 268.6, "Median Update Latency": 109.9, "Tail Update Latency": 279.2}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 568855.7, "Median Read Latency": 9605.0, "Tail Read Latency": 232225.5, "Median Update Latency": 9612.0, "Tail Update Latency": 232598.3}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200009.7, "Median Read Latency": 88.0, "Tail Read Latency": 239.6, "Median Update Latency": 94.3, "Tail Update Latency": 241.1}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 570228.2, "Median Read Latency": 6570.1, "Tail Read Latency": 225938.3, "Median Update Latency": 6319.2, "Tail Update Latency": 221840.9}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299969.9, "Median Read Latency": 90.6, "Tail Read Latency": 349.4, "Median Update Latency": 96.7, "Tail Update Latency": 363.3}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 620657.5, "Median Read Latency": 5477.7, "Tail Read Latency": 231316.9, "Median Update Latency": 3529.9, "Tail Update Latency": 232065.4}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99976.6, "Median Read Latency": 92.6, "Tail Read Latency": 140.5, "Median Update Latency": 97.5, "Tail Update Latency": 149.4}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400042.8, "Median Read Latency": 117.3, "Tail Read Latency": 1777.2, "Median Update Latency": 123.8, "Tail Update Latency": 1476.7}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100038.9, "Median Read Latency": 79.2, "Tail Read Latency": 122.3, "Median Update Latency": 85.3, "Tail Update Latency": 130.3}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 680848.2, "Median Read Latency": 6171.2, "Tail Read Latency": 227595.7, "Median Update Latency": 6211.2, "Tail Update Latency": 226364.4}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400014.0, "Median Read Latency": 113.7, "Tail Read Latency": 861.3, "Median Update Latency": 120.8, "Tail Update Latency": 910.6}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299995.0, "Median Read Latency": 99.8, "Tail Read Latency": 460.1, "Median Update Latency": 105.9, "Tail Update Latency": 467.5}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499364.1, "Median Read Latency": 2224.1, "Tail Read Latency": 226096.6, "Median Update Latency": 3068.3, "Tail Update Latency": 225018.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500025.6, "Median Read Latency": 313.0, "Tail Read Latency": 11828.0, "Median Update Latency": 321.6, "Tail Update Latency": 11853.5}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 620927.4, "Median Read Latency": 24535.1, "Tail Read Latency": 257784.9, "Median Update Latency": 24537.7, "Tail Update Latency": 255864.2}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 602632.4, "Median Read Latency": 13072.1, "Tail Read Latency": 61232.2, "Median Update Latency": 13168.5, "Tail Update Latency": 92245.5}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199927.4, "Median Read Latency": 89.4, "Tail Read Latency": 245.8, "Median Update Latency": 95.9, "Tail Update Latency": 254.7}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595090.7, "Median Read Latency": 16183.6, "Tail Read Latency": 247979.2, "Median Update Latency": 16252.1, "Tail Update Latency": 250173.3}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618956.2, "Median Read Latency": 6020.9, "Tail Read Latency": 229495.0, "Median Update Latency": 415.4, "Tail Update Latency": 230616.9}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399956.4, "Median Read Latency": 104.6, "Tail Read Latency": 502.8, "Median Update Latency": 109.0, "Tail Update Latency": 496.5}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 299923.6, "Median Read Latency": 88.0, "Tail Read Latency": 180.5, "Median Update Latency": 93.2, "Tail Update Latency": 186.6}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300010.4, "Median Read Latency": 99.9, "Tail Read Latency": 441.2, "Median Update Latency": 105.4, "Tail Update Latency": 423.4}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300014.0, "Median Read Latency": 97.4, "Tail Read Latency": 219.0, "Median Update Latency": 101.9, "Tail Update Latency": 236.6}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499459.5, "Median Read Latency": 4253.1, "Tail Read Latency": 224248.7, "Median Update Latency": 3402.1, "Tail Update Latency": 224262.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300062.5, "Median Read Latency": 91.6, "Tail Read Latency": 357.4, "Median Update Latency": 98.9, "Tail Update Latency": 364.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 678611.1, "Median Read Latency": 6160.8, "Tail Read Latency": 229544.6, "Median Update Latency": 5851.0, "Tail Update Latency": 229411.7}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578256.9, "Median Read Latency": 11180.9, "Tail Read Latency": 225596.4, "Median Update Latency": 11147.2, "Tail Update Latency": 222763.9}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400003.0, "Median Read Latency": 116.8, "Tail Read Latency": 1089.4, "Median Update Latency": 122.7, "Tail Update Latency": 831.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300032.2, "Median Read Latency": 90.3, "Tail Read Latency": 348.7, "Median Update Latency": 96.4, "Tail Update Latency": 350.9}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200064.6, "Median Read Latency": 78.7, "Tail Read Latency": 128.0, "Median Update Latency": 84.5, "Tail Update Latency": 138.0}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399974.1, "Median Read Latency": 115.0, "Tail Read Latency": 1410.0, "Median Update Latency": 120.0, "Tail Update Latency": 1261.5}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100010.9, "Median Read Latency": 85.4, "Tail Read Latency": 160.3, "Median Update Latency": 91.7, "Tail Update Latency": 173.0}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400024.5, "Median Read Latency": 106.4, "Tail Read Latency": 505.2, "Median Update Latency": 111.2, "Tail Update Latency": 491.0}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300011.7, "Median Read Latency": 87.3, "Tail Read Latency": 178.7, "Median Update Latency": 92.3, "Tail Update Latency": 184.7}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99988.9, "Median Read Latency": 93.3, "Tail Read Latency": 140.6, "Median Update Latency": 98.0, "Tail Update Latency": 150.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 299993.4, "Median Read Latency": 87.5, "Tail Read Latency": 181.0, "Median Update Latency": 93.2, "Tail Update Latency": 182.8}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 617912.5, "Median Read Latency": 25810.6, "Tail Read Latency": 252464.1, "Median Update Latency": 25674.3, "Tail Update Latency": 251736.1}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 625701.7, "Median Read Latency": 22503.9, "Tail Read Latency": 259542.7, "Median Update Latency": 22468.4, "Tail Update Latency": 267154.9}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499785.2, "Median Read Latency": 6496.3, "Tail Read Latency": 222659.0, "Median Update Latency": 6234.7, "Tail Update Latency": 222262.0}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 572021.8, "Median Read Latency": 9548.6, "Tail Read Latency": 233766.8, "Median Update Latency": 9524.3, "Tail Update Latency": 236551.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200009.0, "Median Read Latency": 95.4, "Tail Read Latency": 175.8, "Median Update Latency": 100.7, "Tail Update Latency": 190.9}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200027.1, "Median Read Latency": 78.6, "Tail Read Latency": 126.6, "Median Update Latency": 83.1, "Tail Update Latency": 129.1}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 557369.8, "Median Read Latency": 16483.1, "Tail Read Latency": 253742.7, "Median Update Latency": 16400.4, "Tail Update Latency": 255241.9}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499805.4, "Median Read Latency": 6508.7, "Tail Read Latency": 221536.4, "Median Update Latency": 6509.6, "Tail Update Latency": 223234.9}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300035.4, "Median Read Latency": 100.2, "Tail Read Latency": 457.3, "Median Update Latency": 106.8, "Tail Update Latency": 525.7}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300050.2, "Median Read Latency": 96.4, "Tail Read Latency": 213.9, "Median Update Latency": 100.7, "Tail Update Latency": 220.0}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499885.9, "Median Read Latency": 7063.8, "Tail Read Latency": 214747.3, "Median Update Latency": 6995.4, "Tail Update Latency": 219631.2}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 609226.7, "Median Read Latency": 14494.9, "Tail Read Latency": 237210.7, "Median Update Latency": 14428.8, "Tail Update Latency": 235805.5}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 569466.3, "Median Read Latency": 511.4, "Tail Read Latency": 226153.5, "Median Update Latency": 364.6, "Tail Update Latency": 224980.5}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499908.5, "Median Read Latency": 5780.3, "Tail Read Latency": 221614.4, "Median Update Latency": 5583.2, "Tail Update Latency": 223730.2}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 564939.5, "Median Read Latency": 9668.2, "Tail Read Latency": 232075.5, "Median Update Latency": 9684.1, "Tail Update Latency": 233548.1}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100060.1, "Median Read Latency": 78.8, "Tail Read Latency": 121.0, "Median Update Latency": 84.6, "Tail Update Latency": 130.7}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199988.8, "Median Read Latency": 88.0, "Tail Read Latency": 242.2, "Median Update Latency": 95.1, "Tail Update Latency": 263.1}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 619177.1, "Median Read Latency": 5783.8, "Tail Read Latency": 230993.8, "Median Update Latency": 5783.0, "Tail Update Latency": 228857.0}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 591662.2, "Median Read Latency": 13386.9, "Tail Read Latency": 50085.4, "Median Update Latency": 13368.7, "Tail Update Latency": 44908.7}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100031.6, "Median Read Latency": 78.6, "Tail Read Latency": 120.5, "Median Update Latency": 84.0, "Tail Update Latency": 128.7}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578665.8, "Median Read Latency": 12495.4, "Tail Read Latency": 71938.0, "Median Update Latency": 12460.8, "Tail Update Latency": 134007.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 593825.5, "Median Read Latency": 13381.8, "Tail Read Latency": 42648.3, "Median Update Latency": 13428.1, "Tail Update Latency": 52034.5}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400058.1, "Median Read Latency": 107.0, "Tail Read Latency": 542.8, "Median Update Latency": 112.4, "Tail Update Latency": 534.6}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100023.5, "Median Read Latency": 86.3, "Tail Read Latency": 167.1, "Median Update Latency": 93.4, "Tail Update Latency": 177.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500013.8, "Median Read Latency": 427.0, "Tail Read Latency": 11978.9, "Median Update Latency": 1271.9, "Tail Update Latency": 12021.7}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200028.8, "Median Read Latency": 79.0, "Tail Read Latency": 128.9, "Median Update Latency": 84.0, "Tail Update Latency": 135.6}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 622974.3, "Median Read Latency": 25949.6, "Tail Read Latency": 253985.8, "Median Update Latency": 26016.2, "Tail Update Latency": 256999.9}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498679.6, "Median Read Latency": 2696.6, "Tail Read Latency": 224780.0, "Median Update Latency": 3725.6, "Tail Update Latency": 225606.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300005.0, "Median Read Latency": 87.2, "Tail Read Latency": 181.0, "Median Update Latency": 91.9, "Tail Update Latency": 190.0}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100054.4, "Median Read Latency": 78.9, "Tail Read Latency": 121.5, "Median Update Latency": 84.5, "Tail Update Latency": 128.5}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 594765.9, "Median Read Latency": 13457.0, "Tail Read Latency": 57014.3, "Median Update Latency": 13535.6, "Tail Update Latency": 62271.0}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100045.0, "Median Read Latency": 94.0, "Tail Read Latency": 147.8, "Median Update Latency": 98.6, "Tail Update Latency": 155.2}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 570755.1, "Median Read Latency": 1747.1, "Tail Read Latency": 229720.5, "Median Update Latency": 376.1, "Tail Update Latency": 230143.6}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400028.9, "Median Read Latency": 113.5, "Tail Read Latency": 883.0, "Median Update Latency": 119.6, "Tail Update Latency": 930.9}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499161.2, "Median Read Latency": 4035.8, "Tail Read Latency": 226041.1, "Median Update Latency": 4145.7, "Tail Update Latency": 227144.8}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200004.9, "Median Read Latency": 79.6, "Tail Read Latency": 128.8, "Median Update Latency": 84.8, "Tail Update Latency": 138.1}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499892.3, "Median Read Latency": 6412.0, "Tail Read Latency": 220894.5, "Median Update Latency": 6356.1, "Tail Update Latency": 222058.1}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 569172.7, "Median Read Latency": 1014.0, "Tail Read Latency": 227912.8, "Median Update Latency": 299.3, "Tail Update Latency": 226796.7}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 592497.5, "Median Read Latency": 13366.1, "Tail Read Latency": 50141.0, "Median Update Latency": 13458.6, "Tail Update Latency": 64470.4}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400035.6, "Median Read Latency": 114.4, "Tail Read Latency": 884.8, "Median Update Latency": 121.0, "Tail Update Latency": 883.7}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499062.3, "Median Read Latency": 2907.3, "Tail Read Latency": 226259.2, "Median Update Latency": 3329.0, "Tail Update Latency": 226354.8}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300127.5, "Median Read Latency": 104.7, "Tail Read Latency": 266.2, "Median Update Latency": 109.1, "Tail Update Latency": 269.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300046.9, "Median Read Latency": 88.4, "Tail Read Latency": 182.9, "Median Update Latency": 93.4, "Tail Update Latency": 188.3}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 585332.5, "Median Read Latency": 13581.7, "Tail Read Latency": 41485.9, "Median Update Latency": 13562.4, "Tail Update Latency": 42725.2}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/memcd.updt
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/memcd.updt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/memcd.updt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["fibre", "memcached fibre", {"Update Ratio": 30, "Actual QPS": 629656.2, "Average Read Latency": 27319.8, "Median Read Latency": 22952.7, "Tail Read Latency": 246885.9, "Average Update Latency": 27991.9, "Median Update Latency": 23033.7, "Tail Update Latency": 252283.9}], ["fibre", "memcached fibre", {"Update Ratio": 20, "Actual QPS": 648887.0, "Average Read Latency": 30926.5, "Median Read Latency": 26547.5, "Tail Read Latency": 245170.2, "Average Update Latency": 31078.3, "Median Update Latency": 26466.1, "Tail Update Latency": 245702.3}], ["fibre", "memcached fibre", {"Update Ratio": 20, "Actual QPS": 659773.8, "Average Read Latency": 25337.6, "Median Read Latency": 20743.6, "Tail Read Latency": 250895.8, "Average Update Latency": 25691.3, "Median Update Latency": 20786.0, "Tail Update Latency": 252716.3}], ["forall", "memcached forall", {"Update Ratio": 20, "Actual QPS": 593242.6, "Average Read Latency": 16959.6, "Median Read Latency": 13191.5, "Tail Read Latency": 35809.5, "Average Update Latency": 17359.4, "Median Update Latency": 13176.7, "Tail Update Latency": 39772.7}], ["fibre", "memcached fibre", {"Update Ratio": 30, "Actual QPS": 632062.8, "Average Read Latency": 32964.0, "Median Read Latency": 28637.6, "Tail Read Latency": 239904.1, "Average Update Latency": 33330.3, "Median Update Latency": 28653.8, "Tail Update Latency": 245537.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 30, "Actual QPS": 739300.3, "Average Read Latency": 24611.8, "Median Read Latency": 15550.2, "Tail Read Latency": 250448.4, "Average Update Latency": 24234.4, "Median Update Latency": 15504.0, "Tail Update Latency": 249174.1}], ["fibre", "memcached fibre", {"Update Ratio": 30, "Actual QPS": 635540.7, "Average Read Latency": 26437.0, "Median Read Latency": 21871.7, "Tail Read Latency": 249677.1, "Average Update Latency": 27007.6, "Median Update Latency": 21919.5, "Tail Update Latency": 254080.7}], ["fibre", "memcached fibre", {"Update Ratio": 20, "Actual QPS": 662431.5, "Average Read Latency": 28397.9, "Median Read Latency": 22980.7, "Tail Read Latency": 252279.7, "Average Update Latency": 28848.1, "Median Update Latency": 22954.0, "Tail Update Latency": 256070.8}], ["vanilla", "memcached vanilla", {"Update Ratio": 5, "Actual QPS": 771468.6, "Average Read Latency": 21535.3, "Median Read Latency": 11447.2, "Tail Read Latency": 239536.3, "Average Update Latency": 21420.0, "Median Update Latency": 11457.2, "Tail Update Latency": 239594.8}], ["fibre", "memcached fibre", {"Update Ratio": 10, "Actual QPS": 671082.4, "Average Read Latency": 30220.6, "Median Read Latency": 24900.7, "Tail Read Latency": 254034.5, "Average Update Latency": 30377.6, "Median Update Latency": 24913.7, "Tail Update Latency": 255033.5}], ["fibre", "memcached fibre", {"Update Ratio": 10, "Actual QPS": 672847.0, "Average Read Latency": 31129.7, "Median Read Latency": 27301.0, "Tail Read Latency": 92314.4, "Average Update Latency": 31493.5, "Median Update Latency": 27378.8, "Tail Update Latency": 222712.9}], ["fibre", "memcached fibre", {"Update Ratio": 5, "Actual QPS": 684419.5, "Average Read Latency": 34114.6, "Median Read Latency": 29765.8, "Tail Read Latency": 257535.6, "Average Update Latency": 34848.1, "Median Update Latency": 29566.2, "Tail Update Latency": 264376.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 10, "Actual QPS": 764493.6, "Average Read Latency": 22398.2, "Median Read Latency": 12207.4, "Tail Read Latency": 242402.3, "Average Update Latency": 22038.2, "Median Update Latency": 12234.9, "Tail Update Latency": 240284.4}], ["fibre", "memcached fibre", {"Update Ratio": 40, "Actual QPS": 608626.8, "Average Read Latency": 30286.6, "Median Read Latency": 25633.0, "Tail Read Latency": 240653.7, "Average Update Latency": 30652.8, "Median Update Latency": 25603.9, "Tail Update Latency": 246478.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 50, "Actual QPS": 686252.6, "Average Read Latency": 31374.4, "Median Read Latency": 30501.6, "Tail Read Latency": 63738.2, "Average Update Latency": 31347.0, "Median Update Latency": 30428.1, "Tail Update Latency": 63812.1}], ["forall", "memcached forall", {"Update Ratio": 10, "Actual QPS": 595821.8, "Average Read Latency": 16941.1, "Median Read Latency": 13280.7, "Tail Read Latency": 36355.4, "Average Update Latency": 17445.3, "Median Update Latency": 13307.1, "Tail Update Latency": 39244.2}], ["forall", "memcached forall", {"Update Ratio": 15, "Actual QPS": 590278.9, "Average Read Latency": 16989.3, "Median Read Latency": 13312.9, "Tail Read Latency": 37996.6, "Average Update Latency": 17547.9, "Median Update Latency": 13344.8, "Tail Update Latency": 39578.1}], ["fibre", "memcached fibre", {"Update Ratio": 40, "Actual QPS": 619433.4, "Average Read Latency": 30159.8, "Median Read Latency": 25357.0, "Tail Read Latency": 251306.7, "Average Update Latency": 30411.0, "Median Update Latency": 25361.7, "Tail Update Latency": 254805.8}], ["forall", "memcached forall", {"Update Ratio": 5, "Actual QPS": 593761.3, "Average Read Latency": 16959.6, "Median Read Latency": 13400.4, "Tail Read Latency": 47335.2, "Average Update Latency": 17053.7, "Median Update Latency": 13376.7, "Tail Update Latency": 42239.3}], ["forall", "memcached forall", {"Update Ratio": 10, "Actual QPS": 597040.0, "Average Read Latency": 17352.0, "Median Read Latency": 12940.7, "Tail Read Latency": 65486.0, "Average Update Latency": 17545.6, "Median Update Latency": 12969.6, "Tail Update Latency": 92147.2}], ["forall", "memcached forall", {"Update Ratio": 40, "Actual QPS": 587327.3, "Average Read Latency": 14420.7, "Median Read Latency": 13053.8, "Tail Read Latency": 28889.7, "Average Update Latency": 14766.7, "Median Update Latency": 13061.8, "Tail Update Latency": 29130.7}], ["fibre", "memcached fibre", {"Update Ratio": 50, "Actual QPS": 619688.5, "Average Read Latency": 26716.2, "Median Read Latency": 22403.5, "Tail Read Latency": 245740.0, "Average Update Latency": 27090.3, "Median Update Latency": 22366.9, "Tail Update Latency": 248135.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 5, "Actual QPS": 765840.6, "Average Read Latency": 22396.6, "Median Read Latency": 12223.6, "Tail Read Latency": 242722.6, "Average Update Latency": 23164.3, "Median Update Latency": 12122.3, "Tail Update Latency": 246732.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 15, "Actual QPS": 754242.6, "Average Read Latency": 22345.6, "Median Read Latency": 13000.6, "Tail Read Latency": 242559.2, "Average Update Latency": 21803.8, "Median Update Latency": 13027.6, "Tail Update Latency": 240360.2}], ["vanilla", "memcached vanilla", {"Update Ratio": 20, "Actual QPS": 750259.1, "Average Read Latency": 22937.1, "Median Read Latency": 13719.7, "Tail Read Latency": 246024.2, "Average Update Latency": 23195.7, "Median Update Latency": 13716.5, "Tail Update Latency": 247084.9}], ["vanilla", "memcached vanilla", {"Update Ratio": 10, "Actual QPS": 761986.1, "Average Read Latency": 21767.5, "Median Read Latency": 12522.9, "Tail Read Latency": 240214.6, "Average Update Latency": 21621.3, "Median Update Latency": 12541.2, "Tail Update Latency": 239749.6}], ["forall", "memcached forall", {"Update Ratio": 50, "Actual QPS": 566851.8, "Average Read Latency": 14265.5, "Median Read Latency": 13544.1, "Tail Read Latency": 27746.8, "Average Update Latency": 14507.3, "Median Update Latency": 13553.5, "Tail Update Latency": 28471.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 50, "Actual QPS": 708028.0, "Average Read Latency": 29145.3, "Median Read Latency": 23983.1, "Tail Read Latency": 237401.9, "Average Update Latency": 29124.0, "Median Update Latency": 24027.6, "Tail Update Latency": 235432.4}], ["forall", "memcached forall", {"Update Ratio": 5, "Actual QPS": 597227.5, "Average Read Latency": 17511.6, "Median Read Latency": 13228.9, "Tail Read Latency": 58767.4, "Average Update Latency": 18215.1, "Median Update Latency": 13249.5, "Tail Update Latency": 81599.3}], ["fibre", "memcached fibre", {"Update Ratio": 50, "Actual QPS": 599839.9, "Average Read Latency": 26555.1, "Median Read Latency": 21691.3, "Tail Read Latency": 245066.2, "Average Update Latency": 27096.6, "Median Update Latency": 21758.1, "Tail Update Latency": 248899.9}], ["forall", "memcached forall", {"Update Ratio": 15, "Actual QPS": 594852.2, "Average Read Latency": 17337.0, "Median Read Latency": 13137.9, "Tail Read Latency": 37979.0, "Average Update Latency": 17635.4, "Median Update Latency": 13176.4, "Tail Update Latency": 48130.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 40, "Actual QPS": 718833.1, "Average Read Latency": 27863.0, "Median Read Latency": 22526.4, "Tail Read Latency": 245434.6, "Average Update Latency": 27699.5, "Median Update Latency": 22539.8, "Tail Update Latency": 243109.8}], ["fibre", "memcached fibre", {"Update Ratio": 40, "Actual QPS": 628507.6, "Average Read Latency": 28670.7, "Median Read Latency": 24369.4, "Tail Read Latency": 249240.9, "Average Update Latency": 28858.2, "Median Update Latency": 24429.6, "Tail Update Latency": 249891.7}], ["fibre", "memcached fibre", {"Update Ratio": 10, "Actual QPS": 666251.9, "Average Read Latency": 28297.4, "Median Read Latency": 24552.7, "Tail Read Latency": 76372.4, "Average Update Latency": 28566.0, "Median Update Latency": 24466.9, "Tail Update Latency": 90321.1}], ["vanilla", "memcached vanilla", {"Update Ratio": 20, "Actual QPS": 752354.4, "Average Read Latency": 23536.8, "Median Read Latency": 13936.7, "Tail Read Latency": 248639.8, "Average Update Latency": 23798.2, "Median Update Latency": 14040.6, "Tail Update Latency": 247961.7}], ["forall", "memcached forall", {"Update Ratio": 50, "Actual QPS": 565950.3, "Average Read Latency": 14232.6, "Median Read Latency": 13373.6, "Tail Read Latency": 26848.2, "Average Update Latency": 14466.4, "Median Update Latency": 13384.9, "Tail Update Latency": 27958.6}], ["fibre", "memcached fibre", {"Update Ratio": 15, "Actual QPS": 674044.3, "Average Read Latency": 29547.1, "Median Read Latency": 23376.7, "Tail Read Latency": 257169.3, "Average Update Latency": 30137.2, "Median Update Latency": 23423.1, "Tail Update Latency": 262729.5}], ["forall", "memcached forall", {"Update Ratio": 10, "Actual QPS": 589565.9, "Average Read Latency": 16941.2, "Median Read Latency": 13278.3, "Tail Read Latency": 40040.0, "Average Update Latency": 17308.7, "Median Update Latency": 13285.2, "Tail Update Latency": 44209.1}], ["fibre", "memcached fibre", {"Update Ratio": 50, "Actual QPS": 609521.3, "Average Read Latency": 31260.8, "Median Read Latency": 26017.5, "Tail Read Latency": 255078.0, "Average Update Latency": 31635.9, "Median Update Latency": 26086.2, "Tail Update Latency": 256862.8}], ["vanilla", "memcached vanilla", {"Update Ratio": 5, "Actual QPS": 767111.6, "Average Read Latency": 21590.9, "Median Read Latency": 11566.7, "Tail Read Latency": 239628.7, "Average Update Latency": 21399.7, "Median Update Latency": 11585.0, "Tail Update Latency": 239823.6}], ["forall", "memcached forall", {"Update Ratio": 5, "Actual QPS": 599244.3, "Average Read Latency": 17045.5, "Median Read Latency": 13243.6, "Tail Read Latency": 48650.1, "Average Update Latency": 16998.4, "Median Update Latency": 13280.5, "Tail Update Latency": 49466.4}], ["forall", "memcached forall", {"Update Ratio": 15, "Actual QPS": 592856.6, "Average Read Latency": 16172.7, "Median Read Latency": 13278.0, "Tail Read Latency": 31872.0, "Average Update Latency": 16571.3, "Median Update Latency": 13296.0, "Tail Update Latency": 33983.7}], ["fibre", "memcached fibre", {"Update Ratio": 15, "Actual QPS": 662998.4, "Average Read Latency": 26400.5, "Median Read Latency": 22183.5, "Tail Read Latency": 247443.9, "Average Update Latency": 26822.0, "Median Update Latency": 22267.6, "Tail Update Latency": 250068.2}], ["vanilla", "memcached vanilla", {"Update Ratio": 20, "Actual QPS": 748345.8, "Average Read Latency": 23985.2, "Median Read Latency": 14548.6, "Tail Read Latency": 249111.8, "Average Update Latency": 24053.4, "Median Update Latency": 14609.1, "Tail Update Latency": 249310.7}], ["fibre", "memcached fibre", {"Update Ratio": 5, "Actual QPS": 685611.1, "Average Read Latency": 33820.8, "Median Read Latency": 29843.9, "Tail Read Latency": 249003.5, "Average Update Latency": 34612.2, "Median Update Latency": 29937.2, "Tail Update Latency": 265505.6}], ["fibre", "memcached fibre", {"Update Ratio": 5, "Actual QPS": 685539.2, "Average Read Latency": 37016.3, "Median Read Latency": 32225.9, "Tail Read Latency": 263388.6, "Average Update Latency": 37284.7, "Median Update Latency": 32289.9, "Tail Update Latency": 262337.2}], ["forall", "memcached forall", {"Update Ratio": 30, "Actual QPS": 591380.5, "Average Read Latency": 17979.5, "Median Read Latency": 13499.7, "Tail Read Latency": 33487.1, "Average Update Latency": 18303.9, "Median Update Latency": 13523.5, "Tail Update Latency": 38051.2}], ["forall", "memcached forall", {"Update Ratio": 20, "Actual QPS": 593734.3, "Average Read Latency": 16688.8, "Median Read Latency": 13365.9, "Tail Read Latency": 30994.1, "Average Update Latency": 16892.2, "Median Update Latency": 13374.8, "Tail Update Latency": 32244.0}], ["vanilla", "memcached vanilla", {"Update Ratio": 10, "Actual QPS": 761087.3, "Average Read Latency": 22618.8, "Median Read Latency": 12952.0, "Tail Read Latency": 244425.0, "Average Update Latency": 22239.2, "Median Update Latency": 12910.4, "Tail Update Latency": 241797.4}], ["vanilla", "memcached vanilla", {"Update Ratio": 50, "Actual QPS": 689123.1, "Average Read Latency": 31148.1, "Median Read Latency": 29977.4, "Tail Read Latency": 65004.9, "Average Update Latency": 31122.1, "Median Update Latency": 30043.8, "Tail Update Latency": 64488.0}], ["forall", "memcached forall", {"Update Ratio": 20, "Actual QPS": 586549.0, "Average Read Latency": 15286.4, "Median Read Latency": 13246.3, "Tail Read Latency": 30564.7, "Average Update Latency": 16016.0, "Median Update Latency": 13256.5, "Tail Update Latency": 32001.1}], ["vanilla", "memcached vanilla", {"Update Ratio": 15, "Actual QPS": 759348.9, "Average Read Latency": 22876.0, "Median Read Latency": 12931.3, "Tail Read Latency": 244584.8, "Average Update Latency": 22772.0, "Median Update Latency": 12963.1, "Tail Update Latency": 244540.3}], ["forall", "memcached forall", {"Update Ratio": 30, "Actual QPS": 590171.0, "Average Read Latency": 17542.8, "Median Read Latency": 13058.2, "Tail Read Latency": 37919.8, "Average Update Latency": 17986.1, "Median Update Latency": 13075.7, "Tail Update Latency": 47181.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 30, "Actual QPS": 736755.8, "Average Read Latency": 25724.9, "Median Read Latency": 17214.0, "Tail Read Latency": 251561.3, "Average Update Latency": 25647.4, "Median Update Latency": 17123.3, "Tail Update Latency": 251594.7}], ["forall", "memcached forall", {"Update Ratio": 40, "Actual QPS": 594613.0, "Average Read Latency": 15627.5, "Median Read Latency": 13012.1, "Tail Read Latency": 29321.6, "Average Update Latency": 15866.2, "Median Update Latency": 13034.3, "Tail Update Latency": 29997.2}], ["vanilla", "memcached vanilla", {"Update Ratio": 15, "Actual QPS": 753799.1, "Average Read Latency": 22983.2, "Median Read Latency": 13361.2, "Tail Read Latency": 245494.2, "Average Update Latency": 23014.0, "Median Update Latency": 13407.2, "Tail Update Latency": 245242.0}], ["fibre", "memcached fibre", {"Update Ratio": 15, "Actual QPS": 659243.2, "Average Read Latency": 27600.4, "Median Read Latency": 23642.4, "Tail Read Latency": 75626.1, "Average Update Latency": 28038.3, "Median Update Latency": 23443.4, "Tail Update Latency": 135871.1}], ["forall", "memcached forall", {"Update Ratio": 50, "Actual QPS": 583513.8, "Average Read Latency": 15354.0, "Median Read Latency": 13050.8, "Tail Read Latency": 29195.2, "Average Update Latency": 15693.2, "Median Update Latency": 13051.2, "Tail Update Latency": 29477.5}], ["forall", "memcached forall", {"Update Ratio": 40, "Actual QPS": 588410.8, "Average Read Latency": 17002.9, "Median Read Latency": 13316.4, "Tail Read Latency": 29604.6, "Average Update Latency": 17357.7, "Median Update Latency": 13320.0, "Tail Update Latency": 31441.1}], ["vanilla", "memcached vanilla", {"Update Ratio": 40, "Actual QPS": 716262.2, "Average Read Latency": 28975.9, "Median Read Latency": 25327.0, "Tail Read Latency": 228397.2, "Average Update Latency": 29063.3, "Median Update Latency": 25431.2, "Tail Update Latency": 228666.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 30, "Actual QPS": 742447.9, "Average Read Latency": 24479.8, "Median Read Latency": 14438.4, "Tail Read Latency": 251570.6, "Average Update Latency": 24354.2, "Median Update Latency": 14496.9, "Tail Update Latency": 250835.7}], ["forall", "memcached forall", {"Update Ratio": 30, "Actual QPS": 583473.1, "Average Read Latency": 15821.5, "Median Read Latency": 13189.8, "Tail Read Latency": 30907.0, "Average Update Latency": 16668.2, "Median Update Latency": 13208.3, "Tail Update Latency": 33617.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 40, "Actual QPS": 735416.8, "Average Read Latency": 24244.3, "Median Read Latency": 13281.7, "Tail Read Latency": 254833.8, "Average Update Latency": 24481.9, "Median Update Latency": 13318.5, "Tail Update Latency": 254867.7}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/yield.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/yield.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/yield.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 2400", {"Duration (ms)": 10030.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 415071979.0, "Ops per second": 41507197.0, "ns per ops": 24.0, "Ops per threads": 172946.0, "Ops per procs": 17294665.0, "Ops/sec/procs": 1729466.0, "ns per ops/procs": 579.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 2400", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 868181095.0, "Ops per second": 86818109.5, "ns per ops": 11.53, "Ops per threads": 361742.0, "Ops per procs": 36174212.0, "Ops/sec/procs": 3617421.23, "ns per ops/procs": 276.64}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 90502272.0, "Ops per second": 9050227.0, "ns per ops": 111.0, "Ops per threads": 905022.0, "Ops per procs": 90502272.0, "Ops/sec/procs": 9050227.0, "ns per ops/procs": 111.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 100521509.0, "Ops per second": 10043710.22, "ns per ops": 99.56, "Ops per threads": 1005215.0, "Ops per procs": 100521509.0, "Ops/sec/procs": 10043710.22, "ns per ops/procs": 99.56}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 800", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 335861578.0, "Ops per second": 33586157.8, "ns per ops": 29.84, "Ops per threads": 419826.0, "Ops per procs": 41982697.0, "Ops/sec/procs": 4198269.72, "ns per ops/procs": 238.69}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 336458246.0, "Ops per second": 33645824.0, "ns per ops": 30.0, "Ops per threads": 420572.0, "Ops per procs": 42057280.0, "Ops/sec/procs": 4205728.0, "ns per ops/procs": 240.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 2400", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 863132609.0, "Ops per second": 86313260.9, "ns per ops": 11.59, "Ops per threads": 359638.0, "Ops per procs": 35963858.0, "Ops/sec/procs": 3596385.87, "ns per ops/procs": 278.27}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 1600", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 644981830.0, "Ops per second": 64498183.0, "ns per ops": 15.54, "Ops per threads": 403113.0, "Ops per procs": 40311364.0, "Ops/sec/procs": 4031136.44, "ns per ops/procs": 248.71}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 1600", {"Duration (ms)": 10022.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 1001843527.0, "Ops per second": 99961538.2, "ns per ops": 10.0, "Ops per threads": 626152.0, "Ops per procs": 62615220.0, "Ops/sec/procs": 6247596.14, "ns per ops/procs": 160.06}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 2400", {"Duration (ms)": 10021.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 1448950383.0, "Ops per second": 144584592.02, "ns per ops": 6.92, "Ops per threads": 603729.0, "Ops per procs": 60372932.0, "Ops/sec/procs": 6024358.0, "ns per ops/procs": 165.99}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 100", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 67367876.0, "Ops per second": 6736787.6, "ns per ops": 148.69, "Ops per threads": 673678.0, "Ops per procs": 67367876.0, "Ops/sec/procs": 6736787.6, "ns per ops/procs": 148.69}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 1600", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 635515991.0, "Ops per second": 63551599.1, "ns per ops": 15.77, "Ops per threads": 397197.0, "Ops per procs": 39719749.0, "Ops/sec/procs": 3971974.94, "ns per ops/procs": 252.34}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 1600", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 647292406.0, "Ops per second": 64729240.0, "ns per ops": 15.0, "Ops per threads": 404557.0, "Ops per procs": 40455775.0, "Ops/sec/procs": 4045577.0, "ns per ops/procs": 247.0}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 800", {"Duration (ms)": 10002.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 32040808.0, "Ops per second": 3203363.64, "ns per ops": 312.17, "Ops per threads": 40051.0, "Ops per procs": 4005101.0, "Ops/sec/procs": 400420.45, "ns per ops/procs": 2497.37}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 2400", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 23453819.0, "Ops per second": 2344620.16, "ns per ops": 426.51, "Ops per threads": 9772.0, "Ops per procs": 977242.0, "Ops/sec/procs": 97692.51, "ns per ops/procs": 10236.2}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 100", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 67463939.0, "Ops per second": 6746393.9, "ns per ops": 148.47, "Ops per threads": 674639.0, "Ops per procs": 67463939.0, "Ops/sec/procs": 6746393.9, "ns per ops/procs": 148.47}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 800", {"Duration (ms)": 10009.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 447365602.0, "Ops per second": 44693347.97, "ns per ops": 22.37, "Ops per threads": 559207.0, "Ops per procs": 55920700.0, "Ops/sec/procs": 5586668.5, "ns per ops/procs": 179.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 1600", {"Duration (ms)": 10022.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 1004323666.0, "Ops per second": 100208984.11, "ns per ops": 9.98, "Ops per threads": 627702.0, "Ops per procs": 62770229.0, "Ops/sec/procs": 6263061.51, "ns per ops/procs": 159.67}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 2400", {"Duration (ms)": 10022.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 1438830223.0, "Ops per second": 143560196.99, "ns per ops": 6.97, "Ops per threads": 599512.0, "Ops per procs": 59951259.0, "Ops/sec/procs": 5981674.87, "ns per ops/procs": 167.18}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 1600", {"Duration (ms)": 10002.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 26637182.0, "Ops per second": 2663052.8, "ns per ops": 375.51, "Ops per threads": 16648.0, "Ops per procs": 1664823.0, "Ops/sec/procs": 166440.8, "ns per ops/procs": 6008.14}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 1600", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 638755972.0, "Ops per second": 63875597.2, "ns per ops": 15.7, "Ops per threads": 399222.0, "Ops per procs": 39922248.0, "Ops/sec/procs": 3992224.83, "ns per ops/procs": 251.13}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 2400", {"Duration (ms)": 10005.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 430182652.0, "Ops per second": 43018265.0, "ns per ops": 23.0, "Ops per threads": 179242.0, "Ops per procs": 17924277.0, "Ops/sec/procs": 1792427.0, "ns per ops/procs": 558.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 100472124.0, "Ops per second": 10038792.53, "ns per ops": 99.61, "Ops per threads": 1004721.0, "Ops per procs": 100472124.0, "Ops/sec/procs": 10038792.53, "ns per ops/procs": 99.61}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 117008332.0, "Ops per second": 11700304.5, "ns per ops": 85.47, "Ops per threads": 1170083.0, "Ops per procs": 117008332.0, "Ops/sec/procs": 11700304.5, "ns per ops/procs": 85.47}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 2400", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 23810780.0, "Ops per second": 2380314.39, "ns per ops": 420.11, "Ops per threads": 9921.0, "Ops per procs": 992115.0, "Ops/sec/procs": 99179.77, "ns per ops/procs": 10082.7}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 1600", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 675450930.0, "Ops per second": 67545093.0, "ns per ops": 14.0, "Ops per threads": 422156.0, "Ops per procs": 42215683.0, "Ops/sec/procs": 4221568.0, "ns per ops/procs": 236.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 100428805.0, "Ops per second": 10034445.55, "ns per ops": 99.66, "Ops per threads": 1004288.0, "Ops per procs": 100428805.0, "Ops/sec/procs": 10034445.55, "ns per ops/procs": 99.66}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 90311107.0, "Ops per second": 9031110.0, "ns per ops": 111.0, "Ops per threads": 903111.0, "Ops per procs": 90311107.0, "Ops/sec/procs": 9031110.0, "ns per ops/procs": 111.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 800", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 333502891.0, "Ops per second": 33350289.1, "ns per ops": 30.05, "Ops per threads": 416878.0, "Ops per procs": 41687861.0, "Ops/sec/procs": 4168786.14, "ns per ops/procs": 240.38}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 800", {"Duration (ms)": 10002.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 32147984.0, "Ops per second": 3214143.7, "ns per ops": 311.12, "Ops per threads": 40184.0, "Ops per procs": 4018498.0, "Ops/sec/procs": 401767.96, "ns per ops/procs": 2489.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 1600", {"Duration (ms)": 10003.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 689066173.0, "Ops per second": 68906617.0, "ns per ops": 14.0, "Ops per threads": 430666.0, "Ops per procs": 43066635.0, "Ops/sec/procs": 4306663.0, "ns per ops/procs": 232.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 800", {"Duration (ms)": 10019.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 441418651.0, "Ops per second": 44056605.57, "ns per ops": 22.7, "Ops per threads": 551773.0, "Ops per procs": 55177331.0, "Ops/sec/procs": 5507075.7, "ns per ops/procs": 181.58}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 1600", {"Duration (ms)": 10028.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 999658036.0, "Ops per second": 99685104.34, "ns per ops": 10.03, "Ops per threads": 624786.0, "Ops per procs": 62478627.0, "Ops/sec/procs": 6230319.02, "ns per ops/procs": 160.51}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 1600", {"Duration (ms)": 10002.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 26737174.0, "Ops per second": 2672964.85, "ns per ops": 374.12, "Ops per threads": 16710.0, "Ops per procs": 1671073.0, "Ops/sec/procs": 167060.3, "ns per ops/procs": 5985.86}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 90695862.0, "Ops per second": 9069586.0, "ns per ops": 111.0, "Ops per threads": 906958.0, "Ops per procs": 90695862.0, "Ops/sec/procs": 9069586.0, "ns per ops/procs": 111.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 2400", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 1440698903.0, "Ops per second": 144010330.78, "ns per ops": 6.94, "Ops per threads": 600291.0, "Ops per procs": 60029120.0, "Ops/sec/procs": 6000430.45, "ns per ops/procs": 166.65}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 2400", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 23253629.0, "Ops per second": 2324527.32, "ns per ops": 430.19, "Ops per threads": 9689.0, "Ops per procs": 968901.0, "Ops/sec/procs": 96855.3, "ns per ops/procs": 10324.68}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 116842959.0, "Ops per second": 11683781.88, "ns per ops": 85.59, "Ops per threads": 1168429.0, "Ops per procs": 116842959.0, "Ops/sec/procs": 11683781.88, "ns per ops/procs": 85.59}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 100", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 67109099.0, "Ops per second": 6710909.9, "ns per ops": 149.26, "Ops per threads": 671090.0, "Ops per procs": 67109099.0, "Ops/sec/procs": 6710909.9, "ns per ops/procs": 149.26}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 116630659.0, "Ops per second": 11662547.92, "ns per ops": 85.74, "Ops per threads": 1166306.0, "Ops per procs": 116630659.0, "Ops/sec/procs": 11662547.92, "ns per ops/procs": 85.74}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 800", {"Duration (ms)": 10009.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 438143011.0, "Ops per second": 43771074.82, "ns per ops": 22.85, "Ops per threads": 547678.0, "Ops per procs": 54767876.0, "Ops/sec/procs": 5471384.35, "ns per ops/procs": 182.77}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 1600", {"Duration (ms)": 10002.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 26832358.0, "Ops per second": 2682510.65, "ns per ops": 372.79, "Ops per threads": 16770.0, "Ops per procs": 1677022.0, "Ops/sec/procs": 167656.92, "ns per ops/procs": 5964.56}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 357826049.0, "Ops per second": 35782604.0, "ns per ops": 28.0, "Ops per threads": 447282.0, "Ops per procs": 44728256.0, "Ops/sec/procs": 4472825.0, "ns per ops/procs": 225.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 331894279.0, "Ops per second": 33189427.0, "ns per ops": 30.0, "Ops per threads": 414867.0, "Ops per procs": 41486784.0, "Ops/sec/procs": 4148678.0, "ns per ops/procs": 243.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 800", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 335906841.0, "Ops per second": 33590684.1, "ns per ops": 29.83, "Ops per threads": 419883.0, "Ops per procs": 41988355.0, "Ops/sec/procs": 4198835.51, "ns per ops/procs": 238.65}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 800", {"Duration (ms)": 10002.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 31669651.0, "Ops per second": 3166283.11, "ns per ops": 315.83, "Ops per threads": 39587.0, "Ops per procs": 3958706.0, "Ops/sec/procs": 395785.39, "ns per ops/procs": 2526.62}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 2400", {"Duration (ms)": 10005.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 431374888.0, "Ops per second": 43137488.0, "ns per ops": 23.0, "Ops per threads": 179739.0, "Ops per procs": 17973953.0, "Ops/sec/procs": 1797395.0, "ns per ops/procs": 556.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 2400", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 859381490.0, "Ops per second": 85938149.0, "ns per ops": 11.64, "Ops per threads": 358075.0, "Ops per procs": 35807562.0, "Ops/sec/procs": 3580756.21, "ns per ops/procs": 279.37}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/yield.low.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/yield.low.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/data/yield.low.jax	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+[["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 24", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 198228606.0, "Ops per second": 19822860.6, "ns per ops": 50.49, "Ops per threads": 8259525.0, "Ops per procs": 8259525.0, "Ops/sec/procs": 825952.53, "ns per ops/procs": 1211.87}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 91771391.0, "Ops per second": 9177139.0, "ns per ops": 110.0, "Ops per threads": 91771391.0, "Ops per procs": 91771391.0, "Ops/sec/procs": 9177139.0, "ns per ops/procs": 110.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 1155463759.0, "Ops per second": 115546375.0, "ns per ops": 8.0, "Ops per threads": 72216484.0, "Ops per procs": 72216484.0, "Ops/sec/procs": 7221648.0, "ns per ops/procs": 139.0}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 17081410.0, "Ops per second": 1707820.12, "ns per ops": 585.54, "Ops per threads": 1067588.0, "Ops per procs": 1067588.0, "Ops/sec/procs": 106738.76, "ns per ops/procs": 9368.67}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 2151927291.0, "Ops per second": 215027860.62, "ns per ops": 4.65, "Ops per threads": 268990911.0, "Ops per procs": 268990911.0, "Ops/sec/procs": 26878482.58, "ns per ops/procs": 37.2}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 1281636563.0, "Ops per second": 128163656.0, "ns per ops": 7.0, "Ops per threads": 53401523.0, "Ops per procs": 53401523.0, "Ops/sec/procs": 5340152.0, "ns per ops/procs": 189.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 24", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 5317671032.0, "Ops per second": 531537767.14, "ns per ops": 1.88, "Ops per threads": 221569626.0, "Ops per procs": 221569626.0, "Ops/sec/procs": 22147406.96, "ns per ops/procs": 45.15}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 8", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 158323555.0, "Ops per second": 15832355.5, "ns per ops": 63.29, "Ops per threads": 19790444.0, "Ops per procs": 19790444.0, "Ops/sec/procs": 1979044.44, "ns per ops/procs": 506.3}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 8", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 27128459.0, "Ops per second": 2712537.66, "ns per ops": 368.66, "Ops per threads": 3391057.0, "Ops per procs": 3391057.0, "Ops/sec/procs": 339067.21, "ns per ops/procs": 2949.27}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 8", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 27155837.0, "Ops per second": 2715305.28, "ns per ops": 368.28, "Ops per threads": 3394479.0, "Ops per procs": 3394479.0, "Ops/sec/procs": 339413.16, "ns per ops/procs": 2946.26}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 16", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 211249329.0, "Ops per second": 21124932.9, "ns per ops": 47.45, "Ops per threads": 13203083.0, "Ops per procs": 13203083.0, "Ops/sec/procs": 1320308.31, "ns per ops/procs": 759.17}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 85555889.0, "Ops per second": 8555216.69, "ns per ops": 116.89, "Ops per threads": 85555889.0, "Ops per procs": 85555889.0, "Ops/sec/procs": 8555216.69, "ns per ops/procs": 116.89}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 390829055.0, "Ops per second": 39082905.0, "ns per ops": 25.0, "Ops per threads": 48853631.0, "Ops per procs": 48853631.0, "Ops/sec/procs": 4885363.0, "ns per ops/procs": 206.0}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 13250872.0, "Ops per second": 1324722.09, "ns per ops": 754.88, "Ops per threads": 552119.0, "Ops per procs": 552119.0, "Ops/sec/procs": 55196.75, "ns per ops/procs": 18117.01}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 1000850284.0, "Ops per second": 100085028.0, "ns per ops": 10.0, "Ops per threads": 62553142.0, "Ops per procs": 62553142.0, "Ops/sec/procs": 6255314.0, "ns per ops/procs": 161.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 24", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 184091475.0, "Ops per second": 18409147.5, "ns per ops": 54.36, "Ops per threads": 7670478.0, "Ops per procs": 7670478.0, "Ops/sec/procs": 767047.81, "ns per ops/procs": 1304.54}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 292489865.0, "Ops per second": 29227283.49, "ns per ops": 34.21, "Ops per threads": 292489865.0, "Ops per procs": 292489865.0, "Ops/sec/procs": 29227283.49, "ns per ops/procs": 34.21}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 14063200.0, "Ops per second": 1406005.33, "ns per ops": 711.23, "Ops per threads": 585966.0, "Ops per procs": 585966.0, "Ops/sec/procs": 58583.56, "ns per ops/procs": 17069.64}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 8", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 162528481.0, "Ops per second": 16252848.1, "ns per ops": 61.66, "Ops per threads": 20316060.0, "Ops per procs": 20316060.0, "Ops/sec/procs": 2031606.01, "ns per ops/procs": 493.26}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 16", {"Duration (ms)": 10008.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 3965938528.0, "Ops per second": 396248257.59, "ns per ops": 2.52, "Ops per threads": 247871158.0, "Ops per procs": 247871158.0, "Ops/sec/procs": 24765516.1, "ns per ops/procs": 40.38}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 4048953063.0, "Ops per second": 404492768.97, "ns per ops": 2.47, "Ops per threads": 253059566.0, "Ops per procs": 253059566.0, "Ops/sec/procs": 25280798.06, "ns per ops/procs": 39.56}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 24", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 5311054327.0, "Ops per second": 530869875.38, "ns per ops": 1.88, "Ops per threads": 221293930.0, "Ops per procs": 221293930.0, "Ops/sec/procs": 22119578.14, "ns per ops/procs": 45.21}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 16", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 193683806.0, "Ops per second": 19368380.6, "ns per ops": 51.76, "Ops per threads": 12105237.0, "Ops per procs": 12105237.0, "Ops/sec/procs": 1210523.79, "ns per ops/procs": 828.15}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 16763469.0, "Ops per second": 1676051.66, "ns per ops": 596.64, "Ops per threads": 1047716.0, "Ops per procs": 1047716.0, "Ops/sec/procs": 104753.23, "ns per ops/procs": 9546.25}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 1250273189.0, "Ops per second": 125027318.0, "ns per ops": 8.0, "Ops per threads": 52094716.0, "Ops per procs": 52094716.0, "Ops/sec/procs": 5209471.0, "ns per ops/procs": 193.0}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 8", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 26753255.0, "Ops per second": 2675042.69, "ns per ops": 373.83, "Ops per threads": 3344156.0, "Ops per procs": 3344156.0, "Ops/sec/procs": 334380.34, "ns per ops/procs": 2990.61}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 1139181136.0, "Ops per second": 113918113.0, "ns per ops": 8.0, "Ops per threads": 71198821.0, "Ops per procs": 71198821.0, "Ops/sec/procs": 7119882.0, "ns per ops/procs": 141.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 8", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 148345422.0, "Ops per second": 14834542.2, "ns per ops": 67.54, "Ops per threads": 18543177.0, "Ops per procs": 18543177.0, "Ops/sec/procs": 1854317.77, "ns per ops/procs": 540.32}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 1", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 76054104.0, "Ops per second": 7605410.4, "ns per ops": 131.71, "Ops per threads": 76054104.0, "Ops per procs": 76054104.0, "Ops/sec/procs": 7605410.4, "ns per ops/procs": 131.71}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 292635742.0, "Ops per second": 29241803.8, "ns per ops": 34.2, "Ops per threads": 292635742.0, "Ops per procs": 292635742.0, "Ops/sec/procs": 29241803.8, "ns per ops/procs": 34.2}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 5304170687.0, "Ops per second": 530361478.44, "ns per ops": 1.89, "Ops per threads": 221007111.0, "Ops per procs": 221007111.0, "Ops/sec/procs": 22098394.93, "ns per ops/procs": 45.25}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 4047689095.0, "Ops per second": 404367842.39, "ns per ops": 2.47, "Ops per threads": 252980568.0, "Ops per procs": 252980568.0, "Ops/sec/procs": 25272990.15, "ns per ops/procs": 39.57}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 85548399.0, "Ops per second": 8554476.24, "ns per ops": 116.9, "Ops per threads": 85548399.0, "Ops per procs": 85548399.0, "Ops/sec/procs": 8554476.24, "ns per ops/procs": 116.9}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 91474395.0, "Ops per second": 9147439.0, "ns per ops": 110.0, "Ops per threads": 91474395.0, "Ops per procs": 91474395.0, "Ops/sec/procs": 9147439.0, "ns per ops/procs": 110.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 1289349577.0, "Ops per second": 128934957.0, "ns per ops": 7.0, "Ops per threads": 53722899.0, "Ops per procs": 53722899.0, "Ops/sec/procs": 5372289.0, "ns per ops/procs": 187.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 24", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 227319382.0, "Ops per second": 22731938.2, "ns per ops": 44.02, "Ops per threads": 9471640.0, "Ops per procs": 9471640.0, "Ops/sec/procs": 947164.09, "ns per ops/procs": 1056.48}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 16769801.0, "Ops per second": 1676661.26, "ns per ops": 596.42, "Ops per threads": 1048112.0, "Ops per procs": 1048112.0, "Ops/sec/procs": 104791.33, "ns per ops/procs": 9542.77}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 2148355210.0, "Ops per second": 214670482.89, "ns per ops": 4.66, "Ops per threads": 268544401.0, "Ops per procs": 268544401.0, "Ops/sec/procs": 26833810.36, "ns per ops/procs": 37.27}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 2151532871.0, "Ops per second": 214988947.76, "ns per ops": 4.65, "Ops per threads": 268941608.0, "Ops per procs": 268941608.0, "Ops/sec/procs": 26873618.47, "ns per ops/procs": 37.21}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 1", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 292690252.0, "Ops per second": 29242303.95, "ns per ops": 34.2, "Ops per threads": 292690252.0, "Ops per procs": 292690252.0, "Ops/sec/procs": 29242303.95, "ns per ops/procs": 34.2}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 1", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 75801333.0, "Ops per second": 7580133.3, "ns per ops": 132.15, "Ops per threads": 75801333.0, "Ops per procs": 75801333.0, "Ops/sec/procs": 7580133.3, "ns per ops/procs": 132.15}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 16", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 221926675.0, "Ops per second": 22192667.5, "ns per ops": 45.18, "Ops per threads": 13870417.0, "Ops per procs": 13870417.0, "Ops/sec/procs": 1387041.72, "ns per ops/procs": 722.81}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 84740180.0, "Ops per second": 8473654.06, "ns per ops": 118.01, "Ops per threads": 84740180.0, "Ops per procs": 84740180.0, "Ops/sec/procs": 8473654.06, "ns per ops/procs": 118.01}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 1", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 73281534.0, "Ops per second": 7328153.4, "ns per ops": 136.67, "Ops per threads": 73281534.0, "Ops per procs": 73281534.0, "Ops/sec/procs": 7328153.4, "ns per ops/procs": 136.67}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 388708305.0, "Ops per second": 38870830.0, "ns per ops": 25.0, "Ops per threads": 48588538.0, "Ops per procs": 48588538.0, "Ops/sec/procs": 4858853.0, "ns per ops/procs": 207.0}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 13787158.0, "Ops per second": 1378423.2, "ns per ops": 725.47, "Ops per threads": 574464.0, "Ops per procs": 574464.0, "Ops/sec/procs": 57434.3, "ns per ops/procs": 17411.2}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 398849975.0, "Ops per second": 39884997.0, "ns per ops": 25.0, "Ops per threads": 49856246.0, "Ops per procs": 49856246.0, "Ops/sec/procs": 4985624.0, "ns per ops/procs": 202.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 91905024.0, "Ops per second": 9190502.0, "ns per ops": 109.0, "Ops per threads": 91905024.0, "Ops per procs": 91905024.0, "Ops/sec/procs": 9190502.0, "ns per ops/procs": 109.0}]]
Index: doc/theses/thierry_delisle_PhD/thesis/fig/SAVE.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/SAVE.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/SAVE.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,81 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 7650 5400 8700 6318
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7650 5854 7913 6308 8438 6308 8700 5854 8438 5400 7913 5400
+	 7650 5854
+-6
+6 9675 5400 10725 6318
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 9675 5854 9938 6308 10463 6308 10725 5854 10463 5400 9938 5400
+	 9675 5854
+-6
+6 8175 6675 8608 7050
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 8234 6734 8175 7050
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 8589 6734 8569 6853
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 8214 6853 8332 6813 8470 6872 8569 6853
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 8236 6729 8354 6690 8492 6749 8590 6729
+	 0.000 -0.500 -0.500 0.000
+-6
+6 8325 6900 8700 7400
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 8325 7025 8450 6900 8700 6900 8700 7400 8325 7400 8325 7025
+	 8450 7025 8450 6900
+-6
+6 5694 5250 6150 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 5922.000 5409.011 5877 5410 5922 5364 5967 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 5922.000 5410.000 5785 5410 5922 5273 6059 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 5785 5410 5785 5501 5694 5501 5694 5775 6150 5775 6150 5501
+	 6059 5501 6059 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 5877 5410 5877 5501 5967 5501 5967 5410
+-6
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5625 5250 6825 5250 6825 6600 5625 6600 5625 5250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 60.00 120.00
+	 7050 5850 7725 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 60.00 120.00
+	 9150 5850 9750 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 60.00 120.00
+	 8175 6525 8175 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5625 6150 6825 6150
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
+	7 1 1.00 60.00 120.00
+	 6150 5850 7200 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
+	7 1 1.00 60.00 120.00
+	 8175 5850 9150 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
+	7 0 1.00 60.00 120.00
+	 8175 6150 8175 6600
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 1 5
+	7 0 1.00 60.00 120.00
+	 6150 6375 6225 6900 6525 7200 6900 7350 7425 7350
+	 0.000 -0.500 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 1 0 6
+	1 1 1.00 60.00 120.00
+	 6225 6900 6300 7050 6525 7200 6900 7350 7425 7350 7950 7200
+	 0.000 -0.500 -0.500 -0.500 -0.500 0.000
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7875 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 7725 6975 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 1440 8325 7500 Private Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5250 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5400 5700 Lock\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,133 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 5919 5250 6375 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
+	 6284 5501 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 6102 5410 6102 5501 6192 5501 6192 5410
+-6
+6 7442 6525 7875 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7501 6584 7442 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7856 6584 7836 6703
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 7481 6703 7599 6663 7737 6722 7836 6703
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 7503 6579 7621 6540 7759 6599 7857 6579
+	 0.000 -0.500 -0.500 0.000
+-6
+6 7575 6825 7950 7325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 7575 6950 7700 6825 7950 6825 7950 7325 7575 7325 7575 6950
+	 7700 6950 7700 6825
+-6
+6 9092 6525 9525 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 9151 6584 9092 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 9506 6584 9486 6703
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 9131 6703 9249 6663 9387 6722 9486 6703
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 9153 6579 9271 6540 9409 6599 9507 6579
+	 0.000 -0.500 -0.500 0.000
+-6
+6 9225 6825 9600 7325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 9225 6950 9350 6825 9600 6825 9600 7325 9225 7325 9225 6950
+	 9350 6950 9350 6825
+-6
+6 10742 6525 11175 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 10801 6584 10742 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 11156 6584 11136 6703
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 10781 6703 10899 6663 11037 6722 11136 6703
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 10803 6579 10921 6540 11059 6599 11157 6579
+	 0.000 -0.500 -0.500 0.000
+-6
+6 10875 6825 11250 7325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 10875 6950 11000 6825 11250 6825 11250 7325 10875 7325 10875 6950
+	 11000 6950 11000 6825
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5850 6150 6675 6150
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5850 5250 6675 5250 6675 6600 5850 6600 5850 5250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 7725 6150 7725 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 9375 6150 9375 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 11025 6150 11025 6525
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
+	 10500 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
+	 8850 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
+	 7200 5854
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6450 5925 7275 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 8025 5925 8925 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9675 5925 10575 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 10725 5775 9825 5775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9075 5775 8175 5775
+3 2 0 1 0 7 50 -1 -1 0.000 0 1 1 4
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6300 6375 6375 6825 6750 7050 7350 6975
+	 0.000 -0.500 -0.500 0.000
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
+4 0 0 50 -1 0 11 0.0000 2 135 540 5775 6900 Atomic\001
+4 0 0 50 -1 0 11 0.0000 2 135 630 5775 7125 Pointer\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 7950 6675 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 8025 7125 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 9600 6675 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 9675 7125 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 11250 6675 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 11325 7125 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,85 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 5919 5250 6375 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
+	 6284 5501 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 6102 5410 6102 5501 6192 5501 6192 5410
+-6
+6 7575 6525 7950 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 7575 6650 7700 6525 7950 6525 7950 7025 7575 7025 7575 6650
+	 7700 6650 7700 6525
+-6
+6 9225 6525 9600 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 9225 6650 9350 6525 9600 6525 9600 7025 9225 7025 9225 6650
+	 9350 6650 9350 6525
+-6
+6 10875 6525 11250 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 10875 6650 11000 6525 11250 6525 11250 7025 10875 7025 10875 6650
+	 11000 6650 11000 6525
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 7725 6150 7725 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 9375 6150 9375 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 11025 6150 11025 6525
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
+	 10500 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
+	 8850 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
+	 7200 5854
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6450 5925 7275 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 8025 5925 8925 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9675 5925 10575 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 10725 5775 9825 5775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9075 5775 8175 5775
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5850 5250 6675 5250 6675 6075 5850 6075 5850 5250
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 8025 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 9675 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 11325 6825 Event FD\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,94 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 5919 5250 6375 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
+	 6284 5501 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 6102 5410 6102 5501 6192 5501 6192 5410
+-6
+6 7575 6525 7950 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 7575 6650 7700 6525 7950 6525 7950 7025 7575 7025 7575 6650
+	 7700 6650 7700 6525
+-6
+6 9225 6525 9600 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 9225 6650 9350 6525 9600 6525 9600 7025 9225 7025 9225 6650
+	 9350 6650 9350 6525
+-6
+6 10875 6525 11250 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 10875 6650 11000 6525 11250 6525 11250 7025 10875 7025 10875 6650
+	 11000 6650 11000 6525
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5850 6150 6675 6150
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5850 5250 6675 5250 6675 6600 5850 6600 5850 5250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 7725 6150 7725 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 9375 6150 9375 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 11025 6150 11025 6525
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
+	 10500 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
+	 8850 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
+	 7200 5854
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6450 5925 7275 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 8025 5925 8925 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9675 5925 10575 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 10725 5775 9825 5775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9075 5775 8175 5775
+3 2 0 1 0 7 50 -1 -1 0.000 0 1 1 4
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6300 6375 6375 6825 6900 6975 7500 6750
+	 0.000 -0.500 -0.500 0.000
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
+4 0 0 50 -1 0 11 0.0000 2 135 540 5775 6900 Atomic\001
+4 0 0 50 -1 0 11 0.0000 2 135 630 5775 7125 Pointer\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 8025 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 9675 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 11325 6825 Event FD\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,27 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3900 3600 571 571 3900 3600 3375 3375
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6300 3600 605 605 6300 3600 5775 3300
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 5100 5400 600 600 5100 5400 4500 5400
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	0 0 1.00 60.00 120.00
+	 4200 4125 4725 4950
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	0 0 1.00 60.00 120.00
+	 4500 3600 5700 3600
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	0 0 1.00 60.00 120.00
+	 5923 4125 5475 4875
+4 1 0 50 -1 0 11 0.0000 2 135 450 5100 5475 AWAKE\001
+4 1 0 50 -1 0 11 0.0000 2 135 450 6300 3675 SLEEP\001
+4 1 0 50 -1 0 11 0.0000 2 135 540 3900 3675 SEARCH\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5775 4650 WAKE\001
+4 2 0 50 -1 0 11 0.0000 2 135 540 4350 4650 CANCEL\001
+4 1 0 50 -1 0 11 0.0000 2 135 630 5025 3450 CONFIRM\001
Index: doc/theses/thierry_delisle_PhD/thesis/local.bib
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -701,2 +701,22 @@
   note = "[Online; accessed 12-April-2022]"
 }
+
+% RMR notes :
+% [05/04, 12:36] Trevor Brown
+%     i don't know where rmr complexity was first introduced, but there are many many many papers that use the term and define it
+% ​[05/04, 12:37] Trevor Brown
+%     here's one paper that uses the term a lot and links to many others that use it... might trace it to something useful there https://drops.dagstuhl.de/opus/volltexte/2021/14832/pdf/LIPIcs-DISC-2021-30.pdf
+% ​[05/04, 12:37] Trevor Brown
+%     another option might be to cite a textbook
+% ​[05/04, 12:42] Trevor Brown
+%     but i checked two textbooks in the area i'm aware of and i don't see a definition of rmr complexity in either
+% ​[05/04, 12:42] Trevor Brown
+%     this one has a nice statement about the prevelance of rmr complexity, as well as some rough definition
+% ​[05/04, 12:42] Trevor Brown
+%     https://dl.acm.org/doi/pdf/10.1145/3465084.3467938
+
+% Race to idle notes :
+% [13/04, 16:56] Martin Karsten
+%       I don't have a citation. Google brings up this one, which might be good:
+%
+% https://doi.org/10.1137/1.9781611973099.100
Index: doc/theses/thierry_delisle_PhD/thesis/test.svg
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/test.svg	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ doc/theses/thierry_delisle_PhD/thesis/test.svg	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,492 @@
+<?xml version="1.0" encoding="utf-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+  "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Created with matplotlib (https://matplotlib.org/) -->
+<svg height="345.6pt" version="1.1" viewBox="0 0 460.8 345.6" width="460.8pt" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <defs>
+  <style type="text/css">
+*{stroke-linecap:butt;stroke-linejoin:round;}
+  </style>
+ </defs>
+ <g id="figure_1">
+  <g id="patch_1">
+   <path d="M 0 345.6 
+L 460.8 345.6 
+L 460.8 0 
+L 0 0 
+z
+" style="fill:#ffffff;"/>
+  </g>
+  <g id="axes_1">
+   <g id="patch_2">
+    <path d="M 57.6 307.584 
+L 414.72 307.584 
+L 414.72 41.472 
+L 57.6 41.472 
+z
+" style="fill:#ffffff;"/>
+   </g>
+   <g id="PathCollection_1">
+    <defs>
+     <path d="M -3 3 
+L 3 -3 
+M -3 -3 
+L 3 3 
+" id="mb6fe696cd4" style="stroke:#0095e3;stroke-width:1.5;"/>
+    </defs>
+    <g clip-path="url(#pd400051835)">
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="127.853115" xlink:href="#mb6fe696cd4" y="238.969387"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="408.865574" xlink:href="#mb6fe696cd4" y="92.390403"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="57.6" xlink:href="#mb6fe696cd4" y="263.873925"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="127.853115" xlink:href="#mb6fe696cd4" y="234.646917"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="57.6" xlink:href="#mb6fe696cd4" y="263.692599"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="408.865574" xlink:href="#mb6fe696cd4" y="90.109534"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="408.865574" xlink:href="#mb6fe696cd4" y="85.824"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="127.853115" xlink:href="#mb6fe696cd4" y="237.910643"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="57.6" xlink:href="#mb6fe696cd4" y="263.37251"/>
+    </g>
+   </g>
+   <g id="PathCollection_2">
+    <defs>
+     <path d="M -3 3 
+L 3 -3 
+M -3 -3 
+L 3 3 
+" id="mb7a392378d" style="stroke:#006cb4;stroke-width:1.5;"/>
+    </defs>
+    <g clip-path="url(#pd400051835)">
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="408.865574" xlink:href="#mb7a392378d" y="160.001329"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="127.853115" xlink:href="#mb7a392378d" y="238.512735"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="127.853115" xlink:href="#mb7a392378d" y="238.265802"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="57.6" xlink:href="#mb7a392378d" y="256.021044"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="127.853115" xlink:href="#mb7a392378d" y="238.284474"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="408.865574" xlink:href="#mb7a392378d" y="159.821437"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="57.6" xlink:href="#mb7a392378d" y="256.956649"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="408.865574" xlink:href="#mb7a392378d" y="165.331574"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="57.6" xlink:href="#mb7a392378d" y="255.961851"/>
+    </g>
+   </g>
+   <g id="matplotlib.axis_1">
+    <g id="xtick_1">
+     <g id="line2d_1">
+      <path clip-path="url(#pd400051835)" d="M 57.6 307.584 
+L 57.6 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_2">
+      <defs>
+       <path d="M 0 0 
+L 0 3.5 
+" id="m9bee3d39da" style="stroke:#000000;stroke-width:0.8;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_1">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 57.6, 322.182437)" x="57.6" y="322.182437">1</text>
+     </g>
+    </g>
+    <g id="xtick_2">
+     <g id="line2d_3">
+      <path clip-path="url(#pd400051835)" d="M 81.017705 307.584 
+L 81.017705 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_4">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="81.017705" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_2">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 81.017705, 322.182437)" x="81.017705" y="322.182437">2</text>
+     </g>
+    </g>
+    <g id="xtick_3">
+     <g id="line2d_5">
+      <path clip-path="url(#pd400051835)" d="M 104.43541 307.584 
+L 104.43541 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_6">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="104.43541" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_3">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 104.43541, 322.182437)" x="104.43541" y="322.182437">3</text>
+     </g>
+    </g>
+    <g id="xtick_4">
+     <g id="line2d_7">
+      <path clip-path="url(#pd400051835)" d="M 127.853115 307.584 
+L 127.853115 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_8">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="127.853115" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_4">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 127.853115, 322.182437)" x="127.853115" y="322.182437">4</text>
+     </g>
+    </g>
+    <g id="xtick_5">
+     <g id="line2d_9">
+      <path clip-path="url(#pd400051835)" d="M 151.27082 307.584 
+L 151.27082 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_10">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="151.27082" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_5">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 151.27082, 322.182437)" x="151.27082" y="322.182437">5</text>
+     </g>
+    </g>
+    <g id="xtick_6">
+     <g id="line2d_11">
+      <path clip-path="url(#pd400051835)" d="M 174.688525 307.584 
+L 174.688525 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_12">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="174.688525" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_6">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 174.688525, 322.182437)" x="174.688525" y="322.182437">6</text>
+     </g>
+    </g>
+    <g id="xtick_7">
+     <g id="line2d_13">
+      <path clip-path="url(#pd400051835)" d="M 198.10623 307.584 
+L 198.10623 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_14">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="198.10623" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_7">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 198.10623, 322.182437)" x="198.10623" y="322.182437">7</text>
+     </g>
+    </g>
+    <g id="xtick_8">
+     <g id="line2d_15">
+      <path clip-path="url(#pd400051835)" d="M 221.523934 307.584 
+L 221.523934 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_16">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="221.523934" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_8">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 221.523934, 322.182437)" x="221.523934" y="322.182437">8</text>
+     </g>
+    </g>
+    <g id="xtick_9">
+     <g id="line2d_17">
+      <path clip-path="url(#pd400051835)" d="M 244.941639 307.584 
+L 244.941639 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_18">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="244.941639" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_9">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 244.941639, 322.182437)" x="244.941639" y="322.182437">9</text>
+     </g>
+    </g>
+    <g id="xtick_10">
+     <g id="line2d_19">
+      <path clip-path="url(#pd400051835)" d="M 268.359344 307.584 
+L 268.359344 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_20">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="268.359344" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_10">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 268.359344, 322.182437)" x="268.359344" y="322.182437">10</text>
+     </g>
+    </g>
+    <g id="xtick_11">
+     <g id="line2d_21">
+      <path clip-path="url(#pd400051835)" d="M 291.777049 307.584 
+L 291.777049 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_22">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="291.777049" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_11">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 291.777049, 322.182437)" x="291.777049" y="322.182437">11</text>
+     </g>
+    </g>
+    <g id="xtick_12">
+     <g id="line2d_23">
+      <path clip-path="url(#pd400051835)" d="M 315.194754 307.584 
+L 315.194754 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_24">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="315.194754" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_12">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 315.194754, 322.182437)" x="315.194754" y="322.182437">12</text>
+     </g>
+    </g>
+    <g id="xtick_13">
+     <g id="line2d_25">
+      <path clip-path="url(#pd400051835)" d="M 338.612459 307.584 
+L 338.612459 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_26">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="338.612459" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_13">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 338.612459, 322.182437)" x="338.612459" y="322.182437">13</text>
+     </g>
+    </g>
+    <g id="xtick_14">
+     <g id="line2d_27">
+      <path clip-path="url(#pd400051835)" d="M 362.030164 307.584 
+L 362.030164 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_28">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="362.030164" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_14">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 362.030164, 322.182437)" x="362.030164" y="322.182437">14</text>
+     </g>
+    </g>
+    <g id="xtick_15">
+     <g id="line2d_29">
+      <path clip-path="url(#pd400051835)" d="M 385.447869 307.584 
+L 385.447869 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_30">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="385.447869" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_15">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 385.447869, 322.182437)" x="385.447869" y="322.182437">15</text>
+     </g>
+    </g>
+    <g id="xtick_16">
+     <g id="line2d_31">
+      <path clip-path="url(#pd400051835)" d="M 408.865574 307.584 
+L 408.865574 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_32">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="408.865574" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_16">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 408.865574, 322.182437)" x="408.865574" y="322.182437">16</text>
+     </g>
+    </g>
+    <g id="text_17">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 236.16, 335.860562)" x="236.16" y="335.860562">Number of processors</text>
+    </g>
+   </g>
+   <g id="matplotlib.axis_2">
+    <g id="ytick_1">
+     <g id="line2d_33">
+      <path clip-path="url(#pd400051835)" d="M 57.6 307.584 
+L 414.72 307.584 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_34">
+      <defs>
+       <path d="M 0 0 
+L -3.5 0 
+" id="m082b2e4a56" style="stroke:#000000;stroke-width:0.8;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_18">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 311.383219)" x="50.6" y="311.383219">0 Ops</text>
+     </g>
+    </g>
+    <g id="ytick_2">
+     <g id="line2d_35">
+      <path clip-path="url(#pd400051835)" d="M 57.6 268.244858 
+L 414.72 268.244858 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_36">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="268.244858"/>
+      </g>
+     </g>
+     <g id="text_19">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 272.044077)" x="50.6" y="272.044077">5 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_3">
+     <g id="line2d_37">
+      <path clip-path="url(#pd400051835)" d="M 57.6 228.905717 
+L 414.72 228.905717 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_38">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="228.905717"/>
+      </g>
+     </g>
+     <g id="text_20">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 232.704936)" x="50.6" y="232.704936">10 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_4">
+     <g id="line2d_39">
+      <path clip-path="url(#pd400051835)" d="M 57.6 189.566575 
+L 414.72 189.566575 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_40">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="189.566575"/>
+      </g>
+     </g>
+     <g id="text_21">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 193.365794)" x="50.6" y="193.365794">15 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_5">
+     <g id="line2d_41">
+      <path clip-path="url(#pd400051835)" d="M 57.6 150.227434 
+L 414.72 150.227434 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_42">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="150.227434"/>
+      </g>
+     </g>
+     <g id="text_22">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 154.026652)" x="50.6" y="154.026652">20 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_6">
+     <g id="line2d_43">
+      <path clip-path="url(#pd400051835)" d="M 57.6 110.888292 
+L 414.72 110.888292 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_44">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="110.888292"/>
+      </g>
+     </g>
+     <g id="text_23">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 114.687511)" x="50.6" y="114.687511">25 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_7">
+     <g id="line2d_45">
+      <path clip-path="url(#pd400051835)" d="M 57.6 71.54915 
+L 414.72 71.54915 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_46">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="71.54915"/>
+      </g>
+     </g>
+     <g id="text_24">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 75.348369)" x="50.6" y="75.348369">30 MOps</text>
+     </g>
+    </g>
+    <g id="text_25">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-90, 0.559375, 174.528)" x="0.559375" y="174.528">Ops per second</text>
+    </g>
+   </g>
+   <g id="patch_3">
+    <path d="M 57.6 307.584 
+L 57.6 41.472 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_4">
+    <path d="M 414.72 307.584 
+L 414.72 41.472 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_5">
+    <path d="M 57.6 307.584 
+L 414.72 307.584 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_6">
+    <path d="M 57.6 41.472 
+L 414.72 41.472 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="legend_1">
+    <g id="patch_7">
+     <path d="M 64.6 78.82825 
+L 161.615625 78.82825 
+Q 163.615625 78.82825 163.615625 76.82825 
+L 163.615625 48.472 
+Q 163.615625 46.472 161.615625 46.472 
+L 64.6 46.472 
+Q 62.6 46.472 62.6 48.472 
+L 62.6 76.82825 
+Q 62.6 78.82825 64.6 78.82825 
+z
+" style="fill:#ffffff;opacity:0.8;stroke:#cccccc;stroke-linejoin:miter;"/>
+    </g>
+    <g id="PathCollection_3">
+     <g>
+      <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="76.6" xlink:href="#mb6fe696cd4" y="55.445437"/>
+     </g>
+    </g>
+    <g id="text_26">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:start;" transform="rotate(-0, 94.6, 58.070437)" x="94.6" y="58.070437">rdq-cycle-go</text>
+    </g>
+    <g id="PathCollection_4">
+     <g>
+      <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="76.6" xlink:href="#mb7a392378d" y="70.123562"/>
+     </g>
+    </g>
+    <g id="text_27">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:start;" transform="rotate(-0, 94.6, 72.748562)" x="94.6" y="72.748562">rdq-cycle-cfa</text>
+    </g>
+   </g>
+  </g>
+ </g>
+ <defs>
+  <clipPath id="pd400051835">
+   <rect height="266.112" width="357.12" x="57.6" y="41.472"/>
+  </clipPath>
+ </defs>
+</svg>
Index: doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -7,7 +7,46 @@
 Networked ZIPF
 
+Nginx : 5Gb still good, 4Gb starts to suffer
+
+Cforall : 10Gb too high, 4 Gb too low
+
 \section{Memcached}
 
-In Memory
+\subsection{Benchmark Environment}
+These experiments are run on a cluster of homogenous Supermicro SYS-6017R-TDF compute nodes with the following characteristics:
+The server runs Ubuntu 20.04.3 LTS on top of Linux Kernel 5.11.0-34.
+Each node has 2 Intel(R) Xeon(R) CPU E5-2620 v2 running at 2.10GHz.
+These CPUs have 6 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 24 \glspl{hthrd}.
+The cpus each have 384 KB, 3 MB and 30 MB of L1, L2 and L3 caches respectively.
+Each node is connected to the network through a Mellanox 10 Gigabit Ethernet port.
+The network route uses 1 Mellanox SX1012 10/40 Gigabit Ethernet cluster switch.
 
-Networked
+
+
+\begin{figure}
+	\centering
+	\input{result.memcd.updt.qps.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:updt:qps}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\input{result.memcd.updt.lat.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:updt:lat}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\input{result.memcd.rate.qps.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:rate:qps}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\input{result.memcd.rate.99th.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:rate:tail}
+\end{figure}
Index: doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -6,4 +6,9 @@
 \section{Benchmark Environment}
 All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine.
+
+For all benchmarks, \texttt{taskset} is used to limit the experiment to 1 NUMA Node with no hyper threading.
+If more \glspl{hthrd} are needed, then 1 NUMA Node with hyperthreading is used.
+If still more \glspl{hthrd} are needed then the experiment is limited to as few NUMA Nodes as needed.
+
 
 \paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM.
@@ -23,4 +28,10 @@
 
 \section{Cycling latency}
+\begin{figure}
+	\centering
+	\input{cycle.pstex_t}
+	\caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
+	\label{fig:cycle}
+\end{figure}
 The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue.
 Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.
@@ -42,11 +53,4 @@
 Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system.
 
-\begin{figure}
-	\centering
-	\input{cycle.pstex_t}
-	\caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
-	\label{fig:cycle}
-\end{figure}
-
 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
 Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
@@ -54,24 +58,59 @@
 
 The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
-
-\begin{lstlisting}
-	Thread.main() {
-		count := 0
-		for {
-			wait()
-			this.next.wake()
-			count ++
-			if must_stop() { break }
-		}
-		global.count += count
-	}
-\end{lstlisting}
-
-\begin{figure}
-	\centering
-	\input{result.cycle.jax.ops.pstex_t}
-	\vspace*{-10pt}
-	\label{fig:cycle:ns:jax}
-\end{figure}
+Figure~\ref{fig:cycle:code} shows pseudo code for this benchmark.
+
+\begin{figure}
+	\begin{lstlisting}
+		Thread.main() {
+			count := 0
+			for {
+				wait()
+				this.next.wake()
+				count ++
+				if must_stop() { break }
+			}
+			global.count += count
+		}
+	\end{lstlisting}
+	\caption[Cycle Benchmark : Pseudo Code]{Cycle Benchmark : Pseudo Code}
+	\label{fig:cycle:code}
+\end{figure}
+
+
+
+\subsection{Results}
+\begin{figure}
+	\subfloat[][Throughput, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.jax.ops.pstex_t}
+		}
+		\label{fig:cycle:jax:ops}
+	}
+	\subfloat[][Throughput, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.low.jax.ops.pstex_t}
+		}
+		\label{fig:cycle:jax:low:ops}
+	}
+
+	\subfloat[][Latency, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.jax.ns.pstex_t}
+		}
+
+	}
+	\subfloat[][Latency, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.low.jax.ns.pstex_t}
+		}
+		\label{fig:cycle:jax:low:ns}
+	}
+	\caption[Cycle Benchmark on Intel]{Cycle Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 100 cycles per \proc, 5 \ats per cycle.}
+	\label{fig:cycle:jax}
+\end{figure}
+Figure~\ref{fig:cycle:jax} shows the throughput as a function of \proc count, with the following constants:
+Each run uses 100 cycles per \proc, 5 \ats per cycle.
+
+\todo{results discussion}
 
 \section{Yield}
@@ -81,17 +120,56 @@
 Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty.
 This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done.
-
-\todo{code, setup, results}
-
-\begin{lstlisting}
-	Thread.main() {
-		count := 0
-		while !stop {
-			yield()
-			count ++
-		}
-		global.count += count
-	}
-\end{lstlisting}
+Figure~\ref{fig:yield:code} shows pseudo code for this benchmark, the ``wait/wake-next'' is simply replaced by a yield.
+
+\begin{figure}
+	\begin{lstlisting}
+		Thread.main() {
+			count := 0
+			for {
+				yield()
+				count ++
+				if must_stop() { break }
+			}
+			global.count += count
+		}
+	\end{lstlisting}
+	\caption[Yield Benchmark : Pseudo Code]{Yield Benchmark : Pseudo Code}
+	\label{fig:yield:code}
+\end{figure}
+
+\subsection{Results}
+\begin{figure}
+	\subfloat[][Throughput, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.yield.jax.ops.pstex_t}
+		}
+		\label{fig:yield:jax:ops}
+	}
+	\subfloat[][Throughput, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+		\input{result.yield.low.jax.ops.pstex_t}
+		}
+		\label{fig:yield:jax:low:ops}
+	}
+
+	\subfloat[][Latency, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+		\input{result.yield.jax.ns.pstex_t}
+		}
+		\label{fig:yield:jax:ns}
+	}
+	\subfloat[][Latency, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+		\input{result.yield.low.jax.ns.pstex_t}
+		}
+		\label{fig:yield:jax:low:ns}
+	}
+	\caption[Yield Benchmark on Intel]{Yield Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 1 \ats per \proc.}
+	\label{fig:yield:jax}
+\end{figure}
+Figure~\ref{fig:yield:ops:jax} shows the throughput as a function of \proc count, with the following constants:
+Each run uses 100 \ats per \proc.
+
+\todo{results discussion}
 
 
@@ -105,8 +183,9 @@
 In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly.
 
-To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}.
-When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any.
-This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves.
-For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}.
+To achieve this the benchmark uses a fixed size array of semaphores.
+Each \gls{at} picks a random semaphore, \texttt{V}s it to unblock a \at waiting and then \texttt{P}s on the semaphore.
+This creates a flow where \glspl{at} push each other out of the semaphores before being pushed out themselves.
+For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of semaphores plus the number of \glspl{proc}.
+Note that the nature of these semaphores mean the counter can go beyond 1, which could lead to calls to \texttt{P} not blocking.
 
 \todo{code, setup, results}
@@ -116,7 +195,6 @@
 		for {
 			r := random() % len(spots)
-			next := xchg(spots[r], this)
-			if next { next.wake() }
-			wait()
+			spots[r].V()
+			spots[r].P()
 			count ++
 			if must_stop() { break }
@@ -125,4 +203,34 @@
 	}
 \end{lstlisting}
+
+\begin{figure}
+	\subfloat[][Throughput, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.jax.ops.pstex_t}
+		}
+		\label{fig:churn:jax:ops}
+	}
+	\subfloat[][Throughput, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.low.jax.ops.pstex_t}
+		}
+		\label{fig:churn:jax:low:ops}
+	}
+
+	\subfloat[][Latency, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.jax.ns.pstex_t}
+		}
+
+	}
+	\subfloat[][Latency, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.low.jax.ns.pstex_t}
+		}
+		\label{fig:churn:jax:low:ns}
+	}
+	\caption[Churn Benchmark on Intel]{\centering Churn Benchmark on Intel\smallskip\newline Throughput and latency of the Churn on the benchmark on the Intel machine. Throughput is the total operation per second across all cores. Latency is the duration of each opeartion.}
+	\label{fig:churn:jax}
+\end{figure}
 
 \section{Locality}
Index: doc/theses/thierry_delisle_PhD/thesis/text/intro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/intro.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/text/intro.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -2,8 +2,9 @@
 \todo{A proper intro}
 
-The C programming language\cit{C}
+The C programming language~\cite{C11}
 
-The \CFA programming language\cite{cfa:frontpage,cfa:typesystem} which extends the C programming language to add modern safety and productiviy features while maintaining backwards compatibility. Among it's productiviy features, \CFA introduces support for threading\cit{CFA Concurrency}, to allow programmers to write modern concurrent and parallel programming.
-While previous work on the concurrent package of \CFA focused on features and interfaces, this thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. More specifically, this thesis concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strictly \glsxtrshort{fifo} \gls{rQ}.
+The \CFA programming language~\cite{cfa:frontpage,cfa:typesystem} extends the C programming language by adding modern safety and productivity features, while maintaining backwards compatibility. Among its productivity features, \CFA supports user-level threading~\cite{Delisle21} allowing programmers to write modern concurrent and parallel programs.
+My previous master's thesis on concurrent in \CFA focused on features and interfaces.
+This Ph.D.\ thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. Specifically, this work concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strict \glsxtrshort{fifo} \gls{rQ} and  no non-blocking I/O capabilities at the user-thread level.
 
-This work exclusively concentrates on Linux as it's operating system since the existing \CFA runtime and compiler does not already support other operating systems. Furthermore, as \CFA is yet to be released, supporting version of Linux older than the latest version is not a goal of this work.
+As a research project, this work builds exclusively on newer versions of the Linux operating-system and gcc/clang compilers. While \CFA is released, supporting older versions of Linux ($<$~Ubuntu 16.04) and gcc/clang compilers ($<$~gcc 6.0) is not a goal of this work.
Index: doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -7,16 +7,19 @@
 More precise \CFA supports adding \procs using the RAII object @processor@.
 These objects can be created at any time and can be destroyed at any time.
-They are normally create as automatic stack variables, but this is not a requirement.
+They are normally created as automatic stack variables, but this is not a requirement.
 
 The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence.
 
 \section{Manual Resizing}
-The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed.
-This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}.
-
-There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown.
+Manual resizing is expected to be a rare operation.
+Programmers are mostly expected to resize clusters on startup or teardown.
+Therefore dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state.
+As such all internal arrays that are sized based on the number of \procs need to be \texttt{realloc}ed.
+This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing when shrinkingbecause some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}.
+
+There are no performance requirements, within reason, for resizing since it is expected to be rare.
 However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks.
 It should also avoid as much as possible any effect on performance when the number of \procs remain constant.
-This later requirement prehibits simple solutions, like simply adding a global lock to these arrays.
+This later requirement prohibits naive solutions, like simply adding a global lock to the ready-queue arrays.
 
 \subsection{Read-Copy-Update}
@@ -24,11 +27,11 @@
 In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy.
 This approach potentially has the advantage that it may not need any synchronization to do the switch.
-The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in.
-The important question then becomes whether or not this race can be recovered from.
-If the changes that arrived late can be transferred from the original to the copy then this solution works.
-
-For linked-lists, dequeing is somewhat of a problem.
+However, there is a race where \procs could still use the previous, original, data structure after the copy was switched in.
+This race not only requires some added memory reclamation scheme, it also requires that operations made on the stale original version be eventually moved to the copy.
+
+For linked-lists, enqueing is only somewhat problematic, \ats enqueued to the original queues need to be transferred to the new, which might not preserve ordering.
+Dequeing is more challenging.
 Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at.
-Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves.
+Fixing this requires more synchronization or more indirection on the queues.
 
 Another challenge is that the original must be kept until all \procs have witnessed the change.
@@ -97,6 +100,6 @@
 In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful.
 While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time.
-Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice.
-Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
+Furthermore, race conditions that spuriously lead to the impression that no \ats are ready are actually common in practice.
+Therefore resources associated with \procs should not be freed but \procs simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
 This state is referred to as \newterm{Idle-Sleep}.
 
@@ -110,18 +113,111 @@
 The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work.
 
+\section{Sleeping}
+As usual, the corner-stone of any feature related to the kernel is the choice of system call.
+In terms of blocking a \gls{kthrd} until some event occurs the linux kernel has many available options:
+
+\paragraph{\texttt{pthread\_mutex}/\texttt{pthread\_cond}}
+The most classic option is to use some combination of \texttt{pthread\_mutex} and \texttt{pthread\_cond}.
+These serve as straight forward mutual exclusion and synchronization tools and allow a \gls{kthrd} to wait on a \texttt{pthread\_cond} until signalled.
+While this approach is generally perfectly appropriate for \glspl{kthrd} waiting after eachother, \io operations do not signal \texttt{pthread\_cond}s.
+For \io results to wake a \proc waiting on a \texttt{pthread\_cond} means that a different \glspl{kthrd} must be woken up first, and then the \proc can be signalled.
+
+\subsection{\texttt{io\_uring} and Epoll}
+An alternative is to flip the problem on its head and block waiting for \io, using \texttt{io\_uring} or even \texttt{epoll}.
+This creates the inverse situation, where \io operations directly wake sleeping \procs but waking \proc from a running \gls{kthrd} must use an indirect scheme.
+This generally takes the form of creating a file descriptor, \eg, a dummy file, a pipe or an event fd, and using that file descriptor when \procs need to wake eachother.
+This leads to additional complexity because there can be a race between these artificial \io operations and genuine \io operations.
+If not handled correctly, this can lead to the artificial files going out of sync.
+
+\subsection{Event FDs}
+Another interesting approach is to use an event file descriptor\cit{eventfd}.
+This is a Linux feature that is a file descriptor that behaves like \io, \ie, uses \texttt{read} and \texttt{write}, but also behaves like a semaphore.
+Indeed, all read and writes must use 64bits large values\footnote{On 64-bit Linux, a 32-bit Linux would use 32 bits values.}.
+Writes add their values to the buffer, that is arithmetic addition and not buffer append, and reads zero out the buffer and return the buffer values so far\footnote{This is without the \texttt{EFD\_SEMAPHORE} flag. This flags changes the behavior of \texttt{read} but is not needed for this work.}.
+If a read is made while the buffer is already 0, the read blocks until a non-0 value is added.
+What makes this feature particularly interesting is that \texttt{io\_uring} supports the \texttt{IORING\_REGISTER\_EVENTFD} command, to register an event fd to a particular instance.
+Once that instance is registered, any \io completion will result in \texttt{io\_uring} writing to the event FD.
+This means that a \proc waiting on the event FD can be \emph{directly} woken up by either other \procs or incomming \io.
+
+\begin{figure}
+	\centering
+	\input{idle1.pstex_t}
+	\caption[Basic Idle Sleep Data Structure]{Basic Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
+	Each \proc has a private event FD.}
+	\label{fig:idle1}
+\end{figure}
+
 
 \section{Tracking Sleepers}
 Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
 The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
-
-Furthermore, the ``Race-to-Idle'' approach means that there is some
-
-\section{Sleeping}
-
-\subsection{Event FDs}
-
-\subsection{Epoll}
-
-\subsection{\texttt{io\_uring}}
-
-\section{Reducing Latency}
+Since \ats can be made ready by timers, \io operations or other events outside a clusre, this race can occur even if the \proc going to sleep is the only \proc awake.
+As a result, improper handling of this race can lead to all \procs going to sleep and the system deadlocking.
+
+Furthermore, the ``Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers.
+Contention slowing down \procs attempting to sleep or wake-up can be tolerated.
+These \procs are not doing useful work and therefore not contributing to overall performance.
+However, notifying, checking if a \proc must be woken-up and doing so if needed, can significantly affect overall performance and must be low cost.
+
+\subsection{Sleepers List}
+Each cluster maintains a list of idle \procs, organized as a stack.
+This ordering hopefully allows \proc at the tail to stay in idle sleep for extended period of times.
+Because of these unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \proc handle as much of the work as possible.
+The idle \procs maintain the of sleepers among themselves and notifying a sleeping \proc takes as little work as possible.
+This approach means that maintaining the list is fairly straightforward.
+The list can simply use a single lock per cluster and only \procs that are getting in and out of idle state will contend for that lock.
+
+This approach also simplifies notification.
+Indeed, \procs need to be notify when a new \at is readied, but they also must be notified during resizing, so the \gls{kthrd} can be joined.
+This means that whichever entity removes idle \procs from the sleeper list must be able to do so in any order.
+Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure.
+The notification process then simply needs to wake-up the desired idle \proc, using \texttt{pthread\_cond\_signal}, \texttt{write} on an fd, etc., and the \proc will handle the rest.
+
+\subsection{Reducing Latency}
+As mentioned in this section, \procs going idle for extremely short periods of time is likely in certain common scenarios.
+Therefore, the latency of doing a system call to read from and writing to the event fd can actually negatively affect overall performance in a notable way.
+Is it important to reduce latency and contention of the notification as much as possible.
+Figure~\ref{fig:idle1} shoes the basic idle sleep data structure.
+For the notifiers, this data structure can cause contention on the lock and the event fd syscall can cause notable latency.
+
+\begin{figure}
+	\centering
+	\input{idle2.pstex_t}
+	\caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
+	\label{fig:idle2}
+\end{figure}
+
+The contention is mostly due to the lock on the list needing to be held to get to the head \proc.
+That lock can be contended by \procs attempting to go to sleep, \procs waking or notification attempts.
+The contentention from the \procs attempting to go to sleep can be mitigated slightly by using \texttt{try\_acquire} instead, so the \procs simply continue searching for \ats if the lock is held.
+This trick cannot be used for waking \procs since they are not in a state where they can run \ats.
+However, it is worth nothing that notification does not strictly require accessing the list or the head \proc.
+Therefore, contention can be reduced notably by having notifiers avoid the lock entirely and adding a pointer to the event fd of the first idle \proc, as in Figure~\ref{fig:idle2}.
+To avoid contention between the notifiers, instead of simply reading the atomic pointer, notifiers atomically exchange it to \texttt{null} so only only notifier will contend on the system call.
+
+\begin{figure}
+	\centering
+	\input{idle_state.pstex_t}
+	\caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
+	\label{fig:idle:state}
+\end{figure}
+
+The next optimization that can be done is to avoid the latency of the event fd when possible.
+This can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event fd.
+A simple three state flag is added beside the event fd to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}.
+The flag starts in state \texttt{SEARCH}, while the \proc is searching for \ats to run.
+The \proc then confirms the sleep by atomically swaping the state to \texttt{SLEEP}.
+If the previous state was still \texttt{SEARCH}, then the \proc does read the event fd.
+Meanwhile, notifiers atomically exchange the state to \texttt{AWAKE} state.
+if the previous state was \texttt{SLEEP}, then the notifier must write to the event fd.
+However, if the notify arrives almost immediately after the \proc marks itself idle, then both reads and writes on the event fd can be omitted, which reduces latency notably.
+This leads to the final data structure shown in Figure~\ref{fig:idle}.
+
+\begin{figure}
+	\centering
+	\input{idle.pstex_t}
+	\caption[Low-latency Idle Sleep Data Structure]{Low-latency Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
+	Each \proc has a private event FD with a benaphore in front of it.
+	The list also has an atomic pointer to the event fd and benaphore of the first \proc on the list.}
+	\label{fig:idle}
+\end{figure}
Index: doc/theses/thierry_delisle_PhD/thesis/thesis.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/thesis.tex	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ doc/theses/thierry_delisle_PhD/thesis/thesis.tex	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -80,6 +80,7 @@
 %\usepackage{nomencl} % For a nomenclature (optional; available from ctan.org)
 \usepackage{amsmath,amssymb,amstext} % Lots of math symbols and environments
-\usepackage{xcolor}
+\usepackage[dvipsnames]{xcolor}
 \usepackage{graphicx} % For including graphics
+\usepackage{subcaption}
 
 % Hyperlinks make it very easy to navigate an electronic document.
@@ -104,5 +105,5 @@
 	colorlinks=true,        % false: boxed links; true: colored links
 	linkcolor=blue,         % color of internal links
-	citecolor=green,        % color of links to bibliography
+	citecolor=OliveGreen,   % color of links to bibliography
 	filecolor=magenta,      % color of file links
 	urlcolor=cyan           % color of external links
@@ -204,4 +205,5 @@
 \newcommand\at{\gls{at}\xspace}%
 \newcommand\ats{\glspl{at}\xspace}%
+\newcommand\Proc{\Pls{proc}\xspace}%
 \newcommand\proc{\gls{proc}\xspace}%
 \newcommand\procs{\glspl{proc}\xspace}%
Index: libcfa/src/Makefile.am
===================================================================
--- libcfa/src/Makefile.am	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/Makefile.am	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -33,5 +33,5 @@
 # The built sources must not depend on the installed inst_headers_src
 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr -I$(srcdir)/concurrency $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@
-AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@
+AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -fvisibility=hidden -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@
 AM_CCASFLAGS = -g -Wall -Werror=return-type -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@
 CFACC = @CFACC@
@@ -194,9 +194,9 @@
 
 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@
-	${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o ${@}
+	${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@}
 
 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@
 	${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \
-	$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o ${@}
+	$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@}
 
 concurrency/io/call.cfa: $(srcdir)/concurrency/io/call.cfa.in
Index: libcfa/src/algorithms/range_iterator.cfa
===================================================================
--- libcfa/src/algorithms/range_iterator.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/algorithms/range_iterator.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -20,5 +20,7 @@
 #include <fstream.hfa>
 
-void main(RangeIter & this) {
+#include "bits/defs.hfa"
+
+void main(RangeIter & this) libcfa_public {
 	for() {
 		this._start = -1;
Index: libcfa/src/assert.cfa
===================================================================
--- libcfa/src/assert.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/assert.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -19,4 +19,5 @@
 #include <unistd.h>								// STDERR_FILENO
 #include "bits/debug.hfa"
+#include "bits/defs.hfa"
 
 extern "C" {
@@ -26,5 +27,6 @@
 
 	// called by macro assert in assert.h
-	void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) {
+	// would be cool to remove libcfa_public but it's needed for libcfathread
+	void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) libcfa_public {
 		__cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file );
 		abort();
@@ -32,5 +34,6 @@
 
 	// called by macro assertf
-	void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) {
+	// would be cool to remove libcfa_public but it's needed for libcfathread
+	void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) libcfa_public {
 		__cfaabi_bits_acquire();
 		__cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file );
Index: libcfa/src/bits/debug.cfa
===================================================================
--- libcfa/src/bits/debug.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/bits/debug.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -21,9 +21,12 @@
 #include <unistd.h>
 
+#include "bits/defs.hfa"
+
 enum { buffer_size = 4096 };
 static char buffer[ buffer_size ];
 
 extern "C" {
-	void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) {
+	// would be cool to remove libcfa_public but it's needed for libcfathread
+	void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) libcfa_public {
 		// ensure all data is written
 		for ( int count = 0, retcode; count < len; count += retcode ) {
@@ -44,5 +47,6 @@
 	void __cfaabi_bits_release() __attribute__((__weak__)) {}
 
-	int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) {
+	// would be cool to remove libcfa_public but it's needed for libcfathread
+	int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) libcfa_public {
 		va_list args;
 
Index: libcfa/src/bits/defs.hfa
===================================================================
--- libcfa/src/bits/defs.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/bits/defs.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -36,4 +36,6 @@
 #define __cfa_dlink(x) struct { struct x * next; struct x * back; } __dlink_substitute
 #endif
+
+#define libcfa_public __attribute__((visibility("default")))
 
 #ifdef __cforall
Index: libcfa/src/bits/weakso_locks.cfa
===================================================================
--- libcfa/src/bits/weakso_locks.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/bits/weakso_locks.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,4 +18,6 @@
 #include "bits/weakso_locks.hfa"
 
+#pragma GCC visibility push(default)
+
 void  ?{}( blocking_lock &, bool, bool ) {}
 void ^?{}( blocking_lock & ) {}
Index: libcfa/src/common.cfa
===================================================================
--- libcfa/src/common.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/common.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,4 +18,6 @@
 #include <stdlib.h>					// div_t, *div
 
+#pragma GCC visibility push(default)
+
 //---------------------------------------
 
Index: libcfa/src/concurrency/alarm.cfa
===================================================================
--- libcfa/src/concurrency/alarm.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/alarm.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -141,5 +141,5 @@
 //=============================================================================================
 
-void sleep( Duration duration ) {
+void sleep( Duration duration ) libcfa_public {
 	alarm_node_t node = { active_thread(), duration, 0`s };
 
Index: libcfa/src/concurrency/clib/cfathread.cfa
===================================================================
--- libcfa/src/concurrency/clib/cfathread.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/clib/cfathread.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -237,5 +237,5 @@
 
 typedef ThreadCancelled(cfathread_object) cfathread_exception;
-typedef ThreadCancelled_vtable(cfathread_object) cfathread_vtable;
+typedef vtable(ThreadCancelled(cfathread_object)) cfathread_vtable;
 
 void defaultResumptionHandler(ThreadCancelled(cfathread_object) & except) {
@@ -283,5 +283,5 @@
 
 typedef ThreadCancelled(__cfainit) __cfainit_exception;
-typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable;
+typedef vtable(ThreadCancelled(__cfainit)) __cfainit_vtable;
 
 void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) {
@@ -326,17 +326,19 @@
 }
 
+#pragma GCC visibility push(default)
+
 //================================================================================
 // Main Api
 extern "C" {
-	int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {
+	int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public {
 		*cl = new();
 		return 0;
 	}
 
-	cfathread_cluster_t cfathread_cluster_self(void) {
+	cfathread_cluster_t cfathread_cluster_self(void) libcfa_public {
 		return active_cluster();
 	}
 
-	int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {
+	int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public {
 		#if !defined(__CFA_NO_STATISTICS__)
 			print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO );
Index: libcfa/src/concurrency/coroutine.cfa
===================================================================
--- libcfa/src/concurrency/coroutine.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/coroutine.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -48,5 +48,5 @@
 //-----------------------------------------------------------------------------
 forall(T &)
-void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {
+void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public {
 	dst->virtual_table = src->virtual_table;
 	dst->the_coroutine = src->the_coroutine;
@@ -55,5 +55,5 @@
 
 forall(T &)
-const char * msg(CoroutineCancelled(T) *) {
+const char * msg(CoroutineCancelled(T) *) libcfa_public {
 	return "CoroutineCancelled(...)";
 }
@@ -62,5 +62,5 @@
 forall(T & | is_coroutine(T))
 void __cfaehm_cancelled_coroutine(
-		T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {
+		T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public {
 	verify( desc->cancellation );
 	desc->state = Cancelled;
@@ -89,5 +89,5 @@
 
 void __stack_prepare( __stack_info_t * this, size_t create_size );
-void __stack_clean  ( __stack_info_t * this );
+static void __stack_clean  ( __stack_info_t * this );
 
 //-----------------------------------------------------------------------------
@@ -114,5 +114,5 @@
 }
 
-void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {
+void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) {
 	(this.context){0p, 0p};
 	(this.stack){storage, storageSize};
@@ -124,5 +124,5 @@
 }
 
-void ^?{}(coroutine$& this) {
+void ^?{}(coroutine$& this) libcfa_public {
 	if(this.state != Halted && this.state != Start && this.state != Primed) {
 		coroutine$ * src = active_coroutine();
@@ -146,6 +146,6 @@
 // Part of the Public API
 // Not inline since only ever called once per coroutine
-forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
-void prime(T& cor) {
+forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
+void prime(T& cor) libcfa_public {
 	coroutine$* this = get_coroutine(cor);
 	assert(this->state == Start);
@@ -155,5 +155,5 @@
 }
 
-[void *, size_t] __stack_alloc( size_t storageSize ) {
+static [void *, size_t] __stack_alloc( size_t storageSize ) {
 	const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
 	assert(__page_size != 0l);
@@ -193,5 +193,5 @@
 }
 
-void __stack_clean  ( __stack_info_t * this ) {
+static void __stack_clean  ( __stack_info_t * this ) {
 	void * storage = this->storage->limit;
 
@@ -215,5 +215,5 @@
 }
 
-void __stack_prepare( __stack_info_t * this, size_t create_size ) {
+void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public {
 	const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
 	bool userStack;
Index: libcfa/src/concurrency/coroutine.hfa
===================================================================
--- libcfa/src/concurrency/coroutine.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/coroutine.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -22,8 +22,9 @@
 //-----------------------------------------------------------------------------
 // Exception thrown from resume when a coroutine stack is cancelled.
-EHM_FORALL_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) (
+forall(coroutine_t &)
+exception CoroutineCancelled {
 	coroutine_t * the_coroutine;
 	exception_t * the_exception;
-);
+};
 
 forall(T &)
@@ -37,5 +38,5 @@
 // Anything that implements this trait can be resumed.
 // Anything that is resumed is a coroutine.
-trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled, (T))) {
+trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled(T))) {
 	void main(T & this);
 	coroutine$ * get_coroutine(T & this);
@@ -60,5 +61,5 @@
 //-----------------------------------------------------------------------------
 // Public coroutine API
-forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
+forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
 void prime(T & cor);
 
@@ -113,6 +114,4 @@
 
 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
-extern void __stack_clean  ( __stack_info_t * this );
-
 
 // Suspend implementation inlined for performance
@@ -141,8 +140,8 @@
 forall(T & | is_coroutine(T))
 void __cfaehm_cancelled_coroutine(
-	T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );
+	T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) );
 
 // Resume implementation inlined for performance
-forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
+forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
 static inline T & resume(T & cor) {
 	// optimization : read TLS once and reuse it
Index: libcfa/src/concurrency/exception.cfa
===================================================================
--- libcfa/src/concurrency/exception.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/exception.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -64,9 +64,9 @@
 extern "C" {
 
-struct exception_context_t * this_exception_context(void) {
+struct exception_context_t * this_exception_context(void) libcfa_public {
 	return &__get_stack( active_coroutine() )->exception_context;
 }
 
-_Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {
+_Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public {
 	_Unwind_Stop_Fn stop_func;
 	void * stop_param;
Index: libcfa/src/concurrency/invoke.c
===================================================================
--- libcfa/src/concurrency/invoke.c	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/invoke.c	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -36,5 +36,5 @@
 extern void enable_interrupts( _Bool poll );
 
-void __cfactx_invoke_coroutine(
+libcfa_public void __cfactx_invoke_coroutine(
 	void (*main)(void *),
 	void *this
@@ -70,5 +70,5 @@
 }
 
-void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
+libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) {
 	_Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
@@ -77,5 +77,5 @@
 }
 
-void __cfactx_invoke_thread(
+libcfa_public void __cfactx_invoke_thread(
 	void (*main)(void *),
 	void *this
@@ -98,5 +98,5 @@
 }
 
-void __cfactx_start(
+libcfa_public void __cfactx_start(
 	void (*main)(void *),
 	struct coroutine$ * cor,
Index: libcfa/src/concurrency/io.cfa
===================================================================
--- libcfa/src/concurrency/io.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/io.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -221,5 +221,5 @@
 			const unsigned long long ctsc = rdtscl();
 
-			if(proc->io.target == MAX) {
+			if(proc->io.target == UINT_MAX) {
 				uint64_t chaos = __tls_rand();
 				unsigned ext = chaos & 0xff;
@@ -232,5 +232,5 @@
 			else {
 				const unsigned target = proc->io.target;
-				/* paranoid */ verify( io.tscs[target].tv != MAX );
+				/* paranoid */ verify( io.tscs[target].tv != ULLONG_MAX );
 				HELP: if(target < ctxs_count) {
 					const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io);
@@ -246,5 +246,5 @@
 					__STATS__( true, io.calls.helped++; )
 				}
-				proc->io.target = MAX;
+				proc->io.target = UINT_MAX;
 			}
 		}
@@ -340,5 +340,5 @@
 	// for convenience, return both the index and the pointer to the sqe
 	// sqe == &sqes[idx]
-	struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
+	struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
 		// __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
 
@@ -419,5 +419,5 @@
 	}
 
-	void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
+	void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
 		// __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
 
Index: libcfa/src/concurrency/io/call.cfa.in
===================================================================
--- libcfa/src/concurrency/io/call.cfa.in	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/io/call.cfa.in	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -139,4 +139,5 @@
 // I/O Interface
 //=============================================================================================
+#pragma GCC visibility push(default)
 """
 
Index: libcfa/src/concurrency/io/setup.cfa
===================================================================
--- libcfa/src/concurrency/io/setup.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/io/setup.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -26,5 +26,5 @@
 
 #if !defined(CFA_HAVE_LINUX_IO_URING_H)
-	void ?{}(io_context_params & this) {}
+	void ?{}(io_context_params & this) libcfa_public {}
 
 	void  ?{}($io_context & this, struct cluster & cl) {}
@@ -66,5 +66,5 @@
 #pragma GCC diagnostic pop
 
-	void ?{}(io_context_params & this) {
+	void ?{}(io_context_params & this) libcfa_public {
 		this.num_entries = 256;
 	}
Index: libcfa/src/concurrency/io/types.hfa
===================================================================
--- libcfa/src/concurrency/io/types.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/io/types.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -17,4 +17,6 @@
 #pragma once
 
+#include <limits.h>
+
 extern "C" {
 	#include <linux/types.h>
@@ -25,5 +27,4 @@
 #include "iofwd.hfa"
 #include "kernel/fwd.hfa"
-#include "limits.hfa"
 
 #if defined(CFA_HAVE_LINUX_IO_URING_H)
@@ -140,5 +141,5 @@
 		const __u32 tail = *this->cq.tail;
 
-		if(head == tail) return MAX;
+		if(head == tail) return ULLONG_MAX;
 
 		return this->cq.ts;
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/kernel.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -389,5 +389,5 @@
 
 // KERNEL_ONLY
-void returnToKernel() {
+static void returnToKernel() {
 	/* paranoid */ verify( ! __preemption_enabled() );
 	coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
@@ -547,5 +547,5 @@
 }
 
-void unpark( thread$ * thrd, unpark_hint hint ) {
+void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
 	if( !thrd ) return;
 
@@ -558,5 +558,5 @@
 }
 
-void park( void ) {
+void park( void ) libcfa_public {
 	__disable_interrupts_checked();
 		/* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
@@ -601,5 +601,5 @@
 
 // KERNEL ONLY
-bool force_yield( __Preemption_Reason reason ) {
+bool force_yield( __Preemption_Reason reason ) libcfa_public {
 	__disable_interrupts_checked();
 		thread$ * thrd = kernelTLS().this_thread;
@@ -849,5 +849,5 @@
 //-----------------------------------------------------------------------------
 // Debug
-bool threading_enabled(void) __attribute__((const)) {
+bool threading_enabled(void) __attribute__((const)) libcfa_public {
 	return true;
 }
@@ -856,5 +856,5 @@
 // Statistics
 #if !defined(__CFA_NO_STATISTICS__)
-	void print_halts( processor & this ) {
+	void print_halts( processor & this ) libcfa_public {
 		this.print_halts = true;
 	}
@@ -873,5 +873,5 @@
 	}
 
-	void crawl_cluster_stats( cluster & this ) {
+	static void crawl_cluster_stats( cluster & this ) {
 		// Stop the world, otherwise stats could get really messed-up
 		// this doesn't solve all problems but does solve many
@@ -889,5 +889,5 @@
 
 
-	void print_stats_now( cluster & this, int flags ) {
+	void print_stats_now( cluster & this, int flags ) libcfa_public {
 		crawl_cluster_stats( this );
 		__print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/kernel.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -49,5 +49,7 @@
 
 // Coroutine used py processors for the 2-step context switch
-coroutine processorCtx_t {
+
+struct processorCtx_t {
+	struct coroutine$ self;
 	struct processor * proc;
 };
Index: libcfa/src/concurrency/kernel/cluster.cfa
===================================================================
--- libcfa/src/concurrency/kernel/cluster.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/kernel/cluster.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -49,5 +49,5 @@
 
 // returns the maximum number of processors the RWLock support
-__attribute__((weak)) unsigned __max_processors() {
+__attribute__((weak)) unsigned __max_processors() libcfa_public {
 	const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
 	if(!max_cores_s) {
@@ -233,9 +233,9 @@
 					if(is_empty(sl)) {
 						assert( sl.anchor.next == 0p );
-						assert( sl.anchor.ts   == -1llu );
+						assert( sl.anchor.ts   == MAX );
 						assert( mock_head(sl)  == sl.prev );
 					} else {
 						assert( sl.anchor.next != 0p );
-						assert( sl.anchor.ts   != -1llu );
+						assert( sl.anchor.ts   != MAX );
 						assert( mock_head(sl)  != sl.prev );
 					}
@@ -259,9 +259,9 @@
 		/* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
 		it->rdq.id = valrq;
-		it->rdq.target = MAX;
+		it->rdq.target = UINT_MAX;
 		valrq += __shard_factor.readyq;
 		#if defined(CFA_HAVE_LINUX_IO_URING_H)
 			it->io.ctx->cq.id = valio;
-			it->io.target = MAX;
+			it->io.target = UINT_MAX;
 			valio += __shard_factor.io;
 		#endif
@@ -472,5 +472,5 @@
 	this.prev = mock_head(this);
 	this.anchor.next = 0p;
-	this.anchor.ts   = -1llu;
+	this.anchor.ts   = MAX;
 	#if !defined(__CFA_NO_STATISTICS__)
 		this.cnt  = 0;
@@ -484,5 +484,5 @@
 	/* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
 	/* paranoid */ verify( mock_head(this)->link.next == 0p );
-	/* paranoid */ verify( mock_head(this)->link.ts   == -1llu  );
+	/* paranoid */ verify( mock_head(this)->link.ts   == MAX );
 	/* paranoid */ verify( mock_head(this) == this.prev );
 	/* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );
@@ -495,5 +495,5 @@
 	// Make sure the list is empty
 	/* paranoid */ verify( this.anchor.next == 0p );
-	/* paranoid */ verify( this.anchor.ts   == -1llu );
+	/* paranoid */ verify( this.anchor.ts   == MAX );
 	/* paranoid */ verify( mock_head(this)  == this.prev );
 }
Index: libcfa/src/concurrency/kernel/cluster.hfa
===================================================================
--- libcfa/src/concurrency/kernel/cluster.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/kernel/cluster.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -19,5 +19,5 @@
 #include "kernel/private.hfa"
 
-#include "limits.hfa"
+#include <limits.h>
 
 //-----------------------------------------------------------------------
@@ -37,5 +37,5 @@
 
 static inline void touch_tsc(__timestamp_t * tscs, size_t idx, unsigned long long ts_prev, unsigned long long ts_next) {
-	if (ts_next == MAX) return;
+	if (ts_next == ULLONG_MAX) return;
 	unsigned long long now = rdtscl();
 	unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED);
@@ -59,5 +59,5 @@
 	for(i; shard_factor) {
 		unsigned long long ptsc = ts(data[start + i]);
-		if(ptsc != -1ull) {
+		if(ptsc != ULLONG_MAX) {
 			/* paranoid */ verify( start + i < count );
 			unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i].ma);
Index: libcfa/src/concurrency/kernel/private.hfa
===================================================================
--- libcfa/src/concurrency/kernel/private.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/kernel/private.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -109,5 +109,6 @@
 //-----------------------------------------------------------------------------
 // Processor
-void main(processorCtx_t *);
+void main(processorCtx_t &);
+static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
 
 void * __create_pthread( pthread_t *, void * (*)(void *), void * );
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -120,5 +120,5 @@
 #endif
 
-cluster              * mainCluster;
+cluster              * mainCluster libcfa_public;
 processor            * mainProcessor;
 thread$              * mainThread;
@@ -169,5 +169,5 @@
 };
 
-void ?{}( current_stack_info_t & this ) {
+static void ?{}( current_stack_info_t & this ) {
 	__stack_context_t ctx;
 	CtxGet( ctx );
@@ -209,6 +209,6 @@
 	// Construct the processor context of the main processor
 	void ?{}(processorCtx_t & this, processor * proc) {
-		(this.__cor){ "Processor" };
-		this.__cor.starter = 0p;
+		(this.self){ "Processor" };
+		this.self.starter = 0p;
 		this.proc = proc;
 	}
@@ -507,5 +507,5 @@
 	self_mon_p = &self_mon;
 	link.next = 0p;
-	link.ts   = -1llu;
+	link.ts   = MAX;
 	preferred = ready_queue_new_preferred();
 	last_proc = 0p;
@@ -526,5 +526,5 @@
 // Construct the processor context of non-main processors
 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
-	(this.__cor){ info };
+	(this.self){ info };
 	this.proc = proc;
 }
@@ -578,5 +578,5 @@
 }
 
-void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {
+void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public {
 	( this.terminated ){};
 	( this.runner ){};
@@ -591,10 +591,10 @@
 }
 
-void ?{}(processor & this, const char name[], cluster & _cltr) {
+void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public {
 	(this){name, _cltr, 0p};
 }
 
 extern size_t __page_size;
-void ^?{}(processor & this) with( this ){
+void ^?{}(processor & this) libcfa_public with( this ) {
 	/* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) );
 	__cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
@@ -623,5 +623,5 @@
 }
 
-void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
+void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) {
 	this.name = name;
 	this.preemption_rate = preemption_rate;
@@ -658,5 +658,5 @@
 }
 
-void ^?{}(cluster & this) {
+void ^?{}(cluster & this) libcfa_public {
 	destroy(this.io.arbiter);
 
Index: libcfa/src/concurrency/locks.cfa
===================================================================
--- libcfa/src/concurrency/locks.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/locks.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -24,4 +24,6 @@
 #include <stdlib.hfa>
 
+#pragma GCC visibility push(default)
+
 //-----------------------------------------------------------------------------
 // info_thread
@@ -116,5 +118,5 @@
 }
 
-void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
+static void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
 	thread$ * t = &try_pop_front( blocked_threads );
 	owner = t;
@@ -192,5 +194,5 @@
 	void ^?{}( alarm_node_wrap(L) & this ) { }
 
-	void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
+	static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
 		// This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin.
 		lock( cond->lock __cfaabi_dbg_ctx2 );
@@ -216,5 +218,5 @@
 
 	// this casts the alarm node to our wrapped type since we used type erasure
-	void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
+	static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
 }
 
@@ -233,5 +235,5 @@
 	void ^?{}( condition_variable(L) & this ){ }
 
-	void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
+	static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
 		if(&popped != 0p) {
 			popped.signalled = true;
@@ -278,5 +280,5 @@
 	int counter( condition_variable(L) & this ) with(this) { return count; }
 
-	size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
+	static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
 		// add info_thread to waiting queue
 		insert_last( blocked_threads, *i );
@@ -291,5 +293,5 @@
 
 	// helper for wait()'s' with no timeout
-	void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
+	static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
 		size_t recursion_count = queue_and_get_recursion(this, &i);
@@ -308,5 +310,5 @@
 
 	// helper for wait()'s' with a timeout
-	void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
+	static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
 		size_t recursion_count = queue_and_get_recursion(this, &info);
@@ -343,5 +345,5 @@
 	// fast_cond_var
 	void  ?{}( fast_cond_var(L) & this ){
-		this.blocked_threads{}; 
+		this.blocked_threads{};
 		#ifdef __CFA_DEBUG__
 		this.lock_used = 0p;
Index: libcfa/src/concurrency/monitor.cfa
===================================================================
--- libcfa/src/concurrency/monitor.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/monitor.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -44,4 +44,8 @@
 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
 
+static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
+static inline void ?{}(__condition_criterion_t & this );
+static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
+
 static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
@@ -243,5 +247,5 @@
 
 // Leave single monitor
-void __leave( monitor$ * this ) {
+static void __leave( monitor$ * this ) {
 	// Lock the monitor spinlock
 	lock( this->lock __cfaabi_dbg_ctx2 );
@@ -278,5 +282,5 @@
 
 // Leave single monitor for the last time
-void __dtor_leave( monitor$ * this, bool join ) {
+static void __dtor_leave( monitor$ * this, bool join ) {
 	__cfaabi_dbg_debug_do(
 		if( active_thread() != this->owner ) {
@@ -344,5 +348,5 @@
 // Ctor for monitor guard
 // Sorts monitors before entering
-void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {
+void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public {
 	thread$ * thrd = active_thread();
 
@@ -369,5 +373,5 @@
 }
 
-void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {
+void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public {
 	this{ m, count, 0p };
 }
@@ -375,5 +379,5 @@
 
 // Dtor for monitor guard
-void ^?{}( monitor_guard_t & this ) {
+void ^?{}( monitor_guard_t & this ) libcfa_public {
 	// __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count);
 
@@ -389,5 +393,5 @@
 // Ctor for monitor guard
 // Sorts monitors before entering
-void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {
+void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public {
 	// optimization
 	thread$ * thrd = active_thread();
@@ -409,5 +413,5 @@
 
 // Dtor for monitor guard
-void ^?{}( monitor_dtor_guard_t & this ) {
+void ^?{}( monitor_dtor_guard_t & this ) libcfa_public {
 	// Leave the monitors in order
 	__dtor_leave( this.m, this.join );
@@ -419,5 +423,5 @@
 //-----------------------------------------------------------------------------
 // Internal scheduling types
-void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
+static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
 	this.waiting_thread = waiting_thread;
 	this.count = count;
@@ -426,5 +430,5 @@
 }
 
-void ?{}(__condition_criterion_t & this ) with( this ) {
+static void ?{}(__condition_criterion_t & this ) with( this ) {
 	ready  = false;
 	target = 0p;
@@ -433,5 +437,5 @@
 }
 
-void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
+static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
 	this.ready  = false;
 	this.target = target;
@@ -442,5 +446,5 @@
 //-----------------------------------------------------------------------------
 // Internal scheduling
-void wait( condition & this, uintptr_t user_info = 0 ) {
+void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public {
 	brand_condition( this );
 
@@ -496,5 +500,5 @@
 }
 
-bool signal( condition & this ) {
+bool signal( condition & this ) libcfa_public {
 	if( is_empty( this ) ) { return false; }
 
@@ -538,5 +542,5 @@
 }
 
-bool signal_block( condition & this ) {
+bool signal_block( condition & this ) libcfa_public {
 	if( !this.blocked.head ) { return false; }
 
@@ -586,5 +590,5 @@
 
 // Access the user_info of the thread waiting at the front of the queue
-uintptr_t front( condition & this ) {
+uintptr_t front( condition & this ) libcfa_public {
 	verifyf( !is_empty(this),
 		"Attempt to access user data on an empty condition.\n"
@@ -608,5 +612,5 @@
 // 		setup mask
 // 		block
-void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
+void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public {
 	// This statment doesn't have a contiguous list of monitors...
 	// Create one!
@@ -994,5 +998,5 @@
 // Can't be accepted since a mutex stmt is effectively an anonymous routine
 // Thus we do not need a monitor group
-void lock( monitor$ * this ) {
+void lock( monitor$ * this ) libcfa_public {
 	thread$ * thrd = active_thread();
 
@@ -1046,5 +1050,5 @@
 // Leave routine for mutex stmt
 // Is just a wrapper around __leave for the is_lock trait to see
-void unlock( monitor$ * this ) { __leave( this ); }
+void unlock( monitor$ * this ) libcfa_public { __leave( this ); }
 
 // Local Variables: //
Index: libcfa/src/concurrency/monitor.hfa
===================================================================
--- libcfa/src/concurrency/monitor.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/monitor.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -119,7 +119,7 @@
 }
 
-void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
-void ?{}(__condition_criterion_t & this );
-void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
+// void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
+// void ?{}(__condition_criterion_t & this );
+// void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
 
 struct condition {
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/preemption.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -38,5 +38,5 @@
 #endif
 
-__attribute__((weak)) Duration default_preemption() {
+__attribute__((weak)) Duration default_preemption() libcfa_public {
 	const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION");
 	if(!preempt_rate_s) {
@@ -238,5 +238,5 @@
 //----------
 // special case for preemption since used often
-__attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {
+__attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public {
 	// create a assembler label before
 	// marked as clobber all to avoid movement
@@ -276,5 +276,5 @@
 // Get data from the TLS block
 // struct asm_region __cfaasm_get;
-uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
+uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems
 uintptr_t __cfatls_get( unsigned long int offset ) {
 	// create a assembler label before
@@ -295,5 +295,5 @@
 extern "C" {
 	// Disable interrupts by incrementing the counter
-	void disable_interrupts() {
+	__attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public {
 		// create a assembler label before
 		// marked as clobber all to avoid movement
@@ -326,5 +326,5 @@
 	// Enable interrupts by decrementing the counter
 	// If counter reaches 0, execute any pending __cfactx_switch
-	void enable_interrupts( bool poll ) {
+	void enable_interrupts( bool poll ) libcfa_public {
 		// Cache the processor now since interrupts can start happening after the atomic store
 		processor   * proc = __cfaabi_tls.this_processor;
@@ -362,5 +362,5 @@
 //-----------------------------------------------------------------------------
 // Kernel Signal Debug
-void __cfaabi_check_preemption() {
+void __cfaabi_check_preemption() libcfa_public {
 	bool ready = __preemption_enabled();
 	if(!ready) { abort("Preemption should be ready"); }
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -125,5 +125,5 @@
 	const unsigned long long ctsc = rdtscl();
 
-	if(proc->rdq.target == MAX) {
+	if(proc->rdq.target == UINT_MAX) {
 		uint64_t chaos = __tls_rand();
 		unsigned ext = chaos & 0xff;
@@ -137,5 +137,5 @@
 		const unsigned target = proc->rdq.target;
 		__cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
-		/* paranoid */ verify( readyQ.tscs[target].tv != MAX );
+		/* paranoid */ verify( readyQ.tscs[target].tv != ULLONG_MAX );
 		if(target < lanes_count) {
 			const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
@@ -147,5 +147,5 @@
 			}
 		}
-		proc->rdq.target = MAX;
+		proc->rdq.target = UINT_MAX;
 	}
 
@@ -245,5 +245,5 @@
 // get preferred ready for new thread
 unsigned ready_queue_new_preferred() {
-	unsigned pref = MAX;
+	unsigned pref = UINT_MAX;
 	if(struct thread$ * thrd = publicTLS_get( this_thread )) {
 		pref = thrd->preferred;
Index: libcfa/src/concurrency/ready_subqueue.hfa
===================================================================
--- libcfa/src/concurrency/ready_subqueue.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/ready_subqueue.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -32,16 +32,16 @@
 	/* paranoid */ verify( this.lock );
 	/* paranoid */ verify( node->link.next == 0p );
-	/* paranoid */ verify( node->link.ts   == MAX  );
+	/* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX  );
 	/* paranoid */ verify( this.prev->link.next == 0p );
-	/* paranoid */ verify( this.prev->link.ts   == MAX  );
+	/* paranoid */ verify( __atomic_load_n(&this.prev->link.ts, __ATOMIC_RELAXED)   == MAX  );
 	if( this.anchor.next == 0p ) {
 		/* paranoid */ verify( this.anchor.next == 0p );
-		/* paranoid */ verify( this.anchor.ts   == MAX );
-		/* paranoid */ verify( this.anchor.ts   != 0  );
+		/* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) == MAX );
+		/* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0  );
 		/* paranoid */ verify( this.prev == mock_head( this ) );
 	} else {
 		/* paranoid */ verify( this.anchor.next != 0p );
-		/* paranoid */ verify( this.anchor.ts   != MAX );
-		/* paranoid */ verify( this.anchor.ts   != 0  );
+		/* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX );
+		/* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0  );
 		/* paranoid */ verify( this.prev != mock_head( this ) );
 	}
@@ -62,14 +62,14 @@
 	/* paranoid */ verify( this.lock );
 	/* paranoid */ verify( this.anchor.next != 0p );
-	/* paranoid */ verify( this.anchor.ts   != MAX );
-	/* paranoid */ verify( this.anchor.ts   != 0  );
+	/* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX );
+	/* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0   );
 
 	// Get the relevant nodes locally
 	thread$ * node = this.anchor.next;
 	this.anchor.next = node->link.next;
-	this.anchor.ts   = node->link.ts;
+	__atomic_store_n(&this.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED);
 	bool is_empty = this.anchor.next == 0p;
 	node->link.next = 0p;
-	node->link.ts   = MAX;
+	__atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED);
 	#if !defined(__CFA_NO_STATISTICS__)
 		this.cnt--;
@@ -79,10 +79,11 @@
 	if(is_empty) this.prev = mock_head( this );
 
+	unsigned long long ats = __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED);
 	/* paranoid */ verify( node->link.next == 0p );
-	/* paranoid */ verify( node->link.ts   == MAX  );
-	/* paranoid */ verify( node->link.ts   != 0  );
-	/* paranoid */ verify( this.anchor.ts  != 0  );
-	/* paranoid */ verify( (this.anchor.ts  == MAX) == is_empty );
-	return [node, this.anchor.ts];
+	/* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX );
+	/* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) != 0   );
+	/* paranoid */ verify( ats != 0 );
+	/* paranoid */ verify( (ats == MAX) == is_empty );
+	return [node, ats];
 }
 
@@ -96,4 +97,5 @@
 	// Cannot verify 'emptiness' here since it may not be locked
 	/* paranoid */ verify(this.anchor.ts != 0);
-	return this.anchor.ts;
+	/* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this.anchor.ts), &this.anchor.ts));
+	return __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED);
 }
Index: libcfa/src/concurrency/thread.cfa
===================================================================
--- libcfa/src/concurrency/thread.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/thread.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -19,6 +19,7 @@
 #include "thread.hfa"
 
+#include "exception.hfa"
 #include "kernel/private.hfa"
-#include "exception.hfa"
+#include "limits.hfa"
 
 #define __CFA_INVOKE_PRIVATE__
@@ -26,4 +27,6 @@
 
 extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask;
+
+#pragma GCC visibility push(default)
 
 //-----------------------------------------------------------------------------
@@ -42,5 +45,5 @@
 	curr_cluster = &cl;
 	link.next = 0p;
-	link.ts   = -1llu;
+	link.ts   = MAX;
 	preferred = ready_queue_new_preferred();
 	last_proc = 0p;
@@ -86,6 +89,6 @@
 }
 
-forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))
-    | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
+forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
+    | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
 void ?{}( thread_dtor_guard_t & this,
 		T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
@@ -165,6 +168,6 @@
 
 //-----------------------------------------------------------------------------
-forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))
-    | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
+forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
+	| { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
 T & join( T & this ) {
 	thread_dtor_guard_t guard = { this, defaultResumptionHandler };
Index: libcfa/src/concurrency/thread.hfa
===================================================================
--- libcfa/src/concurrency/thread.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/concurrency/thread.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -32,8 +32,9 @@
 };
 
-EHM_FORALL_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) (
+forall(thread_t &)
+exception ThreadCancelled {
 	thread_t * the_thread;
 	exception_t * the_exception;
-);
+};
 
 forall(T &)
@@ -79,6 +80,6 @@
 };
 
-forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))
-    | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
+forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
+	| { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
 void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) );
 void ^?{}( thread_dtor_guard_t & this );
@@ -126,6 +127,6 @@
 //----------
 // join
-forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))
-    | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
+forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
+	| { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
 T & join( T & this );
 
Index: libcfa/src/containers/maybe.cfa
===================================================================
--- libcfa/src/containers/maybe.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/containers/maybe.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -17,4 +17,5 @@
 #include <assert.h>
 
+#pragma GCC visibility push(default)
 
 forall(T)
Index: libcfa/src/containers/result.cfa
===================================================================
--- libcfa/src/containers/result.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/containers/result.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -17,4 +17,5 @@
 #include <assert.h>
 
+#pragma GCC visibility push(default)
 
 forall(T, E)
Index: libcfa/src/containers/string.cfa
===================================================================
--- libcfa/src/containers/string.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/containers/string.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,4 +18,5 @@
 #include <stdlib.hfa>
 
+#pragma GCC visibility push(default)
 
 /*
Index: libcfa/src/containers/string_sharectx.hfa
===================================================================
--- libcfa/src/containers/string_sharectx.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/containers/string_sharectx.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -16,9 +16,11 @@
 #pragma once
 
+#pragma GCC visibility push(default)
+
 //######################### String Sharing Context #########################
 
 struct VbyteHeap;
 
-// A string_sharectx 
+// A string_sharectx
 //
 // Usage:
Index: libcfa/src/containers/vector.cfa
===================================================================
--- libcfa/src/containers/vector.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/containers/vector.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,6 +18,8 @@
 #include <stdlib.hfa>
 
+#pragma GCC visibility push(default)
+
 forall(T, allocator_t | allocator_c(T, allocator_t))
-void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);
+static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);
 
 //------------------------------------------------------------------------------
@@ -83,5 +85,5 @@
 
 forall(T, allocator_t | allocator_c(T, allocator_t))
-void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)
+static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)
 {
 	this->size = other->size;
Index: libcfa/src/device/cpu.cfa
===================================================================
--- libcfa/src/device/cpu.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/device/cpu.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -31,4 +31,5 @@
 }
 
+#include "bits/defs.hfa"
 #include "algorithms/range_iterator.hfa"
 
@@ -456,3 +457,3 @@
 }
 
-cpu_info_t cpu_info;
+libcfa_public cpu_info_t cpu_info;
Index: libcfa/src/exception.c
===================================================================
--- libcfa/src/exception.c	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/exception.c	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -27,4 +27,7 @@
 #include "stdhdr/assert.h"
 #include "virtual.h"
+
+#pragma GCC visibility push(default)
+
 #include "lsda.h"
 
@@ -261,5 +264,5 @@
 #else // defined( __ARM_ARCH )
 	// The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack.
-	// This workaround tries to keep default exception handling working. 
+	// This workaround tries to keep default exception handling working.
 	if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) {
 #endif
Index: libcfa/src/exception.hfa
===================================================================
--- libcfa/src/exception.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/exception.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Thu Apr  7 10:25:00 2020
 // Last Modified By : Andrew Beach
-// Last Modified On : Thr Apr  8 15:16:00 2021
-// Update Count     : 4
+// Last Modified On : Wed May 25 17:20:00 2022
+// Update Count     : 5
 //
 
@@ -18,169 +18,15 @@
 // -----------------------------------------------------------------------------------------------
 
-// EHM_EXCEPTION(exception_name)(fields...);
-// Create an exception (a virtual structure that inherits from exception_t)
-// with the given name and fields.
-#define EHM_EXCEPTION(exception_name) \
-	_EHM_TYPE_ID_STRUCT(exception_name, ); \
-	_EHM_TYPE_ID_VALUE(exception_name, ); \
-	_EHM_VIRTUAL_TABLE_STRUCT(exception_name, , ); \
-	_EHM_EXCEPTION_STRUCT(exception_name, , )
+// EHM_DEFAULT_VTABLE(exception_type)
+// Create a declaration for a (possibly polymorphic) default vtable.
+// Mostly used by and for the currency module.
+#define EHM_DEFAULT_VTABLE(type) vtable(type) & const _default_vtable
 
-// EHM_EXTERN_VTABLE(exception_name, table_name);
-// Forward declare a virtual table called table_name for exception_name type.
-#define EHM_EXTERN_VTABLE(exception_name, table_name) \
-	_EHM_EXTERN_VTABLE(exception_name, , table_name)
-
-// EHM_VIRTUAL_TABLE(exception_name, table_name);
-// Define a virtual table called table_name for exception_name type.
-#define EHM_VIRTUAL_TABLE(exception_name, table_name) \
-	_EHM_DEFINE_COPY(exception_name, ) \
-	_EHM_DEFINE_MSG(exception_name, ) \
-	_EHM_VIRTUAL_TABLE(exception_name, , table_name)
-
-// EHM_FORALL_EXCEPTION(exception_name, (assertions), (parameters))(fields...);
-// As EHM_EXCEPTION but for polymorphic types instead of monomorphic ones.
-// The assertions list should include all polymorphic parameters and
-// assertions inside a parentisized list. Parameters should include all the
-// polymorphic parameter names inside a parentisized list (same order).
-#define EHM_FORALL_EXCEPTION(exception_name, assertions, parameters) \
-	_EHM_TYPE_ID_STRUCT(exception_name, forall assertions); \
-	_EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall assertions, parameters); \
-	_EHM_EXCEPTION_STRUCT(exception_name, forall assertions, parameters)
-
-// EHM_FORALL_EXTERN_VTABLE(exception_name, (arguments), table_name);
-// As EHM_EXTERN_VTABLE but for polymorphic types instead of monomorphic ones.
-// Arguments should be the parentisized list of polymorphic arguments.
-#define EHM_FORALL_EXTERN_VTABLE(exception_name, arguments, table_name) \
-	_EHM_EXTERN_VTABLE(exception_name, arguments, table_name)
-
-// EHM_FORALL_VIRTUAL_TABLE(exception_name, (arguments), table_name);
-// As EHM_VIRTUAL_TABLE but for polymorphic types instead of monomorphic ones.
-// Arguments should be the parentisized list of polymorphic arguments.
-#define EHM_FORALL_VIRTUAL_TABLE(exception_name, arguments, table_name) \
-	_EHM_TYPE_ID_VALUE(exception_name, arguments); \
-	_EHM_DEFINE_COPY(exception_name, arguments) \
-	_EHM_DEFINE_MSG(exception_name, arguments) \
-	_EHM_VIRTUAL_TABLE(exception_name, arguments, table_name)
-
-// EHM_DEFAULT_VTABLE(exception_name, (arguments))
-// Create a declaration for a (possibly polymorphic) default vtable.
-#define EHM_DEFAULT_VTABLE(exception_name, arguments) \
-	_EHM_VTABLE_TYPE(exception_name) arguments & const _default_vtable
-
-// IS_EXCEPTION(exception_name [, (...parameters)])
-// IS_RESUMPTION_EXCEPTION(exception_name [, (parameters...)])
-// IS_TERMINATION_EXCEPTION(exception_name [, (parameters...)])
-// Create an assertion that exception_name, possibly with the qualifing parameters, is the given
-// kind of exception with the standard vtable with the same parameters if applicable.
-#define IS_EXCEPTION(...) _IS_EXCEPTION(is_exception, __VA_ARGS__, , ~)
-#define IS_RESUMPTION_EXCEPTION(...) _IS_EXCEPTION(is_resumption_exception, __VA_ARGS__, , ~)
-#define IS_TERMINATION_EXCEPTION(...) _IS_EXCEPTION(is_termination_exception, __VA_ARGS__, , ~)
-
-// Macros starting with a leading underscore are internal.
-
-// Create an exception type definition. must be tailing, can be polymorphic.
-#define _EHM_EXCEPTION_STRUCT(exception_name, forall_clause, parameters) \
-	forall_clause struct exception_name { \
-		_EHM_VTABLE_TYPE(exception_name) parameters const * virtual_table; \
-		_CLOSE
-
-// Create a (possibly polymorphic) virtual table forward declaration.
-#define _EHM_EXTERN_VTABLE(exception_name, arguments, table_name) \
-	extern const _EHM_VTABLE_TYPE(exception_name) arguments table_name
-
-// Create a (possibly polymorphic) virtual table definition.
-#define _EHM_VIRTUAL_TABLE(exception_type, arguments, table_name) \
-	const _EHM_VTABLE_TYPE(exception_type) arguments table_name @= { \
-		.__cfavir_typeid : &_EHM_TYPE_ID_NAME(exception_type), \
-		.size : sizeof(struct exception_type arguments), \
-		.copy : copy, \
-		.^?{} : ^?{}, \
-		.msg : msg, \
-	}
-
-// Create a (possibly polymorphic) copy function from an assignment operator.
-#define _EHM_DEFINE_FORALL_COPY(exception_name, forall_clause, parameters) \
-	forall_clause void copy(exception_name parameters * this, \
-			exception_name parameters * that) { \
-		*this = *that; \
-	}
-
-#define _EHM_DEFINE_COPY(exception_name, arguments) \
-	void copy(exception_name arguments * this, exception_name arguments * that) { \
-		*this = *that; \
-	}
-
-// Create a (possibly polymorphic) msg function
-#define _EHM_DEFINE_FORALL_MSG(exception_name, forall_clause, parameters) \
-	forall_clause const char * msg(exception_name parameters * this) { \
-		return #exception_name #parameters; \
-	}
-
-#define _EHM_DEFINE_MSG(exception_name, arguments) \
-	const char * msg(exception_name arguments * this) { \
-		return #exception_name #arguments; \
-	}
-
-// Produces the C compatable name of the virtual table type for a virtual type.
-#define _EHM_VTABLE_TYPE(type_name) struct _GLUE2(type_name,_vtable)
-
-// Create the vtable type for exception name.
-#define _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall_clause, parameters) \
-	forall_clause struct exception_name; \
-	forall_clause _EHM_VTABLE_TYPE(exception_name) { \
-		_EHM_TYPE_ID_TYPE(exception_name) parameters const * __cfavir_typeid; \
-		size_t size; \
-		void (*copy)(exception_name parameters * this, exception_name parameters * other); \
-		void (*^?{})(exception_name parameters & this); \
-		const char * (*msg)(exception_name parameters * this); \
-	}
-
-// Define the function required to satify the trait for exceptions.
-#define _EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \
-	forall_clause inline void mark_exception( \
-		exception_name parameters const &, \
-		_EHM_VTABLE_TYPE(exception_name) parameters const &) {} \
-
-#define __EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \
-	forall_clause inline _EHM_VTABLE_TYPE(exception_name) parameters const & \
-			get_exception_vtable(exception_name parameters const & this) { \
-		/* This comes before the structure definition, but we know the offset. */ \
-		/* return (_EHM_VTABLE_TYPE(exception_name) parameters const &)this; */ \
-		assert(false); \
-	}
-
-// Generates a new type-id structure. This is used to mangle the name of the
-// type-id instance so it also includes polymorphic information. Must be the
-// direct decendent of exception_t.
-// The second field is used to recover type information about the exception.
-#define _EHM_TYPE_ID_STRUCT(exception_name, forall_clause) \
-	forall_clause _EHM_TYPE_ID_TYPE(exception_name) { \
-		__cfavir_type_info const * parent; \
-	}
-
-// Generate a new type-id value.
-#define _EHM_TYPE_ID_VALUE(exception_name, arguments) \
-	__attribute__((cfa_linkonce)) \
-	_EHM_TYPE_ID_TYPE(exception_name) arguments const \
-			_EHM_TYPE_ID_NAME(exception_name) = { \
-		&__cfatid_exception_t, \
-	}
-
-// _EHM_TYPE_ID_STRUCT and _EHM_TYPE_ID_VALUE are the two that would need to
-// be updated to extend the hierarchy if we are still using macros when that
-// is added.
-
-// Produce the C compatable name of the type-id type for an exception type.
-#define _EHM_TYPE_ID_TYPE(exception_name) \
-	struct _GLUE2(__cfatid_struct_, exception_name)
-
-// Produce the name of the instance of the type-id for an exception type.
-#define _EHM_TYPE_ID_NAME(exception_name) _GLUE2(__cfatid_,exception_name)
-
-#define _IS_EXCEPTION(kind, exception_name, parameters, ...) \
-	kind(exception_name parameters, _EHM_VTABLE_TYPE(exception_name) parameters)
-
-// Internal helper macros:
-#define _CLOSE(...) __VA_ARGS__ }
-#define _GLUE2(left, right) left##right
+// IS_EXCEPTION(exception_type)
+// IS_RESUMPTION_EXCEPTION(exception_type)
+// IS_TERMINATION_EXCEPTION(exception_type)
+// Create an assertion that exception_type is the given kind of exception.
+// This is used to mimic associated types so the vtable type is unmentioned.
+#define IS_EXCEPTION(type) is_exception(type, vtable(type))
+#define IS_RESUMPTION_EXCEPTION(type) is_resumption_exception(type, vtable(type))
+#define IS_TERMINATION_EXCEPTION(type) is_termination_exception(type, vtable(type))
Index: libcfa/src/fstream.cfa
===================================================================
--- libcfa/src/fstream.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/fstream.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -22,4 +22,6 @@
 #include <assert.h>
 #include <errno.h>										// errno
+
+#pragma GCC visibility push(default)
 
 // *********************************** ofstream ***********************************
@@ -118,5 +120,5 @@
 		// abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno );
 	} // if
-	(os){ file };										// initialize 
+	(os){ file };										// initialize
 } // open
 
@@ -157,5 +159,5 @@
 	va_list args;
 	va_start( args, format );
-		
+
 	int len;
     for ( cnt; 10 ) {
@@ -241,5 +243,5 @@
 		// abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno );
 	} // if
-	(is){ file };										// initialize 
+	(is){ file };										// initialize
 } // open
 
Index: libcfa/src/fstream.hfa
===================================================================
--- libcfa/src/fstream.hfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/fstream.hfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,5 +18,4 @@
 #include "bits/weakso_locks.hfa"						// mutex_lock
 #include "iostream.hfa"
-#include <exception.hfa>
 
 
Index: libcfa/src/heap.cfa
===================================================================
--- libcfa/src/heap.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/heap.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -36,7 +36,7 @@
 static bool traceHeap = false;
 
-inline bool traceHeap() { return traceHeap; }
-
-bool traceHeapOn() {
+inline bool traceHeap() libcfa_public { return traceHeap; }
+
+bool traceHeapOn() libcfa_public {
 	bool temp = traceHeap;
 	traceHeap = true;
@@ -44,5 +44,5 @@
 } // traceHeapOn
 
-bool traceHeapOff() {
+bool traceHeapOff() libcfa_public {
 	bool temp = traceHeap;
 	traceHeap = false;
@@ -50,14 +50,14 @@
 } // traceHeapOff
 
-bool traceHeapTerm() { return false; }
+bool traceHeapTerm() libcfa_public { return false; }
 
 
 static bool prtFree = false;
 
-bool prtFree() {
+static bool prtFree() {
 	return prtFree;
 } // prtFree
 
-bool prtFreeOn() {
+static bool prtFreeOn() {
 	bool temp = prtFree;
 	prtFree = true;
@@ -65,5 +65,5 @@
 } // prtFreeOn
 
-bool prtFreeOff() {
+static bool prtFreeOff() {
 	bool temp = prtFree;
 	prtFree = false;
@@ -388,6 +388,7 @@
 static unsigned int maxBucketsUsed;						// maximum number of buckets in use
 // extern visibility, used by runtime kernel
-size_t __page_size;										// architecture pagesize
-int __map_prot;											// common mmap/mprotect protection
+// would be cool to remove libcfa_public but it's needed for libcfathread
+libcfa_public size_t __page_size;							// architecture pagesize
+libcfa_public int __map_prot;								// common mmap/mprotect protection
 
 
@@ -727,5 +728,5 @@
 
 
-size_t prtFree( Heap & manager ) with( manager ) {
+static size_t prtFree( Heap & manager ) with( manager ) {
 	size_t total = 0;
 	#ifdef __STATISTICS__
@@ -879,5 +880,5 @@
 	// Allocates size bytes and returns a pointer to the allocated memory.  The contents are undefined. If size is 0,
 	// then malloc() returns a unique pointer value that can later be successfully passed to free().
-	void * malloc( size_t size ) {
+	void * malloc( size_t size ) libcfa_public {
 		#ifdef __STATISTICS__
 		if ( likely( size > 0 ) ) {
@@ -894,5 +895,5 @@
 
 	// Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
-	void * aalloc( size_t dim, size_t elemSize ) {
+	void * aalloc( size_t dim, size_t elemSize ) libcfa_public {
 		size_t size = dim * elemSize;
 		#ifdef __STATISTICS__
@@ -910,5 +911,5 @@
 
 	// Same as aalloc() with memory set to zero.
-	void * calloc( size_t dim, size_t elemSize ) {
+	void * calloc( size_t dim, size_t elemSize ) libcfa_public {
 		size_t size = dim * elemSize;
 	  if ( unlikely( size ) == 0 ) {			// 0 BYTE ALLOCATION RETURNS NULL POINTER
@@ -951,5 +952,5 @@
 	// not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier
 	// call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
-	void * resize( void * oaddr, size_t size ) {
+	void * resize( void * oaddr, size_t size ) libcfa_public {
 		// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
 	  if ( unlikely( size == 0 ) ) {					// special cases
@@ -996,5 +997,5 @@
 	// Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
 	// the old and new sizes.
-	void * realloc( void * oaddr, size_t size ) {
+	void * realloc( void * oaddr, size_t size ) libcfa_public {
 		// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
 	  if ( unlikely( size == 0 ) ) {					// special cases
@@ -1060,5 +1061,5 @@
 
 	// Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
-	void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {
+	void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public {
 		return realloc( oaddr, dim * elemSize );
 	} // reallocarray
@@ -1066,5 +1067,5 @@
 
 	// Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
-	void * memalign( size_t alignment, size_t size ) {
+	void * memalign( size_t alignment, size_t size ) libcfa_public {
 		#ifdef __STATISTICS__
 		if ( likely( size > 0 ) ) {
@@ -1081,5 +1082,5 @@
 
 	// Same as aalloc() with memory alignment.
-	void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {
+	void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
 		size_t size = dim * elemSize;
 		#ifdef __STATISTICS__
@@ -1097,5 +1098,5 @@
 
 	// Same as calloc() with memory alignment.
-	void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {
+	void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
 		size_t size = dim * elemSize;
 	  if ( unlikely( size ) == 0 ) {					// 0 BYTE ALLOCATION RETURNS NULL POINTER
@@ -1136,5 +1137,5 @@
 	// Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
 	// of alignment. This requirement is universally ignored.
-	void * aligned_alloc( size_t alignment, size_t size ) {
+	void * aligned_alloc( size_t alignment, size_t size ) libcfa_public {
 		return memalign( alignment, size );
 	} // aligned_alloc
@@ -1145,5 +1146,5 @@
 	// is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
 	// free(3).
-	int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
+	int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public {
 	  if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
 		*memptr = memalign( alignment, size );
@@ -1154,5 +1155,5 @@
 	// Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
 	// page size.  It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
-	void * valloc( size_t size ) {
+	void * valloc( size_t size ) libcfa_public {
 		return memalign( __page_size, size );
 	} // valloc
@@ -1160,5 +1161,5 @@
 
 	// Same as valloc but rounds size to multiple of page size.
-	void * pvalloc( size_t size ) {
+	void * pvalloc( size_t size ) libcfa_public {
 		return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
 	} // pvalloc
@@ -1168,5 +1169,5 @@
 	// or realloc().  Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
 	// 0p, no operation is performed.
-	void free( void * addr ) {
+	void free( void * addr ) libcfa_public {
 	  if ( unlikely( addr == 0p ) ) {					// special case
 			#ifdef __STATISTICS__
@@ -1189,5 +1190,5 @@
 
 	// Returns the alignment of an allocation.
-	size_t malloc_alignment( void * addr ) {
+	size_t malloc_alignment( void * addr ) libcfa_public {
 	  if ( unlikely( addr == 0p ) ) return libAlign();	// minimum alignment
 		Heap.Storage.Header * header = HeaderAddr( addr );
@@ -1201,5 +1202,5 @@
 
 	// Returns true if the allocation is zero filled, e.g., allocated by calloc().
-	bool malloc_zero_fill( void * addr ) {
+	bool malloc_zero_fill( void * addr ) libcfa_public {
 	  if ( unlikely( addr == 0p ) ) return false;		// null allocation is not zero fill
 		Heap.Storage.Header * header = HeaderAddr( addr );
@@ -1212,5 +1213,5 @@
 
 	// Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
-	size_t malloc_size( void * addr ) {
+	size_t malloc_size( void * addr ) libcfa_public {
 	  if ( unlikely( addr == 0p ) ) return 0;			// null allocation has zero size
 		Heap.Storage.Header * header = HeaderAddr( addr );
@@ -1224,5 +1225,5 @@
 	// Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
 	// malloc or a related function.
-	size_t malloc_usable_size( void * addr ) {
+	size_t malloc_usable_size( void * addr ) libcfa_public {
 	  if ( unlikely( addr == 0p ) ) return 0;			// null allocation has 0 size
 		Heap.Storage.Header * header;
@@ -1236,5 +1237,5 @@
 
 	// Prints (on default standard error) statistics about memory allocated by malloc and related functions.
-	void malloc_stats( void ) {
+	void malloc_stats( void ) libcfa_public {
 		#ifdef __STATISTICS__
 		printStats();
@@ -1245,5 +1246,5 @@
 
 	// Changes the file descriptor where malloc_stats() writes statistics.
-	int malloc_stats_fd( int fd __attribute__(( unused )) ) {
+	int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public {
 		#ifdef __STATISTICS__
 		int temp = stats_fd;
@@ -1259,5 +1260,5 @@
 	// The string is printed on the file stream stream.  The exported string includes information about all arenas (see
 	// malloc).
-	int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {
+	int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public {
 	  if ( options != 0 ) { errno = EINVAL; return -1; }
 		#ifdef __STATISTICS__
@@ -1271,5 +1272,5 @@
 	// Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
 	// specifies the parameter to be modified, and value specifies the new value for that parameter.
-	int mallopt( int option, int value ) {
+	int mallopt( int option, int value ) libcfa_public {
 	  if ( value < 0 ) return 0;
 		choose( option ) {
@@ -1285,5 +1286,5 @@
 
 	// Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
-	int malloc_trim( size_t ) {
+	int malloc_trim( size_t ) libcfa_public {
 		return 0;										// => impossible to release memory
 	} // malloc_trim
@@ -1294,5 +1295,5 @@
 	// structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
 	// result.  (The caller must free this memory.)
-	void * malloc_get_state( void ) {
+	void * malloc_get_state( void ) libcfa_public {
 		return 0p;										// unsupported
 	} // malloc_get_state
@@ -1301,5 +1302,5 @@
 	// Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
 	// structure pointed to by state.
-	int malloc_set_state( void * ) {
+	int malloc_set_state( void * ) libcfa_public {
 		return 0;										// unsupported
 	} // malloc_set_state
@@ -1307,16 +1308,16 @@
 
 	// Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
-	__attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }
+	__attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; }
 
 	// Sets the crossover point between allocations occuring in the sbrk area or separately mmapped.
-	__attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; }
+	__attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; }
 
 	// Amount subtracted to adjust for unfreed program storage (debug only).
-	__attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; }
+	__attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; }
 } // extern "C"
 
 
 // Must have CFA linkage to overload with C linkage realloc.
-void * resize( void * oaddr, size_t nalign, size_t size ) {
+void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public {
 	// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
   if ( unlikely( size == 0 ) ) {						// special cases
@@ -1380,5 +1381,5 @@
 
 
-void * realloc( void * oaddr, size_t nalign, size_t size ) {
+void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public {
 	// If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
   if ( unlikely( size == 0 ) ) {						// special cases
Index: libcfa/src/interpose.cfa
===================================================================
--- libcfa/src/interpose.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/interpose.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -36,5 +36,5 @@
 //=============================================================================================
 
-void preload_libgcc(void) {
+static void preload_libgcc(void) {
 	dlopen( "libgcc_s.so.1", RTLD_NOW );
 	if ( const char * error = dlerror() ) abort( "interpose_symbol : internal error pre-loading libgcc, %s\n", error );
@@ -42,5 +42,5 @@
 
 typedef void (* generic_fptr_t)(void);
-generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {
+static generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {
 	const char * error;
 
@@ -83,16 +83,16 @@
 //=============================================================================================
 
-void sigHandler_segv( __CFA_SIGPARMS__ );
-void sigHandler_ill ( __CFA_SIGPARMS__ );
-void sigHandler_fpe ( __CFA_SIGPARMS__ );
-void sigHandler_abrt( __CFA_SIGPARMS__ );
-void sigHandler_term( __CFA_SIGPARMS__ );
-
-struct {
+static void sigHandler_segv( __CFA_SIGPARMS__ );
+static void sigHandler_ill ( __CFA_SIGPARMS__ );
+static void sigHandler_fpe ( __CFA_SIGPARMS__ );
+static void sigHandler_abrt( __CFA_SIGPARMS__ );
+static void sigHandler_term( __CFA_SIGPARMS__ );
+
+static struct {
 	void (* exit)( int ) __attribute__(( __noreturn__ ));
 	void (* abort)( void ) __attribute__(( __noreturn__ ));
 } __cabi_libc;
 
-int cfa_main_returned;
+libcfa_public int cfa_main_returned;
 
 extern "C" {
@@ -148,15 +148,15 @@
 
 // Forward declare abort after the __typeof__ call to avoid ambiguities
-void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
-void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
-void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
-void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));
+libcfa_public void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
+libcfa_public void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
+libcfa_public void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
+libcfa_public void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));
 
 extern "C" {
-	void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
+	libcfa_public void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
 		abort( false, "%s", "" );
 	}
 
-	void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {
+	libcfa_public void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {
 		va_list argp;
 		va_start( argp, fmt );
@@ -165,5 +165,5 @@
 	}
 
-	void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
+	libcfa_public void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
 		__cabi_libc.exit( status );
 	}
Index: libcfa/src/iostream.cfa
===================================================================
--- libcfa/src/iostream.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/iostream.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -32,4 +32,5 @@
 #include "bitmanip.hfa"									// high1
 
+#pragma GCC visibility push(default)
 
 // *********************************** ostream ***********************************
Index: libcfa/src/limits.cfa
===================================================================
--- libcfa/src/limits.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/limits.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -20,4 +20,6 @@
 #include <complex.h>
 #include "limits.hfa"
+
+#pragma GCC visibility push(default)
 
 // Integral Constants
Index: libcfa/src/memory.cfa
===================================================================
--- libcfa/src/memory.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/memory.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -16,4 +16,6 @@
 #include "memory.hfa"
 #include "stdlib.hfa"
+
+#pragma GCC visibility push(default)
 
 // Internal data object.
Index: libcfa/src/parseargs.cfa
===================================================================
--- libcfa/src/parseargs.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/parseargs.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -24,4 +24,6 @@
 #include "common.hfa"
 #include "limits.hfa"
+
+#pragma GCC visibility push(default)
 
 extern int cfa_args_argc __attribute__((weak));
Index: libcfa/src/parseconfig.cfa
===================================================================
--- libcfa/src/parseconfig.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/parseconfig.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -14,9 +14,11 @@
 
 
+#pragma GCC visibility push(default)
+
 // *********************************** exceptions ***********************************
 
 
 // TODO: Add names of missing config entries to exception (see further below)
-static vtable(Missing_Config_Entries) Missing_Config_Entries_vt;
+vtable(Missing_Config_Entries) Missing_Config_Entries_vt;
 
 [ void ] ?{}( & Missing_Config_Entries this, unsigned int num_missing ) {
@@ -31,5 +33,5 @@
 
 
-static vtable(Parse_Failure) Parse_Failure_vt;
+vtable(Parse_Failure) Parse_Failure_vt;
 
 [ void ] ?{}( & Parse_Failure this, [] char failed_key, [] char failed_value ) {
@@ -53,5 +55,5 @@
 
 
-static vtable(Validation_Failure) Validation_Failure_vt;
+vtable(Validation_Failure) Validation_Failure_vt;
 
 [ void ] ?{}( & Validation_Failure this, [] char failed_key, [] char failed_value ) {
@@ -110,5 +112,5 @@
 
 
-[ bool ] comments( & ifstream in, [] char name ) {
+static [ bool ] comments( & ifstream in, [] char name ) {
 	while () {
 		in | name;
Index: libcfa/src/rational.cfa
===================================================================
--- libcfa/src/rational.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/rational.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -17,4 +17,6 @@
 #include "fstream.hfa"
 #include "stdlib.hfa"
+
+#pragma GCC visibility push(default)
 
 forall( T | Arithmetic( T ) ) {
Index: libcfa/src/startup.cfa
===================================================================
--- libcfa/src/startup.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/startup.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -41,6 +41,6 @@
 	} // __cfaabi_appready_shutdown
 
-	void disable_interrupts() __attribute__(( weak )) {}
-	void enable_interrupts() __attribute__(( weak )) {}
+	void disable_interrupts() __attribute__(( weak )) libcfa_public {}
+	void enable_interrupts() __attribute__(( weak )) libcfa_public {}
 
 
@@ -64,5 +64,5 @@
 struct __spinlock_t;
 extern "C" {
-	void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {}
+	void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {}
 }
 
Index: libcfa/src/stdlib.cfa
===================================================================
--- libcfa/src/stdlib.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/stdlib.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -25,4 +25,6 @@
 #include <complex.h>									// _Complex_I
 #include <assert.h>
+
+#pragma GCC visibility push(default)
 
 //---------------------------------------
@@ -225,6 +227,7 @@
 #define GENERATOR LCG
 
-uint32_t __global_random_seed;							// sequential/concurrent
-uint32_t __global_random_state;							// sequential only
+// would be cool to make hidden but it's needed for libcfathread
+__attribute__((visibility("default"))) uint32_t __global_random_seed;							// sequential/concurrent
+__attribute__((visibility("hidden"))) uint32_t __global_random_state;							// sequential only
 
 void set_seed( PRNG & prng, uint32_t seed_ ) with( prng ) { state = seed = seed_; GENERATOR( state ); } // set seed
Index: libcfa/src/strstream.cfa
===================================================================
--- libcfa/src/strstream.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/strstream.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,10 +1,10 @@
-// 
+//
 // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
-// 
+//
 // The contents of this file are covered under the licence agreement in the
 // file "LICENCE" distributed with Cforall.
 //
-// strstream.cfa -- 
-// 
+// strstream.cfa --
+//
 // Author           : Peter A. Buhr
 // Created On       : Thu Apr 22 22:24:35 2021
@@ -12,5 +12,5 @@
 // Last Modified On : Sun Oct 10 16:13:20 2021
 // Update Count     : 101
-// 
+//
 
 #include "strstream.hfa"
@@ -24,4 +24,5 @@
 #include <unistd.h>										// sbrk, sysconf
 
+#pragma GCC visibility push(default)
 
 // *********************************** strstream ***********************************
Index: libcfa/src/time.cfa
===================================================================
--- libcfa/src/time.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/time.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,4 +18,6 @@
 #include <stdio.h>										// snprintf
 #include <assert.h>
+
+#pragma GCC visibility push(default)
 
 static char * nanomsd( long int ns, char * buf ) {		// most significant digits
Index: libcfa/src/virtual.c
===================================================================
--- libcfa/src/virtual.c	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ libcfa/src/virtual.c	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -16,4 +16,6 @@
 #include "virtual.h"
 #include "assert.h"
+
+#pragma GCC visibility push(default)
 
 int __cfavir_is_parent(
Index: c/AST/Eval.hpp
===================================================================
--- src/AST/Eval.hpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ 	(revision )
@@ -1,37 +1,0 @@
-//
-// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
-//
-// The contents of this file are covered under the licence agreement in the
-// file "LICENCE" distributed with Cforall.
-//
-// Eval.hpp --
-//
-// Author           : Aaron B. Moss
-// Created On       : Fri Jun 28 14:00:00 2019
-// Last Modified By : Aaron B. Moss
-// Created On       : Fri Jun 28 14:00:00 2019
-// Update Count     : 1
-//
-
-#include <string>
-#include <utility>
-
-#include "Expr.hpp"
-
-namespace ast {
-
-/// Create a new UntypedExpr with the given arguments
-template< typename... Args >
-UntypedExpr * call( const CodeLocation & loc, const std::string & name, Args &&... args ) {
-	return new UntypedExpr {
-		loc, new NameExpr { loc, name },
-		std::vector< ptr< Expr > > { std::forward< Args >( args )... } };
-}
-
-}
-
-// Local Variables: //
-// tab-width: 4 //
-// mode: c++ //
-// compile-command: "make install" //
-// End: //
Index: src/AST/Expr.cpp
===================================================================
--- src/AST/Expr.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Expr.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Wed May 15 17:00:00 2019
 // Last Modified By : Andrew Beach
-// Created On       : Tue Nov 30 14:23:00 2021
-// Update Count     : 7
+// Created On       : Wed May 18 13:56:00 2022
+// Update Count     : 8
 //
 
@@ -21,5 +21,4 @@
 
 #include "Copy.hpp"                // for shallowCopy
-#include "Eval.hpp"                // for call
 #include "GenericSubstitution.hpp"
 #include "LinkageSpec.hpp"
@@ -67,8 +66,13 @@
 // --- UntypedExpr
 
+bool UntypedExpr::get_lvalue() const {
+	std::string fname = InitTweak::getFunctionName( this );
+	return lvalueFunctionNames.count( fname );
+}
+
 UntypedExpr * UntypedExpr::createDeref( const CodeLocation & loc, const Expr * arg ) {
 	assert( arg );
 
-	UntypedExpr * ret = call( loc, "*?", arg );
+	UntypedExpr * ret = createCall( loc, "*?", { arg } );
 	if ( const Type * ty = arg->result ) {
 		const Type * base = InitTweak::getPointerBase( ty );
@@ -87,13 +91,8 @@
 }
 
-bool UntypedExpr::get_lvalue() const {
-	std::string fname = InitTweak::getFunctionName( this );
-	return lvalueFunctionNames.count( fname );
-}
-
 UntypedExpr * UntypedExpr::createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs ) {
 	assert( lhs && rhs );
 
-	UntypedExpr * ret = call( loc, "?=?", lhs, rhs );
+	UntypedExpr * ret = createCall( loc, "?=?", { lhs, rhs } );
 	if ( lhs->result && rhs->result ) {
 		// if both expressions are typed, assumes that this assignment is a C bitwise assignment,
@@ -102,4 +101,10 @@
 	}
 	return ret;
+}
+
+UntypedExpr * UntypedExpr::createCall( const CodeLocation & loc,
+		const std::string & name, std::vector<ptr<Expr>> && args ) {
+	return new UntypedExpr( loc,
+			new NameExpr( loc, name ), std::move( args ) );
 }
 
Index: src/AST/Expr.hpp
===================================================================
--- src/AST/Expr.hpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Expr.hpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -230,4 +230,7 @@
 	/// Creates a new assignment expression
 	static UntypedExpr * createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs );
+	/// Creates a new call of a variable.
+	static UntypedExpr * createCall( const CodeLocation & loc,
+		const std::string & name, std::vector<ptr<Expr>> && args );
 
 	const Expr * accept( Visitor & v ) const override { return v.visit( this ); }
Index: src/AST/Label.hpp
===================================================================
--- src/AST/Label.hpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Label.hpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -34,5 +34,5 @@
 	std::vector< ptr<Attribute> > attributes;
 
-	Label( CodeLocation loc, const std::string& name = "",
+	Label( const CodeLocation& loc, const std::string& name = "",
 		std::vector<ptr<Attribute>> && attrs = std::vector<ptr<Attribute>>{} )
 	: location( loc ), name( name ), attributes( attrs ) {}
Index: src/AST/Node.hpp
===================================================================
--- src/AST/Node.hpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Node.hpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -51,4 +51,7 @@
 	bool isManaged() const { return strong_count > 0; }
 	bool isReferenced() const { return weak_count > 0; }
+	bool isStable() const {
+		return (1 == strong_count || (1 < strong_count && 0 == weak_count));
+	}
 
 private:
Index: src/AST/Pass.proto.hpp
===================================================================
--- src/AST/Pass.proto.hpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Pass.proto.hpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -131,6 +131,6 @@
 	template< typename node_t >
 	struct result1 {
-		bool differs;
-		const node_t * value;
+		bool differs = false;
+		const node_t * value = nullptr;
 
 		template< typename object_t, typename super_t, typename field_t >
@@ -151,5 +151,5 @@
 		};
 
-		bool differs;
+		bool differs = false;
 		container_t< delta > values;
 
@@ -167,5 +167,5 @@
 	template< template<class...> class container_t, typename node_t >
 	struct resultN {
-		bool differs;
+		bool differs = false;
 		container_t<ptr<node_t>> values;
 
Index: src/AST/Stmt.hpp
===================================================================
--- src/AST/Stmt.hpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Stmt.hpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -58,5 +58,5 @@
 	// cannot be, they are sub-types of this type, for organization.
 
-    StmtClause( const CodeLocation & loc )
+	StmtClause( const CodeLocation & loc )
 		: ParseNode(loc) {}
 
@@ -396,16 +396,16 @@
 class WaitForClause final : public StmtClause {
   public:
-    ptr<Expr> target_func;
-    std::vector<ptr<Expr>> target_args;
-    ptr<Stmt> stmt;
-    ptr<Expr> cond;
-
-    WaitForClause( const CodeLocation & loc )
+	ptr<Expr> target_func;
+	std::vector<ptr<Expr>> target_args;
+	ptr<Stmt> stmt;
+	ptr<Expr> cond;
+
+	WaitForClause( const CodeLocation & loc )
 		: StmtClause( loc ) {}
 
 	const WaitForClause * accept( Visitor & v ) const override { return v.visit( this ); }
   private:
-    WaitForClause * clone() const override { return new WaitForClause{ *this }; }
-    MUTATE_FRIEND
+	WaitForClause * clone() const override { return new WaitForClause{ *this }; }
+	MUTATE_FRIEND
 };
 
Index: src/AST/Util.cpp
===================================================================
--- src/AST/Util.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/Util.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -5,11 +5,11 @@
 // file "LICENCE" distributed with Cforall.
 //
-// Util.hpp -- General utilities for working with the AST.
+// Util.cpp -- General utilities for working with the AST.
 //
 // Author           : Andrew Beach
 // Created On       : Wed Jan 19  9:46:00 2022
 // Last Modified By : Andrew Beach
-// Last Modified On : Fri Mar 11 18:07:00 2022
-// Update Count     : 1
+// Last Modified On : Wed May 11 16:16:00 2022
+// Update Count     : 3
 //
 
@@ -46,9 +46,40 @@
 
 /// Check that every note that can has a set CodeLocation.
-struct SetCodeLocationsCore {
-	void previsit( const ParseNode * node ) {
-		assert( node->location.isSet() );
+void isCodeLocationSet( const ParseNode * node ) {
+	assert( node->location.isSet() );
+}
+
+void areLabelLocationsSet( const Stmt * stmt ) {
+	for ( const Label& label : stmt->labels ) {
+		assert( label.location.isSet() );
 	}
-};
+}
+
+/// Make sure the reference counts are in a valid combination.
+void isStable( const Node * node ) {
+	assert( node->isStable() );
+}
+
+/// Check that a FunctionDecl is synchronized with it's FunctionType.
+void functionDeclMatchesType( const FunctionDecl * decl ) {
+	// The type is a cache of sorts, if it is missing that is only a
+	// problem if isTypeFixed is set.
+	if ( decl->isTypeFixed ) {
+		assert( decl->type );
+	} else if ( !decl->type ) {
+		return;
+	}
+
+	const FunctionType * type = decl->type;
+
+	// Check that `type->forall` corresponds with `decl->type_params`.
+	assert( type->forall.size() == decl->type_params.size() );
+	// Check that `type->assertions` corresponds with `decl->assertions`.
+	assert( type->assertions.size() == decl->assertions.size() );
+	// Check that `type->params` corresponds with `decl->params`.
+	assert( type->params.size() == decl->params.size() );
+	// Check that `type->returns` corresponds with `decl->returns`.
+	assert( type->returns.size() == decl->returns.size() );
+}
 
 struct InvariantCore {
@@ -56,13 +87,23 @@
 	// None of the passes should make changes so ordering doesn't matter.
 	NoStrongCyclesCore no_strong_cycles;
-	SetCodeLocationsCore set_code_locations;
 
 	void previsit( const Node * node ) {
 		no_strong_cycles.previsit( node );
+		isStable( node );
 	}
 
 	void previsit( const ParseNode * node ) {
-		no_strong_cycles.previsit( node );
-		set_code_locations.previsit( node );
+		previsit( (const Node *)node );
+		isCodeLocationSet( node );
+	}
+
+	void previsit( const FunctionDecl * node ) {
+		previsit( (const ParseNode *)node );
+		functionDeclMatchesType( node );
+	}
+
+	void previsit( const Stmt * node ) {
+		previsit( (const ParseNode *)node );
+		areLabelLocationsSet( node );
 	}
 
Index: src/AST/module.mk
===================================================================
--- src/AST/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/AST/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -29,5 +29,4 @@
 	AST/DeclReplacer.cpp \
 	AST/DeclReplacer.hpp \
-	AST/Eval.hpp \
 	AST/Expr.cpp \
 	AST/Expr.hpp \
Index: src/CodeGen/CodeGenerator.cc
===================================================================
--- src/CodeGen/CodeGenerator.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/CodeGen/CodeGenerator.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1238,16 +1238,4 @@
 } // namespace CodeGen
 
-
-unsigned Indenter::tabsize = 2;
-
-std::ostream & operator<<( std::ostream & out, const BaseSyntaxNode * node ) {
-	if ( node ) {
-		node->print( out );
-	} else {
-		out << "nullptr";
-	}
-	return out;
-}
-
 // Local Variables: //
 // tab-width: 4 //
Index: src/CodeGen/FixMain.cc
===================================================================
--- src/CodeGen/FixMain.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/CodeGen/FixMain.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -49,6 +49,4 @@
 
 }
-
-	bool FixMain::replace_main = false;
 
 	template<typename container>
Index: src/CodeGen/FixMain2.cc
===================================================================
--- src/CodeGen/FixMain2.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/CodeGen/FixMain2.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,28 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// FixMain2.cc -- A side file used to seperate the compiler and demangler.
+//
+// Author           : Andrew Beach
+// Created On       : Tue May 17 10:05:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Tue May 17 10:08:00 2022
+// Update Count     : 0
+//
+
+#include "FixMain.h"
+
+namespace CodeGen {
+
+bool FixMain::replace_main = false;
+
+} // namespace CodeGen
+
+// Local Variables: //
+// tab-width: 4 //
+// mode: c++ //
+// compile-command: "make install" //
+// End: //
Index: src/CodeGen/GenType.cc
===================================================================
--- src/CodeGen/GenType.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/CodeGen/GenType.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Mon May 18 07:44:20 2015
 // Last Modified By : Andrew Beach
-// Last Modified On : Wed May  1 15:24:00 2019
-// Update Count     : 23
+// Last Modified On : Fri May 20 11:18:00 2022
+// Update Count     : 24
 //
 #include "GenType.h"
@@ -50,4 +50,5 @@
 		void postvisit( TraitInstType * inst );
 		void postvisit( TypeofType * typeof );
+		void postvisit( VTableType * vtable );
 		void postvisit( QualifiedType * qualType );
 
@@ -259,10 +260,11 @@
 			if ( options.genC ) {
 				typeString = "enum " + typeString;
-			} 
-		} 
+			}
+		}
 		handleQualifiers( enumInst );
 	}
 
 	void GenType::postvisit( TypeInstType * typeInst ) {
+		assertf( ! options.genC, "Type instance types should not reach code generation." );
 		typeString = typeInst->name + " " + typeString;
 		handleQualifiers( typeInst );
@@ -320,4 +322,12 @@
 	}
 
+	void GenType::postvisit( VTableType * vtable ) {
+		assertf( ! options.genC, "Virtual table types should not reach code generation." );
+		std::ostringstream os;
+		os << "vtable(" << genType( vtable->base, "", options ) << ") " << typeString;
+		typeString = os.str();
+		handleQualifiers( vtable );
+	}
+
 	void GenType::postvisit( QualifiedType * qualType ) {
 		assertf( ! options.genC, "Qualified types should not reach code generation." );
Index: src/CodeGen/LinkOnce.cc
===================================================================
--- src/CodeGen/LinkOnce.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/CodeGen/LinkOnce.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -53,4 +53,9 @@
 				new ConstantExpr( Constant::from_string( section_name ) )
 			);
+
+			// Unconditionnaly add "visibility(default)" to anything with gnu.linkonce
+			// visibility is a mess otherwise
+			attributes.push_back(new Attribute("visibility", {new ConstantExpr( Constant::from_string( "default" ) )}));
+
 		}
 		visit_children = false;
Index: src/CodeGen/module.mk
===================================================================
--- src/CodeGen/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/CodeGen/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,25 +10,28 @@
 ## Author           : Richard C. Bilson
 ## Created On       : Mon Jun  1 17:49:17 2015
-## Last Modified By : Peter A. Buhr
-## Last Modified On : Sat Dec 14 07:29:42 2019
-## Update Count     : 4
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 14:26:00 2022
+## Update Count     : 5
 ###############################################################################
 
-#SRC +=  ArgTweak/Rewriter.cc \
-#	ArgTweak/Mutate.cc
+SRC_CODEGEN = \
+	CodeGen/FixMain2.cc \
+	CodeGen/FixMain.h \
+	CodeGen/OperatorTable.cc \
+	CodeGen/OperatorTable.h
 
-SRC_CODEGEN = \
+SRC += $(SRC_CODEGEN) \
 	CodeGen/CodeGenerator.cc \
 	CodeGen/CodeGenerator.h \
+	CodeGen/Generate.cc \
+	CodeGen/Generate.h \
 	CodeGen/FixMain.cc \
-	CodeGen/FixMain.h \
+	CodeGen/FixNames.cc \
+	CodeGen/FixNames.h \
 	CodeGen/GenType.cc \
 	CodeGen/GenType.h \
 	CodeGen/LinkOnce.cc \
 	CodeGen/LinkOnce.h \
-	CodeGen/OperatorTable.cc \
-	CodeGen/OperatorTable.h \
 	CodeGen/Options.h
 
-SRC += $(SRC_CODEGEN) CodeGen/Generate.cc CodeGen/Generate.h CodeGen/FixNames.cc CodeGen/FixNames.h
 SRCDEMANGLE += $(SRC_CODEGEN)
Index: src/CodeTools/ResolvProtoDump.cc
===================================================================
--- src/CodeTools/ResolvProtoDump.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/CodeTools/ResolvProtoDump.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -304,5 +304,8 @@
 
 			// replace enums with int
-			void previsit( EnumInstType* ) { ss << (int)BasicType::SignedInt; }
+			void previsit( EnumInstType* ) { 
+				// TODO: add the meaningful representation of typed int
+				ss << (int)BasicType::SignedInt; 
+			}
 
 			void previsit( TypeInstType* vt ) {
Index: src/Common/CodeLocationTools.cpp
===================================================================
--- src/Common/CodeLocationTools.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Common/CodeLocationTools.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Fri Dec  4 15:42:00 2020
 // Last Modified By : Andrew Beach
-// Last Modified On : Mon Mar 14 15:14:00 2022
-// Update Count     : 4
+// Last Modified On : Wed May 11 16:16:00 2022
+// Update Count     : 5
 //
 
@@ -24,64 +24,71 @@
 namespace {
 
-// There are a lot of helpers in this file that could be used much more
-// generally if anyone has another use for them.
-
-// Check if a node type has a code location.
-template<typename node_t>
-struct has_code_location : public std::is_base_of<ast::ParseNode, node_t> {};
-
-template<typename node_t, bool has_location>
-struct __GetCL;
-
-template<typename node_t>
-struct __GetCL<node_t, true> {
-	static inline CodeLocation const * get( node_t const * node ) {
-		return &node->location;
-	}
-
-	static inline CodeLocation * get( node_t * node ) {
-		return &node->location;
-	}
-};
-
-template<typename node_t>
-struct __GetCL<node_t, false> {
-	static inline CodeLocation * get( node_t const * ) {
-		return nullptr;
-	}
-};
-
-template<typename node_t>
-CodeLocation const * get_code_location( node_t const * node ) {
-	return __GetCL< node_t, has_code_location< node_t >::value >::get( node );
-}
-
-template<typename node_t>
-CodeLocation * get_code_location( node_t * node ) {
-	return __GetCL< node_t, has_code_location< node_t >::value >::get( node );
-}
-
 // Fill every location with a nearby (parent) location.
 class FillCore : public ast::WithGuards {
 	CodeLocation const * parent;
+
+	template<typename node_t>
+	node_t const * parse_visit( node_t const * node ) {
+		if ( node->location.isUnset() ) {
+			assert( parent );
+			node_t * newNode = ast::mutate( node );
+			newNode->location = *parent;
+			return newNode;
+		}
+		GuardValue( parent ) = &node->location;
+		return node;
+	}
+
+	bool hasUnsetLabels( const ast::Stmt * stmt ) {
+		for ( const ast::Label& label : stmt->labels ) {
+			if ( label.location.isUnset() ) {
+				return true;
+			}
+		}
+		return false;
+	}
+
+	template<typename node_t>
+	node_t const * stmt_visit( node_t const * node ) {
+		assert( node->location.isSet() );
+
+		if ( hasUnsetLabels( node ) ) {
+			node_t * newNode = ast::mutate( node );
+			for ( ast::Label& label : newNode->labels ) {
+				if ( label.location.isUnset() ) {
+					label.location = newNode->location;
+				}
+			}
+			return newNode;
+		}
+		return node;
+	}
+
+	template<typename node_t>
+	auto visit( node_t const * node, long ) {
+		return node;
+	}
+
+	template<typename node_t>
+	auto visit( node_t const * node, int ) -> typename
+			std::remove_reference< decltype( node->location, node ) >::type {
+		return parse_visit( node );
+	}
+
+	template<typename node_t>
+	auto visit( node_t const * node, char ) -> typename
+			std::remove_reference< decltype( node->labels, node ) >::type {
+		return stmt_visit( parse_visit( node ) );
+	}
+
 public:
 	FillCore() : parent( nullptr ) {}
+	FillCore( const CodeLocation& location ) : parent( &location ) {
+		assert( location.isSet() );
+	}
 
 	template<typename node_t>
 	node_t const * previsit( node_t const * node ) {
-		GuardValue( parent );
-		CodeLocation const * location = get_code_location( node );
-		if ( location && location->isUnset() ) {
-			assert( parent );
-			node_t * newNode = ast::mutate( node );
-			CodeLocation * newLocation = get_code_location( newNode );
-			assert( newLocation );
-			*newLocation = *parent;
-			parent = newLocation;
-			return newNode;
-		} else if ( location ) {
-			parent = location;
-		}
-		return node;
+		return visit( node, '\0' );
 	}
 };
@@ -233,29 +240,7 @@
 
 	template<typename node_t>
-	void previsit( node_t const * node ) {
-		CodeLocation const * location = get_code_location( node );
-		if ( location && location->isUnset() ) {
+	auto previsit( node_t const * node ) -> decltype( node->location, void() ) {
+		if ( node->location.isUnset() ) {
 			unset.push_back( node );
-		}
-	}
-};
-
-class LocalFillCore : public ast::WithGuards {
-	CodeLocation const * parent;
-public:
-	LocalFillCore( CodeLocation const & location ) : parent( &location ) {
-		assert( location.isSet() );
-	}
-
-	template<typename node_t>
-	auto previsit( node_t const * node )
-			-> typename std::enable_if<has_code_location<node_t>::value, node_t const *>::type {
-		if ( node->location.isSet() ) {
-			GuardValue( parent ) = &node->location;
-			return node;
-		} else {
-			node_t * mut = ast::mutate( node );
-			mut->location = *parent;
-			return mut;
 		}
 	}
@@ -304,5 +289,5 @@
 ast::Node const * localFillCodeLocations(
 		CodeLocation const & location , ast::Node const * node ) {
-	ast::Pass<LocalFillCore> visitor( location );
+	ast::Pass<FillCore> visitor( location );
 	return node->accept( visitor );
 }
Index: src/Common/Indenter.cc
===================================================================
--- src/Common/Indenter.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/Common/Indenter.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,24 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// Indenter.cc --
+//
+// Author           : Andrew Beach
+// Created On       : Fri May 13 14:03:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Fri May 13 14:03:00 2022
+// Update Count     : 0
+//
+
+#include "Indenter.h"
+
+unsigned Indenter::tabsize = 2;
+
+// Local Variables: //
+// tab-width: 4 //
+// mode: c++ //
+// compile-command: "make install" //
+// End: //
Index: src/Common/Indenter.h
===================================================================
--- src/Common/Indenter.h	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Common/Indenter.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,10 +10,11 @@
 // Created On       : Fri Jun 30 16:55:23 2017
 // Last Modified By : Andrew Beach
-// Last Modified On : Fri Aug 11 11:15:00 2017
-// Update Count     : 1
+// Last Modified On : Fri May 13 14:10:00 2022
+// Update Count     : 2
 //
 
-#ifndef INDENTER_H
-#define INDENTER_H
+#pragma once
+
+#include <ostream>
 
 struct Indenter {
@@ -37,4 +38,2 @@
 	return out << std::string(indent.indent * indent.amt, ' ');
 }
-
-#endif // INDENTER_H
Index: src/Common/ResolvProtoDump.cpp
===================================================================
--- src/Common/ResolvProtoDump.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Common/ResolvProtoDump.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -227,5 +227,6 @@
 	}
 
-	void previsit( const ast::EnumInstType * ) {
+	void previsit( const ast::EnumInstType * enumInst) {
+		// TODO: Add the meaningful text representation of typed enum
 		ss << (int)ast::BasicType::SignedInt;
 	}
Index: src/Common/module.mk
===================================================================
--- src/Common/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Common/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,51 +10,54 @@
 ## Author           : Richard C. Bilson
 ## Created On       : Mon Jun  1 17:49:17 2015
-## Last Modified By : Peter A. Buhr
-## Last Modified On : Tue Sep 27 11:06:38 2016
-## Update Count     : 4
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 14:27:00 2022
+## Update Count     : 5
 ###############################################################################
 
 SRC_COMMON = \
-      Common/Assert.cc \
-      Common/CodeLocation.h \
-      Common/CodeLocationTools.hpp \
-      Common/CodeLocationTools.cpp \
-      Common/CompilerError.h \
-      Common/Debug.h \
-      Common/DeclStats.hpp \
-      Common/DeclStats.cpp \
-      Common/ErrorObjects.h \
-      Common/Eval.cc \
-      Common/Examine.cc \
-      Common/Examine.h \
-      Common/FilterCombos.h \
-      Common/Indenter.h \
-      Common/PassVisitor.cc \
-      Common/PassVisitor.h \
-      Common/PassVisitor.impl.h \
-      Common/PassVisitor.proto.h \
-      Common/PersistentMap.h \
-      Common/ResolvProtoDump.hpp \
-      Common/ResolvProtoDump.cpp \
-      Common/ScopedMap.h \
-      Common/SemanticError.cc \
-      Common/SemanticError.h \
-      Common/Stats.h \
-      Common/Stats/Base.h \
-      Common/Stats/Counter.cc \
-      Common/Stats/Counter.h \
-      Common/Stats/Heap.cc \
-      Common/Stats/Heap.h \
-      Common/Stats/ResolveTime.cc \
-      Common/Stats/ResolveTime.h \
-      Common/Stats/Stats.cc \
-      Common/Stats/Time.cc \
-      Common/Stats/Time.h \
-      Common/UnimplementedError.h \
-      Common/UniqueName.cc \
-      Common/UniqueName.h \
-      Common/utility.h \
-      Common/VectorMap.h
+	Common/Assert.cc \
+	Common/CodeLocation.h \
+	Common/CodeLocationTools.hpp \
+	Common/CodeLocationTools.cpp \
+	Common/CompilerError.h \
+	Common/Debug.h \
+	Common/DeclStats.hpp \
+	Common/DeclStats.cpp \
+	Common/ErrorObjects.h \
+	Common/Eval.cc \
+	Common/Examine.cc \
+	Common/Examine.h \
+	Common/FilterCombos.h \
+	Common/Indenter.h \
+	Common/Indenter.cc \
+	Common/PassVisitor.cc \
+	Common/PassVisitor.h \
+	Common/PassVisitor.impl.h \
+	Common/PassVisitor.proto.h \
+	Common/PersistentMap.h \
+	Common/ResolvProtoDump.hpp \
+	Common/ResolvProtoDump.cpp \
+	Common/ScopedMap.h \
+	Common/SemanticError.cc \
+	Common/SemanticError.h \
+	Common/Stats.h \
+	Common/Stats/Base.h \
+	Common/Stats/Counter.cc \
+	Common/Stats/Counter.h \
+	Common/Stats/Heap.cc \
+	Common/Stats/Heap.h \
+	Common/Stats/ResolveTime.cc \
+	Common/Stats/ResolveTime.h \
+	Common/Stats/Stats.cc \
+	Common/Stats/Time.cc \
+	Common/Stats/Time.h \
+	Common/UnimplementedError.h \
+	Common/UniqueName.cc \
+	Common/UniqueName.h \
+	Common/utility.h \
+	Common/VectorMap.h
 
-SRC += $(SRC_COMMON) Common/DebugMalloc.cc
+SRC += $(SRC_COMMON) \
+	Common/DebugMalloc.cc
+
 SRCDEMANGLE += $(SRC_COMMON)
Index: src/Concurrency/module.mk
===================================================================
--- src/Concurrency/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Concurrency/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,18 +10,13 @@
 ## Author           : Thierry Delisle
 ## Created On       : Mon Mar 13 12:48:40 2017
-## Last Modified By :
-## Last Modified On :
-## Update Count     : 0
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 13:28:00 2022
+## Update Count     : 1
 ###############################################################################
 
-SRC_CONCURRENCY = \
+SRC += \
 	Concurrency/KeywordsNew.cpp \
-	Concurrency/Keywords.cc
-
-SRC += $(SRC_CONCURRENCY) \
+	Concurrency/Keywords.cc \
 	Concurrency/Keywords.h \
 	Concurrency/Waitfor.cc \
 	Concurrency/Waitfor.h
-
-SRCDEMANGLE += $(SRC_CONCURRENCY)
-
Index: src/ControlStruct/ExceptDecl.cc
===================================================================
--- src/ControlStruct/ExceptDecl.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ControlStruct/ExceptDecl.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -9,7 +9,7 @@
 // Author           : Henry Xue
 // Created On       : Tue Jul 20 04:10:50 2021
-// Last Modified By : Henry Xue
-// Last Modified On : Tue Aug 03 10:42:26 2021
-// Update Count     : 4
+// Last Modified By : Andrew Beach
+// Last Modified On : Wed May 25 16:43:00 2022
+// Update Count     : 5
 //
 
@@ -39,12 +39,11 @@
 }
 
-TypeInstType * makeExceptInstType(
-	const std::string & exceptionName,
-	const std::list< Expression *> & parameters
-) {
-	TypeInstType * exceptInstType = new TypeInstType(
+StructInstType * makeExceptInstType(
+	const std::string & exceptionName,
+	const std::list< Expression *> & parameters
+) {
+	StructInstType * exceptInstType = new StructInstType(
 		noQualifiers,
-		exceptionName,
-		false
+		exceptionName
 	);
 	cloneAll( parameters, exceptInstType->parameters );
@@ -151,5 +150,5 @@
 		nullptr,
 		new PointerType( noQualifiers,
-			new TypeInstType( Type::Const, "__cfavir_type_info", false ) ),
+			new StructInstType( Type::Const, "__cfavir_type_info" ) ),
 		nullptr
 	) );
@@ -257,5 +256,5 @@
 	const std::string & exceptionName,
 	const std::list< TypeDecl *> & forallClause,
-	const std::list< Expression *> & parameters, 
+	const std::list< Expression *> & parameters,
 	const std::list< Declaration *> & members
 ) {
@@ -302,5 +301,5 @@
 ObjectDecl * ehmExternVtable(
 	const std::string & exceptionName,
-	const std::list< Expression *> & parameters, 
+	const std::list< Expression *> & parameters,
 	const std::string & tableName
 ) {
@@ -457,8 +456,27 @@
 }
 
+class VTableCore : public WithDeclsToAdd {
+public:
+	// Remove any remaining vtable type nodes in the tree.
+	Type * postmutate( VTableType * vtableType );
+};
+
+Type * VTableCore::postmutate( VTableType * vtableType ) {
+	auto inst = strict_dynamic_cast<ReferenceToType *>( vtableType->base );
+
+	std::string vtableName = Virtual::vtableTypeName( inst->name );
+	StructInstType * newType = new StructInstType( noQualifiers, vtableName );
+	cloneAll( inst->parameters, newType->parameters );
+
+	delete vtableType;
+	return newType;
+}
+
 void translateExcept( std::list< Declaration *> & translationUnit ) {
 	PassVisitor<ExceptDeclCore> translator;
 	mutateAll( translationUnit, translator );
-}
-
-}
+	PassVisitor<VTableCore> typeTranslator;
+	mutateAll( translationUnit, typeTranslator );
+}
+
+}
Index: src/ControlStruct/MultiLevelExit.cpp
===================================================================
--- src/ControlStruct/MultiLevelExit.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ControlStruct/MultiLevelExit.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -18,4 +18,5 @@
 #include "AST/Pass.hpp"
 #include "AST/Stmt.hpp"
+#include "Common/CodeLocationTools.hpp"
 #include "LabelGeneratorNew.hpp"
 
@@ -228,77 +229,77 @@
 	// Labels on different stmts require different approaches to access
 	switch ( stmt->kind ) {
-	  case BranchStmt::Goto:
+	case BranchStmt::Goto:
 		return stmt;
-	  case BranchStmt::Continue:
-	  case BranchStmt::Break: {
-		  bool isContinue = stmt->kind == BranchStmt::Continue;
-		  // Handle unlabeled break and continue.
-		  if ( stmt->target.empty() ) {
-			  if ( isContinue ) {
-				  targetEntry = findEnclosingControlStructure( isContinueTarget );
-			  } else {
-				  if ( enclosing_control_structures.empty() ) {
+	case BranchStmt::Continue:
+	case BranchStmt::Break: {
+		bool isContinue = stmt->kind == BranchStmt::Continue;
+		// Handle unlabeled break and continue.
+		if ( stmt->target.empty() ) {
+			if ( isContinue ) {
+				targetEntry = findEnclosingControlStructure( isContinueTarget );
+			} else {
+				if ( enclosing_control_structures.empty() ) {
 					  SemanticError( stmt->location,
 									 "'break' outside a loop, 'switch', or labelled block" );
-				  }
-				  targetEntry = findEnclosingControlStructure( isBreakTarget );
-			  }
-			  // Handle labeled break and continue.
-		  } else {
-			  // Lookup label in table to find attached control structure.
-			  targetEntry = findEnclosingControlStructure(
-				  [ targetStmt = target_table.at(stmt->target) ](auto entry){
+				}
+				targetEntry = findEnclosingControlStructure( isBreakTarget );
+			}
+			// Handle labeled break and continue.
+		} else {
+			// Lookup label in table to find attached control structure.
+			targetEntry = findEnclosingControlStructure(
+				[ targetStmt = target_table.at(stmt->target) ](auto entry){
 					  return entry.stmt == targetStmt;
-				  } );
-		  }
-		  // Ensure that selected target is valid.
-		  if ( targetEntry == enclosing_control_structures.rend() || ( isContinue && ! isContinueTarget( *targetEntry ) ) ) {
-			  SemanticError( stmt->location, toString( (isContinue ? "'continue'" : "'break'"),
+				} );
+		}
+		// Ensure that selected target is valid.
+		if ( targetEntry == enclosing_control_structures.rend() || ( isContinue && ! isContinueTarget( *targetEntry ) ) ) {
+			SemanticError( stmt->location, toString( (isContinue ? "'continue'" : "'break'"),
 							" target must be an enclosing ", (isContinue ? "loop: " : "control structure: "),
 							stmt->originalTarget ) );
-		  }
-		  break;
-	  }
-	  // handle fallthrough in case/switch stmts
-	  case BranchStmt::FallThrough: {
-		  targetEntry = findEnclosingControlStructure( isFallthroughTarget );
-		  // Check that target is valid.
-		  if ( targetEntry == enclosing_control_structures.rend() ) {
-			  SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" );
-		  }
-		  if ( ! stmt->target.empty() ) {
-			  // Labelled fallthrough: target must be a valid fallthough label.
-			  if ( ! fallthrough_labels.count( stmt->target ) ) {
-				  SemanticError( stmt->location, toString( "'fallthrough' target must be a later case statement: ",
+		}
+		break;
+	}
+	// handle fallthrough in case/switch stmts
+	case BranchStmt::FallThrough: {
+		targetEntry = findEnclosingControlStructure( isFallthroughTarget );
+		// Check that target is valid.
+		if ( targetEntry == enclosing_control_structures.rend() ) {
+			SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" );
+		}
+		if ( ! stmt->target.empty() ) {
+			// Labelled fallthrough: target must be a valid fallthough label.
+			if ( ! fallthrough_labels.count( stmt->target ) ) {
+				SemanticError( stmt->location, toString( "'fallthrough' target must be a later case statement: ",
 														   stmt->originalTarget ) );
-			  }
-			  return new BranchStmt( stmt->location, BranchStmt::Goto, stmt->originalTarget );
-		  }
-		  break;
-	  }
-	  case BranchStmt::FallThroughDefault: {
-		  targetEntry = findEnclosingControlStructure( isFallthroughDefaultTarget );
-
-		  // Check if in switch or choose statement.
-		  if ( targetEntry == enclosing_control_structures.rend() ) {
-			  SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" );
-		  }
-
-		  // Check if switch or choose has default clause.
-		  auto switchStmt = strict_dynamic_cast< const SwitchStmt * >( targetEntry->stmt );
-		  bool foundDefault = false;
-		  for ( auto caseStmt : switchStmt->cases ) {
-			  if ( caseStmt->isDefault() ) {
-				  foundDefault = true;
-				  break;
-			  }
-		  }
-		  if ( ! foundDefault ) {
-			  SemanticError( stmt->location, "'fallthrough default' must be enclosed in a 'switch' or 'choose'"
-							 "control structure with a 'default' clause" );
-		  }
-		  break;
-	  }
-	  default:
+			}
+			return new BranchStmt( stmt->location, BranchStmt::Goto, stmt->originalTarget );
+		}
+		break;
+	}
+	case BranchStmt::FallThroughDefault: {
+		targetEntry = findEnclosingControlStructure( isFallthroughDefaultTarget );
+
+		// Check if in switch or choose statement.
+		if ( targetEntry == enclosing_control_structures.rend() ) {
+			SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" );
+		}
+
+		// Check if switch or choose has default clause.
+		auto switchStmt = strict_dynamic_cast< const SwitchStmt * >( targetEntry->stmt );
+		bool foundDefault = false;
+		for ( auto caseStmt : switchStmt->cases ) {
+			if ( caseStmt->isDefault() ) {
+				foundDefault = true;
+				break;
+			}
+		}
+		if ( ! foundDefault ) {
+			SemanticError( stmt->location, "'fallthrough default' must be enclosed in a 'switch' or 'choose'"
+						   "control structure with a 'default' clause" );
+		}
+		break;
+	}
+	default:
 		assert( false );
 	}
@@ -307,17 +308,17 @@
 	Label exitLabel( CodeLocation(), "" );
 	switch ( stmt->kind ) {
-	  case BranchStmt::Break:
+	case BranchStmt::Break:
 		assert( ! targetEntry->useBreakExit().empty() );
 		exitLabel = targetEntry->useBreakExit();
 		break;
-	  case BranchStmt::Continue:
+	case BranchStmt::Continue:
 		assert( ! targetEntry->useContExit().empty() );
 		exitLabel = targetEntry->useContExit();
 		break;
-	  case BranchStmt::FallThrough:
+	case BranchStmt::FallThrough:
 		assert( ! targetEntry->useFallExit().empty() );
 		exitLabel = targetEntry->useFallExit();
 		break;
-	  case BranchStmt::FallThroughDefault:
+	case BranchStmt::FallThroughDefault:
 		assert( ! targetEntry->useFallDefaultExit().empty() );
 		exitLabel = targetEntry->useFallDefaultExit();
@@ -327,5 +328,5 @@
 		}
 		break;
-	  default:
+	default:
 		assert(0);
 	}
@@ -588,15 +589,30 @@
 		}
 
+		ptr<Stmt> else_stmt = nullptr;
+		Stmt * loop_kid = nullptr;
+		// check if loop node and if so add else clause if it exists
+		const WhileDoStmt * whilePtr = dynamic_cast<const WhileDoStmt *>(kid.get());
+		if ( whilePtr && whilePtr->else_) {
+			else_stmt = whilePtr->else_;
+			WhileDoStmt * mutate_ptr = mutate(whilePtr);
+			mutate_ptr->else_ = nullptr;
+			loop_kid = mutate_ptr;
+		}
+		const ForStmt * forPtr = dynamic_cast<const ForStmt *>(kid.get());
+		if ( forPtr && forPtr->else_) {
+			else_stmt = forPtr->else_;
+			ForStmt * mutate_ptr = mutate(forPtr);
+			mutate_ptr->else_ = nullptr;
+			loop_kid = mutate_ptr;
+		}
+
 		try {
-			ret.push_back( kid->accept( *visitor ) );
+			if (else_stmt) ret.push_back( loop_kid->accept( *visitor ) );
+			else ret.push_back( kid->accept( *visitor ) );
 		} catch ( SemanticErrorException & e ) {
 			errors.append( e );
 		}
 
-		// check if loop node and if so add else clause if it exists
-		const WhileDoStmt * whilePtr = dynamic_cast<const WhileDoStmt *>(kid.get());
-		if ( whilePtr && whilePtr->else_) ret.push_back(whilePtr->else_);
-		const ForStmt * forPtr = dynamic_cast<const ForStmt *>(kid.get());
-		if ( forPtr && forPtr->else_) ret.push_back(forPtr->else_);
+		if (else_stmt) ret.push_back(else_stmt);
 
 		if ( ! break_label.empty() ) {
@@ -618,5 +634,7 @@
 	Pass<MultiLevelExitCore> visitor( labelTable );
 	const CompoundStmt * ret = stmt->accept( visitor );
-	return ret;
+	// There are some unset code locations slipping in, possibly by Labels.
+	const Node * node = localFillCodeLocations( ret->location, ret );
+	return strict_dynamic_cast<const CompoundStmt *>( node );
 }
 } // namespace ControlStruct
Index: src/ControlStruct/module.mk
===================================================================
--- src/ControlStruct/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ControlStruct/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,12 +10,15 @@
 ## Author           : Richard C. Bilson
 ## Created On       : Mon Jun  1 17:49:17 2015
-## Last Modified By : Peter A. Buhr
-## Last Modified On : Sat Jan 29 12:04:19 2022
-## Update Count     : 7
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 14:30:00 2022
+## Update Count     : 8
 ###############################################################################
 
-SRC_CONTROLSTRUCT = \
+SRC += \
 	ControlStruct/ExceptDecl.cc \
 	ControlStruct/ExceptDecl.h \
+	ControlStruct/ExceptTranslateNew.cpp \
+	ControlStruct/ExceptTranslate.cc \
+	ControlStruct/ExceptTranslate.h \
 	ControlStruct/FixLabels.cpp \
 	ControlStruct/FixLabels.hpp \
@@ -37,9 +40,2 @@
 	ControlStruct/Mutate.h
 
-SRC += $(SRC_CONTROLSTRUCT) \
-	ControlStruct/ExceptTranslateNew.cpp \
-	ControlStruct/ExceptTranslate.cc \
-	ControlStruct/ExceptTranslate.h
-
-SRCDEMANGLE += $(SRC_CONTROLSTRUCT)
-
Index: src/GenPoly/Lvalue.cc
===================================================================
--- src/GenPoly/Lvalue.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/GenPoly/Lvalue.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -9,7 +9,7 @@
 // Author           : Richard C. Bilson
 // Created On       : Mon May 18 07:44:20 2015
-// Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Dec 13 23:14:38 2019
-// Update Count     : 7
+// Last Modified By : Andrew Beach
+// Last Modified On : Mon May 16 14:09:00 2022
+// Update Count     : 8
 //
 
@@ -125,9 +125,6 @@
 	} // namespace
 
-	static bool referencesEliminated = false;
-	// used by UntypedExpr::createDeref to determine whether result type of dereference should be ReferenceType or value type.
-	bool referencesPermissable() {
-		return ! referencesEliminated;
-	}
+	// Stored elsewhere (Lvalue2, initially false).
+	extern bool referencesEliminated;
 
 	void convertLvalue( std::list< Declaration* > & translationUnit ) {
Index: src/GenPoly/Lvalue2.cc
===================================================================
--- src/GenPoly/Lvalue2.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/GenPoly/Lvalue2.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,26 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// Lvalue2.cc -- Seperate Lvalue module for linking.
+//
+// Author           : Andrew Beach
+// Created On       : Mon May 16 14:05:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Mon May 16 14:05:00 2022
+// Update Count     : 0
+//
+
+namespace GenPoly {
+
+bool referencesEliminated = false;
+
+/// Are reference types still allowed in the AST?
+bool referencesPermissable() {
+	return !referencesEliminated;
+}
+
+
+}
Index: src/GenPoly/module.mk
===================================================================
--- src/GenPoly/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/GenPoly/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,26 +10,29 @@
 ## Author           : Richard C. Bilson
 ## Created On       : Mon Jun  1 17:49:17 2015
-## Last Modified By : Peter A. Buhr
-## Last Modified On : Mon Jun  1 17:52:30 2015
-## Update Count     : 1
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 14:31:00 2022
+## Update Count     : 2
 ###############################################################################
 
-SRC += GenPoly/Box.cc \
-       GenPoly/Box.h \
-       GenPoly/ErasableScopedMap.h \
-       GenPoly/FindFunction.cc \
-       GenPoly/FindFunction.h \
-       GenPoly/GenPoly.cc \
-       GenPoly/GenPoly.h \
-       GenPoly/InstantiateGeneric.cc \
-       GenPoly/InstantiateGeneric.h \
-       GenPoly/Lvalue.cc \
-       GenPoly/Lvalue.h \
-       GenPoly/ScopedSet.h \
-       GenPoly/ScrubTyVars.cc \
-       GenPoly/ScrubTyVars.h \
-       GenPoly/Specialize.cc \
-       GenPoly/Specialize.h
+SRC_GENPOLY = \
+	GenPoly/GenPoly.cc \
+	GenPoly/GenPoly.h \
+	GenPoly/Lvalue2.cc \
+	GenPoly/Lvalue.h
 
-SRCDEMANGLE += GenPoly/GenPoly.cc GenPoly/GenPoly.h GenPoly/Lvalue.cc GenPoly/Lvalue.h
+SRC += $(SRC_GENPOLY) \
+	GenPoly/Box.cc \
+	GenPoly/Box.h \
+	GenPoly/ErasableScopedMap.h \
+	GenPoly/FindFunction.cc \
+	GenPoly/FindFunction.h \
+	GenPoly/InstantiateGeneric.cc \
+	GenPoly/InstantiateGeneric.h \
+	GenPoly/Lvalue.cc \
+	GenPoly/ScopedSet.h \
+	GenPoly/ScrubTyVars.cc \
+	GenPoly/ScrubTyVars.h \
+	GenPoly/Specialize.cc \
+	GenPoly/Specialize.h
 
+SRCDEMANGLE += $(SRC_GENPOLY)
Index: src/InitTweak/FixInitNew.cpp
===================================================================
--- src/InitTweak/FixInitNew.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/InitTweak/FixInitNew.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -454,39 +454,32 @@
 
 		auto expr = new ast::ImplicitCopyCtorExpr( appExpr->location, mutExpr );
-		// Move the type substitution to the new top-level, if it is attached to the appExpr.
-		// Ensure it is not deleted with the ImplicitCopyCtorExpr by removing it before deletion.
-		// The substitution is needed to obtain the type of temporary variables so that copy constructor
-		// calls can be resolved.
+		// Move the type substitution to the new top-level. The substitution
+		// is needed to obtain the type of temporary variables so that copy
+		// constructor calls can be resolved.
 		assert( typeSubs );
-		// assert (mutExpr->env);
 		expr->env = tmp;
-		// mutExpr->env = nullptr;
-		//std::swap( expr->env, appExpr->env );
 		return expr;
 	}
 
 	void ResolveCopyCtors::previsit(const ast::Expr * expr) {
-		if (expr->env) {
-			GuardValue(env);
-			GuardValue(envModified);
-			env = expr->env->clone();
-			envModified = false;
-		}
+		if ( nullptr == expr->env ) {
+			return;
+		}
+		GuardValue( env ) = expr->env->clone();
+		GuardValue( envModified ) = false;
 	}
 
 	const ast::Expr * ResolveCopyCtors::postvisit(const ast::Expr * expr) {
-		if (expr->env) {
-			if (envModified) {
-				auto mutExpr = mutate(expr);
-				mutExpr->env = env;
-				return mutExpr;
-			}
-			else {
-				// env was not mutated, skip and delete the shallow copy
-				delete env;
-				return expr;
-			}
-		}
-		else {
+		// No local environment, skip.
+		if ( nullptr == expr->env ) {
+			return expr;
+		// Environment was modified, mutate and replace.
+		} else if ( envModified ) {
+			auto mutExpr = mutate(expr);
+			mutExpr->env = env;
+			return mutExpr;
+		// Environment was not mutated, delete the shallow copy before guard.
+		} else {
+			delete env;
 			return expr;
 		}
@@ -497,6 +490,6 @@
 	const ast::Expr * ResolveCopyCtors::makeCtorDtor( const std::string & fname, const ast::ObjectDecl * var, const ast::Expr * cpArg ) {
 		assert( var );
-		assert (var->isManaged());
-		assert (!cpArg || cpArg->isManaged());
+		assert( var->isManaged() );
+		assert( !cpArg || cpArg->isManaged() );
 		// arrays are not copy constructed, so this should always be an ExprStmt
 		ast::ptr< ast::Stmt > stmt = genCtorDtor(var->location, fname, var, cpArg );
@@ -504,5 +497,4 @@
 		auto exprStmt = stmt.strict_as<ast::ImplicitCtorDtorStmt>()->callStmt.strict_as<ast::ExprStmt>();
 		ast::ptr<ast::Expr> untyped = exprStmt->expr; // take ownership of expr
-		// exprStmt->expr = nullptr;
 
 		// resolve copy constructor
@@ -516,10 +508,8 @@
 			env->add( *resolved->env );
 			envModified = true;
-			// delete resolved->env;
 			auto mut = mutate(resolved.get());
 			assertf(mut == resolved.get(), "newly resolved expression must be unique");
 			mut->env = nullptr;
 		} // if
-		// delete stmt;
 		if ( auto assign = resolved.as<ast::TupleAssignExpr>() ) {
 			// fix newly generated StmtExpr
Index: src/InitTweak/GenInit.cc
===================================================================
--- src/InitTweak/GenInit.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/InitTweak/GenInit.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -368,5 +368,5 @@
 
 	struct ReturnFixer_New final :
-			public ast::WithStmtsToAdd<>, ast::WithGuards {
+			public ast::WithStmtsToAdd<>, ast::WithGuards, ast::WithShortCircuiting {
 		void previsit( const ast::FunctionDecl * decl );
 		const ast::ReturnStmt * previsit( const ast::ReturnStmt * stmt );
@@ -376,4 +376,5 @@
 
 	void ReturnFixer_New::previsit( const ast::FunctionDecl * decl ) {
+		if (decl->linkage == ast::Linkage::Intrinsic) visit_children = false;
 		GuardValue( funcDecl ) = decl;
 	}
Index: src/InitTweak/module.mk
===================================================================
--- src/InitTweak/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/InitTweak/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,21 +10,10 @@
 ## Author           : Richard C. Bilson
 ## Created On       : Mon Jun  1 17:49:17 2015
-## Last Modified By : Rob Schluntz
-## Last Modified On : Fri May 13 11:36:24 2016
-## Update Count     : 3
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 14:31:00 2022
+## Update Count     : 4
 ###############################################################################
 
-SRC += \
-	InitTweak/FixGlobalInit.cc \
-	InitTweak/FixGlobalInit.h \
-	InitTweak/FixInit.cc \
-	InitTweak/FixInit.h \
-	InitTweak/GenInit.cc \
-	InitTweak/GenInit.h \
-	InitTweak/InitTweak.cc \
-	InitTweak/InitTweak.h \
-	InitTweak/FixInitNew.cpp
-
-SRCDEMANGLE += \
+SRC_INITTWEAK = \
 	InitTweak/GenInit.cc \
 	InitTweak/GenInit.h \
@@ -32,2 +21,10 @@
 	InitTweak/InitTweak.h
 
+SRC += $(SRC_INITTWEAK) \
+	InitTweak/FixGlobalInit.cc \
+	InitTweak/FixGlobalInit.h \
+	InitTweak/FixInit.cc \
+	InitTweak/FixInit.h \
+	InitTweak/FixInitNew.cpp
+
+SRCDEMANGLE += $(SRC_INITTWEAK)
Index: src/Parser/parser.yy
===================================================================
--- src/Parser/parser.yy	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Parser/parser.yy	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Sat Sep  1 20:22:55 2001
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Wed May  4 17:22:48 2022
-// Update Count     : 5279
+// Last Modified On : Sat May 14 09:16:22 2022
+// Update Count     : 5401
 //
 
@@ -54,4 +54,6 @@
 #include "Common/SemanticError.h"						// error_str
 #include "Common/utility.h"								// for maybeMoveBuild, maybeBuild, CodeLo...
+
+#include "SynTree/Attribute.h"     // for Attribute
 
 extern DeclarationNode * parseTree;
@@ -93,11 +95,17 @@
 } // appendStr
 
-DeclarationNode * distAttr( DeclarationNode * specifier, DeclarationNode * declList ) {
-	// distribute declaration_specifier across all declared variables, e.g., static, const, __attribute__.
-	DeclarationNode * cur = declList, * cl = (new DeclarationNode)->addType( specifier );
+DeclarationNode * distAttr( DeclarationNode * typeSpec, DeclarationNode * declList ) {
+	// distribute declaration_specifier across all declared variables, e.g., static, const, but not __attribute__.
+	assert( declList );
+//	printf( "distAttr1 typeSpec %p\n", typeSpec ); typeSpec->print( std::cout );
+	DeclarationNode * cur = declList, * cl = (new DeclarationNode)->addType( typeSpec );
+//	printf( "distAttr2 cl %p\n", cl ); cl->type->print( std::cout );
+//	cl->type->aggregate.name = cl->type->aggInst.aggregate->aggregate.name;
+
 	for ( cur = dynamic_cast<DeclarationNode *>( cur->get_next() ); cur != nullptr; cur = dynamic_cast<DeclarationNode *>( cur->get_next() ) ) {
 		cl->cloneBaseType( cur );
 	} // for
 	declList->addType( cl );
+//	printf( "distAttr3 declList %p\n", declList ); declList->print( std::cout, 0 );
 	return declList;
 } // distAttr
@@ -171,11 +179,17 @@
 		if ( ! ( typeSpec->type && (typeSpec->type->kind == TypeData::Aggregate || typeSpec->type->kind == TypeData::Enum) ) ) {
 			stringstream ss;
-			typeSpec->type->print( ss );
+			// printf( "fieldDecl1 typeSpec %p\n", typeSpec ); typeSpec->type->print( std::cout );
 			SemanticWarning( yylloc, Warning::SuperfluousDecl, ss.str().c_str() );
 			return nullptr;
 		} // if
+		// printf( "fieldDecl2 typeSpec %p\n", typeSpec ); typeSpec->type->print( std::cout );
 		fieldList = DeclarationNode::newName( nullptr );
 	} // if
-	return distAttr( typeSpec, fieldList );				// mark all fields in list
+//	return distAttr( typeSpec, fieldList );				// mark all fields in list
+
+	// printf( "fieldDecl3 typeSpec %p\n", typeSpec ); typeSpec->print( std::cout, 0 );
+	DeclarationNode * temp = distAttr( typeSpec, fieldList );				// mark all fields in list
+	// printf( "fieldDecl4 temp %p\n", temp ); temp->print( std::cout, 0 );
+	return temp;
 } // fieldDecl
 
@@ -1620,4 +1634,10 @@
 declaration:											// old & new style declarations
 	c_declaration ';'
+		{
+			// printf( "C_DECLARATION1 %p %s\n", $$, $$->name ? $$->name->c_str() : "(nil)" );
+		  	// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			//   printf( "\tattr %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| cfa_declaration ';'								// CFA
 	| static_assert										// C11
@@ -1825,4 +1845,10 @@
 	basic_type_specifier
 	| sue_type_specifier
+		{
+			// printf( "sue_type_specifier2 %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+		  	// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			//   printf( "\tattr %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| type_type_specifier
 	;
@@ -2041,4 +2067,10 @@
 sue_declaration_specifier:								// struct, union, enum + storage class + type specifier
 	sue_type_specifier
+		{
+			// printf( "sue_declaration_specifier %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+		  	// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			//   printf( "\tattr %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| declaration_qualifier_list sue_type_specifier
 		{ $$ = $2->addQualifiers( $1 ); }
@@ -2051,4 +2083,10 @@
 sue_type_specifier:										// struct, union, enum + type specifier
 	elaborated_type
+		{
+			// printf( "sue_type_specifier %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+		  	// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			//   printf( "\tattr %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| type_qualifier_list
 		{ if ( $1->type != nullptr && $1->type->forall ) forall = true; } // remember generic type
@@ -2123,4 +2161,10 @@
 elaborated_type:										// struct, union, enum
 	aggregate_type
+		{
+			// printf( "elaborated_type %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+		  	// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			//   printf( "\tattr %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| enum_type
 	;
@@ -2142,5 +2186,16 @@
 		}
 	  '{' field_declaration_list_opt '}' type_parameters_opt
-		{ $$ = DeclarationNode::newAggregate( $1, $3, $8, $6, true )->addQualifiers( $2 ); }
+		{
+			// printf( "aggregate_type1 %s\n", $3.str->c_str() );
+			// if ( $2 )
+			// 	for ( Attribute * attr: reverseIterate( $2->attributes ) ) {
+			// 		printf( "copySpecifiers12 %s\n", attr->name.c_str() );
+			// 	} // for
+			$$ = DeclarationNode::newAggregate( $1, $3, $8, $6, true )->addQualifiers( $2 );
+			// printf( "aggregate_type2 %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+			// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			// 	printf( "aggregate_type3 %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| aggregate_key attribute_list_opt TYPEDEFname		// unqualified type name
 		{
@@ -2150,4 +2205,5 @@
 	  '{' field_declaration_list_opt '}' type_parameters_opt
 		{
+			// printf( "AGG3\n" );
 			DeclarationNode::newFromTypedef( $3 );
 			$$ = DeclarationNode::newAggregate( $1, $3, $8, $6, true )->addQualifiers( $2 );
@@ -2160,4 +2216,5 @@
 	  '{' field_declaration_list_opt '}' type_parameters_opt
 		{
+			// printf( "AGG4\n" );
 			DeclarationNode::newFromTypeGen( $3, nullptr );
 			$$ = DeclarationNode::newAggregate( $1, $3, $8, $6, true )->addQualifiers( $2 );
@@ -2236,5 +2293,12 @@
 field_declaration:
 	type_specifier field_declaring_list_opt ';'
-		{ $$ = fieldDecl( $1, $2 ); }
+		{
+			// printf( "type_specifier1 %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+			$$ = fieldDecl( $1, $2 );
+			// printf( "type_specifier2 %p %s\n", $$, $$->type->aggregate.name ? $$->type->aggregate.name->c_str() : "(nil)" );
+		  	// for ( Attribute * attr: reverseIterate( $$->attributes ) ) {
+			//   printf( "\tattr %s\n", attr->name.c_str() );
+			// } // for
+		}
 	| EXTENSION type_specifier field_declaring_list_opt ';'	// GCC
 		{ $$ = fieldDecl( $2, $3 ); distExt( $$ ); }
@@ -2845,6 +2909,12 @@
 	// empty
 		{ $$ = nullptr; forall = false; }
-	| WITH '(' tuple_expression_list ')'
-		{ $$ = $3; forall = false; }
+	| WITH '(' tuple_expression_list ')' attribute_list_opt
+		{
+			$$ = $3; forall = false;
+			if ( $5 ) {
+				SemanticError( yylloc, "Attributes cannot be associated with function body. Move attribute(s) before \"with\" clause." );
+				$$ = nullptr;
+			} // if
+		}
 	;
 
Index: src/ResolvExpr/AlternativeFinder.cc
===================================================================
--- src/ResolvExpr/AlternativeFinder.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ResolvExpr/AlternativeFinder.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -42,5 +42,5 @@
 #include "SymTab/Indexer.h"        // for Indexer
 #include "SymTab/Mangler.h"        // for Mangler
-#include "SymTab/Validate.h"       // for validateType
+#include "SymTab/ValidateType.h"   // for validateType
 #include "SynTree/Constant.h"      // for Constant
 #include "SynTree/Declaration.h"   // for DeclarationWithType, TypeDecl, Dec...
Index: src/ResolvExpr/CandidateFinder.cpp
===================================================================
--- src/ResolvExpr/CandidateFinder.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ResolvExpr/CandidateFinder.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -899,4 +899,14 @@
 
 						if (argType.as<ast::PointerType>()) funcFinder.otypeKeys.insert(Mangle::Encoding::pointer);
+						else if (const ast::EnumInstType * enumInst = argType.as<ast::EnumInstType>()) {
+							const ast::EnumDecl * enumDecl = enumInst->base;
+							if ( const ast::Type* enumType = enumDecl->base ) {
+								// instance of enum (T) is a instance of type (T) 
+								funcFinder.otypeKeys.insert(Mangle::mangle(enumType, Mangle::NoGenericParams | Mangle::Type));
+							} else {
+								// instance of an untyped enum is techically int
+								funcFinder.otypeKeys.insert(Mangle::mangle(enumDecl, Mangle::NoGenericParams | Mangle::Type));
+							}
+						} 
 						else funcFinder.otypeKeys.insert(Mangle::mangle(argType, Mangle::NoGenericParams | Mangle::Type));
 					}
@@ -918,5 +928,5 @@
 
 			// find function operators
-			ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" };
+			ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" }; // ??? why not ?{}
 			CandidateFinder opFinder( context, tenv );
 			// okay if there aren't any function operations
Index: src/ResolvExpr/CommonType.cc
===================================================================
--- src/ResolvExpr/CommonType.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ResolvExpr/CommonType.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -497,5 +497,5 @@
 				result = new BasicType( basicType->tq | otherBasic->tq, newType );
 			} // if
-		} else if ( dynamic_cast< EnumInstType * > ( type2 ) || dynamic_cast< ZeroType * >( type2 ) || dynamic_cast< OneType * >( type2 ) ) {
+		} else if (  dynamic_cast< ZeroType * >( type2 ) || dynamic_cast< OneType * >( type2 ) ) {
 			// use signed int in lieu of the enum/zero/one type
 			BasicType::Kind newType = commonTypes[ basicType->get_kind() ][ BasicType::SignedInt ];
@@ -503,5 +503,15 @@
 				result = new BasicType( basicType->tq | type2->tq, newType );
 			} // if
-		} // if
+		} else if ( const EnumInstType * enumInst = dynamic_cast< const EnumInstType * > ( type2 ) ) {
+			const EnumDecl* enumDecl = enumInst->baseEnum;
+			if ( const Type* baseType = enumDecl->base ) {
+				result = baseType->clone();
+			} else {
+				BasicType::Kind newType = commonTypes[ basicType->get_kind() ][ BasicType::SignedInt ];
+				if ( ( ( newType == basicType->get_kind() && basicType->tq >= type2->tq ) || widenFirst ) && ( ( newType != basicType->get_kind() && basicType->tq <= type2->tq ) || widenSecond ) ) {
+					result = new BasicType( basicType->tq | type2->tq, newType );
+				} // if
+			}
+		}
 	}
 
@@ -691,6 +701,5 @@
 				}
 			} else if (
-				dynamic_cast< const ast::EnumInstType * >( type2 )
-				|| dynamic_cast< const ast::ZeroType * >( type2 )
+				dynamic_cast< const ast::ZeroType * >( type2 )
 				|| dynamic_cast< const ast::OneType * >( type2 )
 			) {
@@ -705,4 +714,20 @@
 					result = new ast::BasicType{ kind, basic->qualifiers | type2->qualifiers };
 				}
+			} else if ( const ast::EnumInstType * enumInst = dynamic_cast< const ast::EnumInstType * >( type2 ) ) {
+				#warning remove casts when `commonTypes` moved to new AST
+				const ast::EnumDecl* enumDecl = enumInst->base;
+				if ( enumDecl->base ) {
+					result = enumDecl->base.get();
+				} else {
+					ast::BasicType::Kind kind = (ast::BasicType::Kind)(int)commonTypes[ (BasicType::Kind)(int)basic->kind ][ (BasicType::Kind)(int)ast::BasicType::SignedInt ];
+					if (
+						( ( kind == basic->kind && basic->qualifiers >= type2->qualifiers )
+							|| widen.first )
+						&& ( ( kind != basic->kind && basic->qualifiers <= type2->qualifiers )
+							|| widen.second )
+					) {
+						result = new ast::BasicType{ kind, basic->qualifiers | type2->qualifiers };
+					}
+				}
 			}
 		}
@@ -723,4 +748,18 @@
 			result = voidPtr;
 			add_qualifiers( result, oPtr->qualifiers );
+		}
+
+		// For a typed enum, we want to unify type1 with the base type of the enum
+		bool tryResolveWithTypedEnum( const ast::Type * type1 ) {
+			if (auto enumInst = dynamic_cast<const ast::EnumInstType *> (type2) ) {
+				ast::AssertionSet have, need; // unused
+				ast::OpenVarSet newOpen{ open };
+				if (enumInst->base->base 
+				&& unifyExact(type1, enumInst->base->base, tenv, need, have, newOpen, widen, symtab)) {
+					result = type1;
+					return true;
+				}
+			}
+			return false;
 		}
 
@@ -768,8 +807,13 @@
 				result = pointer;
 				add_qualifiers( result, type2->qualifiers );
-			}
-		}
-
-		void postvisit( const ast::ArrayType * ) {}
+			} else {
+				tryResolveWithTypedEnum( pointer );
+			}
+		}
+
+		void postvisit( const ast::ArrayType * arr ) {
+			// xxx - does it make sense? 
+			tryResolveWithTypedEnum( arr );
+		}
 
 		void postvisit( const ast::ReferenceType * ref ) {
@@ -810,22 +854,28 @@
 				result = ref;
 				add_qualifiers( result, type2->qualifiers );
-			}
-		}
-
-		void postvisit( const ast::FunctionType * ) {}
-
-		void postvisit( const ast::StructInstType * ) {}
-
-		void postvisit( const ast::UnionInstType * ) {}
+			} else {
+				// xxx - does unifying a ref with typed enumInst makes sense?
+				if (!dynamic_cast<const ast::EnumInstType *>(type2))
+					result = commonType( type2, ref, widen, symtab, tenv, open );
+			}
+		}
+
+		void postvisit( const ast::FunctionType * func) {
+			tryResolveWithTypedEnum( func ); 
+		}
+
+		void postvisit( const ast::StructInstType * inst ) {
+			tryResolveWithTypedEnum( inst );
+		}
+
+		void postvisit( const ast::UnionInstType * inst ) {
+			tryResolveWithTypedEnum( inst );
+		}
 
 		void postvisit( const ast::EnumInstType * enumInst ) {
-			if (
-				dynamic_cast< const ast::BasicType * >( type2 )
-				|| dynamic_cast< const ast::ZeroType * >( type2 )
-				|| dynamic_cast< const ast::OneType * >( type2 )
-			) {
-				// reuse BasicType/EnumInstType common type by swapping
+			// reuse BasicType/EnumInstType common type by swapping
+			// xxx - is this already handled by unify?
+			if (!dynamic_cast<const ast::EnumInstType *>(type2))
 				result = commonType( type2, enumInst, widen, symtab, tenv, open );
-			}
 		}
 
@@ -850,4 +900,6 @@
 						result = type2;
 						reset_qualifiers( result, q1 | q2 );
+					} else {
+						tryResolveWithTypedEnum( t1 );
 					}
 				}
@@ -855,5 +907,7 @@
 		}
 
-		void postvisit( const ast::TupleType * ) {}
+		void postvisit( const ast::TupleType * tuple) {
+			tryResolveWithTypedEnum( tuple );
+		}
 
 		void postvisit( const ast::VarArgsType * ) {}
@@ -861,9 +915,6 @@
 		void postvisit( const ast::ZeroType * zero ) {
 			if ( ! widen.first ) return;
-			if (
-				dynamic_cast< const ast::BasicType * >( type2 )
-				|| dynamic_cast< const ast::PointerType * >( type2 )
-				|| dynamic_cast< const ast::EnumInstType * >( type2 )
-			) {
+			if ( dynamic_cast< const ast::BasicType * >( type2 )
+				|| dynamic_cast< const ast::PointerType * >( type2 ) ) {
 				if ( widen.second || zero->qualifiers <= type2->qualifiers ) {
 					result = type2;
@@ -873,4 +924,15 @@
 				result = new ast::BasicType{
 					ast::BasicType::SignedInt, zero->qualifiers | type2->qualifiers };
+			} else if ( const ast::EnumInstType * enumInst = dynamic_cast< const ast::EnumInstType * >( type2 ) ) {
+				const ast::EnumDecl * enumDecl = enumInst->base;
+				if ( enumDecl->base ) {
+					if ( tryResolveWithTypedEnum( zero ) ) 
+						add_qualifiers( result, zero->qualifiers );
+				} else {
+					if ( widen.second || zero->qualifiers <= type2->qualifiers ) {
+						result = type2;
+						add_qualifiers( result, zero->qualifiers );
+					}
+				}
 			}
 		}
@@ -878,8 +940,5 @@
 		void postvisit( const ast::OneType * one ) {
 			if ( ! widen.first ) return;
-			if (
-				dynamic_cast< const ast::BasicType * >( type2 )
-				|| dynamic_cast< const ast::EnumInstType * >( type2 )
-			) {
+			if ( dynamic_cast< const ast::BasicType * >( type2 ) ) {
 				if ( widen.second || one->qualifiers <= type2->qualifiers ) {
 					result = type2;
@@ -889,4 +948,15 @@
 				result = new ast::BasicType{
 					ast::BasicType::SignedInt, one->qualifiers | type2->qualifiers };
+			} else if ( const ast::EnumInstType * enumInst = dynamic_cast< const ast::EnumInstType * >( type2 ) ) {
+				const ast::EnumDecl * enumBase = enumInst->base;
+				if ( enumBase->base ) {
+					if ( tryResolveWithTypedEnum( one ))
+						add_qualifiers( result, one->qualifiers );
+				} else {
+					if ( widen.second || one->qualifiers <= type2->qualifiers ) {
+						result = type2;
+						add_qualifiers( result, one->qualifiers );
+					}
+				}
 			}
 		}
Index: src/ResolvExpr/ConversionCost.cc
===================================================================
--- src/ResolvExpr/ConversionCost.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ResolvExpr/ConversionCost.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -321,20 +321,27 @@
 	}
 
+	// refactor for code resue
+	void ConversionCost::conversionCostFromBasicToBasic(const BasicType * src, const BasicType * dest) {
+		int tableResult = costMatrix[ src->kind ][ dest->kind ];
+		if ( tableResult == -1 ) {
+			cost = Cost::unsafe;
+		} else {
+			cost = Cost::zero;
+			cost.incSafe( tableResult );
+			cost.incSign( signMatrix[ src->kind ][ dest->kind ] );
+		} // if
+	} // ConversionCost::conversionCostFromBasicToBasic
+
 	void ConversionCost::postvisit(const BasicType * basicType) {
 		if ( const BasicType * destAsBasic = dynamic_cast< const BasicType * >( dest ) ) {
-			int tableResult = costMatrix[ basicType->kind ][ destAsBasic->kind ];
-			if ( tableResult == -1 ) {
-				cost = Cost::unsafe;
-			} else {
-				cost = Cost::zero;
-				cost.incSafe( tableResult );
-				cost.incSign( signMatrix[ basicType->kind ][ destAsBasic->kind ] );
-			} // if
-		} else if ( dynamic_cast< const EnumInstType * >( dest ) ) {
-			// xxx - not positive this is correct, but appears to allow casting int => enum
-			// TODO
-			EnumDecl * decl = dynamic_cast< const EnumInstType * >( dest )->baseEnum;
-			if ( decl->base ) {
-				cost = Cost::infinity;
+			conversionCostFromBasicToBasic(basicType, destAsBasic);
+		} else if ( const EnumInstType * enumInst = dynamic_cast< const EnumInstType * >( dest ) ) {
+			const EnumDecl * base_enum = enumInst->baseEnum;
+			if ( const Type * base = base_enum->base ) { // if the base enum has a base (if it is typed)
+				if ( const BasicType * enumBaseAstBasic = dynamic_cast< const BasicType *> (base) ) {
+					conversionCostFromBasicToBasic(basicType, enumBaseAstBasic);
+				} else {
+					cost = Cost::infinity;
+				} // if
 			} else {
 				cost = Cost::unsafe;
@@ -398,10 +405,15 @@
 	void ConversionCost::postvisit( const FunctionType * ) {}
 
-	void ConversionCost::postvisit( const EnumInstType * ) {
-		static Type::Qualifiers q;
-		static BasicType integer( q, BasicType::SignedInt );
-		cost = costFunc( &integer, dest, srcIsLvalue, indexer, env );  // safe if dest >= int
+	void ConversionCost::postvisit( const EnumInstType * enumInst) {
+		const EnumDecl * enumDecl = enumInst -> baseEnum;
+		if ( const Type * enumType = enumDecl -> base ) { // if it is a typed enum
+			cost = costFunc( enumType, dest, srcIsLvalue, indexer, env );
+		} else {
+			static Type::Qualifiers q;
+			static BasicType integer( q, BasicType::SignedInt );
+			cost = costFunc( &integer, dest, srcIsLvalue, indexer, env );  // safe if dest >= int
+		} // if
 		if ( cost < Cost::unsafe ) {
-			cost.incSafe();
+				cost.incSafe();
 		} // if
 	}
@@ -604,22 +616,29 @@
 }
 
+void ConversionCost_new::conversionCostFromBasicToBasic( const ast::BasicType * src, const ast::BasicType* dest ) {
+	int tableResult = costMatrix[ src->kind ][ dest->kind ];
+	if ( tableResult == -1 ) {
+		cost = Cost::unsafe;
+	} else {
+		cost = Cost::zero;
+		cost.incSafe( tableResult );
+		cost.incSign( signMatrix[ src->kind ][ dest->kind ] );
+	}
+}
+
 void ConversionCost_new::postvisit( const ast::BasicType * basicType ) {
 	if ( const ast::BasicType * dstAsBasic = dynamic_cast< const ast::BasicType * >( dst ) ) {
-		int tableResult = costMatrix[ basicType->kind ][ dstAsBasic->kind ];
-		if ( tableResult == -1 ) {
-			cost = Cost::unsafe;
-		} else {
-			cost = Cost::zero;
-			cost.incSafe( tableResult );
-			cost.incSign( signMatrix[ basicType->kind ][ dstAsBasic->kind ] );
-		}
-	} else if ( dynamic_cast< const ast::EnumInstType * >( dst ) ) {
-		// xxx - not positive this is correct, but appears to allow casting int => enum
-		const ast::EnumDecl * decl = (dynamic_cast< const ast::EnumInstType * >( dst ))->base.get();
-		if ( decl->base ) {
-			cost = Cost::infinity;
-		} else {
-			cost = Cost::unsafe;
-		} // if
+		conversionCostFromBasicToBasic( basicType, dstAsBasic );
+	} else if ( const ast::EnumInstType * enumInst = dynamic_cast< const ast::EnumInstType * >( dst ) ) {
+		const ast::EnumDecl * enumDecl = enumInst->base.get();
+		if ( const ast::Type * enumType = enumDecl->base.get() ) {
+			if ( const ast::BasicType * enumTypeAsBasic = dynamic_cast<const ast::BasicType *>(enumType) ) {
+				conversionCostFromBasicToBasic( basicType, enumTypeAsBasic );
+			} else {
+				cost = Cost::infinity;
+			}
+		} else {
+            cost = Cost::unsafe;
+		}
 	}
 }
@@ -673,7 +692,12 @@
 
 void ConversionCost_new::postvisit( const ast::EnumInstType * enumInstType ) {
-	(void)enumInstType;
-	static ast::ptr<ast::BasicType> integer = { new ast::BasicType( ast::BasicType::SignedInt ) };
-	cost = costCalc( integer, dst, srcIsLvalue, symtab, env );
+	const ast::EnumDecl * baseEnum = enumInstType->base;
+	if ( const ast::Type * baseType = baseEnum->base ) {
+		cost = costCalc( baseType, dst, srcIsLvalue, symtab, env );
+	} else {
+		(void)enumInstType;
+		static ast::ptr<ast::BasicType> integer = { new ast::BasicType( ast::BasicType::SignedInt ) };
+		cost = costCalc( integer, dst, srcIsLvalue, symtab, env );
+	}
 	if ( cost < Cost::unsafe ) {
 		cost.incSafe();
Index: src/ResolvExpr/ConversionCost.h
===================================================================
--- src/ResolvExpr/ConversionCost.h	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/ResolvExpr/ConversionCost.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -65,4 +65,7 @@
 		const TypeEnvironment &env;
 		CostFunction costFunc;
+	  private:
+	  	// refactor for code resue
+	  	void conversionCostFromBasicToBasic( const BasicType * src, const BasicType* dest );
 	};
 
@@ -111,4 +114,7 @@
 	void postvisit( const ast::ZeroType * zeroType );
 	void postvisit( const ast::OneType * oneType );
+private:
+	// refactor for code resue
+	void conversionCostFromBasicToBasic( const ast::BasicType * src, const ast::BasicType* dest );
 };
 
Index: src/SymTab/Autogen.h
===================================================================
--- src/SymTab/Autogen.h	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/Autogen.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -21,5 +21,4 @@
 
 #include "AST/Decl.hpp"
-#include "AST/Eval.hpp"
 #include "AST/Expr.hpp"
 #include "AST/Init.hpp"
@@ -71,6 +70,6 @@
 	template< typename OutIter >
 	ast::ptr< ast::Stmt > genCall(
-		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, 
-		const CodeLocation & loc, const std::string & fname, OutIter && out, 
+		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam,
+		const CodeLocation & loc, const std::string & fname, OutIter && out,
 		const ast::Type * type, const ast::Type * addCast, LoopDirection forward = LoopForward );
 
@@ -128,12 +127,12 @@
 	}
 
-	/// inserts into out a generated call expression to function fname with arguments dstParam and 
+	/// inserts into out a generated call expression to function fname with arguments dstParam and
 	/// srcParam. Should only be called with non-array types.
-	/// optionally returns a statement which must be inserted prior to the containing loop, if 
+	/// optionally returns a statement which must be inserted prior to the containing loop, if
 	/// there is one
 	template< typename OutIter >
-	ast::ptr< ast::Stmt > genScalarCall( 
-		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, 
-		const CodeLocation & loc, std::string fname, OutIter && out, const ast::Type * type, 
+	ast::ptr< ast::Stmt > genScalarCall(
+		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam,
+		const CodeLocation & loc, std::string fname, OutIter && out, const ast::Type * type,
 		const ast::Type * addCast = nullptr
 	) {
@@ -153,13 +152,13 @@
 
 		if ( addCast ) {
-			// cast to T& with qualifiers removed, so that qualified objects can be constructed and 
-			// destructed with the same functions as non-qualified objects. Unfortunately, lvalue 
-			// is considered a qualifier - for AddressExpr to resolve, its argument must have an 
+			// cast to T& with qualifiers removed, so that qualified objects can be constructed and
+			// destructed with the same functions as non-qualified objects. Unfortunately, lvalue
+			// is considered a qualifier - for AddressExpr to resolve, its argument must have an
 			// lvalue-qualified type, so remove all qualifiers except lvalue.
 			// xxx -- old code actually removed lvalue too...
 			ast::ptr< ast::Type > guard = addCast;  // prevent castType from mutating addCast
 			ast::ptr< ast::Type > castType = addCast;
-			ast::remove_qualifiers( 
-				castType, 
+			ast::remove_qualifiers(
+				castType,
 				ast::CV::Const | ast::CV::Volatile | ast::CV::Restrict | ast::CV::Atomic );
 			dstParam = new ast::CastExpr{ dstParam, new ast::ReferenceType{ castType } };
@@ -181,5 +180,5 @@
 
 		srcParam.clearArrayIndices();
-		
+
 		return listInit;
 	}
@@ -249,12 +248,12 @@
 	}
 
-	/// Store in out a loop which calls fname on each element of the array with srcParam and 
+	/// Store in out a loop which calls fname on each element of the array with srcParam and
 	/// dstParam as arguments. If forward is true, loop goes from 0 to N-1, else N-1 to 0
 	template< typename OutIter >
 	void genArrayCall(
-		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, 
-		const CodeLocation & loc, const std::string & fname, OutIter && out, 
-		const ast::ArrayType * array, const ast::Type * addCast = nullptr, 
-		LoopDirection forward = LoopForward 
+		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam,
+		const CodeLocation & loc, const std::string & fname, OutIter && out,
+		const ast::ArrayType * array, const ast::Type * addCast = nullptr,
+		LoopDirection forward = LoopForward
 	) {
 		static UniqueName indexName( "_index" );
@@ -279,6 +278,6 @@
 		} else {
 			// generate: for ( int i = N-1; i >= 0; --i )
-			begin = ast::call( 
-				loc, "?-?", array->dimension, ast::ConstantExpr::from_int( loc, 1 ) );
+			begin = ast::UntypedExpr::createCall( loc, "?-?",
+				{ array->dimension, ast::ConstantExpr::from_int( loc, 1 ) } );
 			end = ast::ConstantExpr::from_int( loc, 0 );
 			cmp = "?>=?";
@@ -286,16 +285,19 @@
 		}
 
-		ast::ptr< ast::DeclWithType > index = new ast::ObjectDecl{ 
-			loc, indexName.newName(), new ast::BasicType{ ast::BasicType::SignedInt }, 
+		ast::ptr< ast::DeclWithType > index = new ast::ObjectDecl{
+			loc, indexName.newName(), new ast::BasicType{ ast::BasicType::SignedInt },
 			new ast::SingleInit{ loc, begin } };
 		ast::ptr< ast::Expr > indexVar = new ast::VariableExpr{ loc, index };
-		
-		ast::ptr< ast::Expr > cond = ast::call( loc, cmp, indexVar, end );
-		
-		ast::ptr< ast::Expr > inc = ast::call( loc, update, indexVar );
-		
-		ast::ptr< ast::Expr > dstIndex = ast::call( loc, "?[?]", dstParam, indexVar );
-		
-		// srcParam must keep track of the array indices to build the source parameter and/or 
+
+		ast::ptr< ast::Expr > cond = ast::UntypedExpr::createCall(
+			loc, cmp, { indexVar, end } );
+
+		ast::ptr< ast::Expr > inc = ast::UntypedExpr::createCall(
+			loc, update, { indexVar } );
+
+		ast::ptr< ast::Expr > dstIndex = ast::UntypedExpr::createCall(
+			loc, "?[?]", { dstParam, indexVar } );
+
+		// srcParam must keep track of the array indices to build the source parameter and/or
 		// array list initializer
 		srcParam.addArrayIndex( indexVar, array->dimension );
@@ -303,8 +305,8 @@
 		// for stmt's body, eventually containing call
 		ast::CompoundStmt * body = new ast::CompoundStmt{ loc };
-		ast::ptr< ast::Stmt > listInit = genCall( 
-			srcParam, dstIndex, loc, fname, std::back_inserter( body->kids ), array->base, addCast, 
+		ast::ptr< ast::Stmt > listInit = genCall(
+			srcParam, dstIndex, loc, fname, std::back_inserter( body->kids ), array->base, addCast,
 			forward );
-		
+
 		// block containing the stmt and index variable
 		ast::CompoundStmt * block = new ast::CompoundStmt{ loc };
@@ -328,15 +330,15 @@
 	template< typename OutIter >
 	ast::ptr< ast::Stmt > genCall(
-		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, 
-		const CodeLocation & loc, const std::string & fname, OutIter && out, 
+		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam,
+		const CodeLocation & loc, const std::string & fname, OutIter && out,
 		const ast::Type * type, const ast::Type * addCast, LoopDirection forward
 	) {
 		if ( auto at = dynamic_cast< const ast::ArrayType * >( type ) ) {
-			genArrayCall( 
-				srcParam, dstParam, loc, fname, std::forward< OutIter >(out), at, addCast, 
+			genArrayCall(
+				srcParam, dstParam, loc, fname, std::forward< OutIter >(out), at, addCast,
 				forward );
 			return {};
 		} else {
-			return genScalarCall( 
+			return genScalarCall(
 				srcParam, dstParam, loc, fname, std::forward< OutIter >( out ), type, addCast );
 		}
@@ -377,8 +379,8 @@
 	}
 
-	static inline ast::ptr< ast::Stmt > genImplicitCall( 
-		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam, 
-		const CodeLocation & loc, const std::string & fname, const ast::ObjectDecl * obj, 
-		LoopDirection forward = LoopForward 
+	static inline ast::ptr< ast::Stmt > genImplicitCall(
+		InitTweak::InitExpander_new & srcParam, const ast::Expr * dstParam,
+		const CodeLocation & loc, const std::string & fname, const ast::ObjectDecl * obj,
+		LoopDirection forward = LoopForward
 	) {
 		// unnamed bit fields are not copied as they cannot be accessed
@@ -392,5 +394,5 @@
 
 		std::vector< ast::ptr< ast::Stmt > > stmts;
-		genCall( 
+		genCall(
 			srcParam, dstParam, loc, fname, back_inserter( stmts ), obj->type, addCast, forward );
 
@@ -400,5 +402,5 @@
 			const ast::Stmt * callStmt = stmts.front();
 			if ( addCast ) {
-				// implicitly generated ctor/dtor calls should be wrapped so that later passes are 
+				// implicitly generated ctor/dtor calls should be wrapped so that later passes are
 				// aware they were generated.
 				callStmt = new ast::ImplicitCtorDtorStmt{ callStmt->location, callStmt };
@@ -417,3 +419,2 @@
 // compile-command: "make install" //
 // End: //
-
Index: src/SymTab/Demangle.cc
===================================================================
--- src/SymTab/Demangle.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/Demangle.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -5,5 +5,5 @@
 // file "LICENCE" distributed with Cforall.
 //
-// Demangler.cc --
+// Demangle.cc -- Convert a mangled name into a human readable name.
 //
 // Author           : Rob Schluntz
Index: src/SymTab/Demangle.h
===================================================================
--- src/SymTab/Demangle.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/SymTab/Demangle.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,26 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2018 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// Demangle.h -- Convert a mangled name into a human readable name.
+//
+// Author           : Andrew Beach
+// Created On       : Fri May 13 10:11:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Fri May 13 10:30:00 2022
+// Update Count     : 0
+//
+
+#pragma once
+
+extern "C" {
+	char * cforall_demangle(const char *, int);
+}
+
+// Local Variables: //
+// tab-width: 4 //
+// mode: c++ //
+// compile-command: "make install" //
+// End: //
Index: src/SymTab/Mangler.h
===================================================================
--- src/SymTab/Mangler.h	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/Mangler.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -111,8 +111,4 @@
 }
 
-extern "C" {
-	char * cforall_demangle(const char *, int);
-}
-
 // Local Variables: //
 // tab-width: 4 //
Index: src/SymTab/Validate.cc
===================================================================
--- src/SymTab/Validate.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/Validate.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Sun May 17 21:50:04 2015
 // Last Modified By : Andrew Beach
-// Last Modified On : Fri Apr 29  9:45:00 2022
-// Update Count     : 365
+// Last Modified On : Tue May 17 14:36:00 2022
+// Update Count     : 366
 //
 
@@ -74,4 +74,5 @@
 #include "ResolvExpr/ResolveTypeof.h"  // for resolveTypeof
 #include "SymTab/Autogen.h"            // for SizeType
+#include "SymTab/ValidateType.h"       // for decayEnumsAndPointers, decayFo...
 #include "SynTree/LinkageSpec.h"       // for C
 #include "SynTree/Attribute.h"         // for noAttributes, Attribute
@@ -134,46 +135,4 @@
 	};
 
-	/// Replaces enum types by int, and function or array types in function parameter and return lists by appropriate pointers.
-	struct EnumAndPointerDecay_old {
-		void previsit( EnumDecl * aggregateDecl );
-		void previsit( FunctionType * func );
-	};
-
-	/// Associates forward declarations of aggregates with their definitions
-	struct LinkReferenceToTypes_old final : public WithIndexer, public WithGuards, public WithVisitorRef<LinkReferenceToTypes_old>, public WithShortCircuiting {
-		LinkReferenceToTypes_old( const Indexer * indexer );
-
-		void postvisit( TypeInstType * typeInst );
-
-		void postvisit( EnumInstType * enumInst );
-		void postvisit( StructInstType * structInst );
-		void postvisit( UnionInstType * unionInst );
-		void postvisit( TraitInstType * traitInst );
-		void previsit( QualifiedType * qualType );
-		void postvisit( QualifiedType * qualType );
-
-		void postvisit( EnumDecl * enumDecl );
-		void postvisit( StructDecl * structDecl );
-		void postvisit( UnionDecl * unionDecl );
-		void postvisit( TraitDecl * traitDecl );
-
-		void previsit( StructDecl * structDecl );
-		void previsit( UnionDecl * unionDecl );
-
-		void renameGenericParams( std::list< TypeDecl * > & params );
-
-	  private:
-		const Indexer * local_indexer;
-
-		typedef std::map< std::string, std::list< EnumInstType * > > ForwardEnumsType;
-		typedef std::map< std::string, std::list< StructInstType * > > ForwardStructsType;
-		typedef std::map< std::string, std::list< UnionInstType * > > ForwardUnionsType;
-		ForwardEnumsType forwardEnums;
-		ForwardStructsType forwardStructs;
-		ForwardUnionsType forwardUnions;
-		/// true if currently in a generic type body, so that type parameter instances can be renamed appropriately
-		bool inGeneric = false;
-	};
-
 	/// Does early resolution on the expressions that give enumeration constants their values
 	struct ResolveEnumInitializers final : public WithIndexer, public WithGuards, public WithVisitorRef<ResolveEnumInitializers>, public WithShortCircuiting {
@@ -193,26 +152,4 @@
 		void previsit( StructDecl * aggrDecl );
 		void previsit( UnionDecl * aggrDecl );
-	};
-
-	// These structs are the sub-sub-passes of ForallPointerDecay_old.
-
-	struct TraitExpander_old final {
-		void previsit( FunctionType * );
-		void previsit( StructDecl * );
-		void previsit( UnionDecl * );
-	};
-
-	struct AssertionFixer_old final {
-		void previsit( FunctionType * );
-		void previsit( StructDecl * );
-		void previsit( UnionDecl * );
-	};
-
-	struct CheckOperatorTypes_old final {
-		void previsit( ObjectDecl * );
-	};
-
-	struct FixUniqueIds_old final {
-		void previsit( DeclarationWithType * );
 	};
 
@@ -358,5 +295,4 @@
 
 	void validate_A( std::list< Declaration * > & translationUnit ) {
-		PassVisitor<EnumAndPointerDecay_old> epc;
 		PassVisitor<HoistTypeDecls> hoistDecls;
 		{
@@ -367,11 +303,6 @@
 			ReplaceTypedef::replaceTypedef( translationUnit );
 			ReturnTypeFixer::fix( translationUnit ); // must happen before autogen
-			acceptAll( translationUnit, epc ); // must happen before VerifyCtorDtorAssign, because void return objects should not exist; before LinkReferenceToTypes_old because it is an indexer and needs correct types for mangling
-		}
-	}
-
-	void linkReferenceToTypes( std::list< Declaration * > & translationUnit ) {
-		PassVisitor<LinkReferenceToTypes_old> lrt( nullptr );
-		acceptAll( translationUnit, lrt ); // must happen before autogen, because sized flag needs to propagate to generated functions
+			decayEnumsAndPointers( translationUnit ); // must happen before VerifyCtorDtorAssign, because void return objects should not exist; before LinkReferenceToTypes_old because it is an indexer and needs correct types for mangling
+		}
 	}
 
@@ -412,15 +343,4 @@
 			});
 		}
-	}
-
-	static void decayForallPointers( std::list< Declaration * > & translationUnit ) {
-		PassVisitor<TraitExpander_old> te;
-		acceptAll( translationUnit, te );
-		PassVisitor<AssertionFixer_old> af;
-		acceptAll( translationUnit, af );
-		PassVisitor<CheckOperatorTypes_old> cot;
-		acceptAll( translationUnit, cot );
-		PassVisitor<FixUniqueIds_old> fui;
-		acceptAll( translationUnit, fui );
 	}
 
@@ -501,19 +421,4 @@
 	}
 
-	void validateType( Type * type, const Indexer * indexer ) {
-		PassVisitor<EnumAndPointerDecay_old> epc;
-		PassVisitor<LinkReferenceToTypes_old> lrt( indexer );
-		PassVisitor<TraitExpander_old> te;
-		PassVisitor<AssertionFixer_old> af;
-		PassVisitor<CheckOperatorTypes_old> cot;
-		PassVisitor<FixUniqueIds_old> fui;
-		type->accept( epc );
-		type->accept( lrt );
-		type->accept( te );
-		type->accept( af );
-		type->accept( cot );
-		type->accept( fui );
-	}
-
 	void HoistTypeDecls::handleType( Type * type ) {
 		// some type declarations are buried in expressions and not easy to hoist during parsing; hoist them here
@@ -708,124 +613,4 @@
 	}
 
-	void EnumAndPointerDecay_old::previsit( EnumDecl * enumDecl ) {
-		// Set the type of each member of the enumeration to be EnumConstant
-		for ( std::list< Declaration * >::iterator i = enumDecl->members.begin(); i != enumDecl->members.end(); ++i ) {
-			ObjectDecl * obj = dynamic_cast< ObjectDecl * >( * i );
-			assert( obj );
-			obj->set_type( new EnumInstType( Type::Qualifiers( Type::Const ), enumDecl->name ) );
-		} // for
-	}
-
-	namespace {
-		template< typename DWTList >
-		void fixFunctionList( DWTList & dwts, bool isVarArgs, FunctionType * func ) {
-			auto nvals = dwts.size();
-			bool containsVoid = false;
-			for ( auto & dwt : dwts ) {
-				// fix each DWT and record whether a void was found
-				containsVoid |= fixFunction( dwt );
-			}
-
-			// the only case in which "void" is valid is where it is the only one in the list
-			if ( containsVoid && ( nvals > 1 || isVarArgs ) ) {
-				SemanticError( func, "invalid type void in function type " );
-			}
-
-			// one void is the only thing in the list; remove it.
-			if ( containsVoid ) {
-				delete dwts.front();
-				dwts.clear();
-			}
-		}
-	}
-
-	void EnumAndPointerDecay_old::previsit( FunctionType * func ) {
-		// Fix up parameters and return types
-		fixFunctionList( func->parameters, func->isVarArgs, func );
-		fixFunctionList( func->returnVals, false, func );
-	}
-
-	LinkReferenceToTypes_old::LinkReferenceToTypes_old( const Indexer * other_indexer ) : WithIndexer( false ) {
-		if ( other_indexer ) {
-			local_indexer = other_indexer;
-		} else {
-			local_indexer = &indexer;
-		} // if
-	}
-
-	void LinkReferenceToTypes_old::postvisit( EnumInstType * enumInst ) {
-		const EnumDecl * st = local_indexer->lookupEnum( enumInst->name );
-		// it's not a semantic error if the enum is not found, just an implicit forward declaration
-		if ( st ) {
-			enumInst->baseEnum = const_cast<EnumDecl *>(st); // Just linking in the node
-		} // if
-		if ( ! st || ! st->body ) {
-			// use of forward declaration
-			forwardEnums[ enumInst->name ].push_back( enumInst );
-		} // if
-	}
-	void LinkReferenceToTypes_old::postvisit( StructInstType * structInst ) {
-		const StructDecl * st = local_indexer->lookupStruct( structInst->name );
-		// it's not a semantic error if the struct is not found, just an implicit forward declaration
-		if ( st ) {
-			structInst->baseStruct = const_cast<StructDecl *>(st); // Just linking in the node
-		} // if
-		if ( ! st || ! st->body ) {
-			// use of forward declaration
-			forwardStructs[ structInst->name ].push_back( structInst );
-		} // if
-	}
-
-	void LinkReferenceToTypes_old::postvisit( UnionInstType * unionInst ) {
-		const UnionDecl * un = local_indexer->lookupUnion( unionInst->name );
-		// it's not a semantic error if the union is not found, just an implicit forward declaration
-		if ( un ) {
-			unionInst->baseUnion = const_cast<UnionDecl *>(un); // Just linking in the node
-		} // if
-		if ( ! un || ! un->body ) {
-			// use of forward declaration
-			forwardUnions[ unionInst->name ].push_back( unionInst );
-		} // if
-	}
-
-	void LinkReferenceToTypes_old::previsit( QualifiedType * ) {
-		visit_children = false;
-	}
-
-	void LinkReferenceToTypes_old::postvisit( QualifiedType * qualType ) {
-		// linking only makes sense for the 'oldest ancestor' of the qualified type
-		qualType->parent->accept( * visitor );
-	}
-
-	template< typename Decl >
-	void normalizeAssertions( std::list< Decl * > & assertions ) {
-		// ensure no duplicate trait members after the clone
-		auto pred = [](Decl * d1, Decl * d2) {
-			// only care if they're equal
-			DeclarationWithType * dwt1 = dynamic_cast<DeclarationWithType *>( d1 );
-			DeclarationWithType * dwt2 = dynamic_cast<DeclarationWithType *>( d2 );
-			if ( dwt1 && dwt2 ) {
-				if ( dwt1->name == dwt2->name && ResolvExpr::typesCompatible( dwt1->get_type(), dwt2->get_type(), SymTab::Indexer() ) ) {
-					// std::cerr << "=========== equal:" << std::endl;
-					// std::cerr << "d1: " << d1 << std::endl;
-					// std::cerr << "d2: " << d2 << std::endl;
-					return false;
-				}
-			}
-			return d1 < d2;
-		};
-		std::set<Decl *, decltype(pred)> unique_members( assertions.begin(), assertions.end(), pred );
-		// if ( unique_members.size() != assertions.size() ) {
-		// 	std::cerr << "============different" << std::endl;
-		// 	std::cerr << unique_members.size() << " " << assertions.size() << std::endl;
-		// }
-
-		std::list< Decl * > order;
-		order.splice( order.end(), assertions );
-		std::copy_if( order.begin(), order.end(), back_inserter( assertions ), [&]( Decl * decl ) {
-			return unique_members.count( decl );
-		});
-	}
-
 	// expand assertions from trait instance, performing the appropriate type variable substitutions
 	template< typename Iterator >
@@ -838,135 +623,4 @@
 		// substitute trait decl parameters for instance parameters
 		applySubstitution( inst->baseTrait->parameters.begin(), inst->baseTrait->parameters.end(), inst->parameters.begin(), asserts.begin(), asserts.end(), out );
-	}
-
-	void LinkReferenceToTypes_old::postvisit( TraitDecl * traitDecl ) {
-		if ( traitDecl->name == "sized" ) {
-			// "sized" is a special trait - flick the sized status on for the type variable
-			assertf( traitDecl->parameters.size() == 1, "Built-in trait 'sized' has incorrect number of parameters: %zd", traitDecl->parameters.size() );
-			TypeDecl * td = traitDecl->parameters.front();
-			td->set_sized( true );
-		}
-
-		// move assertions from type parameters into the body of the trait
-		for ( TypeDecl * td : traitDecl->parameters ) {
-			for ( DeclarationWithType * assert : td->assertions ) {
-				if ( TraitInstType * inst = dynamic_cast< TraitInstType * >( assert->get_type() ) ) {
-					expandAssertions( inst, back_inserter( traitDecl->members ) );
-				} else {
-					traitDecl->members.push_back( assert->clone() );
-				}
-			}
-			deleteAll( td->assertions );
-			td->assertions.clear();
-		} // for
-	}
-
-	void LinkReferenceToTypes_old::postvisit( TraitInstType * traitInst ) {
-		// handle other traits
-		const TraitDecl * traitDecl = local_indexer->lookupTrait( traitInst->name );
-		if ( ! traitDecl ) {
-			SemanticError( traitInst->location, "use of undeclared trait " + traitInst->name );
-		} // if
-		if ( traitDecl->parameters.size() != traitInst->parameters.size() ) {
-			SemanticError( traitInst, "incorrect number of trait parameters: " );
-		} // if
-		traitInst->baseTrait = const_cast<TraitDecl *>(traitDecl); // Just linking in the node
-
-		// need to carry over the 'sized' status of each decl in the instance
-		for ( auto p : group_iterate( traitDecl->parameters, traitInst->parameters ) ) {
-			TypeExpr * expr = dynamic_cast< TypeExpr * >( std::get<1>(p) );
-			if ( ! expr ) {
-				SemanticError( std::get<1>(p), "Expression parameters for trait instances are currently unsupported: " );
-			}
-			if ( TypeInstType * inst = dynamic_cast< TypeInstType * >( expr->get_type() ) ) {
-				TypeDecl * formalDecl = std::get<0>(p);
-				TypeDecl * instDecl = inst->baseType;
-				if ( formalDecl->get_sized() ) instDecl->set_sized( true );
-			}
-		}
-		// normalizeAssertions( traitInst->members );
-	}
-
-	void LinkReferenceToTypes_old::postvisit( EnumDecl * enumDecl ) {
-		// visit enum members first so that the types of self-referencing members are updated properly
-		// Replace the enum base; right now it works only for StructEnum
-		if ( enumDecl->base && dynamic_cast<TypeInstType*>(enumDecl->base) ) {
-			std::string baseName = static_cast<TypeInstType*>(enumDecl->base)->name;
-			const StructDecl * st = local_indexer->lookupStruct( baseName );
-			if ( st ) {
-				enumDecl->base = new StructInstType(Type::Qualifiers(),const_cast<StructDecl *>(st)); // Just linking in the node
-			}
-		}
-		if ( enumDecl->body ) {
-			ForwardEnumsType::iterator fwds = forwardEnums.find( enumDecl->name );
-			if ( fwds != forwardEnums.end() ) {
-				for ( std::list< EnumInstType * >::iterator inst = fwds->second.begin(); inst != fwds->second.end(); ++inst ) {
-					(* inst)->baseEnum = enumDecl;
-				} // for
-				forwardEnums.erase( fwds );
-			} // if
-		} // if
-	}
-
-	void LinkReferenceToTypes_old::renameGenericParams( std::list< TypeDecl * > & params ) {
-		// rename generic type parameters uniquely so that they do not conflict with user-defined function forall parameters, e.g.
-		//   forall(otype T)
-		//   struct Box {
-		//     T x;
-		//   };
-		//   forall(otype T)
-		//   void f(Box(T) b) {
-		//     ...
-		//   }
-		// The T in Box and the T in f are different, so internally the naming must reflect that.
-		GuardValue( inGeneric );
-		inGeneric = ! params.empty();
-		for ( TypeDecl * td : params ) {
-			td->name = "__" + td->name + "_generic_";
-		}
-	}
-
-	void LinkReferenceToTypes_old::previsit( StructDecl * structDecl ) {
-		renameGenericParams( structDecl->parameters );
-	}
-
-	void LinkReferenceToTypes_old::previsit( UnionDecl * unionDecl ) {
-		renameGenericParams( unionDecl->parameters );
-	}
-
-	void LinkReferenceToTypes_old::postvisit( StructDecl * structDecl ) {
-		// visit struct members first so that the types of self-referencing members are updated properly
-		// xxx - need to ensure that type parameters match up between forward declarations and definition (most importantly, number of type parameters and their defaults)
-		if ( structDecl->body ) {
-			ForwardStructsType::iterator fwds = forwardStructs.find( structDecl->name );
-			if ( fwds != forwardStructs.end() ) {
-				for ( std::list< StructInstType * >::iterator inst = fwds->second.begin(); inst != fwds->second.end(); ++inst ) {
-					(* inst)->baseStruct = structDecl;
-				} // for
-				forwardStructs.erase( fwds );
-			} // if
-		} // if
-	}
-
-	void LinkReferenceToTypes_old::postvisit( UnionDecl * unionDecl ) {
-		if ( unionDecl->body ) {
-			ForwardUnionsType::iterator fwds = forwardUnions.find( unionDecl->name );
-			if ( fwds != forwardUnions.end() ) {
-				for ( std::list< UnionInstType * >::iterator inst = fwds->second.begin(); inst != fwds->second.end(); ++inst ) {
-					(* inst)->baseUnion = unionDecl;
-				} // for
-				forwardUnions.erase( fwds );
-			} // if
-		} // if
-	}
-
-	void LinkReferenceToTypes_old::postvisit( TypeInstType * typeInst ) {
-		// ensure generic parameter instances are renamed like the base type
-		if ( inGeneric && typeInst->baseType ) typeInst->name = typeInst->baseType->name;
-		if ( const NamedTypeDecl * namedTypeDecl = local_indexer->lookupType( typeInst->name ) ) {
-			if ( const TypeDecl * typeDecl = dynamic_cast< const TypeDecl * >( namedTypeDecl ) ) {
-				typeInst->set_isFtype( typeDecl->kind == TypeDecl::Ftype );
-			} // if
-		} // if
 	}
 
@@ -997,5 +651,4 @@
 						}
 					}
-					
 				}
 			}
@@ -1085,39 +738,4 @@
 	void ForallPointerDecay_old::previsit( UnionDecl * aggrDecl ) {
 		forallFixer( aggrDecl->parameters, aggrDecl );
-	}
-
-	void TraitExpander_old::previsit( FunctionType * ftype ) {
-		expandTraits( ftype->forall );
-	}
-
-	void TraitExpander_old::previsit( StructDecl * aggrDecl ) {
-		expandTraits( aggrDecl->parameters );
-	}
-
-	void TraitExpander_old::previsit( UnionDecl * aggrDecl ) {
-		expandTraits( aggrDecl->parameters );
-	}
-
-	void AssertionFixer_old::previsit( FunctionType * ftype ) {
-		fixAssertions( ftype->forall, ftype );
-	}
-
-	void AssertionFixer_old::previsit( StructDecl * aggrDecl ) {
-		fixAssertions( aggrDecl->parameters, aggrDecl );
-	}
-
-	void AssertionFixer_old::previsit( UnionDecl * aggrDecl ) {
-		fixAssertions( aggrDecl->parameters, aggrDecl );
-	}
-
-	void CheckOperatorTypes_old::previsit( ObjectDecl * object ) {
-		// ensure that operator names only apply to functions or function pointers
-		if ( CodeGen::isOperator( object->name ) && ! dynamic_cast< FunctionType * >( object->type->stripDeclarator() ) ) {
-			SemanticError( object->location, toCString( "operator ", object->name.c_str(), " is not a function or function pointer." )  );
-		}
-	}
-
-	void FixUniqueIds_old::previsit( DeclarationWithType * decl ) {
-		decl->fixUniqueId();
 	}
 
Index: src/SymTab/Validate.h
===================================================================
--- src/SymTab/Validate.h	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/Validate.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,7 +10,7 @@
 // Author           : Richard C. Bilson
 // Created On       : Sun May 17 21:53:34 2015
-// Last Modified By : Peter A. Buhr
-// Last Modified On : Sat Jul 22 09:46:07 2017
-// Update Count     : 4
+// Last Modified By : Andrew Beach
+// Last Modified On : Tue May 17 14:35:00 2022
+// Update Count     : 5
 //
 
@@ -33,5 +33,4 @@
 	/// Normalizes struct and function declarations
 	void validate( std::list< Declaration * > &translationUnit, bool doDebug = false );
-	void validateType( Type *type, const Indexer *indexer );
 
 	// Sub-passes of validate.
@@ -42,8 +41,4 @@
 	void validate_E( std::list< Declaration * > &translationUnit );
 	void validate_F( std::list< Declaration * > &translationUnit );
-	void linkReferenceToTypes( std::list< Declaration * > &translationUnit );
-
-	const ast::Type * validateType(
-		const CodeLocation & loc, const ast::Type * type, const ast::SymbolTable & symtab );
 } // namespace SymTab
 
Index: src/SymTab/ValidateType.cc
===================================================================
--- src/SymTab/ValidateType.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/SymTab/ValidateType.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,476 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// ValidateType.cc -- Validate and normalize types.
+//
+// Author           : Andrew Beach
+// Created On       : Mon May 16 16:21:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Tue May 17 14:06:00 2022
+// Update Count     : 0
+//
+
+#include "ValidateType.h"
+
+#include "CodeGen/OperatorTable.h"
+#include "Common/PassVisitor.h"
+#include "SymTab/FixFunction.h"
+#include "SynTree/Declaration.h"
+#include "SynTree/Type.h"
+
+namespace SymTab {
+
+namespace {
+
+/// Replaces enum types by int, and function or array types in function
+/// parameter and return lists by appropriate pointers.
+struct EnumAndPointerDecay_old {
+	void previsit( EnumDecl * aggregateDecl );
+	void previsit( FunctionType * func );
+};
+
+void EnumAndPointerDecay_old::previsit( EnumDecl * enumDecl ) {
+	// Set the type of each member of the enumeration to be EnumConstant
+	for ( std::list< Declaration * >::iterator i = enumDecl->members.begin(); i != enumDecl->members.end(); ++i ) {
+		ObjectDecl * obj = dynamic_cast< ObjectDecl * >( * i );
+		assert( obj );
+		obj->set_type( new EnumInstType( Type::Qualifiers( Type::Const ), enumDecl->name ) );
+	} // for
+}
+
+template< typename DWTList >
+void fixFunctionList( DWTList & dwts, bool isVarArgs, FunctionType * func ) {
+	auto nvals = dwts.size();
+	bool containsVoid = false;
+	for ( auto & dwt : dwts ) {
+		// fix each DWT and record whether a void was found
+		containsVoid |= fixFunction( dwt );
+	}
+
+	// the only case in which "void" is valid is where it is the only one in the list
+	if ( containsVoid && ( nvals > 1 || isVarArgs ) ) {
+		SemanticError( func, "invalid type void in function type " );
+	}
+
+	// one void is the only thing in the list; remove it.
+	if ( containsVoid ) {
+		delete dwts.front();
+		dwts.clear();
+	}
+}
+
+void EnumAndPointerDecay_old::previsit( FunctionType * func ) {
+	// Fix up parameters and return types
+	fixFunctionList( func->parameters, func->isVarArgs, func );
+	fixFunctionList( func->returnVals, false, func );
+}
+
+/// Associates forward declarations of aggregates with their definitions
+struct LinkReferenceToTypes_old final : public WithIndexer, public WithGuards, public WithVisitorRef<LinkReferenceToTypes_old>, public WithShortCircuiting {
+	LinkReferenceToTypes_old( const Indexer * indexer );
+
+	void postvisit( TypeInstType * typeInst );
+
+	void postvisit( EnumInstType * enumInst );
+	void postvisit( StructInstType * structInst );
+	void postvisit( UnionInstType * unionInst );
+	void postvisit( TraitInstType * traitInst );
+	void previsit( QualifiedType * qualType );
+	void postvisit( QualifiedType * qualType );
+
+	void postvisit( EnumDecl * enumDecl );
+	void postvisit( StructDecl * structDecl );
+	void postvisit( UnionDecl * unionDecl );
+	void postvisit( TraitDecl * traitDecl );
+
+	void previsit( StructDecl * structDecl );
+	void previsit( UnionDecl * unionDecl );
+
+	void renameGenericParams( std::list< TypeDecl * > & params );
+
+private:
+	const Indexer * local_indexer;
+
+	typedef std::map< std::string, std::list< EnumInstType * > > ForwardEnumsType;
+	typedef std::map< std::string, std::list< StructInstType * > > ForwardStructsType;
+	typedef std::map< std::string, std::list< UnionInstType * > > ForwardUnionsType;
+	ForwardEnumsType forwardEnums;
+	ForwardStructsType forwardStructs;
+	ForwardUnionsType forwardUnions;
+	/// true if currently in a generic type body, so that type parameter instances can be renamed appropriately
+	bool inGeneric = false;
+};
+
+
+LinkReferenceToTypes_old::LinkReferenceToTypes_old( const Indexer * other_indexer ) : WithIndexer( false ) {
+	if ( other_indexer ) {
+		local_indexer = other_indexer;
+	} else {
+		local_indexer = &indexer;
+	} // if
+}
+
+void LinkReferenceToTypes_old::postvisit( EnumInstType * enumInst ) {
+	const EnumDecl * st = local_indexer->lookupEnum( enumInst->name );
+	// it's not a semantic error if the enum is not found, just an implicit forward declaration
+	if ( st ) {
+		enumInst->baseEnum = const_cast<EnumDecl *>(st); // Just linking in the node
+	} // if
+	if ( ! st || ! st->body ) {
+		// use of forward declaration
+		forwardEnums[ enumInst->name ].push_back( enumInst );
+	} // if
+}
+
+void LinkReferenceToTypes_old::postvisit( StructInstType * structInst ) {
+	const StructDecl * st = local_indexer->lookupStruct( structInst->name );
+	// it's not a semantic error if the struct is not found, just an implicit forward declaration
+	if ( st ) {
+		structInst->baseStruct = const_cast<StructDecl *>(st); // Just linking in the node
+	} // if
+	if ( ! st || ! st->body ) {
+		// use of forward declaration
+		forwardStructs[ structInst->name ].push_back( structInst );
+	} // if
+}
+
+void LinkReferenceToTypes_old::postvisit( UnionInstType * unionInst ) {
+	const UnionDecl * un = local_indexer->lookupUnion( unionInst->name );
+	// it's not a semantic error if the union is not found, just an implicit forward declaration
+	if ( un ) {
+		unionInst->baseUnion = const_cast<UnionDecl *>(un); // Just linking in the node
+	} // if
+	if ( ! un || ! un->body ) {
+		// use of forward declaration
+		forwardUnions[ unionInst->name ].push_back( unionInst );
+	} // if
+}
+
+void LinkReferenceToTypes_old::previsit( QualifiedType * ) {
+	visit_children = false;
+}
+
+void LinkReferenceToTypes_old::postvisit( QualifiedType * qualType ) {
+	// linking only makes sense for the 'oldest ancestor' of the qualified type
+	qualType->parent->accept( * visitor );
+}
+
+// expand assertions from trait instance, performing the appropriate type variable substitutions
+template< typename Iterator >
+void expandAssertions( TraitInstType * inst, Iterator out ) {
+	assertf( inst->baseTrait, "Trait instance not linked to base trait: %s", toCString( inst ) );
+	std::list< DeclarationWithType * > asserts;
+	for ( Declaration * decl : inst->baseTrait->members ) {
+		asserts.push_back( strict_dynamic_cast<DeclarationWithType *>( decl->clone() ) );
+	}
+	// substitute trait decl parameters for instance parameters
+	applySubstitution( inst->baseTrait->parameters.begin(), inst->baseTrait->parameters.end(), inst->parameters.begin(), asserts.begin(), asserts.end(), out );
+}
+
+void LinkReferenceToTypes_old::postvisit( TraitDecl * traitDecl ) {
+	if ( traitDecl->name == "sized" ) {
+		// "sized" is a special trait - flick the sized status on for the type variable
+		assertf( traitDecl->parameters.size() == 1, "Built-in trait 'sized' has incorrect number of parameters: %zd", traitDecl->parameters.size() );
+		TypeDecl * td = traitDecl->parameters.front();
+		td->set_sized( true );
+	}
+
+	// move assertions from type parameters into the body of the trait
+	for ( TypeDecl * td : traitDecl->parameters ) {
+		for ( DeclarationWithType * assert : td->assertions ) {
+			if ( TraitInstType * inst = dynamic_cast< TraitInstType * >( assert->get_type() ) ) {
+				expandAssertions( inst, back_inserter( traitDecl->members ) );
+			} else {
+				traitDecl->members.push_back( assert->clone() );
+			}
+		}
+		deleteAll( td->assertions );
+		td->assertions.clear();
+	} // for
+}
+
+void LinkReferenceToTypes_old::postvisit( TraitInstType * traitInst ) {
+	// handle other traits
+	const TraitDecl * traitDecl = local_indexer->lookupTrait( traitInst->name );
+	if ( ! traitDecl ) {
+		SemanticError( traitInst->location, "use of undeclared trait " + traitInst->name );
+	} // if
+	if ( traitDecl->parameters.size() != traitInst->parameters.size() ) {
+		SemanticError( traitInst, "incorrect number of trait parameters: " );
+	} // if
+	traitInst->baseTrait = const_cast<TraitDecl *>(traitDecl); // Just linking in the node
+
+	// need to carry over the 'sized' status of each decl in the instance
+	for ( auto p : group_iterate( traitDecl->parameters, traitInst->parameters ) ) {
+		TypeExpr * expr = dynamic_cast< TypeExpr * >( std::get<1>(p) );
+		if ( ! expr ) {
+			SemanticError( std::get<1>(p), "Expression parameters for trait instances are currently unsupported: " );
+		}
+		if ( TypeInstType * inst = dynamic_cast< TypeInstType * >( expr->get_type() ) ) {
+			TypeDecl * formalDecl = std::get<0>(p);
+			TypeDecl * instDecl = inst->baseType;
+			if ( formalDecl->get_sized() ) instDecl->set_sized( true );
+		}
+	}
+	// normalizeAssertions( traitInst->members );
+}
+
+void LinkReferenceToTypes_old::postvisit( EnumDecl * enumDecl ) {
+	// visit enum members first so that the types of self-referencing members are updated properly
+	// Replace the enum base; right now it works only for StructEnum
+	if ( enumDecl->base && dynamic_cast<TypeInstType*>(enumDecl->base) ) {
+		std::string baseName = static_cast<TypeInstType*>(enumDecl->base)->name;
+		const StructDecl * st = local_indexer->lookupStruct( baseName );
+		if ( st ) {
+			enumDecl->base = new StructInstType(Type::Qualifiers(),const_cast<StructDecl *>(st)); // Just linking in the node
+		}
+	}
+	if ( enumDecl->body ) {
+		ForwardEnumsType::iterator fwds = forwardEnums.find( enumDecl->name );
+		if ( fwds != forwardEnums.end() ) {
+			for ( std::list< EnumInstType * >::iterator inst = fwds->second.begin(); inst != fwds->second.end(); ++inst ) {
+				(* inst)->baseEnum = enumDecl;
+			} // for
+			forwardEnums.erase( fwds );
+		} // if
+	} // if
+}
+
+void LinkReferenceToTypes_old::renameGenericParams( std::list< TypeDecl * > & params ) {
+	// rename generic type parameters uniquely so that they do not conflict with user-defined function forall parameters, e.g.
+	//   forall(otype T)
+	//   struct Box {
+	//     T x;
+	//   };
+	//   forall(otype T)
+	//   void f(Box(T) b) {
+	//     ...
+	//   }
+	// The T in Box and the T in f are different, so internally the naming must reflect that.
+	GuardValue( inGeneric );
+	inGeneric = ! params.empty();
+	for ( TypeDecl * td : params ) {
+		td->name = "__" + td->name + "_generic_";
+	}
+}
+
+void LinkReferenceToTypes_old::previsit( StructDecl * structDecl ) {
+	renameGenericParams( structDecl->parameters );
+}
+
+void LinkReferenceToTypes_old::previsit( UnionDecl * unionDecl ) {
+	renameGenericParams( unionDecl->parameters );
+}
+
+void LinkReferenceToTypes_old::postvisit( StructDecl * structDecl ) {
+	// visit struct members first so that the types of self-referencing members are updated properly
+	// xxx - need to ensure that type parameters match up between forward declarations and definition (most importantly, number of type parameters and their def>
+	if ( structDecl->body ) {
+		ForwardStructsType::iterator fwds = forwardStructs.find( structDecl->name );
+		if ( fwds != forwardStructs.end() ) {
+			for ( std::list< StructInstType * >::iterator inst = fwds->second.begin(); inst != fwds->second.end(); ++inst ) {
+				(* inst)->baseStruct = structDecl;
+			} // for
+			forwardStructs.erase( fwds );
+		} // if
+	} // if
+}
+
+void LinkReferenceToTypes_old::postvisit( UnionDecl * unionDecl ) {
+	if ( unionDecl->body ) {
+	ForwardUnionsType::iterator fwds = forwardUnions.find( unionDecl->name );
+		if ( fwds != forwardUnions.end() ) {
+			for ( std::list< UnionInstType * >::iterator inst = fwds->second.begin(); inst != fwds->second.end(); ++inst ) {
+				(* inst)->baseUnion = unionDecl;
+			} // for
+			forwardUnions.erase( fwds );
+		} // if
+	} // if
+}
+
+void LinkReferenceToTypes_old::postvisit( TypeInstType * typeInst ) {
+	// ensure generic parameter instances are renamed like the base type
+	if ( inGeneric && typeInst->baseType ) typeInst->name = typeInst->baseType->name;
+	if ( const NamedTypeDecl * namedTypeDecl = local_indexer->lookupType( typeInst->name ) ) {
+		if ( const TypeDecl * typeDecl = dynamic_cast< const TypeDecl * >( namedTypeDecl ) ) {
+			typeInst->set_isFtype( typeDecl->kind == TypeDecl::Ftype );
+		} // if
+	} // if
+}
+
+/* // expand assertions from trait instance, performing the appropriate type variable substitutions
+template< typename Iterator >
+void expandAssertions( TraitInstType * inst, Iterator out ) {
+	assertf( inst->baseTrait, "Trait instance not linked to base trait: %s", toCString( inst ) );
+	std::list< DeclarationWithType * > asserts;
+	for ( Declaration * decl : inst->baseTrait->members ) {
+		asserts.push_back( strict_dynamic_cast<DeclarationWithType *>( decl->clone() ) );
+	}
+	// substitute trait decl parameters for instance parameters
+	applySubstitution( inst->baseTrait->parameters.begin(), inst->baseTrait->parameters.end(), inst->parameters.begin(), asserts.begin(), asserts.end(), out );
+}*/
+
+/// Replace all traits in assertion lists with their assertions.
+void expandTraits( std::list< TypeDecl * > & forall ) {
+	for ( TypeDecl * type : forall ) {
+		std::list< DeclarationWithType * > asserts;
+		asserts.splice( asserts.end(), type->assertions );
+		// expand trait instances into their members
+		for ( DeclarationWithType * assertion : asserts ) {
+			if ( TraitInstType * traitInst = dynamic_cast< TraitInstType * >( assertion->get_type() ) ) {
+				// expand trait instance into all of its members
+				expandAssertions( traitInst, back_inserter( type->assertions ) );
+				delete traitInst;
+			} else {
+				// pass other assertions through
+				type->assertions.push_back( assertion );
+			} // if
+		} // for
+	} // for
+}
+
+struct TraitExpander_old final {
+	void previsit( FunctionType * type ) {
+		expandTraits( type->forall );
+	}
+	void previsit( StructDecl * decl ) {
+		expandTraits( decl->parameters );
+	}
+	void previsit( UnionDecl * decl ) {
+		expandTraits( decl->parameters );
+	}
+};
+
+/*struct TraitExpander_old final {
+	void previsit( FunctionType * );
+	void previsit( StructDecl * );
+	void previsit( UnionDecl * );
+};
+
+void TraitExpander_old::previsit( FunctionType * ftype ) {
+	expandTraits( ftype->forall );
+}
+
+void TraitExpander_old::previsit( StructDecl * aggrDecl ) {
+	expandTraits( aggrDecl->parameters );
+}
+
+void TraitExpander_old::previsit( UnionDecl * aggrDecl ) {
+	expandTraits( aggrDecl->parameters );
+}*/
+
+/// Fix each function in the assertion list and check for invalid void type.
+void fixAssertions(
+		std::list< TypeDecl * > & forall, BaseSyntaxNode * node ) {
+	for ( TypeDecl * type : forall ) {
+		for ( DeclarationWithType *& assertion : type->assertions ) {
+			bool isVoid = fixFunction( assertion );
+			if ( isVoid ) {
+				SemanticError( node, "invalid type void in assertion of function " );
+			} // if
+		} // for
+	}
+}
+
+struct AssertionFixer_old final {
+	void previsit( FunctionType * type ) {
+		fixAssertions( type->forall, type );
+	}
+	void previsit( StructDecl * decl ) {
+		fixAssertions( decl->parameters, decl );
+	}
+	void previsit( UnionDecl * decl ) {
+		fixAssertions( decl->parameters, decl );
+	}
+};
+
+/*
+struct AssertionFixer_old final {
+	void previsit( FunctionType * );
+	void previsit( StructDecl * );
+	void previsit( UnionDecl * );
+};
+
+void AssertionFixer_old::previsit( FunctionType * ftype ) {
+	fixAssertions( ftype->forall, ftype );
+}
+
+void AssertionFixer_old::previsit( StructDecl * aggrDecl ) {
+	fixAssertions( aggrDecl->parameters, aggrDecl );
+}
+
+void AssertionFixer_old::previsit( UnionDecl * aggrDecl ) {
+	fixAssertions( aggrDecl->parameters, aggrDecl );
+}*/
+
+struct CheckOperatorTypes_old final {
+	void previsit( ObjectDecl * );
+};
+
+void CheckOperatorTypes_old::previsit( ObjectDecl * object ) {
+	// ensure that operator names only apply to functions or function pointers
+	if ( CodeGen::isOperator( object->name ) && ! dynamic_cast< FunctionType * >( object->type->stripDeclarator() ) ) {
+		SemanticError( object->location, toCString( "operator ", object->name.c_str(), " is not a function or function pointer." )  );
+	}
+}
+
+struct FixUniqueIds_old final {
+	void previsit( DeclarationWithType * decl ) {
+		decl->fixUniqueId();
+	}
+};
+
+//void FixUniqueIds_old::previsit( DeclarationWithType * decl ) {
+//	decl->fixUniqueId();
+//}
+
+
+} // namespace
+
+void validateType( Type *type, const Indexer *indexer ) {
+	PassVisitor<EnumAndPointerDecay_old> epc;
+	PassVisitor<LinkReferenceToTypes_old> lrt( indexer );
+	PassVisitor<TraitExpander_old> te;
+	PassVisitor<AssertionFixer_old> af;
+	PassVisitor<CheckOperatorTypes_old> cot;
+	PassVisitor<FixUniqueIds_old> fui;
+	type->accept( epc );
+	type->accept( lrt );
+	type->accept( te );
+	type->accept( af );
+	type->accept( cot );
+	type->accept( fui );
+}
+
+void decayEnumsAndPointers( std::list< Declaration * > & translationUnit ) {
+	PassVisitor<EnumAndPointerDecay_old> epc;
+	acceptAll( translationUnit, epc );
+}
+
+void linkReferenceToTypes( std::list< Declaration * > & translationUnit ) {
+	PassVisitor<LinkReferenceToTypes_old> lrt( nullptr );
+	acceptAll( translationUnit, lrt ); // must happen before autogen, because sized flag needs to propagate to generated functions
+}
+
+void decayForallPointers( std::list< Declaration * > & translationUnit ) {
+	PassVisitor<TraitExpander_old> te;
+	acceptAll( translationUnit, te );
+	PassVisitor<AssertionFixer_old> af;
+	acceptAll( translationUnit, af );
+	PassVisitor<CheckOperatorTypes_old> cot;
+	acceptAll( translationUnit, cot );
+	PassVisitor<FixUniqueIds_old> fui;
+	acceptAll( translationUnit, fui );
+}
+
+
+} // namespace SymTab
+
+// Local Variables: //
+// tab-width: 4 //
+// mode: c++ //
+// compile-command: "make install" //
+// End: //
Index: src/SymTab/ValidateType.h
===================================================================
--- src/SymTab/ValidateType.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/SymTab/ValidateType.h	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,38 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// ValidateType.h -- Validate and normalize types.
+//
+// Author           : Andrew Beach
+// Created On       : Mon May 16 16:18:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Mon May 16 16:18:00 2022
+// Update Count     : 0
+//
+
+#pragma once
+
+#include <list>
+
+class Declaration;
+class Type;
+
+namespace SymTab {
+	class Indexer;
+
+	void validateType( Type *type, const Indexer *indexer );
+
+	// Sub-passes that are also used by the larger validate pass.
+	void decayEnumsAndPointers( std::list< Declaration * > & translationUnit );
+	void linkReferenceToTypes( std::list< Declaration * > & translationUnit );
+	void decayForallPointers( std::list< Declaration * > & translationUnit );
+}
+
+// Local Variables: //
+// tab-width: 4 //
+// mode: c++ //
+// compile-command: "make install" //
+// End: //
Index: src/SymTab/demangler.cc
===================================================================
--- src/SymTab/demangler.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/demangler.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,3 +1,3 @@
-#include "Mangler.h"
+#include "Demangle.h"
 #include <iostream>
 #include <fstream>
Index: src/SymTab/module.mk
===================================================================
--- src/SymTab/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SymTab/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -11,21 +11,26 @@
 ## Created On       : Mon Jun  1 17:49:17 2015
 ## Last Modified By : Andrew Beach
-## Last Modified On : Thr Aug 10 16:08:00 2017
-## Update Count     : 4
+## Last Modified On : Tue May 17 14:46:00 2022
+## Update Count     : 5
 ###############################################################################
 
 SRC_SYMTAB = \
-      SymTab/Autogen.cc \
-      SymTab/Autogen.h \
-      SymTab/FixFunction.cc \
-      SymTab/FixFunction.h \
-      SymTab/Indexer.cc \
-      SymTab/Indexer.h \
-      SymTab/Mangler.cc \
-      SymTab/ManglerCommon.cc \
-      SymTab/Mangler.h \
-      SymTab/Validate.cc \
-      SymTab/Validate.h
+	SymTab/Autogen.cc \
+	SymTab/Autogen.h \
+	SymTab/FixFunction.cc \
+	SymTab/FixFunction.h \
+	SymTab/Indexer.cc \
+	SymTab/Indexer.h \
+	SymTab/Mangler.cc \
+	SymTab/ManglerCommon.cc \
+	SymTab/Mangler.h \
+	SymTab/ValidateType.cc \
+	SymTab/ValidateType.h
 
-SRC += $(SRC_SYMTAB)
-SRCDEMANGLE += $(SRC_SYMTAB) SymTab/Demangle.cc
+SRC += $(SRC_SYMTAB) \
+	SymTab/Validate.cc \
+	SymTab/Validate.h
+
+SRCDEMANGLE += $(SRC_SYMTAB) \
+	SymTab/Demangle.cc \
+	SymTab/Demangle.h
Index: src/SynTree/BaseSyntaxNode.cc
===================================================================
--- src/SynTree/BaseSyntaxNode.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ src/SynTree/BaseSyntaxNode.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,31 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// BaseSyntaxNode.cc --
+//
+// Author           : Andrew Beach
+// Created On       : Fri May 13 13:58:00 2022
+// Last Modified By : Andrew Beach
+// Last Modified On : Fri May 13 14:01:00 2022
+// Update Count     : 0
+//
+
+#include "BaseSyntaxNode.h"
+
+std::ostream & operator<<( std::ostream & out, const BaseSyntaxNode * node ) {
+    if ( node ) {
+        node->print( out );
+    } else {
+        out << "nullptr";
+    }
+    return out;
+}
+
+// Local Variables: //
+// tab-width: 4 //
+// mode: c++ //
+// compile-command: "make install" //
+// End: //
Index: src/SynTree/module.mk
===================================================================
--- src/SynTree/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/SynTree/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -24,4 +24,5 @@
       SynTree/AttrType.cc \
       SynTree/BaseSyntaxNode.h \
+      SynTree/BaseSyntaxNode.cc \
       SynTree/BasicType.cc \
       SynTree/CommaExpr.cc \
Index: src/Tuples/TupleExpansion.cc
===================================================================
--- src/Tuples/TupleExpansion.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Tuples/TupleExpansion.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -9,7 +9,7 @@
 // Author           : Rodolfo G. Esteves
 // Created On       : Mon May 18 07:44:20 2015
-// Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Dec 13 23:45:51 2019
-// Update Count     : 24
+// Last Modified By : Andrew Beach
+// Last Modified On : Tue May 17 15:02:00 2022
+// Update Count     : 25
 //
 
@@ -367,50 +367,4 @@
 		return nullptr;
 	}
-
-	namespace {
-		/// determines if impurity (read: side-effects) may exist in a piece of code. Currently gives a very crude approximation, wherein any function call expression means the code may be impure
-		struct ImpurityDetector : public WithShortCircuiting {
-			ImpurityDetector( bool ignoreUnique ) : ignoreUnique( ignoreUnique ) {}
-
-			void previsit( const ApplicationExpr * appExpr ) {
-				visit_children = false;
-				if ( const DeclarationWithType * function = InitTweak::getFunction( appExpr ) ) {
-					if ( function->linkage == LinkageSpec::Intrinsic ) {
-						if ( function->name == "*?" || function->name == "?[?]" ) {
-							// intrinsic dereference, subscript are pure, but need to recursively look for impurity
-							visit_children = true;
-							return;
-						}
-					}
-				}
-				maybeImpure = true;
-			}
-			void previsit( const UntypedExpr * ) { maybeImpure = true; visit_children = false; }
-			void previsit( const UniqueExpr * ) {
-				if ( ignoreUnique ) {
-					// bottom out at unique expression.
-					// The existence of a unique expression doesn't change the purity of an expression.
-					// That is, even if the wrapped expression is impure, the wrapper protects the rest of the expression.
-					visit_children = false;
-					return;
-				}
-			}
-
-			bool maybeImpure = false;
-			bool ignoreUnique;
-		};
-	} // namespace
-
-	bool maybeImpure( const Expression * expr ) {
-		PassVisitor<ImpurityDetector> detector( false );
-		expr->accept( detector );
-		return detector.pass.maybeImpure;
-	}
-
-	bool maybeImpureIgnoreUnique( const Expression * expr ) {
-		PassVisitor<ImpurityDetector> detector( true );
-		expr->accept( detector );
-		return detector.pass.maybeImpure;
-	}
 } // namespace Tuples
 
Index: src/Tuples/Tuples.cc
===================================================================
--- src/Tuples/Tuples.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Tuples/Tuples.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Mon Jun 17 14:41:00 2019
 // Last Modified By : Andrew Beach
-// Last Modified On : Tue Jun 18  9:31:00 2019
-// Update Count     : 1
+// Last Modified On : Mon May 16 16:15:00 2022
+// Update Count     : 2
 //
 
@@ -18,4 +18,5 @@
 #include "AST/Pass.hpp"
 #include "AST/LinkageSpec.hpp"
+#include "Common/PassVisitor.h"
 #include "InitTweak/InitTweak.h"
 
@@ -23,9 +24,58 @@
 
 namespace {
+	/// Checks if impurity (read: side-effects) may exist in a piece of code.
+	/// Currently gives a very crude approximation, wherein any function
+	/// call expression means the code may be impure.
+	struct ImpurityDetector_old : public WithShortCircuiting {
+		bool const ignoreUnique;
+		bool maybeImpure;
+
+		ImpurityDetector_old( bool ignoreUnique ) :
+			ignoreUnique( ignoreUnique ), maybeImpure( false )
+		{}
+
+		void previsit( const ApplicationExpr * appExpr ) {
+			visit_children = false;
+			if ( const DeclarationWithType * function =
+					InitTweak::getFunction( appExpr ) ) {
+				if ( function->linkage == LinkageSpec::Intrinsic ) {
+					if ( function->name == "*?" || function->name == "?[?]" ) {
+						// intrinsic dereference, subscript are pure,
+						// but need to recursively look for impurity
+						visit_children = true;
+						return;
+					}
+				}
+			}
+			maybeImpure = true;
+		}
+
+		void previsit( const UntypedExpr * ) {
+			maybeImpure = true;
+			visit_children = false;
+		}
+
+		void previsit( const UniqueExpr * ) {
+			if ( ignoreUnique ) {
+				// bottom out at unique expression.
+				// The existence of a unique expression doesn't change the purity of an expression.
+				// That is, even if the wrapped expression is impure, the wrapper protects the rest of the expression.
+				visit_children = false;
+				return;
+			}
+		}
+	};
+
+	bool detectImpurity( const Expression * expr, bool ignoreUnique ) {
+		PassVisitor<ImpurityDetector_old> detector( ignoreUnique );
+		expr->accept( detector );
+		return detector.pass.maybeImpure;
+	}
+
 	/// Determines if impurity (read: side-effects) may exist in a piece of code. Currently gives
 	/// a very crude approximation, wherein any function call expression means the code may be
 	/// impure.
     struct ImpurityDetector : public ast::WithShortCircuiting {
-		bool maybeImpure = false;
+		bool result = false;
 
 		void previsit( ast::ApplicationExpr const * appExpr ) {
@@ -36,10 +86,11 @@
 				}
 			}
-			maybeImpure = true; visit_children = false;
+			result = true; visit_children = false;
 		}
 		void previsit( ast::UntypedExpr const * ) {
-			maybeImpure = true; visit_children = false;
+			result = true; visit_children = false;
 		}
 	};
+
 	struct ImpurityDetectorIgnoreUnique : public ImpurityDetector {
 		using ImpurityDetector::previsit;
@@ -48,19 +99,20 @@
 		}
 	};
-
-	template<typename Detector>
-	bool detectImpurity( const ast::Expr * expr ) {
-		ast::Pass<Detector> detector;
-		expr->accept( detector );
-		return detector.core.maybeImpure;
-	}
 } // namespace
 
 bool maybeImpure( const ast::Expr * expr ) {
-	return detectImpurity<ImpurityDetector>( expr );
+	return ast::Pass<ImpurityDetector>::read( expr );
 }
 
 bool maybeImpureIgnoreUnique( const ast::Expr * expr ) {
-	return detectImpurity<ImpurityDetectorIgnoreUnique>( expr );
+	return ast::Pass<ImpurityDetectorIgnoreUnique>::read( expr );
+}
+
+bool maybeImpure( const Expression * expr ) {
+	return detectImpurity( expr, false );
+}
+
+bool maybeImpureIgnoreUnique( const Expression * expr ) {
+	return detectImpurity( expr, true );
 }
 
Index: src/Tuples/module.mk
===================================================================
--- src/Tuples/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Tuples/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,7 +10,7 @@
 ## Author           : Richard C. Bilson
 ## Created On       : Mon Jun  1 17:49:17 2015
-## Last Modified By : Henry Xue
-## Last Modified On : Mon Aug 23 15:36:09 2021
-## Update Count     : 2
+## Last Modified By : Andrew Beach
+## Last Modified On : Mon May 17 15:00:00 2022
+## Update Count     : 3
 ###############################################################################
 
@@ -24,5 +24,5 @@
 	Tuples/Tuples.h
 
+SRC += $(SRC_TUPLES)
 
-SRC += $(SRC_TUPLES)
 SRCDEMANGLE += $(SRC_TUPLES)
Index: src/Validate/Autogen.cpp
===================================================================
--- src/Validate/Autogen.cpp	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Validate/Autogen.cpp	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -402,4 +402,8 @@
 	auto retval = srcParam();
 	retval->name = "_ret";
+	// xxx - Adding this unused attribute can slience unused variable warning
+	// However, some code might not be compiled as expected
+	// Temporarily disabled
+	// retval->attributes.push_back(new ast::Attribute("unused"));
 	return genProto( "?=?", { dstParam(), srcParam() }, { retval } );
 }
Index: src/Validate/module.mk
===================================================================
--- src/Validate/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Validate/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,10 +10,14 @@
 ## Author           : Rob Schluntz
 ## Created On       : Fri Jul 27 10:10:10 2018
-## Last Modified By : Rob Schluntz
-## Last Modified On : Fri Jul 27 10:10:26 2018
-## Update Count     : 2
+## Last Modified By : Andrew Beach
+## Last Modified On : Tue May 17 14:59:00 2022
+## Update Count     : 3
 ###############################################################################
 
 SRC_VALIDATE = \
+	Validate/FindSpecialDecls.cc \
+	Validate/FindSpecialDecls.h
+
+SRC += $(SRC_VALIDATE) \
 	Validate/Autogen.cpp \
 	Validate/Autogen.hpp \
@@ -22,4 +26,5 @@
 	Validate/EliminateTypedef.cpp \
 	Validate/EliminateTypedef.hpp \
+	Validate/FindSpecialDeclsNew.cpp \
 	Validate/FixQualifiedTypes.cpp \
 	Validate/FixQualifiedTypes.hpp \
@@ -38,9 +43,5 @@
 	Validate/NoIdSymbolTable.hpp \
 	Validate/ReturnCheck.cpp \
-	Validate/ReturnCheck.hpp \
-	Validate/FindSpecialDeclsNew.cpp \
-	Validate/FindSpecialDecls.cc \
-	Validate/FindSpecialDecls.h
+	Validate/ReturnCheck.hpp
 
-SRC += $(SRC_VALIDATE)
 SRCDEMANGLE += $(SRC_VALIDATE)
Index: src/Virtual/module.mk
===================================================================
--- src/Virtual/module.mk	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/Virtual/module.mk	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -11,10 +11,11 @@
 ## Created On       : Tus Jul 25 10:18:00 2017
 ## Last Modified By : Andrew Beach
-## Last Modified On : Tus Jul 25 10:18:00 2017
-## Update Count     : 0
+## Last Modified On : Tus May 17 14:59:00 2022
+## Update Count     : 1
 ###############################################################################
 
-SRC += Virtual/ExpandCasts.cc Virtual/ExpandCasts.h \
-	Virtual/Tables.cc Virtual/Tables.h
-
-SRCDEMANGLE += Virtual/Tables.cc
+SRC += \
+	Virtual/ExpandCasts.cc \
+	Virtual/ExpandCasts.h \
+	Virtual/Tables.cc \
+	Virtual/Tables.h
Index: src/main.cc
===================================================================
--- src/main.cc	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ src/main.cc	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -70,4 +70,5 @@
 #include "ResolvExpr/Resolver.h"            // for resolve
 #include "SymTab/Validate.h"                // for validate
+#include "SymTab/ValidateType.h"            // for linkReferenceToTypes
 #include "SynTree/LinkageSpec.h"            // for Spec, Cforall, Intrinsic
 #include "SynTree/Declaration.h"            // for Declaration
Index: tests/.expect/attributes.nast.x64.txt
===================================================================
--- tests/.expect/attributes.nast.x64.txt	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/.expect/attributes.nast.x64.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1339,9 +1339,5 @@
         }
 
-        {
-            ((void)(_X4_retM12__anonymous4_2=(*_X4_dstM12__anonymous4_2)) /* ?{} */);
-        }
-
-        return _X4_retM12__anonymous4_2;
+        return (*_X4_dstM12__anonymous4_2);
     }
     {
Index: tests/.expect/attributes.nast.x86.txt
===================================================================
--- tests/.expect/attributes.nast.x86.txt	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/.expect/attributes.nast.x86.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1339,9 +1339,5 @@
         }
 
-        {
-            ((void)(_X4_retM12__anonymous4_2=(*_X4_dstM12__anonymous4_2)) /* ?{} */);
-        }
-
-        return _X4_retM12__anonymous4_2;
+        return (*_X4_dstM12__anonymous4_2);
     }
     {
Index: tests/.expect/attributes.oast.x64.txt
===================================================================
--- tests/.expect/attributes.oast.x64.txt	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/.expect/attributes.oast.x64.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1339,9 +1339,5 @@
         }
 
-        {
-            ((void)(_X4_retM12__anonymous4_2=(*_X4_dstM12__anonymous4_2)) /* ?{} */);
-        }
-
-        return _X4_retM12__anonymous4_2;
+        return (*_X4_dstM12__anonymous4_2);
     }
     {
Index: tests/.expect/attributes.oast.x86.txt
===================================================================
--- tests/.expect/attributes.oast.x86.txt	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/.expect/attributes.oast.x86.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1339,8 +1339,4 @@
         }
 
-        {
-            ((void)(_X4_retM12__anonymous4_2=(*_X4_dstM12__anonymous4_2)) /* ?{} */);
-        }
-
         return _X4_retM12__anonymous4_2;
     }
Index: tests/.expect/nested_function.txt
===================================================================
--- tests/.expect/nested_function.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ tests/.expect/nested_function.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+total 105
Index: tests/.expect/quasiKeyword.txt
===================================================================
--- tests/.expect/quasiKeyword.txt	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/.expect/quasiKeyword.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,1 +1,1 @@
-quasiKeyword.cfa:54:25: warning: Compiled
+quasiKeyword.cfa:52:25: warning: Compiled
Index: tests/concurrent/examples/multiSort.cfa
===================================================================
--- tests/concurrent/examples/multiSort.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ tests/concurrent/examples/multiSort.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,121 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2022 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// multiSort.c -- compute sort single array multiple times
+//			make sure sorting works, thread don't screw up
+//			and trampolines work on thread stacks
+//
+// Author           : Peter A. Buhr
+// Created On       : Tue 05 24 11:34:23 2022
+// Last Modified By :
+// Last Modified On :
+// Update Count     :
+//
+
+#include <barrier.hfa>
+#include <fstream.hfa>
+#include <math.trait.hfa>
+#include <stdlib.hfa>
+#include <thread.hfa>
+
+forall(T) {
+	struct MyVec2 {
+		T val1;
+		T val2;
+	};
+
+	forall(| Equality( T ))
+	int ?!=?( MyVec2(T) lhs, MyVec2(T) rhs ) { return lhs.val1 != rhs.val1 || lhs.val2 != rhs.val2; }
+
+	forall(| Relational(T)) {
+		static inline int ?<?( MyVec2(T) lhs, MyVec2(T) rhs ) {
+			if(lhs.val1 < rhs.val1) return true;
+			if(lhs.val1 > rhs.val1) return false;
+			if(lhs.val2 < rhs.val2) return true;
+			if(lhs.val2 > rhs.val2) return false;
+			return false;
+		}
+	}
+
+	forall(| { T random( void ); })
+	MyVec2(T) random( void ) {
+		MyVec2(T) r;
+		r.val1 = random();
+		r.val2 = random();
+		return r;
+	}
+}
+
+
+
+const unsigned nthreads = 7;
+const unsigned nvecs = 313;
+barrier bar = { nthreads + 1 };
+const MyVec2(long int) * original;
+
+thread Sorter {
+	MyVec2(long int) * copy;
+};
+
+void ^?{}( Sorter & mutex this ) {
+	free(this.copy);
+}
+
+// Make this a polymorphic call to prevent thunks from being hosted
+forall( T | Relational(T) | sized(MyVec2(T)) )
+void block_sort( MyVec2(T) * vals, size_t dim ) __attribute__((noinline)) {
+	MyVec2(T) dummy = vals[0];
+	block( bar );
+
+	qsort(vals, dim);
+}
+
+
+void main( Sorter & this ) {
+	this.copy = aalloc(nvecs);
+	for(i; nvecs) {
+		this.copy[i] = original[i];
+	}
+
+	block_sort(this.copy, nvecs);
+}
+
+int main() {
+	sout | "Generating";
+	MyVec2(long int) * local = aalloc( nvecs );
+	for(i; nvecs) {
+		local[i] = random();
+	}
+
+	original = local;
+
+	sout | "Launching";
+
+	processor p; {
+		Sorter sorters[nthreads];
+
+		block( bar );
+
+		sout | "Sorting";
+
+		qsort(local, nvecs);
+
+		sout | "Checking";
+
+		for(i; nthreads) {
+			const MyVec2(long int) * copy = join( sorters[i] ).copy;
+			for(j; nvecs) {
+				if(copy[j] != original[j]) {
+					sout | "Error at thread" | i | ", index" | j | ": data doesn't match!";
+				}
+			}
+		}
+	}
+
+	free(local);
+
+	sout | "Done";
+}
Index: tests/enum_tests/.expect/structEnum.txt
===================================================================
--- tests/enum_tests/.expect/structEnum.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ tests/enum_tests/.expect/structEnum.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,1 @@
+100 c
Index: tests/enum_tests/structEnum.cfa
===================================================================
--- tests/enum_tests/structEnum.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ tests/enum_tests/structEnum.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,33 @@
+#include <stdio.h>
+
+struct Point {
+    int x;
+    char y;
+};
+
+enum(Point) PointEnum {
+    first={
+        100,
+        'c'
+    },
+    second={
+        200,
+        'a'
+    }
+};
+
+// The only valid usage
+struct Point apple = first;
+// Failed due to Qualified name is currently unimplemented.
+// struct Point banana = PointEnum.first;
+
+int main() {
+    printf("%d %c\n", apple.x, apple.y);
+    // Failed; enumInstType is now not a real type and not instantiated. 
+    // Not sure if we want that
+    // printf("%d %c\n", second.x, second.y);
+    return 0;
+}
+
+
+
Index: tests/exceptions/defaults.cfa
===================================================================
--- tests/exceptions/defaults.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/exceptions/defaults.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -2,5 +2,4 @@
 
 #include <string.h>
-#include <exception.hfa>
 
 exception log_message {
@@ -8,9 +7,20 @@
 };
 
-_EHM_DEFINE_COPY(log_message, )
+// Manually define the virtual table and helper functions.
+void copy(log_message * this, log_message * that) {
+	*this = *that;
+}
+
 const char * msg(log_message * this) {
 	return this->msg;
 }
-_EHM_VIRTUAL_TABLE(log_message, , log_vt);
+
+const struct log_message_vtable log_vt @= {
+	.__cfavir_typeid : &__cfatid_log_message,
+	.size : sizeof(struct log_message),
+	.copy : copy,
+	.^?{} : ^?{},
+	.msg : msg,
+};
 
 // Logging messages don't have to be handled.
Index: tests/include/.expect/includes.nast.txt
===================================================================
--- tests/include/.expect/includes.nast.txt	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/include/.expect/includes.nast.txt	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -1,1 +1,1 @@
-include/includes.cfa:153:25: warning: Compiled
+include/includes.cfa:173:25: warning: Compiled
Index: tests/include/includes.cfa
===================================================================
--- tests/include/includes.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/include/includes.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -10,6 +10,6 @@
 // Created On       : Wed May 27 17:56:53 2015
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Thu Feb  3 22:06:07 2022
-// Update Count     : 774
+// Last Modified On : Sun May 22 08:27:20 2022
+// Update Count     : 779
 //
 
@@ -32,5 +32,7 @@
 #include <crypt.h>
 #include <ctype.h>
-//#include <curses.h>										// may not be installed
+#if __has_include( "curses.h" )
+#include <curses.h>										// may not be installed
+#endif
 #include <dirent.h>
 #include <dlfcn.h>
@@ -41,8 +43,12 @@
 #include <errno.h>
 #include <error.h>
-//#include <eti.h>										// may not be installed, comes with ncurses
+#if __has_include( "eti.h" )
+#include <eti.h>										// may not be installed, comes with ncurses
+#endif
 #include <execinfo.h>
-#include <expat.h>
+#if __has_include( "expat.h" )
+#include <expat.h>										// may not be installed
 #include <expat_external.h>
+#endif
 #include <fcntl.h>
 #include <features.h>
@@ -50,5 +56,7 @@
 #include <fmtmsg.h>
 #include <fnmatch.h>
-//#include <form.h>										// may not be installed, comes with ncurses
+#if __has_include( "form.h" )
+#include <form.h>										// may not be installed, comes with ncurses
+#endif
 #include <fstab.h>
 #include <fts.h>
@@ -73,19 +81,27 @@
 //#include <link.h>										// CFA bug #240 nested anonymous enum fails
 #include <locale.h>
-#include <ltdl.h>
+#if __has_include( "ltdl.h" )
+#include <ltdl.h>										// may not be installed
+#endif
 //#include <malloc.h>									// cannot include in extern "C" because of CFA #include_next
 #include <math.h>
 #include <mcheck.h>
 #include <memory.h>
-//#include <menu.h>										// may not be installed, comes with ncurses
+#if __has_include( "menu.h" )
+#include <menu.h>										// may not be installed, comes with ncurses
+#endif
 #include <mntent.h>
 #include <monetary.h>
 #include <mqueue.h>
-//#include <ncurses_dll.h>								// may not be installed, comes with ncurses
+#if __has_include( "ncurses_dll.h" )
+#include <ncurses_dll.h>								// may not be installed, comes with ncurses
+#endif
 #include <netdb.h>
 #include <nl_types.h>
 #include <nss.h>
 #include <obstack.h>
-//#include <panel.h>										// may not be installed, comes with ncurses
+#if __has_include( "panel.h" )
+#include <panel.h>										// may not be installed, comes with ncurses
+#endif
 #include <paths.h>
 #include <poll.h>
@@ -118,6 +134,8 @@
 #include <syslog.h>
 #include <tar.h>
-//#include <term.h>										// may not be installed, comes with ncurses
-//#include <termcap.h>									// may not be installed, comes with ncurses
+#if __has_include( "term.h" )
+#include <term.h>										// may not be installed, comes with ncurses
+#include <termcap.h>									// may not be installed, comes with ncurses
+#endif
 #include <termio.h>
 #include <termios.h>
@@ -131,5 +149,7 @@
 #include <ucontext.h>
 #include <ulimit.h>
-//#include <unctrl.h>										// may not be installed, comes with ncurses
+#if __has_include( "unctrl.h" )
+#include <unctrl.h>										// may not be installed, comes with ncurses
+#endif
 #include <unistd.h>
 #include <utime.h>
Index: tests/linking/exception-nothreads.cfa
===================================================================
--- tests/linking/exception-nothreads.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/linking/exception-nothreads.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -15,8 +15,7 @@
 
 #include <stdlib.hfa>
-#include <exception.hfa>
 
-EHM_EXCEPTION(ping)();
-EHM_VIRTUAL_TABLE(ping, ping_vt);
+exception ping {};
+vtable(ping) ping_vt;
 
 int main(void) {
Index: tests/linking/exception-withthreads.cfa
===================================================================
--- tests/linking/exception-withthreads.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/linking/exception-withthreads.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -15,9 +15,8 @@
 
 #include <stdlib.hfa>
-#include <exception.hfa>
 #include "../exceptions/with-threads.hfa"
 
-EHM_EXCEPTION(ping)();
-EHM_VIRTUAL_TABLE(ping, ping_vt);
+exception ping {};
+vtable(ping) ping_vt;
 
 int main(void) {
Index: tests/nested_function.cfa
===================================================================
--- tests/nested_function.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
+++ tests/nested_function.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -0,0 +1,38 @@
+#include <thread.hfa>
+#include <fstream.hfa>
+#include <stdlib.hfa>
+
+enum { VALUES = 10_000 };
+int values[VALUES], total = 0;
+
+thread T { int id; };
+
+void main( T & ) {
+	int sum = 0;
+	void f() {
+		int i = 0;
+		void h( void ) {								// nest routine
+			int j = i;									// non-local reference
+			void g( void ) {							// nest routine
+				i += 1;									// non-local reference
+				j += 1;									// non-local reference
+				if ( i < 3 ) h();
+			} // g
+			if ( prng( 6 ) == 0 ) g();					// prevent compiler inlining
+			else h();
+			i += 1;
+			sum += bsearchl( j, values, VALUES );		// has internal nested compare routine
+		} // h
+		h();
+	} // f
+	f();
+	__atomic_fetch_add( &total, sum, __ATOMIC_SEQ_CST );
+}
+int main() {
+	set_seed( 1003 );
+	for ( i; VALUES ) values[i] = i;
+	{
+		T t[5];
+	}
+	sout | "total" | total;
+}
Index: tests/pybin/settings.py
===================================================================
--- tests/pybin/settings.py	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/pybin/settings.py	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -201,2 +201,4 @@
 	global output_width
 	output_width = max(map(lambda t: len(t.target()), tests))
+	# 35 is the maximum width of the name field before we get line wrapping.
+	output_width = min(output_width, 35)
Index: tests/pybin/test_run.py
===================================================================
--- tests/pybin/test_run.py	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/pybin/test_run.py	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -43,4 +43,14 @@
 		return os.path.normpath( os.path.join(settings.BUILDDIR, self.path, self.name) )
 
+	def format_target(self, width):
+		target = self.target()
+		length = len(target)
+		if length < width:
+			return '{0:{width}}'.format(target, width=width)
+		elif length == width:
+			return target
+		else:
+			return '...' + target[3-width:]
+
 	@staticmethod
 	def valid_name(name):
Index: tests/quasiKeyword.cfa
===================================================================
--- tests/quasiKeyword.cfa	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/quasiKeyword.cfa	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -4,5 +4,5 @@
 // quasiKeyword.cfa -- test that quasi-keywords can be used for variable and functions names, as well as keywords in
 //					   control structures.
-// 
+//
 // Author           : Peter A. Buhr
 // Created On       : Wed Feb 17 10:33:49 2021
@@ -10,9 +10,7 @@
 // Last Modified On : Sat Jun  5 10:07:59 2021
 // Update Count     : 8
-// 
+//
 
-#include <exception.hfa>
-
-EHM_EXCEPTION( E )();
+exception E {};
 
 void catch( int i ) {}
@@ -49,5 +47,5 @@
 		} fixup ( E * ) {
 		} finally {
-		} 
+		}
 	else catch = 3;
 
Index: tests/test.py
===================================================================
--- tests/test.py	(revision bf0263c9b7b4fb1f0eeb41eb13f2e11fe44991d3)
+++ tests/test.py	(revision 90a812553b6fb7bc8cf5dadc34f305f3df56721e)
@@ -132,5 +132,5 @@
 	parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no')
 	parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_')
-	parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=120)
+	parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=180)
 	parser.add_argument('--global-timeout', help='Maximum cumulative duration in seconds after the ALL tests are considered to have timed out', type=int, default=7200)
 	parser.add_argument('--timeout-with-gdb', help='Instead of killing the command when it times out, orphan it and print process id to allow gdb to attach', type=yes_no, default="no")
@@ -252,5 +252,5 @@
 	try :
 		# print formated name
-		name_txt = '{0:{width}}  '.format(t.target(), width=settings.output_width)
+		name_txt = t.format_target(width=settings.output_width) + '  '
 
 		retcode, error, duration = run_single_test(t)
