Index: doc/papers/llheap/Makefile
===================================================================
--- doc/papers/llheap/Makefile	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/Makefile	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -53,4 +53,6 @@
 FakeHeader \
 Header \
+decreasing \
+increasing \
 }
 
@@ -66,5 +68,10 @@
 
 GRAPHS = ${addsuffix .tex, \
+prolog \
+swift \
+java \
 }
+
+#prolog \
 
 ## Define the documents that need to be made.
@@ -80,5 +87,5 @@
 
 clean :
-	@rm -frv ${DOCUMENT} ${BASE}.ps WileyNJD-AMA.bst ${BASE}.out.ps ${Build}
+	@rm -frv ${DOCUMENT} testgenfmt testgenfmt2 ${BASE}.ps WileyNJD-AMA.bst ${BASE}.out.ps ${Build}
 
 # File Dependencies #
@@ -90,5 +97,5 @@
 	dvips ${Build}/$< -o $@
 
-${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
+${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst testgenfmt testgenfmt2 ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
 		local.bib ../../bibliography/pl.bib | ${Build}
 	# Must have *.aux file containing citations for bibtex
@@ -122,4 +129,37 @@
 	fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
 
+testgenfmt : testgenfmt.cc
+	g++ testgenfmt.cc -o $@
+
+testgenfmt2 : testgenfmt2.cc
+	g++ testgenfmt2.cc -o $@
+
+#${addsuffix /testdata, ${basename ${GRAPHS}}} : ${addsuffix /testgen, ${basename ${GRAPHS}}}
+#	echo ${addsuffix /testdata, ${basename ${GRAPHS}}}
+#	echo ${addsuffix /testgen, ${basename ${GRAPHS}}}
+#	testgenfmt < $< > $@
+
+#swift/testdata.lexp : swift/testgen.ldata testgenfmt.cc
+#	./testgenfmt < $<
+
+swift/testdata.exp : swift/testgen.data testgenfmt2.cc
+	./testgenfmt2 < $<
+
+#prolog/testdata.lexp : prolog/testgen.ldata testgenfmt.cc
+#	./testgenfmt < $<
+
+prolog/testdata.exp : prolog/testgen.data testgenfmt2.cc
+	./testgenfmt2 < $<
+
+#java/testdata.lexp : java/testgen.ldata testgenfmt.cc
+#	./testgenfmt < $<
+
+java/testdata.exp : java/testgen.data testgenfmt2.cc
+	./testgenfmt2 < $<
+
+${GRAPHS} : Makefile plotexp.gp plotres.gp ${addsuffix /testdata.exp, ${basename ${GRAPHS}}}
+	gnuplot -e GRAPH="'${basename $@}'" plotexp.gp
+	gnuplot -e GRAPH="'${basename $@}'" plotres.gp
+
 # Local Variables: #
 # compile-command: "make" #
Index: doc/papers/llheap/Paper.tex
===================================================================
--- doc/papers/llheap/Paper.tex	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/Paper.tex	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -1,3 +1,34 @@
-\documentclass[AMA,STIX1COL]{WileyNJD-v2}
+% Type: Paper
+% 
+% Abstract
+%
+% A new C-based concurrent memory-allocator is presented, called llheap (ll => low latency). It supports C/C++ applications with multiple kernel threads, or it can be embedded into user-threading runtime-systems. llheap extends the C allocation API with new functions providing orthogonal access to allocation features; hence, programmers do have to code missing combinations. llheap also extends the C allocation semantics by remembering multiple aspects of the initial allocation. These properties can be queried, allowing programmers to write safer programs by preserving these properties in future allocations. As well, realloc/reallocarray preserve initial zero-fill and alignment properties when adjusting storage size, again increasing future allocation safety. The allocator provides a contention-free statistics gathering mode, and a debugging mode for dynamically checking allocation pre/post conditions and invariants. These modes are invaluable for understanding and debugging a program's dynamic allocation behaviour, with low enough cost to be used in production code. An example is presented for condensing the allocation API using advanced type-systems, providing a single type-safe allocation routine using named arguments. Finally, performance results across a number of benchmarks show llheap is competitive with other modern memory allocators.
+% 
+% Upload: llheap.pdf
+% 
+% Computing Classification Systems
+% 
+% Add
+% 500 Software and its engineering > Software libraries and repositories
+% Add
+% 300 Computing methodologies > Concurrent programming languages
+%
+% Authors, submitter has to have an orcid
+%
+% Details & Comments
+%
+% cover letter
+%
+% Funding
+%  yes
+%  Government of Canada >
+%  Natural Sciences and Engineering Research Council of Canada
+% 
+% Electronic Supplementary Materials No
+% Are you submitting a conference paper extension: No
+% X  ACM uses CrossCheck, an automated service that checks for plagiarism. Any submission to ACM is subject to such a check. Confirm that you are familiar with the ACM Plagiarism Polic
+% To confirm that you have reviewed all title, author, and affiliation information in the submission form and the manuscript for accuracy, and approve its exact use in the final, published article, please check the box to the right. X
+
+\documentclass[manuscript,screen,review]{acmart}
 
 % Latex packages used in the document.
@@ -8,16 +39,21 @@
 \usepackage{relsize}
 \usepackage{xspace}
+\usepackage{xcolor}
 \usepackage{calc}
+\usepackage{algorithm}
+\usepackage{algorithmic}
+\usepackage{enumitem}
+\usepackage{tabularx}									% allows \lstMakeShortInline@
 \usepackage[scaled=0.88]{helvet}						% descent Helvetica font and scale to times size
 \usepackage[T1]{fontenc}
 \usepackage{listings}									% format program code
-\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt]{subfig}
+\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt,font={rm,md,up}]{subfig}
 \renewcommand{\thesubfigure}{(\alph{subfigure})}
-\usepackage{enumitem}
 
 \hypersetup{breaklinks=true}
-
-\usepackage[pagewise]{lineno}
-\renewcommand{\linenumberfont}{\scriptsize\sffamily}
+\usepackage{breakurl}
+
+% \usepackage[pagewise]{lineno}
+% \renewcommand{\linenumberfont}{\scriptsize\sffamily}
 
 \usepackage{varioref}					% extended references
@@ -71,5 +107,5 @@
 \setlength{\gcolumnposn}{3.25in}
 \setlength{\columnposn}{\gcolumnposn}
-\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
+\renewcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
 \newcommand{\CRT}{\global\columnposn=\gcolumnposn}
 \makeatother
@@ -78,5 +114,5 @@
 columns=fullflexible,
 basicstyle=\linespread{0.9}\sf,			% reduce line spacing and use sanserif font
-stringstyle=\small\tt,					% use typewriter font
+stringstyle=\fontsize{9}{9}\selectfont\tt,	% use typewriter font
 tabsize=5,								% N space tabbing
 xleftmargin=\parindentlnth,				% indent code to paragraph indentation
@@ -93,9 +129,9 @@
 literate=
 %  {-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.75ex}{0.1ex}}}}1
-  {-}{\raisebox{-1pt}{\ttfamily-}}1
+  {-}{\raisebox{0pt}{\ttfamily-}}1
   {^}{\raisebox{0.6ex}{\(\scriptstyle\land\,\)}}1
   {~}{\raisebox{0.3ex}{\(\scriptstyle\sim\,\)}}1
-  {'}{\ttfamily'\hspace*{-0.4ex}}1
-  {`}{\ttfamily\upshape\hspace*{-0.3ex}`}1
+%  {'}{\ttfamily'\hspace*{-0.4ex}}1
+  {`}{\raisebox{-2pt}{\large\textasciigrave\hspace{-1pt}}}1
   {<-}{$\leftarrow$}2
   {=>}{$\Rightarrow$}2
@@ -150,14 +186,4 @@
 \lstnewenvironment{java}[1][]{\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}{}
 
-% inline code @...@
-\lstMakeShortInline@%
-
-% \let\OLDthebibliography\thebibliography
-% \renewcommand\thebibliography[1]{
-%   \OLDthebibliography{#1}
-%   \setlength{\parskip}{0pt}
-%   \setlength{\itemsep}{4pt plus 0.3ex}
-% }
-
 \newsavebox{\myboxA}
 \newsavebox{\myboxB}
@@ -167,71 +193,92 @@
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
-\articletype{RESEARCH ARTICLE}%
-
-% Referees
-% Doug Lea, dl@cs.oswego.edu, SUNY Oswego
-% Herb Sutter, hsutter@microsoft.com, Microsoft Corp
-% Gor Nishanov, gorn@microsoft.com, Microsoft Corp
-% James Noble, kjx@ecs.vuw.ac.nz, Victoria University of Wellington, School of Engineering and Computer Science
-
-\received{XXXXX}
-\revised{XXXXX}
-\accepted{XXXXX}
-
-\raggedbottom
-
 \title{High-Performance Concurrent Memory Allocation}
 
-\author[1]{Mubeen Zulfiqar}
-\author[1]{Ayelet Wasik}
-\author[1]{Peter A. Buhr*}
-\author[2]{Bryan Chan}
-\author[3]{Dave Dice}
-\authormark{ZULFIQAR \textsc{et al.}}
-
-\address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, ON}, \country{Canada}}}
-\address[2]{\orgdiv{Huawei Compiler Lab}, \orgname{Huawei}, \orgaddress{\state{Markham, ON}, \country{Canada}}}
-\address[3]{\orgdiv{Oracle Labs}, \orgname{Oracle}, \orgaddress{\state{Burlington, MA}, \country{USA}}}
-
-
-\corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}
-
-% \fundingInfo{Natural Sciences and Engineering Research Council of Canada}
-
-\abstract[Summary]{%
-A new C-based concurrent memory-allocator is presented, called llheap (low latency).
-It can be used standalone in C/\CC applications with multiple kernel threads, or embedded into high-performance user-threading programming languages.
-llheap extends the feature set of existing C allocation by remembering zero-filled (\lstinline{calloc}) and aligned properties (\lstinline{memalign}) in an allocation.
+\author{Mubeen Zulfiqar}
+\email{m3zulfiq@uwaterloo.ca}
+\author{Ayelet Wasik}
+\email{aisraeli@plg.uwaterloo.ca}
+\author{Peter A. Buhr}
+\email{pabuhr@uwaterloo.ca}
+\orcid{0000-0003-3747-9281}
+\affiliation{%
+  \institution{University of Waterloo}
+  \city{Waterloo}
+  \state{Ontario}
+  \country{Canada}
+}
+\author{Dave Dice}
+\email{dave.dice@oracle.com}
+\orcid{0000-0001-9164-7747}
+\affiliation{%
+  \institution{Oracle Labs}
+  \city{Burlington}
+  \state{Massachusetts}
+  \country{USA}
+}
+\author{Bryan Chan}
+\email{bryan.chan@huawei.com}
+\affiliation{%
+  \institution{Huawei Compiler Lab}
+  \city{Markham}
+  \state{Ontario}
+  \country{Canada}
+}
+
+\renewcommand{\shortauthors}{Zulfiqar et al.}
+
+% inline code @...@
+\lstMakeShortInline@%
+
+\begin{document}
+
+\begin{abstract}
+A new C-based concurrent memory-allocator is presented, called llheap (ll $\Rightarrow$ low latency).
+It supports C/\CC applications with multiple kernel threads, or it can be embedded into user-threading runtime-systems.
+llheap extends the C allocation API with new functions providing orthogonal access to allocation features;
+hence, programmers do have to code missing combinations.
+llheap also extends the C allocation semantics by remembering multiple aspects of the initial allocation.
 These properties can be queried, allowing programmers to write safer programs by preserving these properties in future allocations.
-As well, \lstinline{realloc}/\lstinline{reallocarray} preserve these properties when adjusting storage size, again increasing future allocation safety.
-llheap also extends the C allocation API with \lstinline{aalloc}, \lstinline{amemalign}, \lstinline{cmemalign}, \lstinline{resize}, and extended \lstinline{realloc}, providing orthogonal access to allocation features;
-hence, programmers do have to code missing combinations.
-The llheap allocator also provides a contention-free statistics gathering mode, and a debugging mode for dynamically checking allocation pre/post conditions and invariants.
+As well, \lstinline{realloc}/\lstinline{reallocarray} preserve initial zero-fill and alignment properties when adjusting storage size, again increasing future allocation safety.
+The allocator provides a contention-free statistics gathering mode, and a debugging mode for dynamically checking allocation pre/post conditions and invariants.
 These modes are invaluable for understanding and debugging a program's dynamic allocation behaviour, with low enough cost to be used in production code.
-The llheap API is further extended with the \CFA advanced type-system, providing a single type-safe allocation routine using named arguments, increasing safety and simplifying usage.
-Finally, performance results across a number of benchmarks show llheap is competitive with the best memory allocators.
-}% abstract
-
-% While not as powerful as the \lstinline{valgrind} interpreter, a large number of allocations mistakes are detected.
-% A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs. It has been an interesting challenge.
-% These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs.
-% Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc, and the new allocator llheap are all compared using the new micro-benchmark test-suite.
+An example is presented for condensing the allocation API using advanced type-systems, providing a single type-safe allocation routine using named arguments.
+Finally, performance results across a number of benchmarks show llheap is competitive with other modern memory allocators.
+\end{abstract}
+
+\begin{CCSXML}
+<concept>
+<concept_id>10011007.10011006.10011072</concept_id>
+<concept_desc>Software and its engineering~Software libraries and repositories</concept_desc>
+<concept_significance>500</concept_significance>
+</concept>
+</ccs2012>
+
+<ccs2012>
+<concept>
+<concept_id>10010147.10011777.10011014</concept_id>
+<concept_desc>Computing methodologies~Concurrent programming languages</concept_desc>
+<concept_significance>300</concept_significance>
+</concept>
+\end{CCSXML}
+
+\ccsdesc[500]{Software and its engineering~Software libraries and repositories}
+\ccsdesc[300]{Computing methodologies~Concurrent programming languages}
 
 \keywords{memory allocation, (user-level) concurrency, type-safety, statistics, debugging, high performance}
 
-
-\begin{document}
-%\linenumbers				% comment out to turn off line numbering
+\received{20 February 2007}
+\received[revised]{12 March 2009}
+\received[accepted]{5 June 2009}
+
 
 \maketitle
 
-
 \section{Introduction}
 
-Memory management services a series of program allocation/deallocation requests and attempts to satisfy them from a variable-sized block(s) of memory, while minimizing total memory usage.
-A general-purpose dynamic-allocation algorithm cannot anticipate allocation requests so its time and space performance is rarely optimal (bin packing).
-However, allocators take advantage of allocation patterns in typical programs (heuristics) to produce excellent results, both in time and space (similar to LRU paging).
-Allocators use similar techniques, but each optimizes specific allocation patterns.
-Nevertheless, allocators are a series of compromises, occasionally with some static or dynamic tuning parameters to optimize specific request patterns.
+Memory management services a series of program allocation/deallocation requests and attempts to satisfy them from variable-sized blocks of memory while minimizing total memory usage.
+A general-purpose memory allocator cannot anticipate storage requests so its time and space performance cannot be optimal (bin packing).
+Each allocator takes advantage of a subset of typical allocation patterns (heuristics) to produce excellent results, both in time and space (similar to LRU paging).
+Nevertheless, allocators are a series of compromises, possibly with static or dynamic tuning parameters to optimize specific request patterns.
 
 
@@ -239,13 +286,12 @@
 \label{s:MemoryStructure}
 
-Figure~\ref{f:ProgramAddressSpace} shows the typical layout of a program's address space (high to low) divided into a number of zones, with free memory surrounding the dynamic code/data~\cite{memlayout}.
+Figure~\ref{f:ProgramAddressSpace} shows the typical layout of a program's address space (high addresses to low) divided into a number of zones, with free memory surrounding the dynamic code/data~\cite{memlayout}.
 Static code and data are placed into memory at load time from the executable and are fixed-sized at runtime.
 Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}.
-However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control.
-Stack memory is managed by the program call/return-mechanism using a LIFO technique, which works well for sequential programs.
-For stackful coroutines and user threads, a new stack is commonly created in the dynamic-allocation memory.
-The dynamic-allocation memory is often a contiguous area (can be memory mapped as multiple areas), which starts empty and grows/shrinks as the program creates/deletes variables with independent lifetime.
-The programming-language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
-This work focuses solely on management of the dynamic-allocation memory.
+However, changes to the dynamic code/data space are typically infrequent, most occurring at program startup and are largely outside of a program's control.
+Stack memory is managed by the program call/return mechanism using a LIFO technique.
+For stackful coroutines and user threads, new stacks are commonly created in the dynamic-allocation memory.
+The dynamic-allocation memory is often a contiguous area, which starts empty and grows/shrinks as the program creates/deletes variables with independent lifetime.
+The language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
 
 \begin{figure}
@@ -261,20 +307,29 @@
 \label{s:DynamicMemoryManagement}
 
-Modern programming languages manage dynamic memory in different ways.
-Some languages, such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, provide explicit allocation but \emph{implicit} deallocation of data through garbage collection~\cite{Wilson92}.
-In general, garbage collection supports memory compaction, where dynamic (live) data is moved during runtime to better utilize space.
-However, moving data requires finding and updating pointers to it to reflect the new data locations.
-Programming languages such as C~\cite{C}, \CC~\cite{C++}, and Rust~\cite{Rust} provide the programmer with explicit allocation \emph{and} deallocation of data.
-These languages cannot find and subsequently move live data because pointers can be created to any storage zone, including internal components of allocated objects, and may contain temporary invalid values generated by pointer arithmetic.
-Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
-This work only examines dynamic management with \emph{explicit} deallocation.
-While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.
+Modern programming languages provide two forms of storage management: managed or unmanaged.
+Both forms have explicit allocation, but managed memory has implicit deallocation (garbage collection~\cite{Wilson92}, GC) and unmanaged memory has some form of explicit deallocation.
+Sometimes there are explicit deallocation hints in managed.
+Both forms attempt to reuse freed storage in the heap for new allocations.
+Unmanaged languages have no information about allocated \newterm{objects}, and hence, use techniques during freeing to detect adjacent unused storage if coalescing.
+Conservative GC attempts to find free objects in an unmanaged system by scanning memory and marking anything that \emph{looks} like a live object.
+However, \emph{conservative} means some non-objects might be marked as live;
+the goal is not to miss any live objects.
+Managed languages maintain sufficient information to locate all live objects.
+Precise GC is then able to mark just the live objects.
+Both approaches then sweep through the unmarked objects looking for adjacent free storage to coalesce.
+Precise GC has a further coalescing option of compacting used objects and adjusting the pointers used to find them to the new locations, resulting in a large area of contiguous free storage.
+Languages such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, are managed and normally implemented using precise GC.
+(Both Go~\cite{Go1.3} and Netscape JavaScript~\cite{JavaScriptGC} switched from conservative to precise GC.)
+Languages such as C~\cite{C}, \CC~\cite{C++}, Rust~\cite{Rust} and Swift~\cite{swift} (because of explicit management of weak references) are unmanaged but could be used with conservative GC~\cite{Boehm88}.
+This work only examines unmanaged memory with \emph{explicit} deallocation.
+% While GC is not part this work, some of the results are applicable to the allocation phase in any memory-management approach.
 
 Most programs use a general-purpose allocator, usually the one provided by the programming-language's runtime.
 In certain languages, programmers can write specialize allocators for specific needs.
-C and \CC allow easy replacement of the default memory allocator through a standard API.
-Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
-As well, new languages support concurrency (kernel and/or user threading), which must be safely handled by the allocator.
-Hence, several alternative allocators exist for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
+POSIX~\cite{POSIX17} provides for replacement of the default memory allocator in C and \CC through a standard API.
+Most industry JVMs provide multiple GCs, from which a user selects one for their workload.
+%Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
+As well, new languages support concurrency (kernel/user threading), which must be safely handled by the allocator.
+Hence, alternative allocators exist for C/\CC with the goal of scaling in multi-threaded programs~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
 This work examines the design of high-performance allocators for use by kernel and user multi-threaded applications written in C/\CC.
 
@@ -283,75 +338,42 @@
 \label{s:Contributions}
 
-This work provides the following contributions in the area of explicit concurrent dynamic-allocation:
-\begin{enumerate}[leftmargin=*,itemsep=0pt]
-\item
-Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,400 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions for the concurrent languages \uC~\cite{uC++} and \CFA~\cite{Moss18,Delisle21} using user-level threads running on multiple kernel threads (M:N threading).
-
-\item
-Extend the standard C heap functionality by preserving with each allocation its request size, the amount allocated, whether it is zero fill, and its alignment.
+This work provides the following contributions to the area of explicit concurrent dynamic-allocation.
+\begin{enumerate}[leftmargin=18pt,topsep=3pt,itemsep=0pt]
+\item
+Implementation of a new stand-alone concurrent low-latency memory-allocator, called llheap~\cite{llheap}, ($\approx$1,500 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions for the concurrent languages \uC~\cite{uC++} and \CFA~\cite{Moss18,Delisle21} using user-level threads running on multiple kernel threads (M:N threading).
+
+\item
+Extend the C allocation API with new functions @aalloc@, @amemalign@, @cmemalign@, @resize@, @aligned_resize@, @aligned_realloc@, and @aligned_reallocarray@ to make allocation properties orthogonally accessible.
+
+\item
+Extend the C allocation semantics by preserving with each allocation its request size, the amount allocated, whether it is zero fill, and its alignment.
+
+\item
+Provide additional query operations @malloc_alignment@, @malloc_zero_fill@, and @malloc_size@ to access allocation information.
 
 \item
 Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ and @reallocarray@ to zero-fill and align when storage is extended or copied.
-Without this extension, it is unsafe to @realloc@ storage these allocations if the properties are not preserved when copying.
+Without this extension, it is unsafe to @realloc@ storage if the properties are not preserved when copying.
 This silent problem is unintuitive to programmers and difficult to locate because it is transient.
 
 \item
-Provide additional heap operations to make allocation properties orthogonally accessible.
-\begin{itemize}[topsep=0pt,itemsep=0pt,parsep=0pt]
-\item
-@aalloc( dimension, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled, which is significantly faster than @calloc@.
-\item
-@amemalign( alignment, dimension, elemSize )@ same as @aalloc@ with memory alignment.
-\item
-@cmemalign( alignment, dimension, elemSize )@ same as @calloc@ with memory alignment.
-\item
-@resize( oaddr, size )@ re-purpose an old allocation for a new type \emph{without} preserving fill or alignment.
-\item
-@aligned_resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
-\item
-@aligned_realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment.
-\item
-@aligned_reallocarray( oaddr, alignment, dimension, elemSize )@ same as @reallocarray@ but adding or changing alignment.
-\end{itemize}
-
-\item
-Provide additional query operations to access information about an allocation:
-\begin{itemize}[topsep=0pt,itemsep=0pt,parsep=0pt]
-\item
-@malloc_alignment( addr )@ returns the alignment of the allocation.
-If the allocation is not aligned or @addr@ is @NULL@, the minimal alignment is returned.
-\item
-@malloc_zero_fill( addr )@ returns a boolean result indicating if the memory is allocated with zero fill, \eg by @calloc@/@cmemalign@.
-\item
-@malloc_size( addr )@ returns the size of the memory allocation.
-\item
-@malloc_usable_size( addr )@ returns the usable (total) size of the memory, \ie the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
-\end{itemize}
-
-\item
-Provide optional extensive, fast, and contention-free allocation statistics to understand allocation behaviour, accessed by:
-\begin{itemize}[topsep=0pt,itemsep=0pt,parsep=0pt]
-\item
-@malloc_stats()@ print memory-allocation statistics on the file-descriptor set by @malloc_stats_fd@ (default @stderr@).
-\item
-@malloc_info( options, stream )@ print memory-allocation statistics as an XML string on the specified file-descriptor set by @malloc_stats_fd@ (default @stderr@).
-\item
-@malloc_stats_fd( fd )@ set file-descriptor number for printing memory-allocation statistics (default @stderr@).
-This file descriptor is used implicitly by @malloc_stats@ and @malloc_info@.
-\end{itemize}
-
-\item
-Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
+Provide optional extensive, fast, and contention-free allocation statistics to understand allocation behaviour.
+
+\item
+Provide runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
 
 \item
 Build 8 different versions of the allocator: static or dynamic linking, with or without statistics or debugging.
-A program may link to any of these 8 versions of the allocator often without recompilation (@LD_PRELOAD@).
-
-\item
-Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties.
-
-\item
-A micro-benchmark test-suite for comparing allocators rather than relying on a suite of arbitrary programs.
-These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs
+A program may link to any of these 8 versions of the allocator often without recompilation (linking or @LD_PRELOAD@).
+
+\item
+Demonstrate how advanced programming-language type-systems can condense the allocation API providing a single type-safe allocation function using named arguments.
+
+\item
+Create a benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs.
+
+\item
+Run performance experiments using the new benchmark test-suite comparing llheap with six of the best allocators in use today.
+The goal is to demonstrate that llheap's performance, both in time and space, is comparable to the best allocators in use today.
 \end{enumerate}
 
@@ -359,12 +381,13 @@
 \section{Background}
 
-The following is a quick overview of allocator design options that affect memory usage and performance (see~\cite{Zulfiqar22} for more details).
-Dynamic acquires and releases obtain storage for a program variable, called an \newterm{object}, through calls such as @malloc@/@new@ and @free@/@delete@ in C/\CC.
+The following is an overview of allocator design options that affect memory usage and performance (see~\cite{Zulfiqar22} for more details).
+Dynamic acquires and releases obtain \newterm{object} storage via calls such as @malloc@/@new@ and @free@/@delete@ in C/\CC, respectively.
 A \newterm{memory allocator} contains a complex data-structure and code that manages the layout of objects in the dynamic-allocation zone.
-The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
-Since objects in C/\CC cannot be moved to aid the packing process, only adjacent free storage can be \newterm{coalesced} into larger free areas.
-The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage via OS calls, such as @mmap@ or @sbrk@ in UNIX.
-
-
+% The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
+Since objects in C/\CC cannot be moved, only adjacent free storage can be \newterm{coalesced} into larger free areas.
+The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage using \newterm{operating system} (OS) calls, such as @mmap@ or @sbrk@ in UNIX.
+
+
+\vspace*{-7pt}
 \subsection{Allocator Components}
 \label{s:AllocatorComponents}
@@ -373,10 +396,10 @@
 The \newterm{management data} is a data structure located at a known memory address and contains fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
 For multi-threaded programs, additional management data may exist in \newterm{thread-local storage} (TLS) for each kernel thread executing the program.
-The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
-Allocated objects (light grey) are variable sized, and are allocated and maintained by the program;
+The \newterm{storage data} is composed of allocated/freed objects, and \newterm{reserved memory}.
+Allocated objects (white) are variable sized, and are allocated and maintained by the program;
 \ie only the program knows the location of allocated storage.
-Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating location of new allocations.
-Reserved memory (dark grey) is one or more blocks of memory obtained from the \newterm{operating system} (OS) but not yet allocated to the program;
-if there are multiple reserved blocks, they are also chained together.
+Freed objects (light grey) represent memory deallocated by the program, which are linked into one or more lists facilitating location for new allocations.
+Reserved memory (dark grey) is one or more blocks of memory obtained from the OS but not yet used by the program;
+if there are multiple reserved blocks, they are normally linked together.
 
 \begin{figure}
@@ -389,15 +412,16 @@
 In many allocator designs, allocated objects and reserved blocks have management data embedded within them (see also Section~\ref{s:ObjectContainers}).
 Figure~\ref{f:AllocatedObject} shows an allocated object with a header, trailer, and optional spacing around the object.
-The header contains information about the object, \eg size, type, etc.
-The trailer may be used to simplify coalescing and/or for security purposes to mark the end of an object.
+The header contains information about the object, \eg size, type, \etc.
+The trailer may be used to simplify coalescing and/or for safety purposes to mark the end of an object.
 An object may be preceded by padding to ensure proper alignment.
-Some algorithms quantize allocation requests, resulting in additional space after an object less than the quantized value.
+Some algorithms quantize allocation requests, resulting in additional space after an object.
 When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
 
-A free object often contains management data, \eg size, pointers, etc.
-Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
-For internal chaining, the amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, allocation requests less than 16 bytes are rounded up.
+A free object often contains management data, \eg size, pointers, \etc.
+Often the free list is linked internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
+For internal linking, the amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, allocation requests less than 16 bytes are rounded up.
 Often the minimum storage alignment and free-node size are the same.
-The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new program data and/or management information.
+The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new program data and/or management information, receptively.
+For safety purposes, freed storage may be scrubbed (overwritten) to expose inadvertent bugs, such as assuming variables are zero initialized.
 
 \begin{figure}
@@ -406,22 +430,7 @@
 \caption{Allocated Object}
 \label{f:AllocatedObject}
-\end{figure}
-
-
-\subsection{Single-Threaded Memory-Allocator}
-\label{s:SingleThreadedMemoryAllocator}
-
-In a sequential (single threaded) program, the program thread performs all allocation operations and concurrency issues do not exist.
-However, interrupts logically introduce concurrency, if the signal handler performs allocation/deallocation (serially reusable problem~\cite{SeriallyReusable}).
-In general, the primary issues in a single-threaded allocator are fragmentation and locality.
-
-\subsubsection{Fragmentation}
-\label{s:Fragmentation}
-
-Fragmentation is memory requested from the OS but not used allocated objects in by the program.
-Figure~\ref{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: \emph{internal} or \emph{external}.
-
-\begin{figure}
-\centering
+
+\bigskip
+
 \input{IntExtFragmentation}
 \caption{Internal and External Fragmentation}
@@ -429,60 +438,46 @@
 \end{figure}
 
-\newterm{Internal fragmentation} is unaccessible allocated memory, such as headers, trailers, padding, and spacing around an allocated object.
-Internal fragmentation is problematic when management space becomes a significant proportion of an allocated object, \eg for objects $<$16 bytes, memory usage doubles.
-An allocator strives to keep internal management information to a minimum.
+
+\subsection{Single-Threaded Memory-Allocator}
+\label{s:SingleThreadedMemoryAllocator}
+
+In a sequential (single threaded) program, the program thread performs all allocation operations without direct concurrency issues.
+However, interrupts introduce indirect concurrency, if the signal handler performs allocation/deallocation (serially reusable problem~\cite{SeriallyReusable}).
+In general, the primary issues in a single-threaded allocator are fragmentation and locality.
+
+
+\subsubsection{Fragmentation}
+\label{s:Fragmentation}
+
+Fragmentation is unused memory requested from the OS.
+Figure~\ref{f:InternalExternalFragmentation} shows fragmentation has two forms: \emph{internal} or \emph{external}.
+
+\newterm{Internal fragmentation} is inaccessible \emph{allocated} memory, such as headers, trailers, \etc.
+Internal fragmentation is problematic when management space approaches the object size, \eg for objects $<$16 bytes, memory usage doubles.
 
 \newterm{External fragmentation} is memory not allocated in the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.
-This memory is problematic in two ways: heap blowup and highly fragmented memory.
-\newterm{Heap blowup} occurs when freed memory cannot be reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
-Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects, resulting in a checkerboard of adjacent allocated and free areas, where the free blocks are to small to service requests.
-% Figure~\ref{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
-Heap blowup occurs with allocator policies that are too restrictive in reusing freed memory, \eg the allocated size cannot use a larger free block and/or no coalescing of free storage.
-% Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
-% Memory is highly fragmented when most free blocks are unusable because of their sizes.
-% For example, Figure~\ref{f:Contiguous} and Figure~\ref{f:HighlyFragmented} have the same quantity of external fragmentation, but Figure~\ref{f:HighlyFragmented} is highly fragmented.
-% If there is a request to allocate a large object, Figure~\ref{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while Figure~\ref{f:HighlyFragmented} likely has to request more memory from the OS.
-
-% \begin{figure}
-% \centering
-% \input{MemoryFragmentation}
-% \caption{Memory Fragmentation}
-% \label{f:MemoryFragmentation}
-% \vspace{10pt}
-% \subfloat[Contiguous]{
-% 	\input{ContigFragmentation}
-% 	\label{f:Contiguous}
-% } % subfloat
-% 	\subfloat[Highly Fragmented]{
-% 	\input{NonContigFragmentation}
-% \label{f:HighlyFragmented}
-% } % subfloat
-% \caption{Fragmentation Quality}
-% \label{f:FragmentationQuality}
-% \end{figure}
-
-For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
-The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
-Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
+This memory is problematic resulting in heap blowup and fragmented memory.
+\newterm{Blowup} occurs when freed memory becomes a checkerboard of adjacent allocated and free areas, where the free blocks are too small to service requests, leading to unbounded external fragmentation growth~\cite{Berger00}.
+Heap blowup is a fundamental problem in unmanaged languages without compaction.
+
+Three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
+The first approach is \newterm{sequential-fit} with a list of free objects (possibly ordered by size) that is searched for a block large enough to fit a requested object.
+Different search policies determine the free object selected, \eg the first free object large enough (first fit) or closest to the requested size (best fit).
 Any storage larger than the request can become spacing after the object or split into a smaller free object.
-% The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list.
-
-The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
-When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to space after the object.
-A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
-Fewer bin sizes means a faster search to find a matching bin, but larger differences between allocation and bin size, which increases unusable space after objects (internal fragmentation).
-More bin sizes means a slower search but smaller differences matching between allocation and bin size resulting in less internal fragmentation but more external fragmentation if larger bins cannot service smaller requests.
-Allowing larger bins to service smaller allocations when the matching bin is empty means the freed object can be returned to the matching or larger bin (some advantages to either scheme).
-% For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
-% For subsequent requests, the bin free-lists contain objects of different sizes, ranging from one bin-size to the next (8-16 in this example), and a sequential-fit algorithm may be used to find an object large enough for the requested size on the associated bin list.
-
-The third approach is a \newterm{splitting} and \newterm{coalescing} algorithms.
-When an object is allocated, if there is no matching free storage, a larger free object is split into two smaller objects, one matching the allocation size.
+
+The second approach is \newterm{segregation} or \newterm{binning} with a set of lists for different sized freed objects.
+The request size is rounded up to the nearest bin size, often leading to internal fragmentation after the object.
+A binning algorithm searches for the smallest bin that covers the request, and selects the first free object, if available.
+Fewer bin sizes means more internal fragmentation but increased reuse as more request sizes match the bin size.
+More bin sizes has less internal fragmentation size but more external fragmentation as larger bins cannot service smaller requests.
+Allowing larger bins to service smaller allocations means the freed object can be returned to the matching or larger bin (some advantages to either scheme).
+
+The third approach is \newterm{splitting} and \newterm{coalescing}.
+If there is no matching free storage for allocation, a larger free object is split to get the allocation and the smaller object is put back on the free list.
 For example, in the \newterm{buddy system}, a block of free memory is split into equal chunks, splitting continues until a minimal block is created that fits the allocation.
-When an object is deallocated, it is coalesced with the objects immediately before/after it in memory, if they are free, turning them into a larger block.
+When an object is deallocated, it is coalesced with the objects immediately before/after it in memory, if they are free, creating a larger block.
 Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled.
 However, coalescing increases allocation latency (unbounded delays), both for allocation and deallocation.
 While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup.
-% Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory.
 
 
@@ -495,8 +490,8 @@
 Hardware takes advantage of the working set through multiple levels of caching and paging, \ie memory hierarchy.
 % When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
-For example, entire cache lines are transferred between cache and memory, and entire virtual-memory pages are transferred between memory and disk.
+% For example, entire cache lines are transferred between cache and memory, and entire virtual-memory pages are transferred between memory and disk.
 % A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}.
 
-Temporal locality is largely controlled by program accesses to its variables~\cite{Feng05}.
+Temporal locality is largely controlled by program accesses to variables~\cite{Feng05}.
 An allocator has only indirect influence on temporal locality but largely dictates spatial locality.
 For temporal locality, an allocator tries to return recently freed storage for new allocations, as this memory is still \emph{warm} in the memory hierarchy.
@@ -506,5 +501,5 @@
 
 An allocator can easily degrade locality by increasing the working set.
-An allocator can access an unbounded number of free objects when matching an allocation or coalescing, causing multiple cache or page misses~\cite{Grunwald93}.
+For example, it can access an unbounded number of free objects when matching an allocation or coalescing, causing multiple cache or page misses~\cite{Grunwald93}.
 An allocator can spatially separate related data by binning free storage anywhere in memory, so the related objects are highly separated.
 
@@ -513,6 +508,6 @@
 \label{s:MultiThreadedMemoryAllocator}
 
-In a concurrent (multi-threaded) program, multiple program threads performs allocation operations and all concurrency issues arise.
-Along with fragmentation and locality issues, a multi-threaded allocator must deal with mutual exclusion, false sharing, and additional forms of heap blowup.
+In a concurrent program, multiple kernel threads (KT) perform allocations, requiring some form of mutual exclusion.
+Along with fragmentation and locality issues, a multi-threaded allocator must deal with false sharing and additional forms of heap blowup.
 
 
@@ -520,49 +515,19 @@
 \label{s:MutualExclusion}
 
-\newterm{Mutual exclusion} provides sequential access to the shared-management data of the heap.
+% \newterm{Mutual exclusion} provides sequential access to the shared-management data of the heap.
 There are two performance issues for mutual exclusion.
-First is the cost of performing at least one hardware atomic operation every time a shared resource is accessed.
-Second is \emph{contention} on simultaneous access, so some threads must wait until the resource is released.
-Contention can be reduced in a number of ways:
-1) Using multiple fine-grained locks versus a single lock to spread the contention across the locks.
+First, the cost of performing atomic instructions every time a shared resource is accessed to provide mutual exclusion.
+Solutions using any atomic fence, atomic instruction (lock free), or lock along a fast path, even with zero contention, results in significant slowdown.
+Second, \newterm{contention} on simultaneous access, so threads must wait until the resource is released.
+Contention can be reduced by:
+1) Using multiple fine-grained locks versus few course-gain locks to spread the contention.
 2) Using trylock and generating new storage if the lock is busy (classic space versus time tradeoff).
-3) Using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
-However, all approaches have degenerate cases where program contention to the heap is high, which is beyond the allocator's control.
-
-
-\subsubsection{False Sharing}
-\label{s:FalseSharing}
-
-False sharing occurs when two or more threads simultaneously modify different objects sharing a cache line.
-Changes now invalidate each thread's cache, even though the threads may be uninterested in the other modified object.
-False sharing can occur three ways:
-1) Thread T$_1$ allocates objects O$_1$ and O$_2$ on the same cache line and passes O$_2$'s reference to thread T$_2$;
-both threads now simultaneously modifying the objects on the same cache line.
-2) Objects O$_1$ and O$_2$ are allocated on the same cache line by thread T$_3$ and their references are passed to T$_1$ and T$_2$, which simultaneously modify the objects.
-3) T$_2$ deallocates O$_2$, T$_1$ allocates O$_1$ on the same cache line as O$_2$, and T$_2$ reallocated O$_2$ while T$_1$ is using O$_1$.
-In all three cases, the allocator performs a hidden and possibly transient (non-determinism) operation, making it extremely difficult to find and fix the issue.
-
-
-\subsubsection{Heap Blowup}
-\label{s:HeapBlowup}
-
-In a multi-threaded program, heap blowup occurs when memory freed by one thread is inaccessible to other threads due to the allocation strategy.
-Specific examples are presented in later subsections.
-
-
-\subsection{Multi-Threaded Allocator Features}
-\label{s:MultiThreadedAllocatorFeatures}
-
-The following features are used in the construction of multi-threaded allocators.
-
-\subsubsection{Multiple Heaps}
-\label{s:MultipleHeaps}
-
-Figure~\ref{f:ThreadHeapRelationship} shows how a multi-threaded allocator reduced contention by subdividing a single heap into multiple heaps.
+% 3) Using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Fatourou12}.
+% However, all approaches have degenerate cases where program contention to the heap is high, which is beyond the allocator's control.
+Figure~\ref{f:ThreadHeapRelationship} shows how a multi-threaded allocator reduces contention by subdividing a single heap into multiple heaps.
 
 \begin{figure}
 \centering
 \subfloat[T:1]{
-%	\input{SingleHeap.pstex_t}
 	\input{SingleHeap}
 	\label{f:SingleHeap}
@@ -570,5 +535,4 @@
 \vrule
 \subfloat[T:H]{
-%	\input{MultipleHeaps.pstex_t}
 	\input{SharedHeaps}
 	\label{f:SharedHeaps}
@@ -576,5 +540,4 @@
 \vrule
 \subfloat[1:1]{
-%	\input{MultipleHeapsGlobal.pstex_t}
 	\input{PerThreadHeap}
 	\label{f:PerThreadHeap}
@@ -586,105 +549,74 @@
 \begin{description}[leftmargin=*]
 \item[T:1 model (Figure~\ref{f:SingleHeap})] is all threads (T) sharing a single heap (1).
-The arrows indicate memory movement for allocation/deallocation operations.
-Memory is obtained from freed objects, reserved memory, or the OS;
-freed memory can be returned to the OS.
-To handle concurrency, a single lock is used for all heap operations or fine-grained locking if operations can be made independent.
+% The arrows indicate memory movement for allocation/deallocation operations.
+% Memory is obtained from freed objects, reserved memory, or the OS;
+% freed memory can be returned to the OS.
+To handle concurrency, a single lock is used for all heap operations or fine-grained (lock-free) locking if operations can be made independent.
 As threads perform large numbers of allocations, a single heap becomes a significant source of contention.
 
 \item[T:H model (Figure~\ref{f:SharedHeaps})] is multiple threads (T) sharing multiple heaps (H).
-The allocator independently allocates/deallocates heaps and assigns threads to heaps based on dynamic contention pressure.
-Locking is required within each heap, but contention is reduced because fewer threads access a specific heap.
-The goal is minimal heaps (storage) and contention per heap (time).
-A worst case is more heaps than threads, \eg many threads at startup create a large number of heaps and then the threads reduce.
-
-% For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
-% At creation, a thread is associated with a heap from the pool.
-% In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
-% If an unlocked heap is found, the thread changes its association and uses that heap.
-% If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
-% or the thread can block waiting for a heap to become available.
-% While the heap-pool approach often minimizes the number of extant heaps, the worse case can result in more heaps than threads;
-% \eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces.
-
-% Threads using multiple heaps need to determine the specific heap to access for an allocation/deallocation, \ie association of thread to heap.
-% A number of techniques are used to establish this association.
-% The simplest approach is for each thread to have a pointer to its associated heap (or to administrative information that points to the heap), and this pointer changes if the association changes.
-% For threading systems with thread-local storage, the heap pointer is created using this mechanism;
-% otherwise, the heap routines must simulate thread-local storage using approaches like hashing the thread's stack-pointer or thread-id to find its associated heap.
-
-% The storage management for multiple heaps is more complex than for a single heap (see Figure~\ref{f:AllocatorComponents}).
-% Figure~\ref{f:MultipleHeapStorage} illustrates the general storage layout for multiple heaps.
-% Allocated and free objects are labelled by the thread or heap they are associated with.
-% (Links between free objects are removed for simplicity.)
-% The management information for multiple heaps in the static zone must be able to locate all heaps.
-% The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
-% Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory.
-% An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
-% Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
-% Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the OS.
-
-% \begin{figure}
-% \centering
-% \input{MultipleHeapsStorage}
-% \caption{Multiple-Heap Storage}
-% \label{f:MultipleHeapStorage}
-% \end{figure}
-
-Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
-The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
-Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the OS (see Section~\ref{s:Ownership}).
-Returning storage to the OS may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
+The allocator allocates/deallocates heaps and assigns threads to heaps often based on dynamic contention pressure.
+While locking is required for heap access, contention is (normally) reduced as access is spread across the heaps.
+Locking can be reduced (eliminated) using the T:C variant, \ie each CPU has a heap, and a thread cannot migrate from the CPU if executing an allocator critical-section, implemented with restartable critical sections~\cite{Desnoyers19,Dice02} (see also Section~\ref{s:UserlevelThreadingSupport}).
+% The goal is minimal heaps (storage) and contention per heap (time).
+Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup, where the worst-case scenario is more heaps than threads.
+The external fragmentation is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
+When freeing, objects normally need to be returned to their original heap (see Section~\ref{s:Ownership}).
+% Returning storage to the OS may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
 % In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
 
-Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space.
-Now, each heap obtains and returns storage to/from the global heap rather than the OS.
-Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold.
-Similarly, the global heap buffers this memory, obtaining and returning storage to/from the OS as necessary.
-The global heap does not have its own thread and makes no internal allocation requests;
-instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations.
-Hence, the worst-case cost of a memory operation includes all these steps.
-With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the OS to achieve the same goal and is independent of the mechanism used by the OS to present dynamic memory to an address space.
-However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking.
-A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention.
-In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap.
-
-\item[1:1 model (Figure~\ref{f:PerThreadHeap})] is each thread (1) has a heap (1), eliminating most contention and locking if threads seldom access another thread's heap (see Section~\ref{s:Ownership}).
+A shared \newterm{global heap} (G) is often introduced to manage the reserved memory among heaps and centralize interacts with the OS.
+Instead of heaps making individual object allocations/deallocations through the global heap, resulting in locking and high contention, the global heap partitions the reserved memory into heap (allocation) buffers, which are given out to heaps for their own suballocations.
+Hence, a heap's allocations are temporally and spatially accessed densely in a small set of buffers, rather than spread sparsely across the entire reserve memory.
+Buffers are allocated at heap startup, after which allocation often reaches a steady state through free lists.
+Allocation buffers may increase external fragmentation, since some memory may never be used.
+
+\item[1:1 model (Figure~\ref{f:PerThreadHeap})] is each thread (1) having its own heap (1), eliminating most contention and locking if threads seldom access another thread's heap (see Section~\ref{s:Ownership}).
 A thread's objects are consolidated in its heap, better utilizing the cache and paging during thread execution.
 In contrast, the T:H model can spread thread objects over a larger area in different heaps.
-Thread heaps can also reduces false-sharing, unless there are overlapping memory boundaries from another thread's heap.
 %For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them.
-
 When a thread terminates, it can free its heap objects to the global heap, or the thread heap is retained as-is and reused for a new thread in the future.
 Destroying a heap can reduce external fragmentation sooner, since all free objects in the global heap are available for immediate reuse.
-Alternatively, reusing a heap can aid the inheriting thread, if it has a similar allocation pattern because the heap in primed with unfreed storage of the right sizes.
+Alternatively, reusing a heap can aid the inheriting thread, if it has a similar allocation pattern, because the heap in primed with freed storage of the right sizes.
 \end{description}
 
 
-\subsubsection{User-Level Threading}
-
-It is possible to use any of the heap models with user-level (M:N) threading.
-However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the OS, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
-It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
-Figure~\ref{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime.
-Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
+\subsubsection{False Sharing}
+\label{s:FalseSharing}
+
+False sharing occurs for a read/write or write/write among threads modifying different memory sharing a cache line~\cite{Bolosky93}.
+The write invalidates each thread's cache, even though the threads may be uninterested in the other modified object.
+False sharing can occur three ways:
+1) Thread T$_1$ allocates objects O$_1$ and O$_2$ on the same cache line and passes O$_2$'s reference to thread T$_2$.
+2) Thread T$_1$ allocates object O$_1$ and thread T$_2$ allocates O$_2$, where objects O$_1$ and O$_2$ are on the same cache line.
+3) T$_2$ deallocates O$_2$, T$_1$ allocates O$_1$ on the same cache line as O$_2$, and T$_2$ reallocated O$_2$ while T$_1$ is using O$_1$.
+In all three cases, the false sharing is hidden and possibly transient (non-deterministic), making it extremely difficult to find and fix.
+Case 1) occurs in all three allocator models, and is induced by program behaviour, not the allocator.
+Case 2) and 3) are allocator induced, and occurs in T:1 and T:H models due to heap sharing, but not 1:1 with private heaps, except possibly at boundary points among heaps.
+
+
+\subsubsection{Object Containers}
+\label{s:ObjectContainers}
+
+Associating header data with every allocation can result in significant internal fragmentation, as shown in Figure~\ref{f:AllocatedObject}.
+While the header and object are spatially together in memory, they are generally not accessed temporally together~\cite{Feng05}.
+The result is poor cache usage, since only a portion of the cache line is holding useful data from the program's perspective.
+% \eg an object is accessed by the program after it is allocated, while the header is accessed by the allocator after it is free.
 
 \begin{figure}
 \centering
-\input{UserKernelHeaps}
-\caption{User-Level Kernel Heaps}
-\label{f:UserLevelKernelHeaps}
+\input{Container}
+\caption{Object Container}
+\label{f:ObjectContainer}
 \end{figure}
 
-Adopting user threading results in a subtle problem with shared heaps.
-With kernel threading, an operation started by a kernel thread is always completed by that thread.
-For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap, even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.
-However, this correctness property is not preserved for user-level threading.
-A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}.
-When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
-To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
-However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is infrequent (milliseconds).
-Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
-Occasional ignoring of a preemption should be benign, but a persistent lack of preemption can result in starvation;
-techniques like rolling forward the preemption to the next context switch can be used.
+The alternative approach factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks}~\cite[\S~3]{Berger00}) suballocated from a heap's allocation buffers, as in Figure~\ref{f:ObjectContainer}.
+A trailer may also be used at the end of the container.
+To find the header from an allocation within the container, the container is aligned on a power of 2 boundary and the lower bits of the object address are truncated (or rounded up, minus the trailer size, to obtain the trailer address).
+Container size is a tradeoff between internal and external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
+A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on the program's access patterns.
+Normally, heap ownership applies to its containers.
+Without ownership, different objects in a container may be on different heap free-lists.
+Finally, containers are linked together for management purposes, and should all objects in a container become free, the container can be repurposed for different sized objects or given to another heap through a global heap.
 
 
@@ -692,464 +624,64 @@
 \label{s:Ownership}
 
-\newterm{Ownership} defines which heap an object is returned-to on deallocation.
-If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects.
+Object \newterm{ownership} is defined as the heap to which an object is returned upon deallocation~\cite[\S~6.1]{Berger00}.
+If a thread returns an object to its originating heap, a heap has ownership of its objects.
+Containers force ownership of internal contiguous objects, unless the entire container changes ownership after it becomes empty.
 Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime.
-Figure~\ref{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
-Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
-For the 1:1 thread:heap relationship, a thread only allocates from its own heap, and without ownership, a thread only frees objects to its own heap, which means the heap is private to its owner thread and does not require any locking, called a \newterm{private heap}.
-For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
-
-\begin{figure}
-\centering
-\subfloat[Ownership]{
-	\input{MultipleHeapsOwnership}
-} % subfloat
-\hspace{0.25in}
-\subfloat[No Ownership]{
-	\input{MultipleHeapsNoOwnership}
-} % subfloat
-\caption{Heap Ownership}
-\label{f:HeapsOwnership}
-\end{figure}
-
-% Figure~\ref{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
-% (For simplicity, assume the heaps all use the same size of reserves storage.)
-% In contrast to Figure~\ref{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
-% Passive false-sharing may still occur, if delayed ownership is used (see below).
-
-% \begin{figure}
-% \centering
-% \input{MultipleHeapsOwnershipStorage.pstex_t}
-% \caption{Multiple-Heap Storage with Ownership}
-% \label{f:MultipleHeapStorageOwnership}
-% \end{figure}
-
-The main advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
-Ownership prevents the classical problem where one thread performs allocations from one heap, passes the object to another thread, and the receiving thread deallocates the object to another heap, hence draining the initial heap of storage.
-Because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
-The exception is for the 1:1 model if reserved memory does not overlap a cache-line because all allocated storage within a used area is associated with a single thread.
-In this case, there is no allocator-induced active false-sharing because two adjacent allocated objects used by different threads cannot share a cache-line.
-Finally, there is no allocator-induced passive false-sharing because two adjacent allocated objects used by different threads cannot occur as free objects are returned to the owner heap.
-% For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, the deallocation by Thread$_2$ returns Object$_2$ back to Thread$_1$'s heap;
-% hence a subsequent allocation by Thread$_2$ cannot return this storage.
-The disadvantage of ownership is deallocating to another thread's heap so heaps are no longer private and require locks to provide safe concurrent access.
+The advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
+Ownership prevents the problem of a producer thread allocating from one heap, passing the object to a consumer thread, and the consumer deallocates the object to another heap, hence draining the producer heap of storage.
+The disadvantage of ownership is deallocating to another thread's heap requires an atomic operation.
 
 Object ownership can be immediate or delayed, meaning free objects may be batched on a separate free list either by the returning or receiving thread.
-While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
-It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
-Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads.
-Finally, it is possible for heaps to temporarily steal owned objects rather than return them immediately and then reallocate these objects again.
-It is unclear whether the complexity of this approach is worthwhile.
-% However, stealing can result in passive false-sharing.
-% For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially.
-% If Thread$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur.
-
-For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps.% (see~Figure~\ref{f:HybridPrivatePublicHeap}).
-The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
-In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
-Similarly, a thread first deallocates an object to its private heap, and second to the public heap.
-Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
-% Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
-Finally, when a thread frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing thread's private heap for delayed ownership, which does allows the freeing thread to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return.
-
-% \begin{figure}
-% \centering
-% \input{PrivatePublicHeaps.pstex_t}
-% \caption{Hybrid Private/Public Heap for Per-thread Heaps}
-% \label{f:HybridPrivatePublicHeap}
-% \vspace{10pt}
-% \input{RemoteFreeList.pstex_t}
-% \caption{Remote Free-List}
-% \label{f:RemoteFreeList}
-% \end{figure}
-
-% As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified.
-% For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
-% To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
-% Since the remote free-list is occasionally cleared during an allocation, this adds to that cost.
-% Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object.
- 
-% If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking.
-% In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur.
-% If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations.
- 
-% The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator.
-% Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used.
-% In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps.
-% For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach.
-
-
-\begin{figure}
-\centering
-\subfloat[Object Headers]{
-	\input{ObjectHeaders}
-	\label{f:ObjectHeaders}
-} % subfloat
-\subfloat[Object Container]{
-	\input{Container}
-	\label{f:ObjectContainer}
-} % subfloat
-\caption{Header Placement}
-\label{f:HeaderPlacement}
-\end{figure}
-
-
-\subsubsection{Object Containers}
-\label{s:ObjectContainers}
-
-Associating header data with every allocation can result in significant internal fragmentation, as shown in Figure~\ref{f:ObjectHeaders}.
-Especially if the headers contain redundant data, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
-As well, the redundant data can result in poor cache usage, since only a portion of the cache line is holding useful data from the program's perspective.
-Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}.
-While the header and object are spatially together in memory, they are generally not accessed temporarily together;
-\eg an object is accessed by the program after it is allocated, while the header is accessed by the allocator after it is free.
-
-An alternative approach factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks}~\cite{Berger00}), as in Figure~\ref{f:ObjectContainer}.
-The header for the container holds information necessary for all objects in the container;
-a trailer may also be used at the end of the container.
-Similar to the approach described for thread heaps in Section~\ref{s:MultipleHeaps}, if container boundaries do not overlap with memory of another container at crucial boundaries and all objects in a container are allocated to the same thread, allocator-induced active false-sharing is avoided.
-
-The difficulty with object containers lies in finding the object header/trailer given only the object address, since that is normally the only information passed to the deallocation operation.
-One way is to start containers on aligned addresses in memory, then truncate the lower bits of the object address to obtain the header address (or round up and subtract the trailer size to obtain the trailer address).
-For example, if an object at address 0xFC28\,EF08 is freed and containers are aligned on 64\,KB (0x0001\,0000) addresses, then the container header is at 0xFC28\,0000.
-
-Normally, a container has homogeneous objects, \eg object size and ownership.
-This approach greatly reduces internal fragmentation since far fewer headers are required, and potentially increases spatial locality as a cache line or page holds more objects since the objects are closer together.
-However, different sized objects are further apart in separate containers.
-Depending on the program, this may or may not improve locality.
-If the program uses several objects from a small number of containers in its working set, then locality is improved since fewer cache lines and pages are required.
-If the program uses many containers, there is poor locality, as both caching and paging increase.
-Another drawback is that external fragmentation may be increased since containers reserve space for objects that may never be allocated, \ie there are often multiple containers for each size only partially full.
-However, external fragmentation can be reduced by using small containers.
-
-Containers with heterogeneous objects implies different headers describing them, which complicates the problem of locating a specific header solely by an address.
-A couple of solutions can be used to implement containers with heterogeneous objects.
-However, the problem with allowing objects of different sizes is that the number of objects, and therefore headers, in a single container is unpredictable.
-One solution allocates headers at one end of the container, while allocating objects from the other end of the container;
-when the headers meet the objects, the container is full.
-Freed objects cannot be split or coalesced since this causes the number of headers to change.
-The difficulty in this strategy remains in finding the header for a specific object;
-in general, a search is necessary to find the object's header among the container headers.
-A second solution combines the use of container headers and individual object headers.
-Each object header stores the object's heterogeneous information, such as its size, while the container header stores the homogeneous information, such as the owner when using ownership.
-This approach allows containers to hold different types of objects, but does not completely separate headers from objects.
-% The benefit of the container in this case is to reduce some redundant information that is factored into the container header.
-
-% In summary, object containers trade off internal fragmentation for external fragmentation by isolating common administration information to remove/reduce internal fragmentation, but at the cost of external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
-% A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on program access-patterns.
-
-
-\paragraph{Container Ownership}
-\label{s:ContainerOwnership}
-
-Without ownership, objects in a container are deallocated to the heap currently associated with the thread that frees the object.
-Thus, different objects in a container may be on different heap free-lists. % (see Figure~\ref{f:ContainerNoOwnershipFreelist}).
-With ownership, all objects in a container belong to the same heap,
-% (see Figure~\ref{f:ContainerOwnershipFreelist}),
-so ownership of an object is determined by the container owner.
-If multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
-Only with the 1:1 model and ownership is active and passive false-sharing avoided (see Section~\ref{s:Ownership}).
-Passive false-sharing may still occur, if delayed ownership is used.
-Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap.
-
-% \begin{figure}
-% \centering
-% \subfloat[No Ownership]{
-% 	\input{ContainerNoOwnershipFreelist}
-% 	\label{f:ContainerNoOwnershipFreelist}
-% } % subfloat
-% \vrule
-% \subfloat[Ownership]{
-% 	\input{ContainerOwnershipFreelist}
-% 	\label{f:ContainerOwnershipFreelist}
-% } % subfloat
-% \caption{Free-list Structure with Container Ownership}
-% \end{figure}
-
-When a container changes ownership, the ownership of all objects within it change as well.
-Moving a container involves moving all objects on the heap's free-list in that container to the new owner.
-This approach can reduce contention for the global heap, since each request for objects from the global heap returns a container rather than individual objects.
-
-Additional restrictions may be applied to the movement of containers to prevent active false-sharing.
-For example, if a container changes ownership through the global heap, then a thread allocating from the newly acquired container is actively false-sharing even though no objects are passed among threads.
-Note, once the thread frees the object, no more false sharing can occur until the container changes ownership again.
-To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
-One implementation approach that increases the freedom to return a free container to the OS involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the OS.
-
-% \begin{figure}
-% \centering
-% \subfloat[]{
-% 	\input{ContainerFalseSharing1}
-% 	\label{f:ContainerFalseSharing1}
-% } % subfloat
-% \subfloat[]{
-% 	\input{ContainerFalseSharing2}
-% 	\label{f:ContainerFalseSharing2}
-% } % subfloat
-% \caption{Active False-Sharing using Containers}
-% \label{f:ActiveFalseSharingContainers}
-% \end{figure}
-
-Using containers with ownership increases external fragmentation since a new container for a requested object size must be allocated separately for each thread requesting it.
-% In Figure~\ref{f:ExternalFragmentationContainerOwnership}, using object ownership allocates 80\% more space than without ownership.
-
-% \begin{figure}
-% \centering
-% \subfloat[No Ownership]{
-% 	\input{ContainerNoOwnership}
-% } % subfloat
-% \\
-% \subfloat[Ownership]{
-% 	\input{ContainerOwnership}
-% } % subfloat
-% \caption{External Fragmentation with Container Ownership}
-% \label{f:ExternalFragmentationContainerOwnership}
-% \end{figure}
-
-
-\paragraph{Container Size}
-\label{s:ContainerSize}
-
-One way to control the external fragmentation caused by allocating a large container for a small number of requested objects is to vary the size of the container.
-As described earlier, container boundaries need to be aligned on addresses that are a power of two to allow easy location of the header (by truncating lower bits).
-Aligning containers in this manner also determines the size of the container.
-However, the size of the container has different implications for the allocator.
-
-The larger the container, the fewer containers are needed, and hence, the fewer headers need to be maintained in memory, improving both internal fragmentation and potentially performance.
-However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation.
-With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container.
-In general, large objects, \eg greater than 64\,KB, are allocated directly from the OS and are returned immediately to the OS to reduce long-term external fragmentation.
-If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate.
-Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header.
-
-In order to find the container header when using different sized containers, a super container is used (see~Figure~\ref{f:SuperContainers}).
-The super container spans several containers, contains a header with information for finding each container header, and starts on an aligned address.
-Super-container headers are found using the same method used to find container headers by dropping the lower bits of an object address.
-The containers within a super container may be different sizes or all the same size.
-If the containers in the super container are different sizes, then the super-container header must be searched to determine the specific container for an object given its address.
-If all containers in the super container are the same size, \eg 16KB, then a specific container header can be found by a simple calculation.
-The free space at the end of a super container is used to allocate new containers.
-
-\begin{figure}
-\centering
-\input{SuperContainers}
-% \includegraphics{diagrams/supercontainer.eps}
-\caption{Super Containers}
-\label{f:SuperContainers}
-\end{figure}
-
-Minimal internal and external fragmentation is achieved by having as few containers as possible, each being as full as possible.
-It is also possible to achieve additional benefit by using larger containers for popular small sizes, as it reduces the number of containers with associated headers.
-However, this approach assumes it is possible for an allocator to determine in advance which sizes are popular.
-Keeping statistics on requested sizes allows the allocator to make a dynamic decision about which sizes are popular.
-For example, after receiving a number of allocation requests for a particular size, that size is considered a popular request size and larger containers are allocated for that size.
-If the decision is incorrect, larger containers than necessary are allocated that remain mostly unused.
-A programmer may be able to inform the allocator about popular object sizes, using a mechanism like @mallopt@, in order to select an appropriate container size for each object size.
-
-
-\paragraph{Container Free-Lists}
-\label{s:containersfreelists}
-
-The container header allows an alternate approach for managing the heap's free-list.
-Rather than maintain a global free-list throughout the heap the containers are linked through their headers and only the local free objects within a container are linked together.
-Note, maintaining free lists within a container assumes all free objects in the container are associated with the same heap;
-thus, this approach only applies to containers with ownership.
-
-This alternate free-list approach can greatly reduce the complexity of moving all freed objects belonging to a container to another heap.
-To move a container using a global free-list, the free list is first searched to find all objects within the container.
-Each object is then removed from the free list and linked together to form a local free-list for the move to the new heap.
-With local free-lists in containers, the container is simply removed from one heap's free list and placed on the new heap's free list.
-Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
-However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation.
-
-% \begin{figure}
-% \centering
-% \subfloat[Global Free-List Among Containers]{
-% 	\input{FreeListAmongContainers}
-% 	\label{f:GlobalFreeListAmongContainers}
-% } % subfloat
-% \hspace{0.25in}
-% \subfloat[Local Free-List Within Containers]{
-% 	\input{FreeListWithinContainers}
-% 	\label{f:LocalFreeListWithinContainers}
-% } % subfloat
-% \caption{Container Free-List Structure}
-% \label{f:ContainerFreeListStructure}
-% \end{figure}
-
-When all objects in the container are the same size, a single free-list is sufficient.
-However, when objects in the container are different size, the header needs a free list for each size class when using a binning allocation algorithm, which can be a significant increase in the container-header size.
-The alternative is to use a different allocation algorithm with a single free-list, such as a sequential-fit allocation-algorithm.
-
-
-\subsubsection{Allocation Buffer}
-\label{s:AllocationBuffer}
-
-An allocation buffer is reserved memory (see Section~\ref{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
-That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
-Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or OS, respectively.
-The allocation buffer reduces contention and the number of global/OS calls.
-For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
-
-Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation).
-Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
-Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish.
-
-Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
-For example, if all objects sharing a cache line come from the same allocation buffer, then these objects are allocated to the same thread, avoiding active false-sharing.
-Active false-sharing may still occur if objects are freed to the global heap and reused by another heap.
-
-Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated.
-A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or OS.
-The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer.
-
-The unused part of a container, neither allocated or freed, is an allocation buffer.
-For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made.
-This lazy method of constructing objects is beneficial in terms of paging and caching.
-For example, although an entire container, possibly spanning several pages, is allocated from the OS, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache.
-
-
-\subsubsection{Lock-Free Operations}
-\label{s:LockFreeOperations}
-
-A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual thread has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}.
-(A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.)
-Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
-While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress.
-With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design.
-Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
-on the other hand, a thread may busy-wait for an unbounded period holding a processor.
-Finally, lock-free implementations have greater complexity and hardware dependency.
-Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
-Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex.
-Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
+The returning thread batches objects to reduce contention by passing multiple objects at once;
+however, batching across multiple allocation sizes and heaps is complex and there is no obvious time when to push back to the owner heap.
+It is simpler for the returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
+The receiving thread often delays incorporating returned storage until its local storage in drained.
+
+
+\subsubsection{User-Level Threading}
+
+Any heap model can be used with user-level (M:N) threading.
+However, an important goal of user threads (UT) is for fast operations (creation/termination/context-switching) by not interacting with the OS, allowing large numbers of high-performance interacting threads ($>$ 10,000).
+In general, UTs use whatever kernel-level heap-model is provided by the language runtime.
+Hence, a UT allocates/deallocates from/to the heap of the KT on which it is executing.
+
+However, there is a subtle concurrency problem with user threading and shared heaps.
+With kernel threading, an operation started by a KT is always completed by that thread, even if preempted;
+hence, any locking correctness associated with the shared heap is preserved.
+However, this correctness property is not preserved for user-level threading.
+A UT can start an allocation/deallocation on one KT, be preempted by user-level time slicing, and continue running on a different KT to complete the operation~\cite{Dice02}.
+When the UT continues on the new KT, it may have pointers into the previous KT's heap and hold locks associated with it.
+To get the same KT safety, time slicing must be disabled/\-enabled around these operations to prevent movement.
+However, eagerly disabling time slicing on the allocation/deallocation fast path is expensive, especially as preemption is infrequent (millisecond intervals).
+Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it completes atomically.
+Occasional ignoring a preemption is normally benign;
+in the worst case, ignoring preemption results in starvation.
+To mitigate starvation, techniques like rolling the preemption forward at the next context switch can be used.
 
 
 \section{llheap}
 
-This section presents our new stand-alone, concurrent, low-latency memory-allocator, called llheap (low-latency heap), fulfilling the GNU C Library allocator API~\cite{GNUallocAPI} for C/\CC programs using kernel threads (1:1 threading), with specialized versions for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
-The primary design objective for llheap is low-latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator delay during an allocator call.
-Excluded from the low-latency objective are (large) allocations requiring initialization, \eg zero fill, and/or data copying, which are outside the allocator's purview.
+This section presents our new stand-alone, concurrent, low-latency memory allocator, called llheap (low-latency heap), fulfilling the GNU C Library allocator API~\cite{GNUallocAPI} for C/\CC programs using KTs, with specialized versions for the programming languages \uC and \CFA using user-level threads running over multiple KTs (M:N threading).
+The primary design objective for llheap is low latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator delay during an allocator call.
+Excluded from the low-latency objective are (large) allocations requiring initialization, \eg zero fill, and/or data copying, along with unbounded delays to acquire storage from the OS or OS scheduling, all of which are outside the allocator's purview.
 A direct consequence of this objective is very simple or no storage coalescing;
 hence, llheap's design is willing to use more storage to lower latency.
-This objective is apropos because systems research and industrial applications are striving for low latency and modern computers have huge amounts of RAM memory.
-Finally, llheap's performance should be comparable with the current best allocators, both in space and time (see performance comparison in Section~\ref{c:Performance}).
-
-
-\subsection{Design Choices}
-
-llheap's design was reviewed and changed multiple times during its development, with the final choices discussed here.
-All designs focused on the allocation/free \newterm{fastpath}, \ie the shortest code path for the most common operations, \eg when an allocation can immediately return free storage or returned storage is not coalesced.
-The model chosen is 1:1, so there is one thread-local heap for each KT.
-(See Figure~\ref{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
-Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
-Therefore, heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership.
-
-Problems:
-\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
-\item
-Need to know when a KT starts/terminates to create/delete its heap.
-
-\noindent
-It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
-\item
-There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
-
-\noindent
-The classic solution only deletes a heap after all referents are returned, which is complex.
-The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
-If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
-To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
-In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
-\item
-There can be significant external fragmentation as the number of KTs increases.
-
-\noindent
-In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
-Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
-\item
-Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
-\begin{quote}
-A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
-\end{quote}
-If a KT is preempted during an allocation operation, the OS can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
-Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
-Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the OS is providing the second thread via the signal handler.
-
-Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical subsection after undoing its writes, if the critical subsection is preempted.
-
-%There is the same serially-reusable problem with UTs migrating across KTs.
-\end{itemize}
-Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
-
-
-\vspace{5pt}
-\noindent
-The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
-For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N.
-For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
-However, T:H=CPU has poor OS support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
-More OS support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
-So the 1:1 model had no atomic actions along the fastpath and no special OS support requirements.
-The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in Section~\ref{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
-
-
-% \begin{itemize}
-% \item
-% A decentralized design is better to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designs shard the whole heap which has all the buckets with the addition of sharding @sbrk@ area. So Design 1 was eliminated.
-% \item
-% Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenario.
-% \item
-% Design 3 was eliminated because it was slower than Design 4 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achieve user-threading safety which has some cost to it.
-% that  because of 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
-% \end{itemize}
-% Of the four designs for a low-latency memory allocator, the 1:1 model was chosen for the following reasons:
-
-% \subsubsection{Advantages of distributed design}
-% 
-% The distributed design of llheap is concurrent to work in multi-threaded applications.
-% Some key benefits of the distributed design of llheap are as follows:
-% \begin{itemize}
-% \item
-% The bump allocation is concurrent as memory taken from @sbrk@ is sharded across all heaps as bump allocation reserve. The call to @sbrk@ will be protected using locks but bump allocation (on memory taken from @sbrk@) will not be contended once the @sbrk@ call has returned.
-% \item
-% Low or almost no contention on heap resources.
-% \item
-% It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
-% \item
-% Distributed design avoids unnecessary locks on resources shared across all KTs.
-% \end{itemize}
-
-\subsubsection{Allocation Latency}
-
-A primary goal of llheap is low latency, hence the name low-latency heap (llheap).
-Two forms of latency are internal and external.
-Internal latency is the time to perform an allocation, while external latency is time to obtain or return storage from or to the OS.
-Ideally latency is $O(1)$ with a small constant.
-
-$O(1)$ internal latency means no open searching on the allocation fastpath, which largely prohibits coalescing.
-The mitigating factor is that most programs have a small, fixed, allocation pattern, where the majority of allocation operations can be $O(1)$ and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
-Modern computers have large memories so a slight increase in program footprint is not a problem.
-
-$O(1)$ external latency means obtaining one large storage area from the OS and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation.
-Excluding real-time OSs, OS operations are unbounded, and hence some external latency is unavoidable.
-The mitigating factor is that OS calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \pageref{p:malloc_expansion}).
-Furthermore, while OS calls are unbounded, many are now reasonably fast, so their latency is tolerable because it occurs infrequently.
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\subsection{llheap Structure}
-
-Figure~\ref{f:llheapStructure} shows the design of llheap, which uses the following features:
-1:1 multiple-heap model to minimize the fastpath,
-can be built with or without heap ownership,
+This objective is apropos because systems research and industrial applications are striving for low latency and modern computers have huge amounts of RAM.
+Finally, llheap's performance must be comparable with current allocators, both in space and time (see performance comparison in Section~\ref{c:Performance}).
+
+
+\subsection{llheap Design}
+
+Figure~\ref{f:llheapDesign} shows the design of llheap, which uses the following features:
+1:1 allocator model eliminating locking on the fast path,
+separate small (@sbrk@) and large object management (@mmap@),
 headers per allocation versus containers,
+small object binning (buckets) forming lists for different sized freed objects,
+optional fast-lookup table for converting allocation requests into bucket sizes,
 no coalescing to minimize latency,
-global heap memory (pool) obtained from the OS using @mmap@ to create and reuse heaps needed by threads,
-local reserved memory (pool) per heap obtained from global pool,
-global reserved memory (pool) obtained from the OS using @sbrk@ call,
-optional fast-lookup table for converting allocation requests into bucket sizes,
-optional statistic-counters table for accumulating counts of allocation operations.
+optional heap ownership (build time),
+reserved memory (buffer pool) per heap obtained from a global pool,
+global heap managing freed thread heaps and interacting with the OS to obtained storage,
+optional statistic-counters table for accumulating counts of allocation operations and a debugging version for testing (build time).
 
 \begin{figure}
@@ -1157,136 +689,147 @@
 % \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
 \input{llheap}
-\caption{llheap Structure}
-\label{f:llheapStructure}
+\caption{llheap Design}
+\label{f:llheapDesign}
 \end{figure}
 
-llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
-There is a global bump-pointer to the next free heap in the array.
-When this array is exhausted, another array of heaps is allocated.
-There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads.
-When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
-
-When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
-When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps.
-The free heaps are stored on stack so hot storage is reused first.
-Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used.
-This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
+llheap starts by creating an empty array for $N$ global heaps from storage obtained using @mmap@ that persists for program duration, where $N$ is the number of computer cores.
+There is a global last-array pointer and bump-pointer within this array to locate the next free heap storage.
+When an array's storage is exhausted, another empty array is allocated.
+Terminated threads push their heap onto a global-stack top-pointer, where free heaps are intrusively linked.
+When statistics are turned on, there is a global top pointer for a intrusive linked-list to link \emph{all} the heaps (not shown), which is traversed to accumulate statistics counters across heaps when @malloc_stats@ is called.
+
+When a KT starts, it pops heap storage from the heap free-list, or if empty, gets the next free heap-storage.
+When a KT terminates, its heap is pushed onto the heap free-list for reuse by a new KT, which prevents unbounded heap growth.
+The free heaps are stored in a stack so hot storage is reused first.
+Preserving all heaps created during the program lifetime solves the storage lifetime problem when ownership is used.
+This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially, which is rare.
+
+Each heap uses segregated free-buckets that have free objects distributed across 60 different sizes from 16 to 16M.
+All objects in a bucket are the same size.
+The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation, which is specified by calling @mallopt( M_MMAP_THRESHOLD )@, where the cross over must be $\ge$ the page size or $\le$ the largest bucket (16M).
+Each cache-aligned bucket has a stack of the same-sized freed objects, where a stack ensures hot storage is reused first.
 llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
-
-Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
-All objects in a bucket are of the same size.
-The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the OS.
-Each free bucket of a specific size has two lists.
-1) A free stack used solely by the KT heap-owner, so push/pop operations do not require locking.
-The free objects are a stack so hot storage is reused first.
-2) For ownership, a shared away-stack for KTs to return storage allocated by other KTs, so push/pop operations require locking.
-When the free stack is empty, the entire ownership stack is removed and becomes the head of the corresponding free stack.
+For ownership, a shared remote stack is added to the freelist structure, so push/pop operations require locking.
+Pushes are eager on each remove free \vs batching, and pops are lazy when there is no cheap storage available, then the entire remote stack is gulped and added to the bucket's free list.
+
+Initial threads are assigned empty heaps from the heap array.
+The first thread allocation causes a request for storage from the shared @sbrk@ area.
+The size of this request is the maximum of the request size or the @sbrk@-extension-size / 16.
+This heuristic means the @sbrk@ area is subdivided into separate heap buffers (HB) per thread, providing no contention and data locality.
+A thread does bump allocation in its current buffer, until it starts reusing freed storage or there is insufficient storage, and it obtains another buffer.
+Thread buffers are not linked;
+only logically connected to the thread through allocated and deallocated storage.
+When a thread ends, its heap is returned to the heap array but no storage is released.
+A new thread receiving a freed heap starts with it fully populated with freed storage.
+The heuristic is that threads often do similar work, so the free storage in the heap is reusable, resulting in less internal fragmentation.
+%The heuristic is that threads often do similar work so the free storage in the heap is immediately available.
+%The downside is the risk of more external fragmentation, if the freed storage is never reused.
+The downside is if the freed storage is never reused creating external fragmentation.
 
 Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$.
-First, the allocation is divided into small (@sbrk@) or large (@mmap@).
-For large allocations, the storage is mapped directly from the OS.
+The allocation is divided into small (@sbrk@) or large (@mmap@).
 For small allocations, $S$ is quantized into a bucket size.
-Quantizing is performed using a binary search over the ordered bucket array.
-An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
-The @char@ type restricts the number of bucket sizes to 256.
-For $S$ > 64K, a binary search is used.
-Then, the allocation storage is obtained from the following locations (in order), with increasing latency:
-bucket's free stack,
-bucket's away stack,
-heap's local pool,
-global pool,
-OS (@sbrk@).
-
-\begin{algorithm}
-\caption{Dynamic object allocation of size $S$}\label{alg:heapObjectAlloc}
-\begin{algorithmic}[1]
-\State $\textit{O} \gets \text{NULL}$
-\If {$S >= \textit{mmap-threshhold}$}
-	\State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
-\Else
-	\State $\textit{B} \gets \text{smallest free-bucket} \geq S$
-	\If {$\textit{B's free-list is empty}$}
-		\If {$\textit{B's away-list is empty}$}
-			\If {$\textit{heap's allocation buffer} < S$}
-				\State $\text{get allocation from global pool (which might call \lstinline{sbrk})}$
-			\EndIf
-			\State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
-		\Else
-			\State $\textit{merge B's away-list into free-list}$
-			\State $\textit{O} \gets \text{pop an object from B's free-list}$
-		\EndIf
-	\Else
-		\State $\textit{O} \gets \text{pop an object from B's free-list}$
-	\EndIf
-	\State $\textit{O's owner} \gets \text{B}$
-\EndIf
-\State $\Return \textit{ O}$
+Quantizing is performed using a direct lookup for sizes < 64K or a binary search over the ordered bucket array for $S$ $\ge$ 64K.
+Then, the allocation storage is obtained from the following locations, in order of increasing latency: the bucket's free stack, the heap's local buffer, the bucket's remote stack, the global buffer, the OS (@sbrk@).
+For large allocations, the storage is directly allocated from the OS using @mmap@.
+
+\begin{algorithm}[t]
+\caption{Dynamic object allocation of size $S$}
+\label{alg:heapObjectAlloc}
+\begin{algorithmic}
+\STATE $S \gets S + \text{header-size}$
+\IF {$S < \textit{mmap-threshhold}$}
+	\STATE $\textit{B} \gets \text{smallest free-bucket} \geq S$
+	\IF {$\textit{B's free-list \(\neg\)empty}$}
+		\STATE $\textit{O} \gets \text{pop an object from B's free-list}$
+	\ELSIF {$\textit{heap's allocation buffer} \ge B$}
+		\STATE $\textit{O} \gets \text{bump allocate object of size B from allocation buffer}$
+	\ELSIF {$\textit{heap's remote-list \(\neg\)empty}$}
+		\STATE $\textit{merge heap's remote-list into free-list}$
+		\STATE $\textit{O} \gets \text{pop an object from B's free-list}$
+	\ELSE
+		\STATE $\textit{O} \gets \text{allocate an object of size B from global pool}$
+	\ENDIF
+\ELSE
+	\STATE $\textit{O} \gets \text{allocate an object of size S using \lstinline{mmap} system-call}$
+\ENDIF
+\RETURN $\textit{O}$
 \end{algorithmic}
 \end{algorithm}
 
-\begin{algorithm}
-\caption{Dynamic object free at address $A$ with object ownership}\label{alg:heapObjectFreeOwn}
-\begin{algorithmic}[1]
-\If {$\textit{A mapped allocation}$}
-	\State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
-\Else
-	\State $\text{B} \gets \textit{O's owner}$
-	\If {$\textit{B is thread-local heap's bucket}$}
-		\State $\text{push A to B's free-list}$
-	\Else
-		\State $\text{push A to B's away-list}$
-	\EndIf
-\EndIf
+Algorithm~\ref{alg:heapObjectFreeOwn} shows the deallocation (free) outline for an object at address $A$ with ownership.
+First, the address is divided into small (@sbrk@) or large (@mmap@).
+For small allocations, the bucket associated with the request size is retrieved from the allocation header.
+If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
+If the bucket is not local to the thread, the allocation is pushed onto the owning thread's remote stack.
+For large allocations, the storage is unmapped back to the OS.
+Without object ownership, the algorithm is the same as for ownership except when the bucket is not local to the thread.
+In that case, the corresponding bucket of the owner thread is computed by the deallocating thread, and the allocation is pushed onto the deallocating thread's corresponding bucket, \ie no search is required.
+
+\begin{algorithm}[t]
+\caption{Dynamic object free at address $A$ with object ownership}
+\label{alg:heapObjectFreeOwn}
+\begin{algorithmic}
+\IF {$\textit{A heap allocation}$}
+	\STATE $\text{B} \gets \textit{O's owner}$
+	\IF {$\textit{B's thread = current heap thread}$}
+		\STATE $\text{push A to B's free-list}$
+	\ELSE
+		\STATE $\text{push A to B's remote-list}$
+	\ENDIF
+\ELSE
+	\STATE $\text{return A to system using system call \lstinline{munmap}}$
+\ENDIF
 \end{algorithmic}
 \end{algorithm}
 
+\begin{comment}
 \begin{algorithm}
-\caption{Dynamic object free at address $A$ without object ownership}\label{alg:heapObjectFreeNoOwn}
+\caption{Dynamic object free at address $A$ without object ownership}
+\label{alg:heapObjectFreeNoOwn}
 \begin{algorithmic}[1]
-\If {$\textit{A mapped allocation}$}
-	\State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
-\Else
-	\State $\text{B} \gets \textit{O's owner}$
-	\If {$\textit{B is thread-local heap's bucket}$}
-		\State $\text{push A to B's free-list}$
-	\Else
-		\State $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
-		\State $\text{push A to C's free-list}$
-	\EndIf
-\EndIf
+\IF {$\textit{A mapped allocation}$}
+	\STATE $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
+\ELSE
+	\STATE $\text{B} \gets \textit{O's owner}$
+	\IF {$\textit{B is thread-local heap's bucket}$}
+		\STATE $\text{push A to B's free-list}$
+	\ELSE
+		\STATE $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
+		\STATE $\text{push A to C's free-list}$
+	\ENDIF
+\ENDIF
 \end{algorithmic}
 \end{algorithm}
-
-
-Algorithm~\ref{alg:heapObjectFreeOwn} shows the deallocation (free) outline for an object at address $A$ with ownership.
-First, the address is divided into small (@sbrk@) or large (@mmap@).
-For large allocations, the storage is unmapped back to the OS.
-For small allocations, the bucket associated with the request size is retrieved.
-If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
-If the bucket is not local to the thread, the allocation is pushed onto the owning thread's associated away stack.
-
-Algorithm~\ref{alg:heapObjectFreeNoOwn} shows the deallocation (free) outline for an object at address $A$ without ownership.
-The algorithm is the same as for ownership except if the bucket is not local to the thread.
-Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
-
-Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
+\end{comment}
+
+Finally, the llheap design funnels all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
 Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
 This design simplifies heap-management code during development and maintenance.
 
 
+\subsubsection{Bounded Allocation}
+
+The llheap design results in bounded allocation.
+For small allocations, once all the buckets have freed objects, storage is recycled.
+For large allocations, the storage is directly recycled back to the OS.
+When a thread terminates, its heap is recycled to the next new thread and the above process begins for that thread.
+The pathological case is threads allocating a large amount of storage, freeing it, and then quiescing, which demonstrates that the bound constant can be large.
+This pathological pattern occurs for \emph{immortal} threads, \eg I/O threads with program lifetime and bursts of activity performing many allocations/deallocations.
+Hence, independent of external fragmentation in thread heaps, storage cannot grow unbounded unless the program does not free.
+
+
 \subsubsection{Alignment}
 
-Allocators have a different minimum storage alignment from the hardware's basic types.
-Often the minimum allocator alignment, $M$, is the bus width (32 or 64-bit), the largest register (double, long double), largest atomic instruction (DCAS), or vector data (MMMX).
-The reason for this larger requirement is the lack of knowledge about the data type occupying the allocation.
-Hence, an allocator assumes the worst-case scenario for the start of data and the compiler correctly aligns items within this data because it knows their types.
-Often the minimum storage alignment is an 8/16-byte boundary on a 32/64-bit computer.
-Alignments larger than $M$ are normally a power of 2, such as page alignment (4/8K).
+The minimum storage alignment $M$ comes from the architecture application-binary-interface (ABI) based on hardware factors: bus width (32 or 64-bit), largest register (double, long double), largest atomic instruction (double compare-and-swap), or vector data (Intel MMX).
+An access with a nonaligned address maybe slow or an error.
+A memory allocator must assume the largest hardware requirement because it is unaware of the data type occupying the allocation.
+Often the minimum storage alignment is an 8/16-byte boundary on a 32/64-bit computer, respectively.
+Alignments larger than $M$ are powers of 2, such as page alignment (4/8K).
 Any alignment less than $M$ is raised to the minimal alignment.
 
-llheap aligns its header at the $M$ boundary and its size is $M$;
-hence, data following the header is aligned at $M$.
-This pattern means there is no minimal alignment computation along the allocation fastpath, \ie new storage and reused storage is always correctly aligned.
-An alignment $N$ greater than $M$ is accomplished with a \emph{pessimistic} request for storage that ensures \emph{both} the alignment and size request are satisfied, \eg:
+llheap aligns its allocation header on an $M$ boundary and its size is $M$, making the following data $M$ aligned.
+This pattern means there is no minimal alignment computation along the allocation fast path, \ie new storage and reused storage is always correctly aligned.
+An alignment $N$ greater than $M$ is accomplished with a \emph{pessimistic} request for storage that ensures \emph{both} the alignment and size request are satisfied.
 \begin{center}
 \input{Alignment2}
@@ -1295,67 +838,106 @@
 The approach is pessimistic if $P$ happens to have the correct alignment $N$, and the initial allocation has requested sufficient space to move to the next multiple of $N$.
 In this case, there is $alignment - M$ bytes of unused storage after the data object, which could be used by @realloc@.
-Note, the address returned by the allocation is $A$, which is subsequently returned to @free@.
-To correctly free the object, the value $P$ must be computable from $A$, since that is the actual start of the allocation, from which $H$ can be computed $P - M$.
-Hence, there must be a mechanism to detect when $P$ $\neq$ $A$ and then compute $P$ from $A$.
+Note, the address returned by the allocation is $A$, which is subsequently returned for deallocation.
+However, the deallocation requires the value $P$, which must be computable from $A$, from which $H$ can be computed $P - M$.
+Hence, there must be a mechanism to detect $P$ $\neq$ $A$ and compute $P$ from $A$.
 
 To detect and perform this computation, llheap uses two headers:
-the \emph{original} header $H$ associated with the allocation, and a \emph{fake} header $F$ within this storage before the alignment boundary $A$, e.g.:
+the \emph{original} header $H$ associated with the allocation, and a \emph{fake} header $F$ within this storage before the alignment boundary $A$.
 \begin{center}
 \input{Alignment2Impl}
 \end{center}
 Since every allocation is aligned at $M$, $P$ $\neq$ $A$ only holds for alignments greater than $M$.
-When $P$ $\neq$ $A$, the minimum distance between $P$ and $A$ is $M$ bytes, due to the pessimistic storage-allocation.
+When $P$ $\neq$ $A$, the minimum distance between $P$ and $A$ is $M$ bytes, due to the pessimistic storage allocation.
 Therefore, there is always room for an $M$-byte fake header before $A$.
 The fake header must supply an indicator to distinguish it from a normal header and the location of address $P$ generated by the allocation.
-This information is encoded as an offset from A to P and the initialize alignment (discussed in Section~\ref{s:ReallocStickyProperties}).
-To distinguish a fake header from a normal header, the least-significant bit of the alignment is used because the offset participates in multiple calculations, while the alignment is just remembered data.
+This information is encoded as an offset from A to P and the initial alignment (discussed in Section~\ref{s:ReallocStickyProperties}).
+To distinguish a fake header from a normal header, the least-significant bit of the alignment is set to 1 because the offset participates in multiple calculations, while the alignment is just remembered data.
 \begin{center}
 \input{FakeHeader}
 \end{center}
 
+Note, doing alignment with containers requires a separate container for the aligned fixed-sized objects, so there are more kinds of containers that must be managed.
+
 
 \subsubsection{\lstinline{realloc} and Sticky Properties}
 \label{s:ReallocStickyProperties}
 
-The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data.
-The realloc pattern is simpler than the suboptimal manually steps.
+The allocation routine @realloc@ provides a memory management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data.
+The realloc pattern is simpler than the suboptimal manual steps.
 \begin{flushleft}
+\setlength{\tabcolsep}{10pt}
 \begin{tabular}{ll}
-\multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manually}} \\
-\begin{lstlisting}
+\multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manual}} \\
+\begin{C++}
 T * naddr = realloc( oaddr, newSize );
 
 
 
-\end{lstlisting}
+\end{C++}
 &
-\begin{lstlisting}
+\begin{C++}
 T * naddr = (T *)malloc( newSize ); $\C[2in]{// new storage}$
 memcpy( naddr, addr, oldSize );	 $\C{// copy old bytes}$
 free( addr );				$\C{// free old storage}$
 addr = naddr;				$\C{// change pointer}\CRT$
-\end{lstlisting}
+\end{C++}
 \end{tabular}
 \end{flushleft}
-The manual steps are suboptimal because there may be sufficient internal fragmentation at the end of the allocation due to bucket sizes.
-If this storage is large enough, it eliminates a new allocation and copying.
+The manual steps are suboptimal because there may be internal fragmentation at the end of the allocation due to bucket sizes.
+If this storage is sufficiently large, it eliminates a new allocation and copying.
 Alternatively, if the storage is made smaller, there may be a reasonable crossover point, where just increasing the internal fragmentation eliminates a new allocation and copying.
-This pattern should be used more frequently to reduce storage management costs.
+Hence, using @realloc@ as often as possible can reduce storage management costs.
 In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc( newSize)@, and if @newSize@ is 0, @realloc@ does a @free( oaddr )@, so all allocation/deallocation can be done with @realloc@.
 
 The hidden problem with this pattern is the effect of zero fill and alignment with respect to reallocation.
-For safety, we argue these properties should be persistent (``sticky'') and not transient.
-For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, any subsequent reallocations of this storage must preserve these properties.
-Currently, allocation properties are not preserved nor is it possible to query an allocation to maintain these properties manually.
-Hence, subsequent use of @realloc@ storage that assumes any initially properties may cause errors.
+For safety, these properties must persist (be ``sticky'') when storage size changes.
+Prior to llheap, allocation properties are not preserved across reallocation nor is it possible to query an allocation to maintain these properties manually.
+Hence, a random call to @realloc@ that reallocates storage may cause downstream errors, if allocation properties are needed.
 This silent problem is unintuitive to programmers, can cause catastrophic failure, and is difficult to debug because it is transient.
 To prevent these problems, llheap preserves initial allocation properties within an allocation, allowing them to be queried, and the semantics of @realloc@ preserve these properties on any storage change.
 As a result, the realloc pattern is efficient and safe.
 
+Note, @realloc@ has a compile-time disadvantage \vs @malloc@, because @malloc@ simplifies optimization opportunities.
+For @malloc@ the compiler knows the new storage address is not aliased, which is not true for @realloc@: the same storage can be returned.
+The compiler uses this knowledge to optimize the region of code between the @malloc@ call and the point where the pointer escapes or it finds the matching @free@.
+For @realloc@, the compiler must also analyse the code \emph{before} the call and this analysis may fail.
+
+Finally, there is a flaw in @realloc@'s definition: if there is no memory to allocate new storage for an expansion, the original allocation is not freed or moved, @errno@ is set to @ENOMEM@, and a null pointer is returned.
+This semantics preserves the original allocation so the data is not lost in a failure case.
+However, most calls to @realloc@ are written: @p = realloc( p, size )@, so the original storage is leaked when pointer @p@ is overwritten with null, negating the benefit of not freeing the storage for recovery purposes.
+Programmers can follow a coding pattern of:
+\begin{C++}
+char * p;
+...
+void * p1 = realloc( p, size );
+if ( p1 ) p = (char *)p1;
+else // release some storage
+\end{C++}
+However, most programmers ignore return codes.
+A better alternative is to change @realloc@'s interface to be like @posix_memalign@, which returns two results, a return code and a storage address, so the error code is separate from the returned storage.
+\begin{C++}
+int retcode = realloc( (void **)&p, size );
+\end{C++}
+which returns 0 or @ENOMEM@, only changes @p@ for expansion, but requires an ugly cast on the call.
+
+
+\subsubsection{Sticky Test}
+
+Since sticky properties are an important safety feature for @realloc@, an ad-hoc @realloc@ test was created (not shown) to test whether a memory allocator preserves zero-fill from @calloc@ and/or alignment from @memalign@.
+The first test @calloc@s a large array (zero fill), sets the array to 42, shortens it, and then enlarges it to the original size.
+It does these steps 100 times attempting to get a reused large block of memory that is still set to 42, showing new storage does not preserve zero fill.
+The second test @memalign@s storage and @realloc@s it multiple times making it larger until the current storage must be copied into new storage.
+The alignment of each storage address returned from @realloc@ is verified with the original alignment.
+
+If a test fails, that sticky properties is not provided;
+if the test passes, that sticky property is provided in some form but not necessarily in all forms (test just got lucky).
+If an allocator fails these tests, it is unnecessary to perform a manual inspection of the @realloc@ code for sticky properties.
+Only llheap passes the test, as its @realloc@ applies sticky properties.
+
 
 \subsubsection{Header}
 
 To preserve allocation properties requires storing additional information about an allocation.
-Figure~\ref{f:llheapHeader} shows llheap captures this information in the header, which has two fields (left/right) sized appropriately for 32/64-bit alignment requirements.
+Figure~\ref{f:llheapHeader} shows llheap captures this information in the per object header, which has two fields (left/right) sized appropriately for 32/64-bit alignment requirements.
 
 \begin{figure}
@@ -1367,56 +949,58 @@
 
 The left field is a union of three values:
-\begin{description}
+\begin{description}[leftmargin=*,topsep=2pt,itemsep=2pt,parsep=0pt]
 \item[bucket pointer]
-is for deallocated of heap storage and points back to the bucket associated with this storage requests (see Figure~\ref{f:llheapStructure} for the fields accessible in a bucket).
+is for deallocation and points back to the bucket associated with this storage request (see Figure~\ref{f:llheapDesign} for the fields accessible in a bucket).
 \item[mapped size]
 is for deallocation of mapped storage and is the storage size for unmapping.
 \item[next free block]
-is for freed storage and is an intrusive pointer chaining same-size free blocks onto a bucket's stack of free objects.
+is an intrusive pointer linking same-size free blocks onto a bucket's stack of free objects.
 \end{description}
-The low-order 3-bits of this field are unused for any stored values as these values are at least 8-byte aligned.
+The low-order 3-bits of these fields are unused for any stored values, due to the minimum aligned of 8-bytes (even for 32-bit addressing).
 The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
 Note, the zero-filled/mapped bits are only used in the normal header and the alignment bit in the fake header.
 This implementation allows a fast test if any of the lower 3-bits are on (@&@ and compare).
-If no bits are on, it implies a basic allocation, which is handled quickly in the fastpath for allocation and free;
+If no bits are on, it implies a basic allocation, which is handled quickly in the fast path for allocation and free;
 otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
 
-The right field remembers the request size versus the allocation (bucket) size, \eg request of 42 bytes is rounded up to 64 bytes.
-Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management.
+The right field remembers the allocation request size versus the allocation (bucket) size, \eg request of 42 bytes is rounded up to 64 bytes.
+Since programmers think in request size rather than allocation size, the request size allows better generation of statistics or errors and also helps in memory management.
 
 
 \subsection{Statistics and Debugging}
 
-llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand dynamic-memory behaviour.
-Incrementing statistic counters must appear on the allocation fastpath.
-As noted, any atomic operation along the fastpath produces a significant increase in allocation costs.
-To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so heap operations do not require atomic operations.
+llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand dynamic memory behaviour.
+Incrementing statistic counters must appear on the allocation fast path.
+To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so statistic operations do not require slow atomic operations.
 
 To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
-Note, the list is locked to prevent errors traversing an active list;
+Note, the list is locked to prevent errors traversing an active list, which may have nodes added or removed dynamically;
 the statistics counters are not locked and can flicker during accumulation.
+Hence, printing statistics during program execution is an approximation.
 Figure~\ref{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
-No other memory allocator studied provides as comprehensive statistical information.
-Finally, these statistics were invaluable during the development of this work for debugging and verifying correctness and should be equally valuable to application developers.
+No other memory allocator provides as comprehensive statistical information.
+These statistics were invaluable during the development of llheap for debugging and verifying correctness, and should be equally valuable to application developers.
 
 \begin{figure}
-\begin{lstlisting}
-Heap statistics: (storage request / allocation)
-  malloc >0 calls 2,766; 0 calls 2,064; storage 12,715 / 13,367 bytes
-  aalloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
-  calloc >0 calls 6; 0 calls 0; storage 1,008 / 1,104 bytes
-  memalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
+\begin{C++}
+PID: 2167216 Heap statistics: (storage request / allocation)
+  malloc    >0 calls 19,938,000,110; 0 calls 2,064,000,000; storage 4,812,152,081,688 / 5,487,040,092,624 bytes
+  aalloc    >0 calls 0; 0 calls 0; storage 0 / 0 bytes
+  calloc    >0 calls 7; 0 calls 0; storage 1,040 / 1,152 bytes
+  memalign  >0 calls 0; 0 calls 0; storage 0 / 0 bytes
   amemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
   cmemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
-  resize >0 calls 0; 0 calls 0; storage 0 / 0 bytes
-  realloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
-  free !null calls 2,766; null calls 4,064; storage 12,715 / 13,367 bytes
-  away pulls 0; pushes 0; storage 0 / 0 bytes
-  sbrk calls 1; storage 10,485,760 bytes
-  mmap calls 10,000; storage 10,000 / 10,035 bytes
-  munmap calls 10,000; storage 10,000 / 10,035 bytes
-  threads started 4; exited 3
-  heaps new 4; reused 0
-\end{lstlisting}
+  resize    >0 calls 0; 0 calls 0; storage 0 / 0 bytes
+  realloc   >0 calls 0; 0 calls 0; storage 0 / 0 bytes
+            copies 0; smaller 0; alignment 0; 0 fill 0
+  free      !null calls 19,938,000,092; null / 0 calls 4,064,000,004; storage 4,812,152,003,021 / 5,487,040,005,152 bytes
+  remote    pushes 4; pulls 0; storage 0 / 0 bytes
+  sbrk      calls 1; storage 8,388,608 bytes
+  mmap      calls 2,000,000; storage 2,097,152,000,000 / 2,105,344,000,000 bytes
+  munmap    calls 2,000,000; storage 2,097,152,000,000 / 2,105,344,000,000 bytes
+  remainder calls 0; storage 0 bytes
+  threads   started 4; exited 4
+  heaps     $new$ 4; reused 0
+\end{C++}
 \caption{Statistics Output}
 \label{f:StatiticsOutput}
@@ -1424,39 +1008,59 @@
 
 llheap can also be built with debug checking, which inserts many asserts along all allocation paths.
-These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruptions because internal values (like header fields) are overwritten.
-These checks are best effort as opposed to complete allocation checking as in @valgrind@.
+These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruption because internal values (like header fields) are overwritten.
+These checks are best effort as opposed to complete allocation checking as in @valgrind@~\cite{valgind}.
 Nevertheless, the checks detect many allocation problems.
-There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
-For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer.
-To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \pageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
+There is a problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
+For example, @printf@ might allocate a 1024-byte buffer on the first call and never delete this buffer.
+To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ in Section~\ref{s:ExtendedCAPI}), and it is subtracted from the total allocate/free difference.
 Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
-
-Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
-
-
-\subsection{User-level Threading Support}
-\label{s:UserlevelThreadingSupport}
-
-The serially-reusable problem (see \pageref{p:SeriallyReusable}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
-The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical subsections such as starting a memory operation on one KT and completing it on another.
-Locking these critical subsections negates any attempt for a quick fastpath and results in high contention.
-For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
-Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads.
-To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-subsection for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
-The rollforward flag is tested at the end of each allocation funnel routine (see \pageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
-
-llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period.
-On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately.
-On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe.
-For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
-Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
-
-The fast technique (with lower run time cost) is to define a special code subsection and places all non-interruptible routines in this subsection.
-The linker places all code in this subsection into a contiguous block of memory, but the order of routines within the block is unspecified.
-Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible subsection, and aborts if executing within this subsection and sets the rollforward flag.
-This technique is fragile because any calls in the non-interruptible code outside of the non-interruptible subsection (like @sbrk@) must be bracketed with disable/enable interrupts and these calls must be along the slowpath.
-Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible subsection.
-This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible subsection and their assembler code examined.
-These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading.
+Debugging mode also scrubs each allocation with @0xff@, so assumptions about zero-filled objects generate errors.
+Finally, if a program does segment-fault in debug mode, a stack backtrace is printed to help in debugging.
+
+Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled in programs with 10\% to 15\% allocation cost, and the latency cost for accumulating statistic from each heap is mitigated by limited calls, often only one at the end of the program.
+
+
+% \subsection{Design Choices}
+% 
+% llheap's design was reviewed and changed multiple times during its development.
+% All designs focused on the allocation/free \newterm{fast path}, \ie the shortest code path for the most common operations.
+% The model chosen is 1:1, giving one heap per thread for each kernel thread (KT).
+% Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
+% Therefore, the majority of heap operations are uncontended, modulo operations on the global heap and ownership.
+% 
+% Problems:
+% \begin{itemize}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
+% \item
+% Need to know when a KT starts/terminates to create/delete its heap.
+% 
+% \noindent
+% It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
+% \item
+% There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
+% 
+% \noindent
+% The classic solution only deletes a heap after all referents are returned, which is complex.
+% The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
+% If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
+% To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
+% In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
+% \item
+% There can be significant external fragmentation as the number of KTs increases.
+% 
+% \noindent
+% In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
+% Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
+% \item
+% Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
+% \begin{quote}
+% A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
+% \end{quote}
+% If a KT is preempted during an allocation operation, the OS can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
+% Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
+% Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the OS is providing the second thread via the signal handler.
+
+% There is the same serially-reusable problem with UTs migrating across KTs.
+% \end{itemize}
+% Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
 
 
@@ -1464,12 +1068,7 @@
 
 There are problems bootstrapping a memory allocator.
-\begin{enumerate}
-\item
 Programs can be statically or dynamically linked.
-\item
 The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely.
-\item
-Knowing a KT's start and end independently from the KT code is difficult.
-\end{enumerate}
+Knowing a KT's start and end independently from the KT code is also difficult.
 
 For static linking, the allocator is loaded with the program.
@@ -1477,98 +1076,110 @@
 This approach allows allocator substitution by placing an allocation library before any other in the linked/load path.
 
-Allocator substitution is similar for dynamic linking, but the problem is that the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
-As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including from the dynamic loader.
-Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
-Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
-Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
-
-All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
-The problem is getting initialization done before the first allocator call.
-However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
-Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}.
-This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call.
-As a result, calls to allocation routines occur without initialization.
-To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
-
-Two other important execution points are program startup and termination, which include prologue or epilogue code to bootstrap a program, which programmers are unaware of.
-For example, dynamic-memory allocations before/after the application starts should not be considered in statistics because the application does not make these calls.
-llheap establishes these two points using routines:
-\begin{lstlisting}
-__attribute__(( constructor( 100 ) )) static void startup( void ) {
+Allocator substitution is similar for dynamic linking.
+However, the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
+As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including those from the dynamic loader.
+Hence, some part of the @sbrk@ area may be used by the default allocator and substitution allocator statistics cannot be correct.
+Furthermore, dynamic linking uses an assembler trampoline to call the procedure linkage table resolver, so there is an additional cost along the allocator fast path for all allocation operations.
+Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model( "initial-exec" )@ to obtain tighter binding.
+
+After the allocator is loaded, it needs to be initialized before the first allocation request.
+Currently, the only mechanism to control initialization is via constructor routines (see below), each with an integer priority, where the linker calls the constructors in increasing order of priority.
+However, there are few conventions for priorities amongst libraries, where constructors with equal priorities are called in arbitrary order.
+(Only a transitive closure of references amongst library calls can establish an absolute initialization order.)
+As a result, the first call to an allocation routine can occur without initialization.
+To deal with this problem, it is necessary to have a global flag that is checked along the allocation fast path to trigger initialization (singleton pattern).
+
+Along these lines, there is a subtle problem is defining when a program starts and ends.
+For example, prolog/epilog code outside of the program should not be considered in statistics as the application does not make these calls.
+llheap establishes these two points using constructor/destructor routines with initialization priority 100, where system libraries use priorities $\le$ 100 and application programs have priorities $>$ 100.
+\begin{flushleft}
+\hspace*{\parindentlnth}
+\setlength{\tabcolsep}{20pt}
+\begin{tabular}{@{}ll@{}}
+\begin{C++}
+@__attribute__(( constructor( 100 ) ))@
+static void startup( void ) {
 	// clear statistic counters
 	// reset allocUnfreed counter
 }
-__attribute__(( destructor( 100 ) )) static void shutdown( void ) {
+
+\end{C++}
+&
+\begin{C++}
+@__attribute__(( destructor( 100 ) ))@
+static void shutdown( void ) {
 	// sum allocUnfreed for all heaps
 	// subtract global unfreed storage
 	// if allocUnfreed > 0 then print warning message
 }
-\end{lstlisting}
-which use global constructor/destructor priority 100, where the linker calls these routines at program prologue/epilogue in increasing/decreasing order of priority.
-Application programs may only use global constructor/destructor priorities greater than 100.
+\end{C++}
+\end{tabular}
+\end{flushleft}
 Hence, @startup@ is called after the program prologue but before the application starts, and @shutdown@ is called after the program terminates but before the program epilogue.
 By resetting counters in @startup@, prologue allocations are ignored, and checking unfreed storage in @shutdown@ checks only application memory management, ignoring the program epilogue.
 
-While @startup@/@shutdown@ apply to the program KT, a concurrent program creates additional KTs that do not trigger these routines.
-However, it is essential for the allocator to know when each KT is started/terminated.
-One approach is to create a thread-local object with a construct/destructor, which is triggered after a new KT starts and before it terminates, respectively.
-\begin{lstlisting}
-struct ThreadManager {
-	volatile bool pgm_thread;
-	ThreadManager() {} // unusable
-	~ThreadManager() { if ( pgm_thread ) heapManagerDtor(); }
-};
-static thread_local ThreadManager threadManager;
-\end{lstlisting}
-Unfortunately, thread-local variables are created lazily, \ie on the first dereference of @threadManager@, which then triggers its constructor.
-Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
-Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
-Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
-The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
-
-Finally, there is a recursive problem when the singleton pattern dereferences @pgm_thread@ to initialize the thread-local object, because its initialization calls @atExit@, which immediately calls @malloc@ to obtain storage.
-This recursion is handled with another thread-local flag to prevent double initialization.
-A similar problem exists when the KT terminates and calls member @~ThreadManager@, because immediately afterwards, the terminating KT calls @free@ to deallocate the storage obtained from the @atExit@.
-In the meantime, the terminated heap has been put on the global-heap free-stack, and may be active by a new KT, so the @atExit@ free is handled as a free to another heap and put onto the away list using locking.
-
-For user threading systems, the KTs are controlled by the runtime, and hence, start/end pointers are known and interact directly with the llheap allocator for \uC and \CFA, which eliminates or simplifies several of these problems.
-The following API was created to provide interaction between the language runtime and the allocator.
-\begin{lstlisting}
-void startThread();			$\C{// KT starts}$
-void finishThread();			$\C{// KT ends}$
-void startup();				$\C{// when application code starts}$
-void shutdown();			$\C{// when application code ends}$
-bool traceHeap();			$\C{// enable allocation/free printing for debugging}$
-bool traceHeapOn();			$\C{// start printing allocation/free calls}$
-bool traceHeapOff();			$\C{// stop printing allocation/free calls}$
-\end{lstlisting}
-This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\subsection{Added Features and Methods}
-
-The C dynamic-allocation API (see Figure~\ref{f:CDynamicAllocationAPI}) is neither orthogonal nor complete.
-For example,
-\begin{itemize}
-\item
-It is possible to zero fill or align an allocation but not both.
-\item
-It is \emph{only} possible to zero fill an array allocation.
-\item
-It is not possible to resize a memory allocation without data copying.
-\item
-@realloc@ does not preserve initial allocation properties.
-\end{itemize}
-As a result, programmers must provide these options, which is error prone, resulting in blaming the entire programming language for a poor dynamic-allocation API.
+Unfortunately, @startup@/@shutdown@ only apply to the program KT, not to any additional KTs created by the program.
+However, it is essential for the allocator to know when each KT is started/terminated to initialize/de-initialize the KT's heap.
+Initialization can be handled by making the global flag (above) thread-local and then the initialization check along the fast path covers the first allocation by a newly created thread.
+De-initialization is handled by registering a destructor routine using @pthread_key_create@ in the initialization code triggered along the fast path, which subsequently calls the destructor at thread termination.
+
+
+\subsection{User-level Threading Support}
+\label{s:UserlevelThreadingSupport}
+
+llheap is the underlying allocator in the user-threading programming languages \uC and \CFA.
+These systems have preemptive scheduling, which requires management of timing events through a signal handle (@SIGALRM@).
+The complexity in these system is the serially-reusable problem (see Section~\ref{s:SingleThreadedMemoryAllocator}) when UTs are time sliced (language level) independently from KTs (OS level).
+The solution is to prevent interrupts resulting in a CPU or KT change during critical operations, eliminating problems like starting a memory operation on one KT and completing it on another when the underlying heaps are different.
+% For user-level threading, the serially-reusable problem occurs with time slicing for preemptable user-level scheduling, as the interrupted UT is unlikely to be restarted on the same KT.
+However, without time slicing, a long running UT prevents the execution of other UTs (starvation).
+
+The languages modify llheap using two techniques to prevent time slicing during non-interruptible allocation operations.
+On the slow path, when executing expensive operations, time-slicing interrupts are disabled/enabled, so the operation completes atomically on the KT.
+On the fast path, all non-interruptible allocation/deallocation routines are placed in a separate code segment.
+The linker places this segment into a contiguous block of memory. %, but the order of routines within the block is unspecified.
+Then the time-slice signal handler compares the program counter at the point of interrupt with the start/end address of the non-interruptible segment, and if executing within the segment, the signal handler returns without context switching.
+The llheap funnel design simplifies this implementation so only a few funnel and statistics routines are located in the non-interruptible section.
+% This technique is fragile as no mechanism exists to ensure all crucial code along the fast path is placed into the non-interruptible segment.
+
+Interestingly, marking non-interruptible operations by bracketing them with a set/reset of a thread-local flag fails, as read/write is not atomic on some machines.
+For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
+Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a UT.
+As well, switching to a T:C model with restartable critical sections using @librseq@~\cite{Desnoyers19} was examined (see Section~\ref{s:MutualExclusion}).
+However, tests showed that while  @librseq@ can determine the particular CPU quickly, setting up the restartable critical-section along the allocation fast-path produced a significant decrease in performance.
+Also, the number of undoable writes in @librseq@ is limited and restartable sequences cannot deal with UT migration across KTs.
+For example, UT$_1$ is executing an allocation by KT$_1$ on CPU$_1$ and a time-slice preemption occurs.
+The signal handler context switches UT$_1$ onto the user-level ready-queue and starts running UT$_2$ on KT$_1$, which immediately performs an allocation.
+Since KT$_1$ is still executing on CPU$_1$, @librseq@ takes no action because it assumes KT$_1$ is still executing the same critical section.
+Then UT$_1$ is scheduled onto KT$_2$ by the user-level scheduler, and its allocation operation continues in parallel with UT$_2$ using references into the heap associated with CPU$_1$, which corrupts CPU$_1$'s heap.
+If @librseq@ had an @rseq_abort@ which:
+\begin{enumerate}[leftmargin=*,topsep=2pt,itemsep=0pt,parsep=0pt]
+\item
+marks the current restartable critical-section as cancelled so it restarts when attempting to commit.
+\item
+does nothing if there is no current restartable critical section in progress.
+\end{enumerate}
+Then @rseq_abort@ could be called on the backside of a user-level context-switching.
+A feature similar to this idea might exist for hardware transactional memory.
+A significant effort was made to make this approach work but its complexity, lack of robustness, and performance costs resulted in its rejection.
+
+
+\subsection{C API}
+
+Figure~\ref{f:CDynamicAllocationAPI} shows the C dynamic allocation API, which is neither orthogonal nor complete.
+For example, it is possible to zero fill or align an allocation but not both, it is only possible to zero fill an array allocation, and it is not possible to resize a memory allocation without data copying.
+As a result, programmers must provide missing alternatives, which is error prone, rightly blaming the C programming language for a poor allocation API.
 Furthermore, newer programming languages have better type systems that can provide safer and more powerful APIs for memory allocation.
+The following presents llheap API changes.
 
 \begin{figure}
-\begin{lstlisting}
+\hspace*{\parindentlnth}
+\begin{tabular}{@{}l|l@{}}
+\begin{C++}
 void * malloc( size_t size );
-void * calloc( size_t nmemb, size_t size );
-void * realloc( void * ptr, size_t size );
-void * reallocarray( void * ptr, size_t nmemb, size_t size );
-void free( void * ptr );
+void * calloc( size_t dimension, size_t size );
+void * realloc( void * oaddr, size_t size );
+void * reallocarray( void * oaddr, size_t dimension, size_t size );
+void free( void * addr );
 void * memalign( size_t alignment, size_t size );
 void * aligned_alloc( size_t alignment, size_t size );
@@ -1576,309 +1187,282 @@
 void * valloc( size_t size );
 void * pvalloc( size_t size );
-
-struct mallinfo mallinfo( void );
-int mallopt( int param, int val );
-int malloc_trim( size_t pad );
-size_t malloc_usable_size( void * ptr );
+\end{C++}
+&
+\begin{C++}
+int mallopt( int option, int value );
+size_t malloc_usable_size( void * addr );
 void malloc_stats( void );
 int malloc_info( int options, FILE * fp );
-\end{lstlisting}
-\caption{C Dynamic-Allocation API}
+
+// Unsupported
+struct mallinfo mallinfo( void );
+int malloc_trim( size_t );
+void * malloc_get_state( void );
+int malloc_set_state( void * );
+\end{C++}
+\end{tabular}
+\caption{llheap support of C dynamic-allocation API}
 \label{f:CDynamicAllocationAPI}
 \end{figure}
 
-The following presents design and API changes for C, \CC (\uC), and \CFA, all of which are implemented in llheap.
-
-
-\subsubsection{Out of Memory}
-
-Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
-hence the need to return an alternate value for a zero-sized allocation.
-A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
-In theory, notifying the programmer of memory failure allows recovery;
-in practice, it is almost impossible to gracefully recover when out of memory.
-Hence, the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen because no pseudo allocation is necessary.
-
-
-\subsubsection{C Interface}
-
-For C, it is possible to increase functionality and orthogonality of the dynamic-memory API to make allocation better for programmers.
-
-For existing C allocation routines:
-\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
+
+\subsubsection{Extended C API}
+\label{s:ExtendedCAPI}
+
+llheap transparently augments the C dynamic memory API to increase functionality, orthogonality, and safety.
+\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
+\item
+@malloc@ remembers the original allocation size separate from the actual allocation size.
 \item
 @calloc@ sets the sticky zero-fill property.
 \item
-@memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property.
-\item
-@realloc@ and @reallocarray@ preserve sticky properties.
+@memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property, remembering the specified alignment size.
+\item
+@realloc@ and @reallocarray@ preserve sticky properties across copying.
+\item
+@malloc_stats@ prints detailed statistics of allocation/free operations when linked with a statistic version.
+\item
+Existence of shell variable @MALLOC_STATS@ implicitly calls @malloc_stats@ at program termination, so precompiled programs do not have to be modified.
 \end{itemize}
 
-The C dynamic-memory API is extended with the following routines:
-
-\medskip\noindent
-\lstinline{void * aalloc( size_t dimension, size_t elemSize )}
-extends @calloc@ for allocating a dynamic array of objects with total size @dim@ $\times$ @elemSize@ but \emph{without} zero-filling the memory.
-@aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines for array allocation.
-It returns the address of the dynamic array or @NULL@ if either @dim@ or @elemSize@ are zero.
-
-\medskip\noindent
-\lstinline{void * resize( void * oaddr, size_t size )}
-extends @realloc@ for resizing an existing allocation, @oaddr@, to the new @size@ (smaller or larger than previous) \emph{without} copying previous data into the new allocation or preserving sticky properties.
-@resize@ is significantly faster than @realloc@, which is the only alternative.
-It returns the address of the old or new storage with the specified new size or @NULL@ if @size@ is zero.
-
-\medskip\noindent
-\lstinline{void * amemalign( size_t alignment, size_t dimension, size_t elemSize )}
-extends @aalloc@ and @memalign@ for allocating a dynamic array of objects with the starting address on the @alignment@ boundary.
-Sets sticky alignment property.
-It returns the address of the aligned dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
-
-\medskip\noindent
-\lstinline{void * cmemalign( size_t alignment, size_t dimension, size_t elemSize )}
-extends @amemalign@ with zero fill and has the same usage as @amemalign@.
-Sets sticky zero-fill and alignment property.
-It returns the address of the aligned, zero-filled dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
-
-\medskip\noindent
-\lstinline{size_t malloc_alignment( void * addr )}
-returns the object alignment, where objects not allocated with alignment return the minimal allocation alignment.
-For use in aligning similar allocations.
-
-\medskip\noindent
-\lstinline{bool malloc_zero_fill( void * addr )}
-returns true if the objects zero-fill sticky property is set and false otherwise.
-For use in zero filling similar allocations.
-
-\medskip\noindent
-\lstinline{size_t malloc_size( void * addr )}
-returns the object's request size, which is updated when an object is resized or zero if @addr@ is @NULL@ (see also @malloc_usable_size@).
-For use in similar allocations.
-
-\medskip\noindent
-\lstinline{int malloc_stats_fd( int fd )}
-changes the file descriptor where @malloc_stats@ writes statistics (default @stdout@) and returns the previous file descriptor.
-
-\medskip\noindent
-\lstinline{size_t malloc_expansion()}
-\label{p:malloc_expansion}
-set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
-It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization.
-
-\medskip\noindent
-\lstinline{size_t malloc_mmap_start()}
-set the crossover between allocations occurring in the @sbrk@ area or separately mapped.
-It returns the crossover point used throughout a program, \ie called once at heap initialization.
-
-\medskip\noindent
-\lstinline{size_t malloc_unfreed()}
-\label{p:malloc_unfreed}
-amount subtracted to adjust for unfreed program storage (debug only).
-It returns the new subtraction amount and called by @malloc_stats@ (discussed in Section~\ref{}).
-
-
-\subsubsection{\CC Interface}
-
-The following extensions take advantage of overload polymorphism in the \CC type-system.
-
-\medskip\noindent
-\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}
-extends @resize@ with an alignment requirement, @nalign@.
-It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
-
-\medskip\noindent
-\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}
-extends @realloc@ with an alignment requirement, @nalign@.
-It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
-
-
-\subsubsection{\CFA Interface}
-
-The following extensions take advantage of overload polymorphism in the \CFA type-system.
-The key safety advantage of the \CFA type system is using the return type to select overloads;
-hence, a polymorphic routine knows the returned type and its size.
-This capability is used to remove the object size parameter and correctly cast the return storage to match the result type.
-For example, the following is the \CFA wrapper for C @malloc@:
+llheap extends the C dynamic-memory API with new allocation operations with APIs matching existing C counterparts.
+\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=1pt,parsep=0pt]
+\item
+@aalloc@ extends @calloc@ for dynamic array allocation \emph{without} zero-filling the memory (faster than @calloc@).
+\item
+@resize@ extends @realloc@ for resizing an allocation \emph{without} copying previous data or preserving sticky properties (faster than @realloc@).
+\item
+@resizearray@ extends @resize@ for an array allocation (faster than @reallocarray@).
+\item
+@amemalign@ extends @aalloc@ with alignment and sets sticky alignment property.
+\item
+@cmemalign@ extends @amemalign@ with zero fill and sets sticky zero-fill and alignment property.
+\item
+@aligned_resize@ extends @resize@ with an alignment.
+\item
+@aligned_resizearray@ extends @resizearray@ with alignment.
+\item
+@aligned_realloc@ extends @realloc@ with alignment.
+\item
+@aligned_reallocarray@ extends @resizearray@ with alignment.
+\end{itemize}
+
+llheap extends the C dynamic memory API with new control operations.
+The following routines are called \emph{once} during llheap startup to set specific limits \emph{before} an application starts.
+Setting these value early is essential because allocations can occur from the dynamic loader and other libraries before application code executes.
+To set a value, define a specific routine in an application and return the desired value, \eg
+\begin{C++}
+size_t malloc_extend() { return 16 * 1024 * 1024; }
+\end{C++}
+\begin{itemize}[leftmargin=*,topsep=0pt,itemsep=1pt,parsep=0pt]
+\item
+@malloc_extend@ returns the number of bytes to extend the @sbrk@ area when there is insufficient free storage to service an allocation request.
+\item
+@malloc_mmap_start@ returns the crossover allocation size from the @sbrk@ area to separate mapped areas, see also @mallopt( M_MMAP_THRESHOLD )@.
+\item
+@malloc_unfreed@ returns the amount subtracted from the global unfreed program storage to adjust for unreleased storage from routines like @printf@ (debug only).
+\end{itemize}
+
+llheap extends the C dynamic-memory API with functions to query object properties.
+\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=1pt,parsep=0pt]
+\item
+@malloc_size@ returns the requested size of a dynamic object, which is updated when an object is resized, similar to @malloc_usable_size@.
+\item
+@malloc_alignment@ returns the object alignment, where the minimal alignment is 16 bytes.
+\item
+@malloc_zero_fill@ returns true if the object is zero filled.
+\item
+@malloc_remote@ returns true if the object is from a remote heap (@OWNERSHIP@ only).
+\end{itemize}
+
+llheap extends the C dynamic-memory API with new statistics control.
+\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=1pt,parsep=0pt]
+\item
+@malloc_stats_fd@ sets the file descriptor for @malloc_stats@ writes (default @stdout@).
+\item
+@malloc_stats_clear@ clears the statistics counters for all thread heaps.
+\item
+@heap_stats@ extends @malloc_stats@ to only print statistics for the heap associated with the executing thread.
+\end{itemize}
+
+
+\subsubsection{Modern Allocation API}
+
+Modern programming languages have complex type systems that can be used to consolidate the panoply of memory allocation routines and features, providing a simpler programming experience and safety.
+The \CFA language is used to demonstrate this capability, because llheap forms the memory allocator for this C variant, but other languages can provide similar APIs.
+
+\CFA polymorphism reduces the allocation API to two overloaded routines allocating a single object or an array of objects.
 \begin{cfa}
-forall( T & | sized(T) ) {
-	T * malloc( void ) {
-		if ( _Alignof(T) <= libAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
-		else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
-	} // malloc
+forall( T & ) {
+	T * alloc( /* list of property functions ... */  ) { ... } // singleton allocation
+	T * alloc( size_t @dimension@, /* list of property functions ... */  ) { ... } // array allocation
+}
 \end{cfa}
-and is used as follows:
-\begin{lstlisting}
-int * i = malloc();
-double * d = malloc();
-struct Spinlock { ... } __attribute__(( aligned(128) ));
-Spinlock * sl = malloc();
-\end{lstlisting}
-where each @malloc@ call provides the return type as @T@, which is used with @sizeof@, @_Alignof@, and casting the storage to the correct type.
-This interface removes many of the common allocation errors in C programs.
-Figure~\ref{f:CFADynamicAllocationAPI} show the \CFA wrappers for the equivalent C/\CC allocation routines with same semantic behaviour.
-
-\begin{figure}
-\begin{lstlisting}
-T * malloc( void );
-T * aalloc( size_t dim );
-T * calloc( size_t dim );
-T * resize( T * ptr, size_t size );
-T * realloc( T * ptr, size_t size );
-T * memalign( size_t align );
-T * amemalign( size_t align, size_t dim );
-T * cmemalign( size_t align, size_t dim  );
-T * aligned_alloc( size_t align );
-int posix_memalign( T ** ptr, size_t align );
-T * valloc( void );
-T * pvalloc( void );
-\end{lstlisting}
-\caption{\CFA C-Style Dynamic-Allocation API}
-\label{f:CFADynamicAllocationAPI}
-\end{figure}
-
-In addition to the \CFA C-style allocator interface, a new allocator interface is provided to further increase orthogonality and usability of dynamic-memory allocation.
-This interface helps programmers in three ways.
-\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
-\item
-naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
-\item
-named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls.
-\item
-object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results.
-\end{itemize}
-Note, postfix function call is an alternative call syntax, using backtick @`@, so the argument appears before the function name, \eg
+Because the \CFA type system uses the return type to select overloads (like Ada), this capability is leveraged to remove the object-size parameter and return cast for regular calls to C @malloc@ or @memalign@.
 \begin{cfa}
-duration ?@`@h( int h );		// ? denote the position of the function operand
-duration ?@`@m( int m );
-duration ?@`@s( int s );
-duration dur = 3@`@h + 42@`@m + 17@`@s;
+inline T * alloc( ... ) {
+	if ( _Alignof(T) <= defaultAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
+	else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
+}
 \end{cfa}
-
-The following extensions take advantage of overload polymorphism in the \CC type-system.
-
-\medskip\noindent
-\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dimension, ... )}
-is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations.
-These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine.
-A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
-A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
-The variable number of arguments consist of allocation properties, which can be combined to produce different kinds of allocations.
-The only restriction is for properties @realloc@ and @resize@, which cannot be combined.
-
-The allocation property functions are:
-
-\medskip\noindent
-\lstinline{T_align ?`align( size_t alignment )}
-to align the allocation.
-The alignment parameter must be $\ge$ the default alignment (@libAlign()@ in \CFA) and a power of two.
-The following example returns a dynamic object and object array aligned on a 4096-byte boundary.
+The calls to these two routine are now much safer than the C equivalents.
+\begin{C++}
+int * ip = alloc(); $\C[2.75in]{// T => int, sizeof => 4/8, alignment => default}$
+double * dp = alloc(); $\C{// T => double, sizeof => 8, alignment => default}$
+struct Spinlock { ... } [[aligned(128)]] * sp = alloc(); $\C{// T => Spinlock, sizeof => ..., alignment = 128}$
+int * ia = alloc( 10 ); $\C{// T => int, sizeof => 4/8, alignment => default, dimension => 10}\CRT$
+\end{C++}
+At compile time, each call to @alloc@ extracts the return type @T@ from the left-hand side of the assignment, which is then used in @sizeof@, @_Alignof@, and casting the storage to the correct type.
+The @inline@ and constant expression allow the compiler to remove the @if@ statement.
+This interface removes all the common allocation-call errors in C and provides a uniform name covering all allocation reducing the cognitive burden.
+
+The property functions are a variable number of routines providing @alloc@ with management details and actions.
+The functions are @align@, @fill@, @resize@, and @realloc@, and written in prefix versus postfix notation solely for aesthetic reasons, \eg @3`fill@ $\equiv$ @fill( 3 )@.
+The examples are arrays but apply equally to singleton allocations.
 \begin{cfa}
-int * i0 = alloc( @4096`align@ );  sout | i0 | nl;
-int * i1 = alloc( 3, @4096`align@ );  sout | i1; for (i; 3 ) sout | &i1[i]; sout | nl;
-
-0x555555572000
-0x555555574000 0x555555574000 0x555555574004 0x555555574008
+int * ip = alloc( 5, @4096`align@, @5`fill@ ); $\C[3in]{// start array on 4096 boundary and initialize elements with 5}$
+int * ip2 = alloc( 10, @ip`fill@, @(malloc_alignment( ip ))`align@ ); $\C{// first 5 elements same as ip, same alignment as ip}$
+_Complex double * cdp = alloc( 5, @(3.5+4.1i)`fill@ ); $\C{// initialize complex elements with 3.5+4.1i}$
+struct S { int i, j; };
+S * sp = alloc( 10, @((S){3, 4})`fill@ ); $\C{// initialize structure elements with {3, 4}}$
+ip = alloc( 10, @ip`realloc@, @10`fill@ ); $\C{// make array ip larger and initialize new elements with 10}$
+double * dp = alloc( 5, @ip2`resize@, @256`align@, @13.5`fill@ ); $\C{// reuse ip2 storage for something else}\CRT$
 \end{cfa}
-
-\medskip\noindent
-\lstinline{S_fill(T) ?`fill ( /* various types */ )}
-to initialize storage.
-There are three ways to fill storage:
-\begin{enumerate}[itemsep=0pt,parsep=0pt]
-\item
-A char fills each byte of each object.
-\item
-An object of the returned type fills each object.
-\item
-An object array pointer fills some or all of the corresponding object array.
-\end{enumerate}
-For example:
-\begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
-int * i0 = alloc( @0n`fill@ );  sout | *i0 | nl;  // disambiguate 0
-int * i1 = alloc( @5`fill@ );  sout | *i1 | nl;
-int * i2 = alloc( @'\xfe'`fill@ ); sout | hex( *i2 ) | nl;
-int * i3 = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | i3[i]; sout | nl;
-int * i4 = alloc( 5, @0xdeadbeefN`fill@ );  for ( i; 5 ) sout | hex( i4[i] ); sout | nl;
-int * i5 = alloc( 5, @i3`fill@ );  for ( i; 5 ) sout | i5[i]; sout | nl;
-int * i6 = alloc( 5, @[i3, 3]`fill@ );  for ( i; 5 ) sout | i6[i]; sout | nl;
+Finally, \CFA has constructors and destructors, like \CC, which are invoked when allocating with @new@ and @delete@.
+\begin{cfa}
+T * t = new( 3, 4, 5 ); $\C[3in]{// allocate T and call constructor T\{ 3, 4, 5 \}}$
+W * w = new( 3.5 ); $\C{// allocate W and call constructor W\{ 3,5 \}}$
+delete( t, w ); $\C{// call destructors and free t and w}\CRT$
 \end{cfa}
-\begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
-0
-5
-0xfefefefe
-5 5 5 5 5
-0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef
-5 5 5 5 5
-5 5 5 -555819298 -555819298  // two undefined values
-\end{lstlisting}
-Examples 1 to 3 fill an object with a value or characters.
-Examples 4 to 7 fill an array of objects with values, another array, or part of an array.
-
-\medskip\noindent
-\lstinline{S_resize(T) ?`resize( void * oaddr )}
-used to resize, realign, and fill, where the old object data is not copied to the new object.
-The old object type may be different from the new object type, since the values are not used.
-For example:
-\begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
-int * i = alloc( @5`fill@ );  sout | i | *i;
-i = alloc( @i`resize@, @256`align@, @7`fill@ );  sout | i | *i;
-double * d = alloc( @i`resize@, @4096`align@, @13.5`fill@ );  sout | d | *d;
-\end{cfa}
-\begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
-0x55555556d5c0 5
-0x555555570000 7
-0x555555571000 13.5
-\end{lstlisting}
-Examples 2 to 3 change the alignment, fill, and size for the initial storage of @i@.
-
-\begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
-int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
-ia = alloc( 10, @ia`resize@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
-sout | ia; ia = alloc( 5, @ia`resize@, @512`align@, @13`fill@ ); sout | ia; for ( i; 5 ) sout | ia[i]; sout | nl;;
-ia = alloc( 3, @ia`resize@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
-\end{cfa}
-\begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
-5 5 5 5 5
-7 7 7 7 7 7 7 7 7 7
-0x55555556d560 0x555555571a00 13 13 13 13 13
-0x555555572000 0x555555572000 2 0x555555572004 2 0x555555572008 2
-\end{lstlisting}
-Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
-
-\medskip\noindent
-\lstinline{S_realloc(T) ?`realloc( T * a ))}
-used to resize, realign, and fill, where the old object data is copied to the new object.
-The old object type must be the same as the new object type, since the value is used.
-Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
-For example:
-\begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
-int * i = alloc( @5`fill@ );  sout | i | *i;
-i = alloc( @i`realloc@, @256`align@ );  sout | i | *i;
-i = alloc( @i`realloc@, @4096`align@, @13`fill@ );  sout | i | *i;
-\end{cfa}
-\begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
-0x55555556d5c0 5
-0x555555570000 5
-0x555555571000 5
-\end{lstlisting}
-Examples 2 to 3 change the alignment for the initial storage of @i@.
-The @13`fill@ in example 3 does nothing because no extra space is added.
-
-\begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
-int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
-ia = alloc( 10, @ia`realloc@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
-sout | ia; ia = alloc( 1, @ia`realloc@, @512`align@, @13`fill@ ); sout | ia; for ( i; 1 ) sout | ia[i]; sout | nl;;
-ia = alloc( 3, @ia`realloc@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
-\end{cfa}
-\begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
-5 5 5 5 5
-5 5 5 5 5 7 7 7 7 7
-0x55555556c560 0x555555570a00 5
-0x555555571000 0x555555571000 5 0x555555571004 2 0x555555571008 2
-\end{lstlisting}
-Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
-The @13`fill@ in example 3 does nothing because no extra space is added.
-
-These \CFA allocation features are used extensively in the development of the \CFA runtime.
+The benefits of high-level API simplifications should not be underestimated with respect to programmer productivity and safety.
+
+
+\section{Performance}
+\label{c:Performance}
+
+This section uses a number of benchmarks to compare the behaviour of currently popular memory allocators with llheap.
+The goal is to see if llheap is a competitive memory allocator;
+no attempt is made to select a performance winner.
+
+
+\subsection{Experimental Environment}
+\label{s:ExperimentalEnvironment}
+
+The performance experiments are run on three different multi-core architectures, ARM, AMD, and Intel, covering memory models weak order (WO) and total store order (TSO), to determine if there is consistency across architectures:
+\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
+\item[ARM]
+Gigabyte E252-P31 128-core socket 3.0 GHz, WO memory model
+\item[AMD]
+Supermicro AS--1125HS--TNR EPYC 9754 128--core socket, hyper-threading $\times$ 2 sockets (512 processing units) 2.25 GHz, TSO memory model
+\item[Intel]
+Supermicro SYS-121H-TNR Xeon Gold 6530 32--core, hyper-threading $\times$ 2 sockets (128 processing units) 2.1 GHz, TSO memory model
+\end{description}
+For the parallel experiments, threads are pinned to cores in a linear fashion, \ie from core $N$ to $N+M$, where $N$ is the start of a socket boundary.
+This layout produces the best throughput, as there is little or no communication among threads in the benchmarks, so binding tightly to the cache layout is unnecessary;
+hence, there is almost no OS or NUMA effects perturbing the benchmarks.
+
+The compilers are gcc/g++-14.2.0 and gfortran-14.2.0 running on the Linux v6.8.0-52-generic OS, with @LD_PRELOAD@ used to override the default allocator.
+To prevent eliding certain code patterns, crucial parts of a test are wrapped by the function @pass@
+\begin{uC++}
+static inline void * pass( void * v ) {		$\C[2.5in]{// prevent eliding, cheaper than volatile}$
+	__asm__  __volatile__( "" : "+r"(v) );  return v;
+}
+void * vp = pass( malloc( 0 ) );			$\C{// wrap malloc call to prevent elision}\CRT$
+\end{uC++}
+The call to @pass@ can prevent a small number of compiler optimizations but this cost is the same for all allocators.
+
+
+\subsection{Memory Allocators}
+\label{s:MemoryAllocators}
+
+Historically, a number of C/\CC, stand-alone, general-purpose memory-allocators, \eg dlmalloc~\cite{dlmalloc}, have been written for use by programming languages providing unmanaged memory.
+For this work, 6 of the popular, thread-safe memory-allocators are selected for comparison, along with llheap.
+
+\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt,listparindent=\parindent]
+\item[glibc~\cite{glibc}] % https://sourceware.org/glibc/wiki/MallocInternals
+is the default glibc allocator, derived from ptmalloc, derived from dlmalloc.
+glibc has multiple threads sharing multiple heaps with a global shared heap, header per allocation, free-lists with different organizational criteria and searching, and coalescing of certain adjacent free-areas.
+Version Ubuntu GLIBC 2.31-0ubuntu9.7 2.31 compiled by Ubuntu 24.04.
+
+\item[hoard~\cite{hoard}]
+has multiple threads sharing multiple heaps with a global shared heap, where each heap is composed of superblocks containing fixed-sized objects, with each super-block having a single header for its objects and reuse of superblocks if empty.
+Version 3.13.0, compiled with gcc-14.2.0, default configuration, using command @make@.
+Over the past 5 years, hoard development has stopped;
+it fails on the ARM architecture, possibly because of the WO memory model.
+
+\item[jemalloc~\cite{Evans06}]
+has multiple threads sharing multiple heaps (arenas) composed of same-sized chunks subdivided into regions composed of pages where each page is a container of same-sized objects.
+The components are organized into a number of data structures to facilitate allocations, freeing, and coalescing.
+Large objects are allocated using @mmap@.
+Version jemalloc-5.3.0~\cite{jemalloc}, built with the default configuration, using commands: @autogen.sh; configure; make; make install@.
+
+\item[mimalloc~\cite{Leijen19}]
+has a heap per thread composed of a reserved area subdivided into 3-sized page buffers, where each page is a container of same-sized objects.
+Each page manages its own internal free list and the free list is build when a page is created so there is no initial bump pointer.
+Empty pages are coalesced for reuse.
+Uses a fast freelist search for small allocation sizes.
+Onwership is handled with a separate remote free-list, and remote frees are batched before pushing to the owner heap.
+Version mimalloc-v2.1.2, built with the default configuration, using commands @cmake . ; make@.
+
+\item[tbbmalloc~{\cite[pp.~314--315]{Kukanov07}}] is the allocator shipped with Intel's Threading Building Blocks (TBB).
+tbbmalloc has a heap per thread for small allocations, with large allocation handled using a single request.
+There is a global heap to acquire and reuse space obtained from the OS;
+its reserved space is divided into thread buffers (containers).
+A thread heap is composed of linked containers, with binning used to manage the allocations/deallocations within the containers.
+Small object space is not returned to the OS.
+An allocation has to search its container list to find a partially filled one.
+The search is mitigated by moving mostly-free containers to the start of the container list;
+free containers are returned to the global heap.
+Ownership is handled with a separate remote free-list.
+Version @libtbbmalloc.so.2.11@, installed using @apt-get install libtbb-dev@.
+
+\item[tcmalloc~\cite{tcmalloc}] is the allocator shipped with Google's perftools.\footnote{
+Currently, there are two versions of tcmalloc: Google's perftools and one experimental version available on GitHub, which is not an officially supported Google product.
+We selected the perftools version because it is the most likely choice for users as it installs directly onto multiple OSs.}
+tcmalloc has per CPU heaps for small allocations, with large allocation handled with a single request.
+CPU heaps require a rollback mechanism, @rseq@, to prevent the serially-reusable problem.
+There is a global heap to acquire and reuse space obtained from the OS;
+its reserved space is divided into multi-page spans (containers) of fixed sized objects.
+A CPU heap uses binning to manage the allocations/deallocations within the containers.
+Free containers are returned to the OS.
+Version @libtcmalloc_minimal.so.4@, installed using @apt-get install google-perftools@.
+\end{description}
+
+Untested allocators:
+\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
+\item[ptmalloc3]
+is 8 years old and already integrated into glibc.
+\item[rpmalloc]
+requires explicit insertion of initialization/finalization calls for handling concurrent kernel threads.
+Having to augment programs, like SPEC CPU benchmarks, is deemed outside of normal programmer expectations.
+% An allocator should just plugin and work.
+\item[lock free] allocators guarantee allocation progress whether threads are delayed or killed using an atomic instruction, often CAS.
+The original lock-free allocator~\cite{Michael04} is completely lock-free.
+As stated, atomic instructions on the fast path result in a significant performance penalty.
+Hence, new allocators are not completely lock free, switching to a combination of synchronization-free, \ie 1:1 allocator model, on the fast path and lock-free on the slow path(s) to manipulate shared data structures~\cite{rpmalloc}.
+These allocators are better labelled as \newterm{hybrid locking} rather than lock free, as the lock-free aspect is not contributing to performance.
+
+% We observe that none of the pre-built standard malloc replacement libraries for ubuntu \url{https://launchpad.net/ubuntu/+search?text=malloc} are completely lock-free.
+% 1:1 allocators can avoid synchronization (locks, or lock-free techniques with atomic instructions as well as cache coherence overheads) in their critical fast paths, but care must be taken to ensure the the amount of free memory captured in thread-local structures is bounded.
+
+% Another approach to synchronization for allocators is \newterm{Restartable Critical Sections} ~\cite {https://dl.acm.org/doi/10.1145/512429.512451, https://dl.acm.org/doi/pdf/10.5555/1698184, https://doi.org/10.1145/1064979.1064985}, which are available in linux as the \newterm{RSEQ} facility ~\cite{https://www.gnu.org/software/libc/manual/html_node/Restartable-Sequences.html}.
+% Restartable Critical Sections  provide obstruction-free progress by means of specially crafted transactions that will be rolled back if they happen to be interrupted by the kernel.
+% Restartable Critical Sections transactions can only operate on CPU-specific data, however, which forces a T:C allocator configuration.
+% Google's experimental tcmalloc \url{https://google.github.io/tcmalloc/rseq.html} uses RSEQ.  
+% SuperMalloc \url{ACM DL is dead at the moment, but it's in ISMM 2015} attempts to use hardware transactional memory for lock elision, but falls back to classic locking if the hardware facility is not present or when a given transactional attempt encounters repeated progress failures.  
+
+
+\end{description}
+
+Allocator size is an indirect indicator of complexity.
+Lines-of-code are computed with command @cloc *.{h,c,cc,cpp}@, except for hoard:
+@cloc --exclude-lang="Bourne Shell",SKILL,Markdown,Bazel  Heap-Layers source include@.
+\begin{center}
+\setlength{\tabcolsep}{13pt}
+\begin{tabular}{@{}rrrrrrrr@{}}
+llheap & glibc & hoard & jemalloc & mimalloc & tbbmalloc & tcmalloc \\
+1,450 & 3,807 & 11,932 & 24,512 & 6,887 & 6,256 & 33,963 \\
+\end{tabular}
+\end{center}
 
 
@@ -1886,20 +1470,14 @@
 \label{s:Benchmarks}
 
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Micro Benchmark Suite
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
 There are two basic approaches for evaluating computer software: benchmarks and micro-benchmarks.
-\begin{description}
+\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
 \item[Benchmarks]
 are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc).
 The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation.
-Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns.
+Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns, resulting in little or no information about why an application did or did perform well for the tested software.
 \item[Micro-Benchmarks]
 attempt to extract the common execution patterns associated with an application and run the pattern independently.
 This approach removes any masking from extraneous application code, allows execution pattern to be very precise, and provides an opportunity for the execution pattern to have multiple independent tuning adjustments (knobs).
-Micro-benchmarks are often criticized for inadequately representing real-world applications.
+Micro-benchmarks are often criticized for inadequately representing real-world applications, but that is not their purpose.
 \end{description}
 
@@ -1907,1108 +1485,633 @@
 In the past, an assortment of applications have been used for benchmarking allocators~\cite{Detlefs93,Berger00,Berger01,berger02reconsidering}: P2C, GS, Espresso/Espresso-2, CFRAC/CFRAC-2, GMake, GCC, Perl/Perl-2, Gawk/Gawk-2, XPDF/XPDF-2, ROBOOP, Lindsay.
 As well, an assortment of micro-benchmark have been used for benchmarking allocators~\cite{larson99memory,Berger00,streamflow}: threadtest, shbench, Larson, consume, false sharing.
-Many of these benchmark applications and micro-benchmarks are old and may not reflect current application allocation patterns.
-
-This work designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters.
-The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance.
-% These programs can be taken as a standard to benchmark an allocator's basic goals.
-These programs give details of an allocator's memory overhead and speed under certain allocation patterns.
-The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs.
-Each micro-benchmark program has multiple control knobs specified by command-line arguments.
-
-The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics.
-An allocator's speed is benchmarked in different ways, as are issues like false sharing.
-
-
-\subsection{Prior Multi-Threaded Micro-Benchmarks}
-
-Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level.
-The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level.
-None of the prior work addresses multi-threading at the UT level.
-
-
-\subsubsection{threadtest}
-
-This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently.
-There is no interaction among threads, \ie no object sharing.
-Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
-The execution time of the benchmark evaluates its efficiency.
-
-
-\subsubsection{shbench}
-
-This benchmark is similar to threadtest but each thread randomly allocate and free a number of \emph{random-sized} objects.
-It is a stress test that also uses runtime to determine efficiency of the allocator.
-
-
-\subsubsection{Larson}
-
-This benchmark simulates a server environment.
-Multiple threads are created where each thread allocates and frees a number of random-sized objects within a size range.
-Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process.
-The number of thread generations varies depending on the thread speed.
-It calculates memory operations per second as an indicator of the memory allocator's performance.
-
-
-\subsection{New Multi-Threaded Micro-Benchmarks}
-
-The following new benchmarks were created to assess multi-threaded programs at the KT and UT level.
-For generating random values, two generators are supported: uniform~\cite{uniformPRNG} and fisher~\cite{fisherPRNG}.
-
-
-\subsubsection{Churn Benchmark}
-\label{s:ChurnBenchmark}
-
-The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory.
-Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@.
-Churn simulates a memory intensive program and can be tuned to create different scenarios.
-
-Figure~\ref{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark.
-This benchmark creates a buffer with M spots and an allocation in each spot, and then starts K threads.
-Each thread picks a random spot in M, frees the object currently at that spot, and allocates a new object for that spot.
-Each thread repeats this cycle N times.
-The main thread measures the total time taken for the whole benchmark and that time is used to evaluate the memory allocator's performance.
+Many of these benchmark applications and micro-benchmarks are old and do not reflect current application allocation patterns.
+
+Except for the SPEC CPU benchmark, the other performance benchmarks used for testing are micro-benchmarks created for this paper.
+All the benchmarks are used solely to extract differences among memory allocators.
+The term benchmark in the following discussion means benchmark or micro-benchmark.
+
+
+\subsection{SPEC CPU 2017}
+
+SPEC CPU 2017 is an industry-standardized suite for measuring and comparing performance of compute-intensive programs.
+It contains integer and floating-point tests written in C, \CC, and Fortran, covering throughput and speed, where each test contains multiple benchmarks~\cite{SPECCPU2017}.
+All the benchmarks perform dynamic allocation, from light to heavy.
+However, the dynamic allocation is relatively small in comparison to the benchmark computation.
+Therefore, differences among allocators should be small, unless a particular access pattern triggers a pathological case.
+The reason for performing SPEC CPU across the allocators is to prove this hypothesis.
+For allocator comparisons, we consider SPEC CPU differences of 5\% as equal and undetectable in general workloads and computing environments.
+For compiler comparisons, small differences of 1\% or 2\% are considered significant.
+
+Table~\ref{t:SPEC-CPU-benchmark} shows the elapsed time (inverted throughput) of the SPEC CPU tests condensed to the geomean across the benchmarks for each of the four SPEC tests, intrate, intspeed, fprate, and fpspeed, covering integer and floating-point operations.
+The tests are configured with size = ref, intrate/fprate: copies = 1, intspeed: threads = 1, fpspeed: threads = 16;
+only fpspeed is concurrent using OpenMP.
+Rigorous testing of SPEC CPU often runs many benchmark copies in parallel to completely load all computer cores.
+However, these tests quickly run into architectural bottlenecks having little to do with an allocator's behaviour.
+Runnning a single program bound to one core means the focus is strictly on allocator differences rather than conjoining transient OS and hardware differences.
+The throughputs are ranked with {\color{red}red} lowest time and {\color{blue}blue} highest, where lower is best.
+Hoard failed in multiple experiments on the ARM architecture, marked with {\color{purple}*Err*}, making it impossible to report the successful tests.
+
+The results show all allocators do well;
+the average, median, and relative standard deviation (right column)\footnote{$rstd = \sigma / \mu \times 100$, where $\sigma =$ standard deviation and $\mu =$ average} proves our hypothesis that the performance difference, 0.6\% to 2.3\%, across allocators is small.
+One implementation trend we observed is that two of the integer tests, @omnetpp@ and @xalancbmk@, had an execution pattern that exercised the cache.
+For the three allocators using headers-per-allocation, glibc, llheap, and tbbmalloc, performance could be up to 40\% slower, between the best and worst allocator results.
+The reason is that the headers consumed part of the cache line, resulting in more cache misses.
+These two experiments, disproportionally increased the geomean for these allocators for both integral experiments on all architectures.
+Hence, headers-per-allocation are disadvantaged for this specific execution pattern.
+The floating-point tests show no trends among the allocators.
+The goal for llheap in this experiment is to do well, which is established by it being close to the median result, meaning it is normally in the middle of the allocator results.
+
+\begin{table}
+\centering
+\caption{SPEC CPU benchmark, 3 hardware architectures, geomean per test in seconds, lower is better}
+\label{t:SPEC-CPU-benchmark}
+%\setlength{\tabcolsep}{6pt}
+\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
+		&	bench/alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
+\cline{2-12}
+		&	intrate & {\color{blue}314.4} & {\color{violet}*Err*} & 300.3 & 309.9 & 302.6 & 313 & {\color{red}298.7} & 306.5 & 309.9 & 2\% \\
+ARM		&	intspeed & {\color{blue}439.1} & {\color{violet}*Err*} & 417.6 & 431.1 & 419.9 & 436.2 & {\color{red}415.5} & 426.6 & 431.1 & 2.2\% \\
+		&	fprate & 347.6 & {\color{violet}*Err*} & {\color{red}333.9} & 352.2 & {\color{blue}356.6} & 345.9 & 344.5 & 346.8 & 347.6 & 2\% \\
+		&	fpspeed & 248.4 & {\color{violet}*Err*} & 245.3 & 245.7 & {\color{blue}250.9} & 246.6 & {\color{red}243.8} & 246.8 & 246.6 & 0.93\%
+\end{tabular}
+
+\begin{comment}
+\bigskip
+\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
+		&	bench/alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
+\cline{2-12}
+		&	intrate & 251 & 242 & 239 & 249 & 240 & {\color{blue}251} & {\color{red}237} & 244 & 242 & 2.3\% \\
+AMD		&	intspeed & 356 & 337 & 335 & 351 & 339 & {\color{blue}356} & {\color{red}333} & 344 & 339 & 2.7\% \\
+		&	fprate & 256 & 261 & {\color{red}250} & 257 & {\color{blue}270} & 256 & 254 & 258 & 256 & 2.3\% \\
+		&	fpspeed & 340 & {\color{blue}353} & {\color{red}326} & 338 & 348 & 341 & 328 & 339 & 340 & 2.7\%
+\end{tabular}
+\end{comment}
+
+\bigskip
+\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
+		&	bench/alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
+\cline{2-12}
+		&	intrate & 251.2 & {\color{red}241.1} & 251.9 & 249.3 & 251.6 & 251.5 & {\color{blue}252.3} & 249.9 & 251.5 & 1.5\% \\
+AMD		&	intspeed & {\color{blue}356.1} & {\color{red}337.1} & 355.4 & 351.7 & 355.5 & 355.8 & 355.9 & 352.5 & 355.5 & 1.8\% \\
+		&	fprate & {\color{red}253.9} & {\color{blue}259.9} & 254.4 & 255.8 & 254.5 & 254.4 & 254.7 & 255.4 & 254.5 & 0.75\% \\
+		&	fpspeed & 329.9 & {\color{blue}339.6} & 330.6 & {\color{red}327.2} & 329.9 & 329.8 & 329.5 & 330.9 & 329.9 & 1.1\%
+\end{tabular}
+
+\bigskip
+\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
+		&	bench./alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
+\cline{2-12}
+		&	intrate & 188.6 & 185.1 & 183.1 & 188.6 & 181.5 & {\color{blue}189.4} & {\color{red}181.2} & 185.4 & 185.1 & 1.8\% \\
+Intel	&	intspeed & 271.6 & 264.6 & 263.5 & 270.2 & 261.2 & {\color{blue}272.1} & {\color{red}260.3} & 266.2 & 264.6 & 1.7\% \\
+		&	fprate & 202.7 & {\color{red}201.8} & 204.4 & 205.1 & {\color{blue}205.3} & 204.7 & 203.7 & 204 & 204.4 & 0.59\% \\
+		&	fpspeed & 237.3 & 235.3 & 234.5 & 235.6 & {\color{blue}244.5} & 236.1 & {\color{red}233.6} & 236.7 & 235.6 & 1.4\%
+\end{tabular}
+\end{table}
+
+
+\subsection{Realloc Benchmark}
+
+Some examination of @realloc@ is necessary to encourage its use.
+Reallocation can be very efficient (both in space and time) when manipulating variable-sized objects, like strings, multi-precise numbers, or dynamic-sized arrays.
+Both X11 (500+ calls) and glibc (300+ calls) use realloc for various purposes.
+For example, in \CC:
+\begin{C++}
+string s = "abc"; // initial allocation and copy new value
+s = "gh"; // change size and copy new value
+s = "l" + s + "r"; // change size and copy new value
+s = s.substr(0,2); // reduce size
+\end{C++}
+variable @s@ changes size and value multiple times, plus temporary strings are created implicitly, \eg multiple concatenations, all of which requires multiple allocations, copying, and deallocations.
+@realloc@ can optimize some of these operations in two ways:
+\begin{enumerate}[leftmargin=*]
+\item
+For decreasing size, Figure~\ref{f:ReallocOptDecreasing} shows a logical truncation of the existing object rather than creating a new object, \ie use a heuristic to decide whether to perform the 3-step procedure (allocate, copy, and free), or pretend the storage is decreased and return the old storage and value, performing zero work but increasing internal fragmentation.
+For example, a request to decrease size from 96 to 75 bytes can be implemented two ways:
+The 21 bytes of internal fragmentation at the end of the logical reallocation may be unavailable, directly available if the allocator supports @malloc_usable_size@, or indirectly available if put back on the allocator free list.
+\item
+For increasing size, Figure~\ref{f:ReallocOptIncreasing} takes advantage of the fact that many memory allocators quantize request sizes (binning), often returning slightly more storage than requested (internal fragmentation).
+For example, an initial request for 75 bytes may return 96 bytes of storage, giving 21 bytes of internal fragmentation:
+For increasing the size up to 21 bytes, realloc can take advantage of this unused space rather than performing the 3-step procedure, which can also result in unused storage.
+\end{enumerate}
 
 \begin{figure}
 \centering
-\begin{lstlisting}
-Main Thread
-	create worker threads
-	note time T1
-	...
-	note time T2
-	churn_speed = (T2 - T1)
-Worker Thread
-	initialize variables
-	...
-	for ( N )
-		R = random spot in array
-		free R
-		allocate new object at R
-\end{lstlisting}
-%\includegraphics[width=1\textwidth]{figures/bench-churn.eps}
-\caption{Churn Benchmark}
-\label{fig:ChurnBenchFig}
+\subfloat[Decreasing]{\label{f:ReallocOptDecreasing}\input{decreasing}}
+\hspace*{5pt}
+\vrule
+\hspace*{5pt}
+\subfloat[Increasing]{\label{f:ReallocOptIncreasing}\raisebox{0.38\totalheight}{\input{increasing}}}
+\caption{Realloc Optimizations}
+\label{f:ReallocOptimizations}
 \end{figure}
 
-The adjustment knobs for churn are:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[thread:]
-number of threads (K).
-\item[spots:]
-number of spots for churn (M).
-\item[obj:]
-number of objects per thread (N).
-\item[max:]
-maximum object size.
-\item[min:]
-minimum object size.
-\item[step:]
-object size increment.
-\item[distro:]
-object size distribution
-\end{description}
-
-
-\subsubsection{Cache Thrash}
-\label{sec:benchThrashSec}
-
-The cache-thrash micro-benchmark measures allocator-induced active false-sharing as illustrated in Section~\ref{s:AllocatorInducedActiveFalseSharing}.
-If memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
-When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases.
-
-Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
-Ideally, a memory allocator should distance the dynamic memory region of one thread from another.
-Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads.
-
-Figure~\ref{fig:benchThrashFig} shows the pseudo code for the cache-thrash micro-benchmark.
-First, it creates K worker threads.
-Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line.
-Each thread repeats this for N times.
-The main thread measures the total time taken for all worker threads to complete.
-Worker threads sharing cache lines with each other are expected to take longer.
+Figure~\ref{f:reallocShrinkBenchmark} shows a benchmark to determine if an allocator takes advantage of the first optimization.
+The benchmark takes a fixed-size allocation and reduction it by 10\%--90\% in steps of 10\%, checking the storage addresses at each reduction step if the same or new storage is returned.
+The fixed-sized allocation is varied between sizes 64--16K in powers of 2.
+Hence, both small and large sized storage are reduced.
+The following table shows the approximate percentage point where storage is retained on shrinkage, \eg the storage reduction must be greater than 50\% of the prior allocation before a new allocation is performed for the smaller size, data is copied, and prior storage released.
+\begin{center}
+\setlength{\tabcolsep}{15pt}
+\begin{tabular}{@{}ccccccc@{}}
+glibc	& hoard	& jemalloc	& llheap	& mimalloc	& tbbmalloc & tcmalloc \\
+90\%	& 50\%	& 20\%		& 50\%		& 50\%		& 90\%		& 50\%
+\end{tabular}
+\end{center}
+The results show glibc and tbbmalloc do not perform this optimization, while the other allocators do with 50\% as the most popular crossover point.
+
+Figure~\ref{f:reallocGrowBenchmark} shows a benchmark to determine if an allocator takes advantage of the second optimization.
+This benchmark creates an array of fixed-sized elements increasing the array size by 1 from 1--10,000 elements.
+Then the element size is varied from 32, 64, 128, 256 bytes.
+To prevent allocators from doing a bump allocation across the entire benchmark, a small perturbation is introduced where storage is allocated, held, and then released at infrequent points across the experiment.
+A companion experiment is a manual simulation of the @realloc@: @malloc@ new storage, copy old data, and free old storage.
+Note, the @realloc@ simulation is performing an equivalent perturbation to the @realloc@ benchmark each time through the loop.
+The experiment is repeated 10,000 times for @realloc@ and 100 times for the simulation to obtain similar timing ranges.
+The performance difference between the @realloc@ and @realloc@-simulation experiments shows if @realloc@ is optimizing unused internal fragmentation at the end of its quantized bucket.
+
+Figure~\ref{f:reallocGrowResults} shows the results for the @realloc@ and @realloc@ simulation benchmarks.
+The difference between the benchmarks is two orders of magnitude, \ie all allocators are reusing some internal fragmentation to prevent a reallocation and copy as the array grows.
+The large difference is the extra copying in the simulation case, which is expensive.
+Within the @realloc@ benchmark, allocators glibc, hoard, jemalloc, and tbbmalloc have higher cost, while the remaining allocators have almost identical results.
+Within the @realloc@ simulation benchmark, allocators glibc and tbbmalloc have higher cost, while the remaining allocators have almost identical results.
+This benchmark confirms that @realloc@ can provide some level of performance benefit for dynamically growing data structures, \eg strings or arrays.
+Therefore, encouraging its use is reasonable, if and only if, it is safe to do so.
+Note, this encouragement is apt for container developers, where low-level storage management is performed internally for the benefit of application users.
 
 \begin{figure}
-\centering
-\input{AllocInducedActiveFalseSharing}
-\medskip
-\begin{lstlisting}
-Main Thread
-	create worker threads
-	...
-	signal workers to allocate
-	...
-	signal workers to free
-	...
-Worker Thread$\(_1\)$
-	warm up memory in chunks of 16 bytes
-	...
-	For N
-		malloc an object
-		read/write the object M times
-		free the object
-	...
-Worker Thread$\(_2\)$
-	// same as Worker Thread$\(_1\)$
-\end{lstlisting}
-%\input{MemoryOverhead}
-%\includegraphics[width=1\textwidth]{figures/bench-cache-thrash.eps}
-\caption{Allocator-Induced Active False-Sharing Benchmark}
-\label{fig:benchThrashFig}
+\begin{C++}
+for ( size_t p = 10; p <= 100; p += 10 ) {
+	for ( size_t s = 64; s < 16 * 1024; s <<= 1 ) {
+		bool reuse = false;
+		void * prev = pass( malloc( s ) );
+		void * curr = pass( realloc( prev, s * p / 100 ) );
+		if ( prev == curr ) {  /*  print  */  }
+		free( curr );
+	}
+}
+\end{C++}
+\vspace*{-10pt}
+\caption{\lstinline{realloc} Shrink Benchmark}
+\label{f:reallocShrinkBenchmark}
+
+\vspace*{10pt}
+
+%\setlength{\tabcolsep}{15pt}
+\begin{tabular}{@{}ll@{}}
+\multicolumn{1}{c}{\lstinline{realloc}} & \multicolumn{1}{c}{\lstinline{realloc} simulation} \\
+\begin{C++}
+struct S { size_t ca[DIM]; }; // varied 32, 64, 128, 256
+enum { Ssize = sizeof( S ) };
+for ( size_t t = 0; t < @10$'$000@; t += 1 ) {
+	S * sa = nullptr, * perturb = nullptr;
+	for ( size_t i = 0, s = Ssize; i < 10$'$000; i += 1, s += Ssize ) {
+		sa = (S *)@realloc( sa, s );@
+
+		sa[i].ca[0] = i;
+		if ( i % 1024 == 0 ) perturb = (S *)realloc( perturb, s );
+	}
+	free( sa );
+	free( perturb );
+}
+\end{C++}
+&
+\begin{C++}
+struct S { size_t ca[DIM]; }; // varied 32, 64, 128, 256
+enum { Ssize = sizeof( S ) };
+for ( size_t t = 0; t < @100@; t += 1 ) {
+	S * sa = nullptr, * so = (S *)malloc( Ssize );
+	for ( size_t i = 0, s = Ssize; i < 10$'$000; i += 1, s += Ssize ) {
+		sa = (S *)@malloc( s )@;			// simulate realloc
+		memcpy( sa, so, s - Ssize );	// so one smaller
+		sa[i].ca[0] = i;
+		free( so );
+		so = sa;
+	}
+	free( sa );
+}
+\end{C++}
+\end{tabular}
+\caption{\lstinline{realloc} Grow Benchmark}
+\label{f:reallocGrowBenchmark}
+
+\vspace*{20pt}
+
+\hspace*{-17pt}
+\setlength{\tabcolsep}{-13pt}
+\begin{tabular}{@{}l@{\hspace*{-5pt}{\vrule height 1.05in}\hspace*{-5pt}}l@{}}
+\begin{tabular}{@{}lll@{}}
+\input{prolog.realloc.tex} & \input{swift.realloc.tex} & \input{java.realloc.tex}
+\\
+\multicolumn{3}{@{}c@{}}{\lstinline{realloc}, 10,000 repetitions}
+\end{tabular}
+&
+\setlength{\tabcolsep}{-10pt}
+\begin{tabular}{@{}lll@{}}
+\input{prolog.reallocsim.tex} & \input{swift.reallocsim.tex} & \input{java.reallocsim.tex}
+\\
+\multicolumn{3}{@{}c@{}}{\lstinline{realloc} simulation, 100 repetitions}
+\end{tabular}
+\end{tabular}
+
+\caption{\lstinline{realloc} Grow Results, x-axis in bytes, lower is better}
+\label{f:reallocGrowResults}
 \end{figure}
 
-The adjustment knobs for cache access scenarios are:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[thread:]
-number of threads (K).
-\item[iterations:]
-iterations of cache benchmark (N).
-\item[cacheRW:]
-repetitions of reads/writes to object (M).
-\item[size:]
-object size.
-\end{description}
-
-
-\subsubsection{Cache Scratch}
-\label{s:CacheScratch}
-
-The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in Section~\ref{s:AllocatorInducedPassiveFalseSharing}.
-As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
-In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object.
-
-% An allocator can unintentionally induce false sharing depending upon its management of the freed objects.
-% If thread Thread$_1$ allocates multiple objects together, they may be allocated on the same cache line by the memory allocator.
-% If Thread$_1$ passes these object to thread Thread$_2$, then both threads may share the same cache line but this scenario is not induced by the allocator;
-% instead, the program induced this situation.
-% Now if Thread$_2$ frees this object and then allocate an object of the same size, the allocator may return the same object, which is on a cache line shared with thread Thread$_1$.
-
-Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly.
-An allocator using object ownership, as described in subsection Section~\ref{s:Ownership}, is less susceptible to allocator-induced passive false-sharing.
-If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line.
-
-Figure~\ref{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark.
-First, it allocates K dynamic objects together, one for each of the K worker threads, possibly causing memory allocator to allocate these objects on the same cache line.
-Then it create K worker threads and passes an object from the K allocated objects to each of the K threads.
-Each worker thread frees the object passed by the main thread.
-Then, it allocates an object and reads/writes it repetitively for M times possibly causing frequent cache invalidations.
-Each worker repeats this N times.
+
+\subsubsection{Cache Benchmark}
+\label{s:CacheBenchmark}
+
+The cache benchmarks attempt to look for false sharing (see Section~\ref{s:FalseSharing}).
+Unfortunately, testing for allocator-induced false-sharing is difficult, because it is equivalent to searching for randomly conjoined allocations within a large storage space.
+Figure~\ref{f:CacheBenchmark} shows a benchmark for program induced false-sharing, where pointers are passed among threads.
+As a side effect, this benchmark is indirectly checking which allocator model is being used.
+The program main runs the benchmark with 4, 8, 16, and 32 threads, passing each thread a separate array of dynamically allocated storage from its common heap with @ASIZE@ elements.
+Each thread then traverse the array adding a value to each element (read and write).
+The traversal is repeated T times.
+Each thread frees the array at the end.
+The experiment is run with a small and medium sized array.
+If there is any heap sharing, the small array has a higher probability for false sharing, \eg the first and last array elements for different array can be juxtaposed in memory, and hence appear in the same cache line.
 
 \begin{figure}
-\centering
-\input{AllocInducedPassiveFalseSharing}
-\medskip
-\begin{lstlisting}
-Main Thread
-	malloc N objects $for$ each worker $thread$
-	create worker threads and pass N objects to each worker
-	...
-	signal workers to allocate
-	...
-	signal workers to free
-	...
-Worker Thread$\(_1\)$
-	warmup memory in chunks of 16 bytes
-	...
-	free the object passed by the Main Thread
-	For N
-		malloc new object
-		read/write the object M times
-		free the object
-	...
-Worker Thread$\(_2\)$
-	// same as Worker Thread$\(_1\)$
-\end{lstlisting}
-%\includegraphics[width=1\textwidth]{figures/bench-cache-scratch.eps}
-\caption{Program-Induced Passive False-Sharing Benchmark}
-\label{fig:benchScratchFig}
+\begin{C++}
+enum { TIMES = 10$'$000$'$000$'$000, ASIZE = 3 }; $\C{// repetitions, array size 3 or 30}$
+void * worker( void * arg ) {		$\C{// array passed from program main}$
+	volatile size_t * arr = (size_t *)arg; $\C{// volatile prevents code elision}$
+	for ( size_t  t = 0; t < TIMES / ASIZE; t += 1 ) $\C{// repeat experiment N times}$
+		for ( size_t r = 0; r < ASIZE; r += 1 ) $\C{// iterate through array}$
+			arr[r] += r;			$\C{// read/write array elements}$
+	free( (void *)arr );			$\C{// cast away volatile}$
+}
+\end{C++}
+\vspace*{-5pt}
+\caption{Cache False-Sharing Benchmark}
+\label{f:CacheBenchmark}
 \end{figure}
 
-Each thread allocating an object after freeing the original object passed by the main thread should cause the memory allocator to return the same object that was initially allocated by the main thread if the allocator did not return the initial object back to its owner (main thread).
-Then, intensive read/write on the shared cache line by multiple threads should slow down worker threads due to to high cache invalidations and misses.
-Main thread measures the total time taken for all the workers to complete.
-
-Similar to benchmark cache thrash in subsection Section~\ref{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments.
-\begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt]
-\item[threads:]
-number of threads (K).
-\item[iterations:]
-iterations of cache benchmark (N).
-\item[cacheRW:]
-repetitions of reads/writes to object (M).
-\item[size:]
-object size.
-\end{description}
-
-
-\subsubsection{Speed Micro-Benchmark}
-\label{s:SpeedMicroBenchmark}
-\vspace*{-4pt}
-
-The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines:
-\begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt]
-\item malloc
-\item realloc
-\item free
-\item calloc
-\item malloc-free
-\item realloc-free
-\item calloc-free
-\item malloc-realloc
-\item calloc-realloc
-\item malloc-realloc-free
-\item calloc-realloc-free
-\item malloc-realloc-free-calloc
+Figure~\ref{f:cacheResults} shows the results for the cache benchmark run with array sizes 3 and 30.
+Allocators glibc, llheap, mimalloc, and tbbmalloc show little or no false-sharing issues at both 3 and 30 array sizes, \ie all generate virtually the same result.
+Note, on the Intel, there is a rise at 32 cores, because of an L3 cache shift at 16 cores; stepping to 32 cores introduces NUMA effects.
+This result correlates with these allocators using a 1:1 allocator model.
+Allocators hoard, jemalloc, and tcmalloc show false-sharing issues at both 3 and 30 array sizes, reducing performance by 2 times at size 3.
+The @perf@ performance analyzer shows a large number of cache misses for these allocators, indicating false sharing.
+This result correlates with these allocators using some form of heap sharing.
+
+\begin{figure}
+\setlength{\tabcolsep}{-8pt}
+\begin{tabular}{@{}l@{\hspace*{-5pt}{\vrule height 1.05in}\hspace*{-5pt}}l@{}}
+\begin{tabular}{@{}lll@{}}
+\input{prolog.cacheS.tex} & \input{swift.cacheS.tex} & \input{java.cacheS.tex}
+\\
+\multicolumn{3}{@{}c@{}}{3 Element Array}
+\end{tabular}
+&
+\begin{tabular}{@{}lll@{}}
+\input{prolog.cacheL.tex} & \input{swift.cacheL.tex} & \input{java.cacheL.tex}
+\\
+\multicolumn{3}{@{}c@{}}{30 Element Array}
+\end{tabular}
+\end{tabular}
+\caption{Cache False-Sharing Results, x-axis in cores, lower is better}
+\label{f:cacheResults}
+\end{figure}
+
+
+\subsection{Ownership Benchmark}
+
+% In multi-threaded allocators with H:T or 1:1 structure, one thread can allocation storage, send it to another thread, and the receiving thread deallocates it.
+% This raises the question of where the storage is returned: the heap (area) from which it was allocated or a different heap;
+% in some cases there is no choice, when storage is bound to its allocation area.
+% If storage is returned to its allocation heap, there are concurrency issues if the allocation area is shared.
+% If the storage is returned to another heap, there can still be concurrency issues, but the real problem is storage drain in the allocation heap and storage bloat in the deallocation heap, without a secondary mechanism to redistribute storage.
+% This choice is the \newterm{ownership problem}.
+
+Historically the Larson benchmark~\cite{larson99memory} is purported to test for ownership issues, but in actuality, the benchmark is a complex simulation of a server environment.
+Multiple threads allocate and free a number of random-sized objects within a size range.
+Each thread runs for a time period, and at termination, creates a child thread and passes its array of objects as an argument, which does not require synchronization.
+The number of thread generations varies with thread speed.
+% It calculates memory operations per second as an indicator of the memory allocator's performance.
+Because the benchmark performs multiple kinds of tests, it is impossible to extracted just the remote-free rate.
+
+Therefore, a new benchmark is created to measure the asynchronous transfer cost from the deallocating to the allocating thread (remote free).
+However, the allocating thread must first asynchronously transferred the allocations to the deallocating thread.
+This cost needs to be mitigated so it does not mask the remote-free measurement.
+To accomplish this, a thread batches its allocations (lots of 100), and atomically exchanges this batch with a freeing thread, which then individually frees the batch components.
+Hence, the cost of the asynchronous allocation transfer is much less than the individual cost of the remote free.
+
+Figure~\ref{f:OwnershipBenchmark} shows the pseudo-code for the benchmark.
+There is a global matrix of allocation addresses: one row for each thread and one column for each batch.
+Each thread starts at a specific row and fills that row with two different sized allocations.
+A thread then loops until it atomically exchanges its row pointer with another thread's row pointer.
+The storage in the received batch is then remote freed, the batch row is reset with new allocations, and the process repeats for a timed duration.
+As well, after each allocation, an integer is written into the storage, and that integer is read before the deallocation.
+
+Figure~\ref{f:Ownership} (a)--(c) shows the throughput of the ownership benchmark.
+The results are divided into three groups.
+glibc and tbbmalloc are slowest because of many system calls to @futex@. % and @nano_sleep@.
+Figure~\ref{f:Ownership}~(d) shows the system time climbing during scaling on the AMD;
+the other architectures are similar.
+llheap and mimalloc are next, as these allocators do not batch remote frees, so every free requires locking.
+jemalloc, hoard, and tcmalloc are fastest, as these allocators batch remote frees, reducing locking.
+For 1:1 allocators, eager remote return makes sense as the returned storage can be reused during the owning thread's lifetime.
+For N:T allocators, lazy remote return using batching makes sense as heaps outlive threads so eventually returned storage can be used by any existing or new thread.
+Batching is possible for 1:1 allocators, but results in complexity and external fragmentation, which is only warranted in certain cases.
+
+\begin{figure}
+\begin{cfa}
+void * batches[MaxThread][MaxBatch];				$\C{// thread global}$
+struct Aligned { CALIGN void * * col; };
+volatile Aligned allocations[MaxThread];
+
+Aligned batch = { batches[id] };					$\C{// thread local}$
+size_t cnt = 0, a = 0;
+for ( ; ! stop; ) {									$\C{// loop for T second}$
+	for ( ssize_t i = Batch - 1; i >= 0; i -= 1 ) {	$\C{// allocations, oppose order from frees}$
+		batch.col[i] = malloc( i & 1 ? 42 : 192 );	$\C{// two allocation sizes}$
+		*(int *)batch.col[i] = 42;					$\C{// write storage}$
+	}
+	Aligned obatch = batch;
+	while ( (batch.col = Fas( allocations[a].col, batch.col )) == obatch.col || batch.col == nullptr ) { // atomic exchange
+		if ( stop ) goto fini;
+		a = (a + 1) % Threads;						$\C{// try another batch}$
+	}
+	for ( size_t i = 0; i < Batch; i += 1 ) {		$\C{// deallocations}$
+		if ( *(int *)batch.col[i] != 42 ) abort();	$\C{// read storage check}$
+		free( batch.col[i] );						$\C{// remote free}$
+	}
+	cnt += Batch;									$\C{// sum allocations/frees}$
+	a = (a + 1) % Threads;							$\C{// try another batch}$
+}  fini: ;
+\end{cfa}
+\caption{Ownership Benchmark Outline}
+\label{f:OwnershipBenchmark}
+\end{figure}
+
+\begin{figure}
+\hspace*{-14pt}
+\setlength{\tabcolsep}{-13pt}
+\begin{tabular}{@{}lll@{\hspace*{-6pt}{\vrule height 2.05in}\hspace*{-6pt}}l@{}}
+\input{prolog.ownership.tex}
+&
+\input{swift.ownership.tex}
+&
+\input{java.ownership.tex}
+&
+\input{swift.ownershipres.tex}
+\end{tabular}
+\caption{Ownership Results, x-axis is cores, (a)--(c) higher is better, (d) lower is better}
+\label{f:Ownership}
+\end{figure}
+
+
+\subsection{Delay Benchmark}
+
+The delay benchmark is a torture test of abrupt allocation patterns looking for delays that increase latency.
+A flat response across the tests means there are few or no allocator-induced pauses.
+The test examines small and large requests, where small requests are handled by the heap (@sbrk@) and large requests are handled by the OS (@mmap@).
+Putting large requests in the heap causes external fragmentation when freed, unless an allocator subdivided the space, leading to pauses.
+The @mallopt@ function provides the option @M_MMAP_THRESHOLD@ to set the division point in bytes for requests that cannot be satisfied by an allocator's free list.
+Each @sbrk@ test in this benchmark is repeated 5,000,000,000 times and each @mmap@ test is performed 1,000,000 times;
+the different repetitions result from the high cost of the OS calls making the experiment run too long.
+A \emph{long running} experiment, rather than short experiments with averaged results, is searching for blowup scenarios in time and/or space.
+Finally, scaling is tested with 4, 8, 16, and 32 pinned threads, where the threads synchronize between tests using a @pthread@ barrier.
+In all experiments, allocated storage has its first and last byte assigned a character to simulate usage.
+
+The tests are performed in this order:
+\begin{enumerate}[leftmargin=18pt,topsep=3pt,itemsep=2pt,parsep=0pt]
+\item
+@x = malloc( 0 ) / free( x )@:
+handles the pathological case of an zero-sized allocation and free.
+The POSIX standard allows two meanings for this case: return @NULL@ or a unique pointer, where both can be freed.
+The fastest implementation is to return @NULL@, rather than create a fictitious allocation.
+However, this overloads the @malloc@ return-value to mean error or a zero-sized allocation.
+To comply with the POSIX standard, the check for running out of memory is:
+\begin{uC++}
+if ( malloc( 0 ) == NULL && errno == ENOMEM ) ... // no memory
+\end{uC++}
+Unfortunately, most programmers assume @NULL@ means an error, \eg two tests in the SPEC CPU benchmark fail if @NULL@ is returned for a zero-sized allocation.
+Hence, returning @NULL@ for a zero-sized allocation is an impractical allocator option.
+
+\item
+@free( NULL )@: handles the pathological case of freeing a non-existent or zero-byte allocation.
+Non-existent allocations occur as algorithm base-cases, such as an unused pointer set to @NULL@.
+Having the allocator ignore this case eliminates checking for an erroneous @free@ call on a @NULL@ value.
+This call should be fast.
+
+\item
+\label{expS}
+@x = malloc( 42 ) / free( x )@:
+handles a fixed-sized allocation and free.
+
+\item
+@x[0..100) = malloc( 42 ) / free( x[0..100) )@:
+handles a group of fixed-sized allocations and group free.
+
+\item
+@x[0..1000) = malloc( 42 ) / free( x[0..1000) )@:
+handles a larger group of fixed-sized allocations and group free.
+
+\item
+@x[0..100) = malloc( 42 ) / free( x(100..0] )@:
+handles a group of fixed-sized allocations and group free in reverse order.
+
+\item
+\label{expE}
+@x[0..1000) = malloc( 42 ) / free( x(1000..0] )@:
+handles a larger group of fixed-sized allocations and group free in reverse order.
+
+\item
+@x = malloc( [0..100) ) / free( x )@:
+handles a variable-sized allocation and free.
+
+\item
+@x[0..100) = malloc( [0..100) ) / free( x[0..100) )@:
+handles a group of variable-sized allocations and group free.
+
+\item
+@x[0..1000) = malloc( [0..1000) ) / free( x[0..1000) )@:
+handles a larger group of variable-sized allocations and group free.
+
+\item
+@x[0..100) = malloc( [0..100) ) / free( x(100..0] )@:
+handles a group of variable-sized allocations and group free in reverse order.
+
+\item
+@x[0..1000) = malloc( [0..1000) ) / free( x(1000..0] )@:
+handles a larger group of variable-sized allocations and group free in reverse order.
 \end{enumerate}
-
-Figure~\ref{fig:SpeedBenchFig} shows the pseudo code for the speed micro-benchmark.
-Each routine in the chain is called for N objects and then those allocated objects are used when calling the next routine in the allocation chain.
-This tests the latency of the memory allocator when multiple routines are chained together, \eg the call sequence malloc-realloc-free-calloc gives a complete picture of the major allocation routines when combined together.
-For each chain, the time is recorded to visualize performance of a memory allocator against each chain.
+Experiments \ref{expS}--\ref{expE} are repeated with a fixed-sized allocation of 1,048,576, where @M_MMAP_THRESHOLD@ is set to 524,288 to force the use of @mmap@, resulting in 17 experiments.
+Because the @mmap@ experiments test the operating-system memory-management not the allocators, the variable-sized @mmap@ experiments are deemed unnecessary.
+A test with random-sized @sbrk@ allocations @malloc( [0..N) random )@ was performed, but the results are the same as fixed sized as all the allocation sizes are quickly accessed over the large number of experiment repetitions.
+That is, once the buckets or superblocks for the allocation sizes are created, access order is irrelevant.
+
+Figures~\ref{f:LatencyExpARM}--\ref{f:LatencyExpIntel} show the results of the @sbrk@ and @mmap@ experiments across the seven allocators with parallel scaling.
+The average of the N threads is graphed for each experiment and the standard deviation is the error bar.
+For the @sbrk@ graphs, a good allocator result should be low (smaller is better), flat across scaling (cores), with no error bars (STD $\approx$ 0) indicating no jitter (pauses) among the threads.
+The result patterns across the three hardware architectures are similar, with differences correlating to CPU speed and cache differences.
+
+The key observation across the @sbrk@ graphs is that llheap and mimalloc are always at the bottom (lower is better) and flat with respect to scaling.
+The only exception is on the Intel, where all allocators experienced similar non-flat behaviour, because of the L3 cache shift at 16 cores.
+Some anomalies are tcmalloc and hoard experiencing large jitter (see error bars) and scaling issues in some experiments, which is correlated with poorer results;
+jemalloc has significant scaling issues for experiments 5, 7, 10, and 12, resulting from large numbers of @futex@ calls, possibly related to @madvise@ for returning storage to the OS;
+and glibc and tbbmalloc are often slower than the other allocators (symbols are on top of each other).
+
+The key observation across the @mmap@ graphs is that only three allocators, glibc, llheap, and tbbmalloc honoured the @mmap@ threshold request (symbols are on top of each other).
+The other allocators made no @mmap@ calls, so their results are extremely low.
+The exception is hoard, which did make @mmap@ calls that were uncorrelated with @M_MMAP_THRESHOLD@, and had significant jitter due to a large number of @futex@ calls.
+For the allocators using @mmap@, there should be some scaling effect as more threads make more system calls.
 
 \begin{figure}
-\centering
-\begin{lstlisting}[morekeywords={foreach}]
-Main Thread
-	create worker threads
-	foreach ( allocation chain )
-		note time T1
-		...
-		note time T2
-		chain_speed = (T2 - T1) / number-of-worker-threads * N )
-Worker Thread
-	initialize variables
-	...
-	foreach ( routine in allocation chain )
-		call routine N times
-\end{lstlisting}
-%\includegraphics[width=1\textwidth]{figures/bench-speed.eps}
-\caption{Speed Benchmark}
-\label{fig:SpeedBenchFig}
+\input{prolog.tex}
+\vspace*{-20pt}
+\caption{Delay Results, ARM, x-axis is cores, lower is better}
+\label{f:LatencyExpARM}
 \end{figure}
 
-The adjustment knobs for memory usage are:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[max:]
-maximum object size.
-\item[min:]
-minimum object size.
-\item[step:]
-object size increment.
-\item[distro:]
-object size distribution.
-\item[objects:]
-number of objects per thread.
-\item[workers:]
-number of worker threads.
-\end{description}
-
-
-\subsubsection{Memory Micro-Benchmark}
-\label{s:MemoryMicroBenchmark}
-
-The memory micro-benchmark measures the memory overhead of an allocator.
-It allocates a number of dynamic objects and reads @/proc/self/proc/maps@ to get the total memory requested by the allocator from the OS.
-It calculates the memory overhead by computing the difference between the memory the allocator requests from the OS and the memory that the program allocates.
-This micro-benchmark is like Larson and stresses the ability of an allocator to deal with object sharing.
-
-Figure~\ref{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark.
-It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads.
-A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer.
-A consumer frees these objects.
-After every memory operation, program memory usage is recorded throughout the runtime.
-This data is used to visualize the memory usage and consumption for the program.
-
 \begin{figure}
-\centering
-\begin{lstlisting}
-Main Thread
-	print memory snapshot
-	create producer threads
-Producer Thread (K)
-	set free start
-	create consumer threads
-	for ( N )
-		allocate memory
-		print memory snapshot
-Consumer Thread (M)
-	wait while ( allocations < free start )
-	for ( N )
-		free memory
-		print memory snapshot
-\end{lstlisting}
-%\includegraphics[width=1\textwidth]{figures/bench-memory.eps}
-\caption{Memory Footprint Micro-Benchmark}
-\label{fig:MemoryBenchFig}
+\input{swift.tex}
+\vspace*{-20pt}
+\caption{Delay Results, AMD, x-axis is cores, lower is better}
+\label{f:LatencyExpAMD}
 \end{figure}
 
-The global adjustment knobs for this micro-benchmark are:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[producer (K):]
-sets the number of producer threads.
-\item[consumer (M):]
-sets number of consumers threads for each producer.
-\item[round:]
-sets production and consumption round size.
-\end{description}
-
-The adjustment knobs for object allocation are:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[max:]
-maximum object size.
-\item[min:]
-minimum object size.
-\item[step:]
-object size increment.
-\item[distro:]
-object size distribution.
-\item[objects (N):]
-number of objects per thread.
-\end{description}
-
-
-\section{Performance}
-\label{c:Performance}
-
-This section uses the micro-benchmarks from Section~\ref{s:Benchmarks} to test a number of current memory allocators, including llheap.
-The goal is to see if llheap is competitive with the currently popular memory allocators.
-
-
-\subsection{Machine Specification}
-
-The performance experiments were run on two different multi-core architectures (x64 and ARM) to determine if there is consistency across platforms:
-\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
-\item
-\textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
-\item
-\textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0
-\end{itemize}
-
-
-\subsection{Existing Memory Allocators}
-\label{sec:curAllocatorSec}
-
-With dynamic allocation being an important feature of C, there are many stand-alone memory allocators that have been designed for different purposes.
-For this work, 7 of the most popular and widely used memory allocators were selected for comparison, along with llheap.
-
-\paragraph{llheap (\textsf{llh})}
-is the thread-safe allocator from Chapter~\ref{c:Allocator}
-\\
-\textbf{Version:} 1.0
-\textbf{Configuration:} Compiled with dynamic linking, but without statistics or debugging.\\
-\textbf{Compilation command:} @make@
-
-\paragraph{glibc (\textsf{glc})}
-\cite{glibc} is the default glibc thread-safe allocator.
-\\
-\textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\
-\textbf{Configuration:} Compiled by Ubuntu 20.04.\\
-\textbf{Compilation command:} N/A
-
-\paragraph{dlmalloc (\textsf{dl})}
-\cite{dlmalloc} is a thread-safe allocator that is single threaded and single heap.
-It maintains free-lists of different sizes to store freed dynamic memory.
-\\
-\textbf{Version:} 2.8.6\\
-\textbf{Configuration:} Compiled with preprocessor @USE_LOCKS@.\\
-\textbf{Compilation command:} @gcc -g3 -O3 -Wall -Wextra -fno-builtin-malloc -fno-builtin-calloc@ @-fno-builtin-realloc -fno-builtin-free -fPIC -shared -DUSE_LOCKS -o libdlmalloc.so malloc-2.8.6.c@
-
-\paragraph{hoard (\textsf{hrd})}
-\cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
-\\
-\textbf{Version:} 3.13\\
-\textbf{Configuration:} Compiled with hoard's default configurations and @Makefile@.\\
-\textbf{Compilation command:} @make all@
-
-\paragraph{jemalloc (\textsf{je})}
-\cite{jemalloc} is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena.
-Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
-\\
-\textbf{Version:} 5.2.1\\
-\textbf{Configuration:} Compiled with jemalloc's default configurations and @Makefile@.\\
-\textbf{Compilation command:} @autogen.sh; configure; make; make install@
-
-\paragraph{ptmalloc3 (\textsf{pt3})}
-\cite{ptmalloc3} is a modification of dlmalloc.
-It is a thread-safe multi-threaded memory allocator that uses multiple heaps.
-ptmalloc3 heap has similar design to dlmalloc's heap.
-\\
-\textbf{Version:} 1.8\\
-\textbf{Configuration:} Compiled with ptmalloc3's @Makefile@ using option ``linux-shared''.\\
-\textbf{Compilation command:} @make linux-shared@
-
-\paragraph{rpmalloc (\textsf{rp})}
-\cite{rpmalloc} is a thread-safe allocator that is multi-threaded and uses per-thread heap.
-Each heap has multiple size-classes and each size-class contains memory regions of the relevant size.
-\\
-\textbf{Version:} 1.4.1\\
-\textbf{Configuration:} Compiled with rpmalloc's default configurations and ninja build system.\\
-\textbf{Compilation command:} @python3 configure.py; ninja@
-
-\paragraph{tbb malloc (\textsf{tbb})}
-\cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread.
-Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
-\\
-\textbf{Version:} intel tbb 2020 update 2, tbb\_interface\_version == 11102\\
-\textbf{Configuration:} Compiled with tbbmalloc's default configurations and @Makefile@.\\
-\textbf{Compilation command:} @make@
-
-% \subsection{Experiment Environment}
-% We used our micro benchmark suite (FIX ME: cite mbench) to evaluate these memory allocators Section~\ref{sec:curAllocatorSec} and our own memory allocator uHeap Section~\ref{sec:allocatorSec}.
-
-\subsection{Experiments}
-
-Each micro-benchmark is configured and run with each of the allocators,
-The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs.
-All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see Section~\ref{s:MemoryMicroBenchmark}).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% CHURN
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\subsubsection{Churn Micro-Benchmark}
-
-Churn tests allocators for speed under intensive dynamic memory usage (see Section~\ref{s:ChurnBenchmark}).
-This experiment was run with following configurations:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[thread:]
-1, 2, 4, 8, 16, 32, 48
-\item[spots:]
-16
-\item[obj:]
-100,000
-\item[max:]
-500
-\item[min:]
-50
-\item[step:]
-50
-\item[distro:]
-fisher
-\end{description}
-
-% -maxS		 : 500
-% -minS		 : 50
-% -stepS		 : 50
-% -distroS	 : fisher
-% -objN		 : 100000
-% -cSpots		 : 16
-% -threadN	 : 1, 2, 4, 8, 16
-
-Figure~\ref{fig:churn} shows the results for algol and nasus.
-The X-axis shows the number of threads;
-the Y-axis shows the total experiment time.
-Each allocator's performance for each thread is shown in different colors.
-
 \begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/churn} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/churn} }
-\caption{Churn}
-\label{fig:churn}
+\input{java.tex}
+\vspace*{-20pt}
+\caption{Delay Results, Intel, x-axis is cores, lower is better}
+\label{f:LatencyExpIntel}
 \end{figure}
 
-\paragraph{Assessment}
-All allocators did well in this micro-benchmark, except for \textsf{dl} on the ARM.
-\textsf{dl}'s is the slowest, indicating some small bottleneck with respect to the other allocators.
-\textsf{je} is the fastest, with only a small benefit over the other allocators.
-% llheap is slightly slower because it uses ownership, where many of the allocations have remote frees, which requires locking.
-% When llheap is compiled without ownership, its performance is the same as the other allocators (not shown).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% THRASH
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\subsubsection{Cache Thrash}
-\label{sec:cache-thrash-perf}
-
-Thrash tests memory allocators for active false sharing (see Section~\ref{sec:benchThrashSec}).
-This experiment was run with following configurations:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[threads:]
-1, 2, 4, 8, 16, 32, 48
-\item[iterations:]
-1,000
-\item[cacheRW:]
-1,000,000
-\item[size:]
-1
-\end{description}
-
-% * Each allocator was tested for its performance across different number of threads.
-% Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
-
-Figure~\ref{fig:cacheThrash} shows the results for algol and nasus.
-The X-axis shows the number of threads;
-the Y-axis shows the total experiment time.
-Each allocator's performance for each thread is shown in different colors.
+Figures~\ref{f:LatencyResARM}--\ref{f:LatencyResIntel} show a time/space perspective across the entire experiment.
+The user, system, and real times along with the maximum memory usage are presented for the @sbrk@ and @mmap@ experiments.
+The result patterns across the three hardware architectures are similar.
+If an allocator disappears in a graph, its result is less than 1 on a logarithmic scale.
+Surprisingly, there are large (2 orders of magnitude) time differences among the allocators.
 
 \begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/cache_thrash_0-thrash} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/cache_thrash_0-thrash} }
-\caption{Cache Thrash}
-\label{fig:cacheThrash}
+\hspace*{15pt}
+\input{prolog2.tex}
+\vspace*{-20pt}
+\caption{Delay Results, ARM, x-axis is cores, lower is better}
+\label{f:LatencyResARM}
+
+\hspace*{15pt}
+\input{swift2.tex}
+\vspace*{-20pt}
+\caption{Delay Results, AMD, x-axis is cores, lower is better}
+\label{f:LatencyResAMD}
+
+\hspace*{15pt}
+\input{java2.tex}
+\vspace*{-20pt}
+\caption{Delay Results, Intel, x-axis is cores, lower is better}
+\label{f:LatencyResIntel}
 \end{figure}
 
-\paragraph{Assessment}
-All allocators did well in this micro-benchmark, except for \textsf{dl} and \textsf{pt3}.
-\textsf{dl} uses a single heap for all threads so it is understandable that it generates so much active false-sharing.
-Requests from different threads are dealt with sequentially by the single heap (using a single lock), which can allocate objects to different threads on the same cache line.
-\textsf{pt3} uses the T:H model, so multiple threads can use one heap, but the active false-sharing is less than \textsf{dl}.
-The rest of the memory allocators generate little or no active false-sharing.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% SCRATCH
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\subsubsection{Cache Scratch}
-
-Scratch tests memory allocators for program-induced allocator-preserved passive false-sharing (see Section~\ref{s:CacheScratch}).
-This experiment was run with following configurations:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[threads:]
-1, 2, 4, 8, 16, 32, 48
-\item[iterations:]
-1,000
-\item[cacheRW:]
-1,000,000
-\item[size:]
-1
-\end{description}
-
-% * Each allocator was tested for its performance across different number of threads.
-% Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
-
-Figure~\ref{fig:cacheScratch} shows the results for algol and nasus.
-The X-axis shows the number of threads;
-the Y-axis shows the total experiment time.
-Each allocator's performance for each thread is shown in different colors.
+For @sbrk@ graphs, the user time should be high and scale with cores, the system time very low, the real time constant, and the maximum memory scales with cores.
+For user time, llheap and mimalloc, are at the bottom (lower is better) and all allocators have linear scaling as cores increase.
+The remaining allocators are slower by one to two orders of magnitude, which correlates with high results in the experiments.
+For system time jemalloc has non-trivial system time that scales with cores, caused by a large number of @futex@ calls.
+The remaining allocators have virtually zero system time (not on graph).
+The exception is a random anomaly where allocators had small amounts of system time, which appeared/disappeared on different experiment runs as if something slightly perturbs the experiment (OS?) over its 20 hour run.
+For real time, llheap and mimalloc, take the least overall time and all allocators except jemalloc have flat performance.
+For maximum memory, all allocators scale with cores, and there is a rough inverse correlation between user time and memory usage, \ie time \vs speed tradeoff.
+
+For @mmap@ graphs, only used by glibc, llheap, and tbbmalloc, the user time should be low and scale with cores, the system time should be high and scale with cores, the real time constant, and the maximum memory scales with cores.
+For user time, glibc, llheap, and tbbmalloc, are at the bottom because there are no @sbrk@ requests.
+The remaining allocators all use a non-trivial amount of time handling the large requests, except mimalloc, which handles the large request identically to a small request.
+Interestingly, the amount of time varies by one to two orders of magnitude.
+For system time, glibc, llheap, and tbbmalloc, are at the top because of the OS calls to @mmap@.
+Interestingly, the remaining allocators still use orders of magnitude of system time, except mimalloc ($<$ 1 so invisible).
+For real time, all allocators scale linearly with cores, except mimalloc, which is flat.
+For maximum memory, all allocators scale with cores, and there is a rough inverse correlation between user time and memory usage, \ie time \vs speed tradeoff.
+
+
+\subsection{Out of Memory Benchmark}
+
+Figure~\ref{f:OutMemoryBenchmark} show a \CC program with unbounded memory allocation.
+The program is run in a shell with restricted data size.
+Hence, it quickly runs out of memory, causing @malloc@, which is called by \CC @new@, to return a @nullptr@ with @errno@ set to @ENOMEM@.
+Routine @new@ sees the @nullptr@ and calls the handler routine set by @set_new_handler@, which prints a message, and resets the default handler to raise the @bad_alloc@ exception caught in the program main.
+Note, to raise an exception requires dynamic allocation, but \CC preallocates a few special exception, like @bad_alloc@, for special cases.
+
+All allocators printed the correct output except hoard, mimalloc, and tcmalloc.
+Hoard prints @MAP_FAILED@ and hangs spinning on a spinlock in a complex call chain.
+mimalloc aborts the program because it incorrectly attempts to raise the @bad_alloc@ exception itself if and only it is compiled with \CC, whereas it is compiled with C.
+The correct design is to return a @nullptr@ with @errno@ set to @ENOMEM@ to \CC @new@, which then raises the exception;
+hence, the allocator can be compiled with C or \CC.
+tcmalloc prints the correct output but adds ``allocation failed'' messages.
 
 \begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/cache_scratch_0-scratch} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/cache_scratch_0-scratch} }
-\caption{Cache Scratch}
-\label{fig:cacheScratch}
+\begin{tabular}{@{\hspace*{\parindentlnth}}l@{\hspace*{2\parindentlnth}}l@{}@{}}
+\begin{cfa}
+static void handler() {
+	cout << "Memory allocation failed\n";
+	set_new_handler( nullptr );
+}
+
+
+\end{cfa}
+&
+\begin{cfa}
+int main() {
+	set_new_handler( handler );
+	try {
+		for ( ;; ) pass( new char[50] );	// unbounded allocation
+	} catch( const bad_alloc & e ) { cout << e.what() << endl; }
+}
+\end{cfa}
+\end{tabular}
+\caption{Out of Memory Benchmark}
+\label{f:OutMemoryBenchmark}
 \end{figure}
 
-\paragraph{Assessment}
-This micro-benchmark divides the allocators into two groups.
-First is the high-performer group: \textsf{llh}, \textsf{je}, and \textsf{rp}.
-These memory allocators generate little or no passive false-sharing and their performance difference is negligible.
-Second is the low-performer group, which includes the rest of the memory allocators.
-These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator.
-All of the allocators in this group are sharing heaps among threads at some level.
-
-Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see Section~\ref{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch.
-It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% SPEED
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\subsubsection{Speed Micro-Benchmark}
-
-Speed tests memory allocators for runtime latency (see Section~\ref{s:SpeedMicroBenchmark}).
-This experiment was run with following configurations:
-\begin{description}
-\item[max:]
-500
-\item[min:]
-50
-\item[step:]
-50
-\item[distro:]
-fisher
-\item[objects:]
-100,000
-\item[workers:]
-1, 2, 4, 8, 16, 32, 48
-\end{description}
-
-% -maxS    :  500
-% -minS    :  50
-% -stepS   :  50
-% -distroS :  fisher
-% -objN    :  1000000
-% -threadN    : \{ 1, 2, 4, 8, 16 \} *
-
-%* Each allocator was tested for its performance across different number of threads.
-%Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
-
-Figures~\ref{fig:speed-3-malloc} to~\ref{fig:speed-14-malloc-calloc-realloc-free} show 12 figures, one figure for each chain of the speed benchmark.
-The X-axis shows the number of threads;
-the Y-axis shows the total experiment time.
-Each allocator's performance for each thread is shown in different colors.
-
-\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
-\item Figure~\ref{fig:speed-3-malloc} shows results for chain: malloc
-\item Figure~\ref{fig:speed-4-realloc} shows results for chain: realloc
-\item Figure~\ref{fig:speed-5-free} shows results for chain: free
-\item Figure~\ref{fig:speed-6-calloc} shows results for chain: calloc
-\item Figure~\ref{fig:speed-7-malloc-free} shows results for chain: malloc-free
-\item Figure~\ref{fig:speed-8-realloc-free} shows results for chain: realloc-free
-\item Figure~\ref{fig:speed-9-calloc-free} shows results for chain: calloc-free
-\item Figure~\ref{fig:speed-10-malloc-realloc} shows results for chain: malloc-realloc
-\item Figure~\ref{fig:speed-11-calloc-realloc} shows results for chain: calloc-realloc
-\item Figure~\ref{fig:speed-12-malloc-realloc-free} shows results for chain: malloc-realloc-free
-\item Figure~\ref{fig:speed-13-calloc-realloc-free} shows results for chain: calloc-realloc-free
-\item Figure~\ref{fig:speed-14-malloc-calloc-realloc-free} shows results for chain: malloc-realloc-free-calloc
-\end{itemize}
-
-\paragraph{Assessment}
-This micro-benchmark divides the allocators into two groups: with and without @calloc@.
-@calloc@ uses @memset@ to set the allocated memory to zero, which dominates the cost of the allocation chain (large performance increase) and levels performance across the allocators.
-But the difference among the allocators in a @calloc@ chain still gives an idea of their relative performance.
-
-All allocators did well in this micro-benchmark across all allocation chains, except for \textsf{dl}, \textsf{pt3}, and \textsf{hrd}.
-Again, the low-performing allocators are sharing heaps among threads, so the contention causes performance increases with increasing numbers of threads.
-Furthermore, chains with @free@ can trigger coalescing, which slows the fast path.
-The high-performing allocators all illustrate low latency across the allocation chains, \ie there are no performance spikes as the chain lengths, that might be caused by contention and/or coalescing.
-Low latency is important for applications that are sensitive to unknown execution delays.
-
-%speed-3-malloc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-3-malloc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-3-malloc} }
-\caption{Speed benchmark chain: malloc}
-\label{fig:speed-3-malloc}
-\end{figure}
-
-%speed-4-realloc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-4-realloc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-4-realloc} }
-\caption{Speed benchmark chain: realloc}
-\label{fig:speed-4-realloc}
-\end{figure}
-
-%speed-5-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-5-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-5-free} }
-\caption{Speed benchmark chain: free}
-\label{fig:speed-5-free}
-\end{figure}
-
-%speed-6-calloc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-6-calloc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-6-calloc} }
-\caption{Speed benchmark chain: calloc}
-\label{fig:speed-6-calloc}
-\end{figure}
-
-%speed-7-malloc-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-7-malloc-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-7-malloc-free} }
-\caption{Speed benchmark chain: malloc-free}
-\label{fig:speed-7-malloc-free}
-\end{figure}
-
-%speed-8-realloc-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-8-realloc-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-8-realloc-free} }
-\caption{Speed benchmark chain: realloc-free}
-\label{fig:speed-8-realloc-free}
-\end{figure}
-
-%speed-9-calloc-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-9-calloc-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-9-calloc-free} }
-\caption{Speed benchmark chain: calloc-free}
-\label{fig:speed-9-calloc-free}
-\end{figure}
-
-%speed-10-malloc-realloc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-10-malloc-realloc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-10-malloc-realloc} }
-\caption{Speed benchmark chain: malloc-realloc}
-\label{fig:speed-10-malloc-realloc}
-\end{figure}
-
-%speed-11-calloc-realloc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-11-calloc-realloc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-11-calloc-realloc} }
-\caption{Speed benchmark chain: calloc-realloc}
-\label{fig:speed-11-calloc-realloc}
-\end{figure}
-
-%speed-12-malloc-realloc-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-12-malloc-realloc-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-12-malloc-realloc-free} }
-\caption{Speed benchmark chain: malloc-realloc-free}
-\label{fig:speed-12-malloc-realloc-free}
-\end{figure}
-
-%speed-13-calloc-realloc-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-13-calloc-realloc-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-13-calloc-realloc-free} }
-\caption{Speed benchmark chain: calloc-realloc-free}
-\label{fig:speed-13-calloc-realloc-free}
-\end{figure}
-
-%speed-14-{m,c,re}alloc-free.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-14-m-c-re-alloc-free} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-14-m-c-re-alloc-free} }
-\caption{Speed benchmark chain: malloc-calloc-realloc-free}
-\label{fig:speed-14-malloc-calloc-realloc-free}
-\end{figure}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% MEMORY
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\newpage
-\subsubsection{Memory Micro-Benchmark}
-\label{s:MemoryMicroBenchmark}
-
-This experiment is run with the following two configurations for each allocator.
-The difference between the two configurations is the number of producers and consumers.
-Configuration 1 has one producer and one consumer, and configuration 2 has 4 producers, where each producer has 4 consumers.
-
-\noindent
-Configuration 1:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[producer (K):]
-1
-\item[consumer (M):]
-1
-\item[round:]
-100,000
-\item[max:]
-500
-\item[min:]
-50
-\item[step:]
-50
-\item[distro:]
-fisher
-\item[objects (N):]
-100,000
-\end{description}
-
-% -threadA :  1
-% -threadF :  1
-% -maxS    :  500
-% -minS    :  50
-% -stepS   :  50
-% -distroS :  fisher
-% -objN    :  100000
-% -consumeS:  100000
-
-\noindent
-Configuration 2:
-\begin{description}[itemsep=0pt,parsep=0pt]
-\item[producer (K):]
-4
-\item[consumer (M):]
-4
-\item[round:]
-100,000
-\item[max:]
-500
-\item[min:]
-50
-\item[step:]
-50
-\item[distro:]
-fisher
-\item[objects (N):]
-100,000
-\end{description}
-
-% -threadA :  4
-% -threadF :  4
-% -maxS    :  500
-% -minS    :  50
-% -stepS   :  50
-% -distroS :  fisher
-% -objN    :  100000
-% -consumeS:  100000
-
-% \begin{table}[b]
-% \centering
-%     \begin{tabular}{ |c|c|c| }
-%      \hline
-%     Memory Allocator & Configuration 1 Result & Configuration 2 Result\\
-%      \hline
-%     llh & Figure~\ref{fig:mem-1-prod-1-cons-100-llh} & Figure~\ref{fig:mem-4-prod-4-cons-100-llh}\\
-%      \hline
-%     dl & Figure~\ref{fig:mem-1-prod-1-cons-100-dl} & Figure~\ref{fig:mem-4-prod-4-cons-100-dl}\\
-%      \hline
-%     glibc & Figure~\ref{fig:mem-1-prod-1-cons-100-glc} & Figure~\ref{fig:mem-4-prod-4-cons-100-glc}\\
-%      \hline
-%     hoard & Figure~\ref{fig:mem-1-prod-1-cons-100-hrd} & Figure~\ref{fig:mem-4-prod-4-cons-100-hrd}\\
-%      \hline
-%     je & Figure~\ref{fig:mem-1-prod-1-cons-100-je} & Figure~\ref{fig:mem-4-prod-4-cons-100-je}\\
-%      \hline
-%     pt3 & Figure~\ref{fig:mem-1-prod-1-cons-100-pt3} & Figure~\ref{fig:mem-4-prod-4-cons-100-pt3}\\
-%      \hline
-%     rp & Figure~\ref{fig:mem-1-prod-1-cons-100-rp} & Figure~\ref{fig:mem-4-prod-4-cons-100-rp}\\
-%      \hline
-%     tbb & Figure~\ref{fig:mem-1-prod-1-cons-100-tbb} & Figure~\ref{fig:mem-4-prod-4-cons-100-tbb}\\
-%      \hline
-%     \end{tabular}
-% \caption{Memory benchmark results}
-% \label{table:mem-benchmark-figs}
-% \end{table}
-% Table Section~\ref{table:mem-benchmark-figs} shows the list of figures that contain memory benchmark results.
-
-Figures~\ref{fig:mem-1-prod-1-cons-100-llh}{fig:mem-4-prod-4-cons-100-tbb} show 16 figures, two figures for each of the 8 allocators, one for each configuration.
-Each figure has 2 graphs, one for each experiment environment.
-Each graph has following 5 subgraphs that show memory usage and statistics throughout the micro-benchmark's lifetime.
-\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
-\item \textit{\textbf{current\_req\_mem(B)}} shows the amount of dynamic memory requested and currently in-use of the benchmark.
-\item \textit{\textbf{heap}}* shows the memory requested by the program (allocator) from the system that lies in the heap (@sbrk@) area.
-\item \textit{\textbf{mmap\_so}}* shows the memory requested by the program (allocator) from the system that lies in the @mmap@ area.
-\item \textit{\textbf{mmap}}* shows the memory requested by the program (allocator or shared libraries) from the system that lies in the @mmap@ area.
-\item \textit{\textbf{total\_dynamic}} shows the total usage of dynamic memory by the benchmark program, which is a sum of \textit{heap}, \textit{mmap}, and \textit{mmap\_so}.
-\end{itemize}
-* These statistics are gathered by monitoring a process's @/proc/self/maps@ file.
-
-The X-axis shows the time when the memory information is polled.
-The Y-axis shows the memory usage in bytes.
-
-For this experiment, the difference between the memory requested by the benchmark (\textit{current\_req\_mem(B)}) and the memory that the process has received from system (\textit{heap}, \textit{mmap}) should be minimum.
-This difference is the memory overhead caused by the allocator and shows the level of fragmentation in the allocator.
-
-\paragraph{Assessment}
-First, the differences in the shape of the curves between architectures (top ARM, bottom x64) is small, where the differences are in the amount of memory used.
-Hence, it is possible to focus on either the top or bottom graph.
-
-Second, the heap curve is 0 for four memory allocators: \textsf{hrd}, \textsf{je}, \textsf{pt3}, and \textsf{rp}, indicating these memory allocators only use @mmap@ to get memory from the system and ignore the @sbrk@ area.
-
-The total dynamic memory is higher for \textsf{hrd} and \textsf{tbb} than the other allocators.
-The main reason is the use of superblocks (see Section~\ref{s:ObjectContainers}) containing objects of the same size.
-These superblocks are maintained throughout the life of the program.
-
-\textsf{pt3} is the only memory allocator where the total dynamic memory goes down in the second half of the program lifetime when the memory is freed by the benchmark program.
-It makes pt3 the only memory allocator that gives memory back to the OS as it is freed by the program.
-
-% FOR 1 THREAD
-
-%mem-1-prod-1-cons-100-llh.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-llh} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-llh} }
-\caption{Memory benchmark results with Configuration-1 for llh memory allocator}
-\label{fig:mem-1-prod-1-cons-100-llh}
-\end{figure}
-
-%mem-1-prod-1-cons-100-dl.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-dl} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-dl} }
-\caption{Memory benchmark results with Configuration-1 for dl memory allocator}
-\label{fig:mem-1-prod-1-cons-100-dl}
-\end{figure}
-
-%mem-1-prod-1-cons-100-glc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-glc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-glc} }
-\caption{Memory benchmark results with Configuration-1 for glibc memory allocator}
-\label{fig:mem-1-prod-1-cons-100-glc}
-\end{figure}
-
-%mem-1-prod-1-cons-100-hrd.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-hrd} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-hrd} }
-\caption{Memory benchmark results with Configuration-1 for hoard memory allocator}
-\label{fig:mem-1-prod-1-cons-100-hrd}
-\end{figure}
-
-%mem-1-prod-1-cons-100-je.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-je} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-je} }
-\caption{Memory benchmark results with Configuration-1 for je memory allocator}
-\label{fig:mem-1-prod-1-cons-100-je}
-\end{figure}
-
-%mem-1-prod-1-cons-100-pt3.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-pt3} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-pt3} }
-\caption{Memory benchmark results with Configuration-1 for pt3 memory allocator}
-\label{fig:mem-1-prod-1-cons-100-pt3}
-\end{figure}
-
-%mem-1-prod-1-cons-100-rp.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-rp} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-rp} }
-\caption{Memory benchmark results with Configuration-1 for rp memory allocator}
-\label{fig:mem-1-prod-1-cons-100-rp}
-\end{figure}
-
-%mem-1-prod-1-cons-100-tbb.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-tbb} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-tbb} }
-\caption{Memory benchmark results with Configuration-1 for tbb memory allocator}
-\label{fig:mem-1-prod-1-cons-100-tbb}
-\end{figure}
-
-% FOR 4 THREADS
-
-%mem-4-prod-4-cons-100-llh.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-llh} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-llh} }
-\caption{Memory benchmark results with Configuration-2 for llh memory allocator}
-\label{fig:mem-4-prod-4-cons-100-llh}
-\end{figure}
-
-%mem-4-prod-4-cons-100-dl.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-dl} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-dl} }
-\caption{Memory benchmark results with Configuration-2 for dl memory allocator}
-\label{fig:mem-4-prod-4-cons-100-dl}
-\end{figure}
-
-%mem-4-prod-4-cons-100-glc.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-glc} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-glc} }
-\caption{Memory benchmark results with Configuration-2 for glibc memory allocator}
-\label{fig:mem-4-prod-4-cons-100-glc}
-\end{figure}
-
-%mem-4-prod-4-cons-100-hrd.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-hrd} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-hrd} }
-\caption{Memory benchmark results with Configuration-2 for hoard memory allocator}
-\label{fig:mem-4-prod-4-cons-100-hrd}
-\end{figure}
-
-%mem-4-prod-4-cons-100-je.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-je} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-je} }
-\caption{Memory benchmark results with Configuration-2 for je memory allocator}
-\label{fig:mem-4-prod-4-cons-100-je}
-\end{figure}
-
-%mem-4-prod-4-cons-100-pt3.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-pt3} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-pt3} }
-\caption{Memory benchmark results with Configuration-2 for pt3 memory allocator}
-\label{fig:mem-4-prod-4-cons-100-pt3}
-\end{figure}
-
-%mem-4-prod-4-cons-100-rp.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-rp} } \\
-	%\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-rp} }
-\caption{Memory benchmark results with Configuration-2 for rp memory allocator}
-\label{fig:mem-4-prod-4-cons-100-rp}
-\end{figure}
-
-%mem-4-prod-4-cons-100-tbb.eps
-\begin{figure}
-\centering
-    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-tbb} } \\
-    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-tbb} }
-\caption{Memory benchmark results with Configuration-2 for tbb memory allocator}
-\label{fig:mem-4-prod-4-cons-100-tbb}
-\end{figure}
-
 
 \section{Conclusion}
 
-% \noindent
-% ====================
-% 
-% Writing Points:
-% \begin{itemize}
-% \item
-% Summarize u-benchmark suite.
-% \item
-% Summarize @uHeapLmmm@.
-% \item
-% Make recommendations on memory allocator design.
-% \end{itemize}
-% 
-% \noindent
-% ====================
-
-The goal of this work was to build a low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
-The new llheap memory-allocator achieves all of these goals, while maintaining and managing sticky allocation information without a performance loss.
-Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally.
-Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
-
-Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not have to do these useful allocation operations themselves.
-The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
-
+The goal of this work is to build a full-featured, low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
+The new llheap allocator achieves all of these goals, while maintaining and managing sticky allocation information \emph{without a performance loss}.
+Hence, it is possible to use @realloc@ frequently as a safe operation, rather than just occasionally or not at all.
+Furthermore, the ability to query sticky properties and other information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
+
+Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, @cmemalign@ and other alignment variations means programmers do not have to generate these allocation operations themselves.
+The ability of the type systems in modern languages, \eg \CFA, to condense the allocation API to one routine with completely orthogonal allocation properties shows how far the allocation API can be advanced.
+The result is increased safety and a cognitive reduction in performing dynamic allocation.
+All of these extensions should eliminate common reasons for C programmers to roll their own memory allocator and/or allocation function, which is a huge safety advantage.
+
+The ability to compile llheap with static/dynamic linking and optional statistics/debugging provides programmers with multiple mechanisms to balance performance and safety.
+These allocator versions are easy to use because they can be linked to an application without recompilation.
 Providing comprehensive statistics for all allocation operations is invaluable in understanding and debugging a program's dynamic behaviour.
 No other memory allocator provides such comprehensive statistics gathering.
-This capability was used extensively during the development of llheap to verify its behaviour.
-As well, providing a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
-While not as powerful as the @valgrind@ interpreter, a large number of allocation mistakes are detected.
-Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
-
-The ability to compile llheap with static/dynamic linking and optional statistics/debugging provides programers with multiple mechanisms to balance performance and safety.
-These allocator versions are easy to use because they can be linked to an application without recompilation.
-
-Starting a micro-benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
-The current micro-benchmarks allow some understanding of allocator implementation properties without actually looking at the implementation.
-For example, the memory micro-benchmark quickly identified how several of the allocators work at the global level.
-It was not possible to show how the micro-benchmarks adjustment knobs were used to tune to an interesting test point.
-Many graphs were created and discarded until a few were selected for the work.
-
-
-\subsection{Future Work}
-
-A careful walk-though of the allocator fastpath should yield additional optimizations for a slight performance gain.
-In particular, analysing the implementation of rpmalloc, which is often the fastest allocator,
-
-The micro-benchmark project requires more testing and analysis.
-Additional allocation patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the most useful tuning knobs.
-Also, identifying ways to visualize the results of the micro-benchmarks is a work in progress.
-
-After llheap is made available on GitHub, interacting with its users to locate problems and improvements will make llbench a more robust memory allocator.
-As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional information.
-
+This capability was used extensively during the development of llheap to verify its behaviour, and to verify the benchmarks developed for the paper.
+As well, the debugging mode, where allocations are checked along with internal pre/post-conditions and invariants, is extremely useful especially for students ($\approx$1,000 students have tested the \uC version of llheap).
+While not as powerful as the @valgrind@ interpreter, lheap's debugging mode can detect a large number of allocation mistakes.
+The contention-free statistics gathering and debugging have a low enough cost to be used in production code.
+Finally, no other memory allocator addresses the needs of user-level threading, which are now available in many modern languages.
+
+Creating a benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
+The purpose of these performance tests is not to pick winners and losers among the allocators, because each allocator optimizes a particular set of allocation patterns: there is no optimal memory-allocator.
+The goal is to demonstrate that llheap's performance, both in time and space, across some interesting allocation patterns, is comparable to the best allocators in use today.
+Admittedly, there are pathological cases where llheap might use significant amounts of memory because it never coalesces or returns storage to the OS.
+These pathological cases do not correlate to long running applications, where llheap can perform very well.
+In the small set of tested benchmarks, no heap blowup was observed, while some tests caused time blowups in other allocators.
+Therefore, llheap is a viable drop-in replacement for many applications and its ancillary features make it safer and more informative.
+
+
+\subsection{Recommendations}
+
+Substantial work has been put into building a new allocator and benchmarks, plus doing comprehensive performance tests among allocators.
+Based on this work, we make two recommendations:
+\begin{enumerate}[leftmargin=*, topsep=0pt,itemsep=0pt,parsep=0pt]
+\item
+Hoard is no longer maintained and did not do well (even broke) in some performance experiments.
+We recommend to those doing memory allocation research not to use it.
+\item
+glibc did not perform as well as other allocators.
+Given it is the default memory allocator for many academic and industry applications, this seems unfortunate and skews performance resulting so developers may draw incorrect conclusions.
+As such, we recommend the adoption of a newer memory allocator for glibc.
+We offer llheap for the reasons given above, but most importantly, its small code base.
+glibc maintainers come and go.
+Therefore, it is crucial for a new maintainer to on-board quickly and have a thorough understanding of the code base within a month.
+The llheap code base is small and can be learned quickly because of its simple design, making it an ideal choice as a substitute allocator.
+\end{enumerate}
 
 
@@ -3016,11 +2119,11 @@
 
 This research is funded by the NSERC/Waterloo-Huawei (\url{http://www.huawei.com}) Joint Innovation Lab. %, and Peter Buhr is partially funded by the Natural Sciences and Engineering Research Council of Canada.
-
-{%
-\fontsize{9bp}{11.5bp}\selectfont%
+% Special thanks to Trevor Brown for many helpful discussions.
+
+\bibliographystyle{ACM-Reference-Format}
 \bibliography{pl,local}
-}%
 
 \end{document}
+\endinput
 
 % Local Variables: %
Index: doc/papers/llheap/figures/AddressSpace.fig
===================================================================
--- doc/papers/llheap/figures/AddressSpace.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/AddressSpace.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -8,8 +8,24 @@
 -2
 1200 2
+6 5700 1200 6600 1800
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1200 1200 2100 1200 2100 1800 1200 1800 1200 1200
-2 2 0 1 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 2100 1200 3000 1200 3000 1800 2100 1800 2100 1200
+	 5700 1250 6600 1250 6600 1750 5700 1750 5700 1250
+4 1 0 50 -1 0 9 0.0000 2 120 660 6150 1575 Code and\001
+4 1 0 50 -1 0 9 0.0000 2 120 375 6150 1400 Static\001
+4 1 0 50 -1 0 9 0.0000 2 120 315 6150 1725 Data\001
+-6
+6 3000 1200 3900 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 3000 1250 3900 1250 3900 1750 3000 1750 3000 1250
+-6
+6 1200 1200 2100 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1200 1250 2100 1250 2100 1750 1200 1750 1200 1250
+-6
+6 4800 1200 5700 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4800 1250 5700 1250 5700 1750 4800 1750 4800 1250
+-6
+6 2100 1200 3000 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
 	1 1 1.00 45.00 90.00
@@ -18,8 +34,10 @@
 	1 1 1.00 45.00 90.00
 	 3000 1500 2700 1500
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3000 1200 3900 1200 3900 1800 3000 1800 3000 1200
 2 2 0 1 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 3900 1200 4800 1200 4800 1800 3900 1800 3900 1200
+	 2100 1250 3000 1250 3000 1750 2100 1750 2100 1250
+4 1 0 50 -1 0 9 0.0000 2 150 600 2550 1700 Memory\001
+4 1 0 50 -1 0 9 0.0000 2 120 300 2550 1450 Free\001
+-6
+6 3900 1200 4800 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
 	1 1 1.00 45.00 90.00
@@ -28,21 +46,15 @@
 	1 1 1.00 45.00 90.00
 	 4800 1500 4500 1500
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4800 1200 5700 1200 5700 1800 4800 1800 4800 1200
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 5700 1200 6600 1200 6600 1800 5700 1800 5700 1200
-4 0 0 50 -1 0 10 0.0000 2 165 870 1200 2025 high address\001
-4 2 0 50 -1 0 10 0.0000 2 120 810 6600 2025 low address\001
-4 1 0 50 -1 0 10 0.0000 2 120 375 1650 1575 Stack\001
-4 1 0 50 -1 0 10 0.0000 2 150 600 2550 1725 Memory\001
-4 1 0 50 -1 0 10 0.0000 2 120 300 2550 1425 Free\001
-4 1 0 50 -1 0 10 0.0000 2 120 660 3450 1575 Code and\001
-4 1 0 50 -1 0 10 0.0000 2 150 630 3450 1350 Dynamic\001
-4 1 0 50 -1 0 10 0.0000 2 120 315 3450 1775 Data\001
-4 1 0 50 -1 0 10 0.0000 2 120 300 4350 1425 Free\001
-4 1 0 50 -1 0 10 0.0000 2 150 600 4350 1725 Memory\001
-4 1 4 50 -1 0 10 0.0000 2 150 630 5250 1425 Dynamic\001
-4 1 0 50 -1 0 10 0.0000 2 120 315 6150 1775 Data\001
-4 1 0 50 -1 0 10 0.0000 2 120 660 6150 1575 Code and\001
-4 1 0 50 -1 0 10 0.0000 2 120 375 6150 1350 Static\001
-4 1 4 50 -1 0 10 0.0000 2 120 720 5250 1725 Allocation\001
+2 2 0 1 0 7 60 -1 17 0.000 0 0 -1 0 0 5
+	 3900 1250 4800 1250 4800 1750 3900 1750 3900 1250
+4 1 0 50 -1 0 9 0.0000 2 150 600 4350 1700 Memory\001
+4 1 0 50 -1 0 9 0.0000 2 120 300 4350 1450 Free\001
+-6
+4 1 0 50 -1 0 9 0.0000 2 120 375 1650 1575 Stack\001
+4 1 0 50 -1 0 9 0.0000 2 120 660 3450 1575 Code and\001
+4 1 0 50 -1 0 9 0.0000 2 120 315 3450 1725 Data\001
+4 1 0 50 -1 0 9 0.0000 2 150 630 3450 1400 Dynamic\001
+4 1 4 50 -1 0 9 0.0000 2 150 630 5250 1450 Dynamic\001
+4 1 4 50 -1 0 9 0.0000 2 120 720 5250 1700 Allocation\001
+4 0 0 50 -1 0 9 0.0000 2 165 870 1200 1950 high address\001
+4 2 0 50 -1 0 9 0.0000 2 120 810 6600 1950 low address\001
Index: doc/papers/llheap/figures/Alignment2.fig
===================================================================
--- doc/papers/llheap/figures/Alignment2.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/Alignment2.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -8,24 +8,22 @@
 -2
 1200 2
-2 1 1 1 0 7 25 -1 -1 4.000 0 0 -1 0 0 2
-	 2100 1500 2100 1800
-2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 5700 1500 5700 1800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5700 1575 5700 1800
+2 1 0 1 0 7 25 -1 -1 0.000 0 0 -1 0 0 2
+	 2400 1575 2400 1800
+2 1 0 1 0 7 25 -1 -1 0.000 0 0 -1 0 0 2
+	 4200 1575 4200 1800
+2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 1200 1575 6600 1575 6600 1800 1200 1800 1200 1575
 2 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
-	 2100 1500 4200 1500 4200 1800 2100 1800 2100 1500
-2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
-	 1200 1500 6600 1500 6600 1800 1200 1800 1200 1500
-2 1 1 1 0 7 25 -1 -1 4.000 0 0 -1 0 0 2
-	 4200 1500 4200 1800
+	 2400 1575 4200 1575 4200 1800 2400 1800 2400 1575
 2 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
-	 5700 1500 6600 1500 6600 1800 5700 1800 5700 1500
-4 1 0 50 -1 0 10 0.0000 2 135 540 1650 1725 header\001
-4 1 0 50 -1 4 10 0.0000 2 150 135 1200 2025 H\001
-4 1 0 50 -1 4 10 0.0000 2 150 135 2100 2025 P\001
-4 0 0 50 -1 0 10 0.0000 2 180 1575 2175 2025 (min. alignment M)\001
-4 1 0 50 -1 0 10 0.0000 2 180 510 4950 1725 object\001
-4 1 0 50 -1 0 10 0.0000 2 135 315 4950 1425 size\001
-4 1 0 50 -1 0 10 0.0000 2 180 1815 3150 1425 internal fragmentation\001
-4 1 0 50 -1 0 10 0.0000 2 135 585 6150 1725 unused\001
-4 1 0 50 -1 4 10 0.0000 2 150 135 4200 2025 A\001
-4 0 0 50 -1 0 10 0.0000 2 180 1200 4275 2025 (multiple of N)\001
+	 5700 1575 6600 1575 6600 1800 5700 1800 5700 1575
+4 1 0 50 -1 0 9 0.0000 2 135 360 4950 1725 object\001
+4 1 0 50 -1 0 9 0.0000 2 105 420 6150 1725 unused\001
+4 1 0 50 -1 0 9 0.0000 2 105 375 1800 1725 header\001
+4 1 0 50 -1 0 9 0.0000 2 135 1320 3300 1500 internal fragmentation\001
+4 1 0 50 -1 0 9 0.0000 2 105 225 4950 1500 size\001
+4 0 0 50 -1 0 9 0.0000 2 135 1140 2400 1950 $P$ (aligned $M$)\001
+4 0 0 50 -1 0 9 0.0000 2 135 1155 1200 1950 $H$ (aligned $M$)\001
+4 0 0 50 -1 0 9 0.0000 2 135 1365 4200 1950 $A$ (multiple of $N$)\001
Index: doc/papers/llheap/figures/Alignment2Impl.fig
===================================================================
--- doc/papers/llheap/figures/Alignment2Impl.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/Alignment2Impl.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -9,6 +9,4 @@
 1200 2
 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 2100 1500 2100 1875
-2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
 	 4200 1500 4200 1875
 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
@@ -17,19 +15,20 @@
 	1 1 1.00 45.00 90.00
 	 3300 1725 2100 1725
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 2100 1500 2100 1875
 2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
 	 1200 1500 5700 1500 5700 1875 1200 1875 1200 1500
 2 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
 	 2100 1500 3300 1500 3300 1875 2100 1875 2100 1500
-4 1 0 50 -1 0 10 0.0000 2 180 1815 2550 1425 internal fragmentation\001
-4 1 0 50 -1 0 10 0.0000 2 180 510 4950 1725 object\001
-4 1 0 50 -1 0 10 0.0000 2 135 315 4950 1425 size\001
-4 1 0 50 -1 4 10 0.0000 2 150 135 1200 2100 H\001
-4 1 0 50 -1 4 10 0.0000 2 150 135 2100 2100 P\001
-4 0 0 50 -1 0 10 0.0000 2 180 1575 2175 2100 (min. alignment M)\001
-4 1 0 50 -1 4 10 0.0000 2 150 135 4200 2100 A\001
-4 0 0 50 -1 0 10 0.0000 2 180 1200 4275 2100 (multiple of N)\001
-4 1 0 50 -1 0 10 0.0000 2 135 540 3750 1850 header\001
-4 1 0 50 -1 0 10 0.0000 2 135 345 3750 1700 fake\001
-4 1 0 50 -1 0 10 0.0000 2 135 450 2700 1700 offset\001
-4 1 0 50 -1 0 10 0.0000 2 135 540 1650 1850 header\001
-4 1 0 50 -1 0 10 0.0000 2 135 570 1650 1675 normal\001
+4 1 0 50 -1 0 9 0.0000 2 135 1320 2550 1425 internal fragmentation\001
+4 1 0 50 -1 0 9 0.0000 2 135 360 4950 1725 object\001
+4 1 0 50 -1 0 9 0.0000 2 105 225 4950 1425 size\001
+4 1 0 50 -1 0 9 0.0000 2 105 330 2700 1700 offset\001
+4 1 0 50 -1 0 9 0.0000 2 105 420 1650 1650 normal\001
+4 1 0 50 -1 0 9 0.0000 2 105 240 3750 1650 fake\001
+4 1 0 50 -1 0 9 0.0000 2 105 375 1650 1800 header\001
+4 1 0 50 -1 0 9 0.0000 2 105 375 3750 1800 header\001
+4 0 0 50 -1 0 9 0.0000 2 120 255 1125 2025 $H$\001
+4 0 0 50 -1 0 9 0.0000 2 120 240 2025 2025 $P$\001
+4 0 0 50 -1 0 9 0.0000 2 120 240 3225 2025 $F$\001
+4 0 0 50 -1 0 9 0.0000 2 120 255 4125 2025 $A$\001
Index: doc/papers/llheap/figures/AllocatedObject.fig
===================================================================
--- doc/papers/llheap/figures/AllocatedObject.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/AllocatedObject.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -1,28 +1,28 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
 -2
 1200 2
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 2100 1275 2100 1500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 3000 1275 3000 1500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 3900 1275 3900 1500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 4800 1275 4800 1500
+2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1200 1275 5700 1275 5700 1500 1200 1500 1200 1275
 2 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 3900 1200 4800 1200 4800 1500 3900 1500 3900 1200
+	 2100 1275 3000 1275 3000 1500 2100 1500 2100 1275
 2 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 2100 1200 3000 1200 3000 1500 2100 1500 2100 1200
-2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1200 1200 5700 1200 5700 1500 1200 1500 1200 1200
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2100 1200 2100 1500
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3000 1200 3000 1500
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3900 1200 3900 1500
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4800 1200 4800 1500
-4 1 0 50 -1 0 10 0.0000 2 135 555 1650 1425 Header\001
-4 1 0 50 -1 0 10 0.0000 2 180 600 2550 1425 Padding\001
-4 1 0 50 -1 0 10 0.0000 2 180 510 3450 1425 Object\001
-4 1 0 50 -1 0 10 0.0000 2 180 600 4350 1425 Spacing\001
-4 1 0 50 -1 0 10 0.0000 2 135 495 5250 1425 Trailer\001
+	 3900 1275 4800 1275 4800 1500 3900 1500 3900 1275
+4 1 0 50 -1 0 9 0.0000 2 105 405 1650 1425 Header\001
+4 1 0 50 -1 0 9 0.0000 2 135 495 2550 1425 Padding\001
+4 1 0 50 -1 0 9 0.0000 2 135 390 3450 1425 Object\001
+4 1 0 50 -1 0 9 0.0000 2 135 480 4350 1425 Spacing\001
+4 1 0 50 -1 0 9 0.0000 2 105 390 5250 1425 Trailer\001
Index: doc/papers/llheap/figures/AllocatorComponents.fig
===================================================================
--- doc/papers/llheap/figures/AllocatorComponents.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/AllocatorComponents.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -17,12 +17,8 @@
 	 4200 1800 4800 1800 4800 2100 4200 2100 4200 1800
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4200 2100 5100 2100 5100 2400 4200 2400 4200 2100
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 5100 2100 6300 2100 6300 2400 5100 2400 5100 2100
-2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
+2 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
 	 3300 1800 4200 1800 4200 2100 3300 2100 3300 1800
-2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
-	 5400 1800 6300 1800 6300 2100 5400 2100 5400 1800
-2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
+2 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
 	 3300 2100 3600 2100 3600 2400 3300 2400 3300 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
@@ -30,10 +26,6 @@
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 3900 2400 4800 2400 4800 2700 3900 2700 3900 2400
-2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
+2 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
 	 4800 2400 5400 2400 5400 2700 4800 2700 4800 2400
-2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
-	 4800 1800 5400 1800 5400 2100 4800 2100 4800 1800
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 5400 2400 6300 2400 6300 2700 5400 2700 5400 2400
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
 	1 1 1.00 45.00 90.00
@@ -58,9 +50,17 @@
 2 2 0 1 0 7 60 -1 13 0.000 0 0 -1 0 0 5
 	 3300 2700 6300 2700 6300 3000 3300 3000 3300 2700
-4 0 0 50 -1 2 10 0.0000 2 165 1005 3300 1725 Storage Data\001
-4 2 0 50 -1 0 10 0.0000 2 165 810 3000 1875 free objects\001
-4 2 0 50 -1 0 10 0.0000 2 135 1140 3000 2850 reserve memory\001
-4 1 0 50 -1 0 10 0.0000 2 120 795 2325 1500 Static Zone\001
-4 1 0 50 -1 0 10 0.0000 2 165 1845 4800 1500 Dynamic-Allocation Zone\001
-4 2 0 50 -1 2 10 0.0000 2 165 1005 2325 2325 Management\001
-4 2 0 50 -1 2 10 0.0000 2 135 375 2325 2525 Data\001
+2 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
+	 5400 1800 6300 1800 6300 2100 5400 2100 5400 1800
+2 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
+	 4800 1800 5400 1800 5400 2100 4800 2100 4800 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4200 2100 5100 2100 5100 2400 4200 2400 4200 2100
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5400 2400 6300 2400 6300 2700 5400 2700 5400 2400
+4 0 0 50 -1 2 9 0.0000 2 150 975 3300 1725 Storage Data\001
+4 2 0 50 -1 0 9 0.0000 2 150 795 3000 1875 free objects\001
+4 2 0 50 -1 0 9 0.0000 2 135 1215 3000 2850 reserved memory\001
+4 1 0 50 -1 0 9 0.0000 2 120 780 2325 1500 Static Zone\001
+4 1 0 50 -1 0 9 0.0000 2 150 1815 4800 1500 Dynamic-Allocation Zone\001
+4 2 0 50 -1 2 9 0.0000 2 150 945 2325 2325 Management\001
+4 2 0 50 -1 2 9 0.0000 2 120 360 2325 2525 Data\001
Index: doc/papers/llheap/figures/Container.fig
===================================================================
--- doc/papers/llheap/figures/Container.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/Container.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -1,29 +1,28 @@
-#FIG 3.2  Produced by xfig version 3.2.5-alpha5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
 -2
 1200 2
-6 1200 1125 2100 1575
+6 4630 1380 4970 1420
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4650 1400 20 20 4650 1400 4670 1400
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 1400 20 20 4950 1400 4970 1400
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 1400 20 20 4800 1400 4820 1400
+-6
 2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1275 1200 2025 1200 2025 1500 1275 1500 1275 1200
-4 1 0 50 -1 0 10 0.0000 2 135 555 1650 1425 Header\001
--6
-6 1950 1125 2850 1575
+	 1275 1275 2025 1275 2025 1500 1275 1500 1275 1275
 2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2025 1200 2775 1200 2775 1500 2025 1500 2025 1200
-4 1 0 50 -1 0 10 0.0000 2 195 870 2400 1425 Object$_1$\001
--6
-6 2700 1125 3600 1575
+	 2025 1275 2775 1275 2775 1500 2025 1500 2025 1275
 2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2775 1200 3525 1200 3525 1500 2775 1500 2775 1200
-4 1 0 50 -1 0 10 0.0000 2 195 870 3150 1425 Object$_2$\001
--6
-6 3450 1125 4350 1575
+	 2775 1275 3525 1275 3525 1500 2775 1500 2775 1275
 2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3525 1200 4275 1200 4275 1500 3525 1500 3525 1200
-4 1 0 50 -1 0 10 0.0000 2 195 870 3900 1425 Object$_3$\001
--6
+	 3525 1275 4275 1275 4275 1500 3525 1500 3525 1275
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4275 1275 5400 1275 5400 1500 4275 1500 4275 1275
+4 1 0 50 -1 0 9 0.0000 2 105 405 1650 1425 Header\001
+4 1 0 50 -1 0 9 0.0000 2 135 690 2400 1425 Object$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 135 690 3150 1425 Object$_2$\001
+4 1 0 50 -1 0 9 0.0000 2 135 690 3900 1425 Object$_3$\001
Index: doc/papers/llheap/figures/FakeHeader.fig
===================================================================
--- doc/papers/llheap/figures/FakeHeader.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/FakeHeader.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -9,16 +9,17 @@
 1200 2
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2700 1500 2700 1800
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1200 1500 4200 1500 4200 1800 1200 1800 1200 1500
+	 3600 1575 3600 1800
 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 2550 1500 2550 1800
+	 3450 1575 3450 1800
 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
 	1 1 1.00 45.00 90.00
-	 2925 1950 2625 1950 2625 1800
-4 1 0 50 -1 0 10 0.0000 2 135 450 3450 1725 offset\001
-4 1 0 50 -1 0 10 0.0000 2 180 825 1950 1725 alignment\001
-4 1 0 50 -1 0 10 0.0000 2 135 105 2625 1725 1\001
-4 0 0 50 -1 0 10 0.0000 2 180 1920 3000 2025 alignment (fake header)\001
-4 1 0 50 -1 0 10 0.0000 2 180 765 1950 1425 4/8-bytes\001
-4 1 0 50 -1 0 10 0.0000 2 180 765 3450 1425 4/8-bytes\001
+	 3825 1950 3525 1950 3525 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 2100 1575 5100 1575 5100 1800 2100 1800 2100 1575
+4 1 0 50 -1 0 9 0.0000 2 105 330 4350 1725 offset\001
+4 1 0 50 -1 0 9 0.0000 2 135 600 2850 1725 alignment\001
+4 1 0 50 -1 0 9 0.0000 2 105 75 3525 1725 1\001
+4 0 0 50 -1 0 9 0.0000 2 135 1575 3900 2025 $\\Rightarrow$ fake header\001
+4 2 0 50 -1 0 9 0.0000 2 105 660 2025 1725 fake header\001
+4 1 0 50 -1 0 9 0.0000 2 135 555 2850 1500 4/8-bytes\001
+4 1 0 50 -1 0 9 0.0000 2 135 555 4350 1500 4/8-bytes\001
Index: doc/papers/llheap/figures/Header.fig
===================================================================
--- doc/papers/llheap/figures/Header.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/Header.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -11,35 +11,35 @@
 	 1800 1800 4200 1800
 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 1800 2100 4200 2100
+	 3900 1575 3900 2250
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 3600 1575 3600 2250
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 3300 1575 3300 2250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+	1 1 1.00 45.00 90.00
+	 4200 2400 4050 2400 4050 1725
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+	1 1 1.00 45.00 90.00
+	 4200 2550 3750 2550 3750 1725
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+	1 1 1.00 45.00 90.00
+	 4200 2700 3450 2700 3450 1950
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 1800 2025 4200 2025
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1800 1500 4200 1500 4200 2400 1800 2400 1800 1500
-2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 3900 1500 3900 2400
-2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 3600 1500 3600 2400
-2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
-	 3300 1500 3300 2400
+	 1800 1575 4200 1575 4200 2250 1800 2250 1800 1575
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800
-2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
-	1 1 1.00 45.00 90.00
-	 4200 2775 3750 2775 3750 1725
-2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
-	1 1 1.00 45.00 90.00
-	 4200 2550 4050 2550 4050 1725
-2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
-	1 1 1.00 45.00 90.00
-	 4200 3000 3450 3000 3450 2025
-4 0 0 50 -1 0 10 0.0000 2 180 1185 1875 1725 bucket pointer\001
-4 0 0 50 -1 0 10 0.0000 2 180 1005 1875 2025 mapped size\001
-4 0 0 50 -1 0 10 0.0000 2 135 1215 1875 2325 next free block\001
-4 2 0 50 -1 0 10 0.0000 2 135 480 1725 2025 union\001
-4 1 0 50 -1 0 10 0.0000 2 180 945 5400 2025 request size\001
-4 1 0 50 -1 0 10 0.0000 2 180 765 5400 1425 4/8-bytes\001
-4 1 0 50 -1 0 10 0.0000 2 180 765 3000 1425 4/8-bytes\001
-4 1 0 50 -1 0 10 0.0000 2 135 270 3475 2025 0/1\001
-4 1 0 50 -1 0 10 0.0000 2 135 270 3775 1725 0/1\001
-4 1 0 50 -1 0 10 0.0000 2 135 270 4075 1725 0/1\001
-4 0 0 50 -1 0 10 0.0000 2 180 1515 4275 3075 mapped allocation\001
-4 0 0 50 -1 0 10 0.0000 2 135 825 4275 2850 zero filled\001
-4 0 0 50 -1 0 10 0.0000 2 180 1920 4275 2625 alignment (fake header)\001
+	 4200 1800 6600 1800 6600 2025 4200 2025 4200 1800
+4 0 0 50 -1 0 9 0.0000 2 135 855 1875 1725 bucket pointer\001
+4 1 0 50 -1 0 9 0.0000 2 105 195 3775 1725 0/1\001
+4 1 0 50 -1 0 9 0.0000 2 105 195 4075 1725 0/1\001
+4 2 0 50 -1 0 9 0.0000 2 105 345 1725 1950 union\001
+4 0 0 50 -1 0 9 0.0000 2 135 735 1875 1950 mapped size\001
+4 1 0 50 -1 0 9 0.0000 2 105 195 3450 1950 0/1\001
+4 1 0 50 -1 0 9 0.0000 2 135 690 5400 1950 request size\001
+4 1 0 50 -1 0 9 0.0000 2 135 555 3000 1500 4/8-bytes\001
+4 1 0 50 -1 0 9 0.0000 2 135 555 5400 1500 4/8-bytes\001
+4 0 0 50 -1 0 9 0.0000 2 105 885 1875 2175 next free block\001
+4 0 0 50 -1 0 9 0.0000 2 105 600 4275 2600 zero filled\001
+4 0 0 50 -1 0 9 0.0000 2 135 1095 4275 2750 mapped allocation\001
+4 0 0 50 -1 0 9 0.0000 2 135 1395 4275 2450 alignment (fake header)\001
Index: doc/papers/llheap/figures/IntExtFragmentation.fig
===================================================================
--- doc/papers/llheap/figures/IntExtFragmentation.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/IntExtFragmentation.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -1,21 +1,11 @@
-#FIG 3.2  Produced by xfig version 3.2.5
+#FIG 3.2  Produced by xfig version 3.2.7b
 Landscape
 Center
 Inches
-Letter  
+Letter
 100.00
 Single
 -2
 1200 2
-6 3150 1200 3900 1500
-2 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 3150 1200 3900 1200 3900 1500 3150 1500 3150 1200
-4 1 0 50 -1 0 10 0.0000 2 180 600 3525 1425 Spacing\001
--6
-6 4425 1125 5775 1575
-2 2 0 2 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 4500 1200 5700 1200 5700 1500 4500 1500 4500 1200
-4 1 0 50 -1 0 10 0.0000 2 180 1020 5100 1425 Free Memory\001
--6
 6 1200 1575 2550 1725
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
@@ -29,5 +19,5 @@
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
 	 2550 1575 2550 1725
-4 1 0 50 -1 0 10 0.0000 2 135 570 1875 1725 internal\001
+4 1 0 50 -1 0 9 0.0000 2 120 525 1875 1725 internal\001
 -6
 6 3150 1575 4500 1725
@@ -42,5 +32,5 @@
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
 	 4500 1575 4500 1725
-4 1 0 50 -1 0 10 0.0000 2 135 570 3825 1725 internal\001
+4 1 0 50 -1 0 9 0.0000 2 120 525 3825 1725 internal\001
 -6
 6 4500 1575 5700 1725
@@ -55,20 +45,26 @@
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
 	 5700 1575 5700 1725
-4 1 0 50 -1 0 10 0.0000 2 135 615 5100 1725 external\001
+4 1 0 50 -1 0 9 0.0000 2 120 555 5100 1725 external\001
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2550 1200 2550 1500
+	 2550 1275 2550 1500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 3150 1275 3150 1500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 3900 1275 3900 1500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 1800 1275 1800 1500
 2 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
-	 1800 1200 2550 1200 2550 1500 1800 1500 1800 1200
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3150 1200 3150 1500
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3900 1200 3900 1500
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 1800 1200 1800 1500
+	 1800 1275 2550 1275 2550 1500 1800 1500 1800 1275
 2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1200 1200 4500 1200 4500 1500 1200 1500 1200 1200
-4 1 0 50 -1 0 10 0.0000 2 135 555 1500 1425 Header\001
-4 1 0 50 -1 0 10 0.0000 2 180 600 2175 1425 Padding\001
-4 1 0 50 -1 0 10 0.0000 2 180 510 2850 1425 Object\001
-4 1 0 50 -1 0 10 0.0000 2 135 495 4200 1425 Trailer\001
+	 1200 1275 4500 1275 4500 1500 1200 1500 1200 1275
+2 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
+	 3150 1275 3900 1275 3900 1500 3150 1500 3150 1275
+2 2 0 2 0 7 60 -1 17 0.000 0 0 -1 0 0 5
+	 4500 1275 5700 1275 5700 1500 4500 1500 4500 1275
+4 1 0 50 -1 0 9 0.0000 2 120 495 1500 1425 Header\001
+4 1 0 50 -1 0 9 0.0000 2 165 570 2175 1425 Padding\001
+4 1 0 50 -1 0 9 0.0000 2 150 450 2850 1425 Object\001
+4 1 0 50 -1 0 9 0.0000 2 120 465 4200 1425 Trailer\001
+4 1 0 50 -1 0 9 0.0000 2 150 945 5100 1425 Free Memory\001
+4 1 0 50 -1 0 9 0.0000 2 165 555 3525 1425 Spacing\001
Index: doc/papers/llheap/figures/PerThreadHeap.fig
===================================================================
--- doc/papers/llheap/figures/PerThreadHeap.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/PerThreadHeap.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -11,5 +11,5 @@
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 2700 1800 3000 1800 3000 2100 2700 2100 2700 1800
-4 1 0 50 -1 0 10 0.0000 2 120 135 2850 2025 G\001
+4 1 0 50 -1 0 9 0.0000 2 120 135 2850 2025 G\001
 -6
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
@@ -34,11 +34,11 @@
 	1 1 1.00 45.00 90.00
 	 2250 1500 2250 1800
-4 1 0 50 -1 0 10 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001
-4 1 0 50 -1 0 10 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001
-4 0 0 50 -1 0 10 0.0000 2 120 240 3300 2025 OS\001
-4 1 0 50 -1 0 10 0.0000 2 165 495 1350 2025 H$_1$\001
-4 1 0 50 -1 0 10 0.0000 2 165 465 1350 1425 T$_1$\001
-4 1 0 50 -1 0 10 0.0000 2 165 495 1800 2025 H$_2$\001
-4 1 0 50 -1 0 10 0.0000 2 165 465 1800 1425 T$_2$\001
-4 1 0 50 -1 0 10 0.0000 2 165 495 2250 2025 H$_3$\001
-4 1 0 50 -1 0 10 0.0000 2 165 465 2250 1425 T$_3$\001
+4 1 0 50 -1 0 9 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001
+4 1 0 50 -1 0 9 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001
+4 0 0 50 -1 0 9 0.0000 2 120 240 3300 2025 OS\001
+4 1 0 50 -1 0 9 0.0000 2 165 495 1350 2025 H$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 165 495 1800 2025 H$_2$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 1800 1425 T$_2$\001
+4 1 0 50 -1 0 9 0.0000 2 165 495 2250 2025 H$_3$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 2250 1425 T$_3$\001
Index: doc/papers/llheap/figures/SharedHeaps.fig
===================================================================
--- doc/papers/llheap/figures/SharedHeaps.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/SharedHeaps.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -10,28 +10,28 @@
 6 1500 1200 2100 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
-4 1 0 50 -1 0 10 0.0000 2 165 465 1800 1425 T$_2$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 1800 1425 T$_2$\001
 -6
 6 1050 1200 1650 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
-4 1 0 50 -1 0 10 0.0000 2 165 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 1350 1425 T$_1$\001
 -6
 6 1950 1200 2550 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
-4 1 0 50 -1 0 10 0.0000 2 165 465 2250 1425 T$_3$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 2250 1425 T$_3$\001
 -6
 6 1275 1800 1875 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1425 1800 1725 1800 1725 2100 1425 2100 1425 1800
-4 1 0 50 -1 0 10 0.0000 2 165 495 1575 2025 H$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 165 495 1575 2025 H$_1$\001
 -6
 6 1725 1800 2325 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 1875 1800 2175 1800 2175 2100 1875 2100 1875 1800
-4 1 0 50 -1 0 10 0.0000 2 165 495 2025 2025 H$_2$\001
+4 1 0 50 -1 0 9 0.0000 2 165 495 2025 2025 H$_2$\001
 -6
 6 2475 1800 2775 2100
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
 	 2475 1800 2775 1800 2775 2100 2475 2100 2475 1800
-4 1 0 50 -1 0 10 0.0000 2 120 135 2625 2025 G\001
+4 1 0 50 -1 0 9 0.0000 2 120 135 2625 2025 G\001
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
@@ -55,5 +55,5 @@
 	1 1 1.00 45.00 90.00
 	 2250 1500 2100 1800
-4 0 0 50 -1 0 10 0.0000 2 120 240 3075 2025 OS\001
-4 1 0 50 -1 0 10 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001
-4 1 0 50 -1 0 10 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
+4 0 0 50 -1 0 9 0.0000 2 120 240 3075 2025 OS\001
+4 1 0 50 -1 0 9 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001
+4 1 0 50 -1 0 9 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
Index: doc/papers/llheap/figures/SingleHeap.fig
===================================================================
--- doc/papers/llheap/figures/SingleHeap.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/SingleHeap.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -10,13 +10,13 @@
 6 1500 1200 2100 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
-4 1 0 50 -1 0 10 0.0000 2 165 465 1800 1425 T$_2$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 1800 1425 T$_2$\001
 -6
 6 1050 1200 1650 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
-4 1 0 50 -1 0 10 0.0000 2 165 465 1350 1425 T$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 1350 1425 T$_1$\001
 -6
 6 1950 1200 2550 1500
 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
-4 1 0 50 -1 0 10 0.0000 2 165 465 2250 1425 T$_3$\001
+4 1 0 50 -1 0 9 0.0000 2 165 465 2250 1425 T$_3$\001
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
@@ -34,5 +34,5 @@
 	1 1 1.00 45.00 90.00
 	 1800 1500 1800 1800
-4 1 0 50 -1 0 10 0.0000 2 165 495 1800 2025 H$_1$\001
-4 1 0 50 -1 0 10 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001
-4 0 0 50 -1 0 10 0.0000 2 120 240 2250 2025 OS\001
+4 1 0 50 -1 0 9 0.0000 2 165 495 1800 2025 H$_1$\001
+4 1 0 50 -1 0 9 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001
+4 0 0 50 -1 0 9 0.0000 2 120 240 2250 2025 OS\001
Index: doc/papers/llheap/figures/decreasing.fig
===================================================================
--- doc/papers/llheap/figures/decreasing.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/figures/decreasing.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,49 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 3600 1125 5700 2400
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 5100 1575 5100 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 3600 1575 5700 1575 5700 1800 3600 1800 3600 1575
+2 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
+	 5100 1575 5700 1575 5700 1800 5100 1800 5100 1575
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 5700 1500 5100 1500
+4 1 0 50 -1 0 9 0.0000 2 105 420 5400 1725 unused\001
+4 1 0 50 -1 0 9 0.0000 2 135 360 4350 1725 object\001
+4 0 0 50 -1 2 9 0.0000 2 135 420 3600 1275 logical\001
+4 1 0 50 -1 0 9 0.0000 2 135 510 4650 1500 96 bytes\001
+4 1 0 50 -1 0 9 0.0000 2 135 510 4350 1950 75 bytes\001
+4 1 0 50 -1 0 9 0.0000 2 135 510 5400 1950 21 bytes\001
+4 2 0 50 -1 0 9 0.0000 2 105 450 5700 2175 internal\001
+4 2 0 50 -1 0 9 0.0000 2 135 825 5700 2325 fragmentation\001
+-6
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 1200 1800 1200 2100
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 2700 1800 2700 2100
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1200 1575 3300 1575 3300 1800 1200 1800 1200 1575
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1200 2100 2700 2100 2700 2325 1200 2325 1200 2100
+2 2 0 1 0 7 60 -1 18 0.000 0 0 -1 0 0 5
+	 1200 2625 3300 2625 3300 2850 1200 2850 1200 2625
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 3450 1125 3450 2850
+4 1 0 50 -1 0 9 0.0000 2 105 285 1950 2025 copy\001
+4 1 0 50 -1 0 9 0.0000 2 135 600 2250 1725 old object\001
+4 1 0 50 -1 0 9 0.0000 2 135 510 2250 1500 96 bytes\001
+4 0 0 50 -1 2 9 0.0000 2 135 1110 1200 1275 physical (3 steps)\001
+4 1 0 50 -1 0 9 0.0000 2 135 645 1875 2250 new object\001
+4 1 0 50 -1 0 9 0.0000 2 135 855 2250 2775 free old object\001
+4 1 0 50 -1 0 9 0.0000 2 135 510 1950 2550 75 bytes\001
Index: doc/papers/llheap/figures/increasing.fig
===================================================================
--- doc/papers/llheap/figures/increasing.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/figures/increasing.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,23 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 2700 1575 2700 1800
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1200 1575 3300 1575 3300 1800 1200 1800 1200 1575
+2 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
+	 2700 1575 3300 1575 3300 1800 2700 1800 2700 1575
+4 1 0 50 -1 0 9 0.0000 2 105 495 3000 1725 unused\001
+4 1 0 50 -1 0 9 0.0000 2 150 420 1950 1725 object\001
+4 1 0 50 -1 0 9 0.0000 2 150 585 2250 1500 96 bytes\001
+4 0 0 50 -1 2 9 0.0000 2 150 615 1200 1275 quantize\001
+4 1 0 50 -1 0 9 0.0000 2 150 585 1950 1950 75 bytes\001
+4 1 0 50 -1 0 9 0.0000 2 150 585 3000 1950 21 bytes\001
+4 2 0 50 -1 0 9 0.0000 2 120 525 3300 2175 internal\001
+4 2 0 50 -1 0 9 0.0000 2 165 975 3300 2325 fragmentation\001
Index: doc/papers/llheap/figures/llheap.fig
===================================================================
--- doc/papers/llheap/figures/llheap.fig	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/figures/llheap.fig	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -8,170 +8,231 @@
 -2
 1200 2
-6 1275 1950 1725 2250
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 1275 1950 1725 1950 1725 2250 1275 2250 1275 1950
-4 1 0 50 -1 0 10 0.0000 2 135 360 1500 2175 lock\001
--6
-6 4125 4050 4275 4350
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 4125 20 20 4200 4125 4220 4125
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 4200 20 20 4200 4200 4220 4200
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 4275 20 20 4200 4275 4220 4275
--6
-6 5025 3825 5325 3975
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3900 20 20 5100 3900 5120 3900
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5175 3900 20 20 5175 3900 5195 3900
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5250 3900 20 20 5250 3900 5270 3900
--6
-6 6150 2025 6450 2175
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6225 2100 20 20 6225 2100 6245 2100
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6300 2100 20 20 6300 2100 6320 2100
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6375 2100 20 20 6375 2100 6395 2100
--6
-6 3225 4650 3675 4950
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3225 4650 3675 4650 3675 4950 3225 4950 3225 4650
-4 1 0 50 -1 0 10 0.0000 2 135 360 3450 4875 lock\001
--6
-6 3750 2325 3900 2700
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 3825 2325 3825 2550
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3750 2550 3900 2550 3900 2700 3750 2700 3750 2550
--6
-6 6750 2025 7050 2175
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6825 2100 20 20 6825 2100 6845 2100
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6900 2100 20 20 6900 2100 6920 2100
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6975 2100 20 20 6975 2100 6995 2100
--6
-6 2550 3150 3450 4350
-6 2925 4050 3075 4350
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 4125 20 20 3000 4125 3020 4125
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 4200 20 20 3000 4200 3020 4200
-1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 4275 20 20 3000 4275 3020 4275
--6
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2550 3375 3450 3375 3450 3600 2550 3600 2550 3375
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2550 3750 3450 3750 3450 3975 2550 3975 2550 3750
-4 1 0 50 -1 0 10 0.0000 2 180 900 3000 3300 local pools\001
--6
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2850 1800 2850 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3000 1800 3000 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3150 1800 3150 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3300 1800 3300 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3450 1800 3450 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2550 1800 2550 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2400 1950 3600 1950
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2700 1800 2700 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2400 2100 3600 2100
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2400 1800 3600 1800 3600 2400 2400 2400 2400 1800
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 2400 2250 3600 2250
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 2475 2325 2475 2550
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 2475 2625 2475 2850
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2400 2850 2550 2850 2550 3000 2400 3000 2400 2850
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2400 2550 2550 2550 2550 2700 2400 2700 2400 2550
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 2925 2175 2925 2550
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 2925 2625 2925 2850
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2850 2850 3000 2850 3000 3000 2850 3000 2850 2850
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 2850 2550 3000 2550 3000 2700 2850 2700 2850 2550
-2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3600 1650 3600 2550
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 3375 2325 3375 2550
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3225 2550 3525 2550 3525 2700 3225 2700 3225 2550
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4050 1800 4050 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4200 1800 4200 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4350 1800 4350 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4500 1800 4500 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4650 1800 4650 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3750 1800 3750 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3600 1950 4800 1950
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3900 1800 3900 2400
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3600 2100 4800 2100
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3600 1800 4800 1800 4800 2400 3600 2400 3600 1800
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 3600 2250 4800 2250
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 4125 2175 4125 2550
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4050 2550 4200 2550 4200 2700 4050 2700 4050 2550
-2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 4800 1650 4800 2550
-2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 5400 1650 5400 2550
-2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 6000 1650 6000 2550
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4800 1800 6600 1800 6600 2400 4800 2400 4800 1800
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 4575 2625 4575 2850
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	1 1 1.00 45.00 90.00
-	 4575 2325 4575 2550
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4425 2550 4725 2550 4725 2700 4425 2700 4425 2550
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 4425 2850 4725 2850 4725 3000 4425 3000 4425 2850
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3750 3375 4650 3375 4650 3600 3750 3600 3750 3375
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3750 3750 4650 3750 4650 3975 3750 3975 3750 3750
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 3825 4650 5325 4650 5325 4950 3825 4950 3825 4650
+6 3000 3375 3150 3675
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 3450 20 20 3075 3450 3095 3450
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 3525 20 20 3075 3525 3095 3525
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 3600 20 20 3075 3600 3095 3600
+-6
+6 3675 1950 3900 2100
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 3750 2025 20 20 3750 2025 3750 2005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 3825 2025 20 20 3825 2025 3825 2005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 3900 2025 20 20 3900 2025 3900 2005
+-6
+6 5475 1950 5700 2100
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5550 2025 20 20 5550 2025 5550 2005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5625 2025 20 20 5625 2025 5625 2005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5700 2025 20 20 5700 2025 5700 2005
+-6
+6 4800 3375 4950 3675
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 3450 20 20 4875 3450 4895 3450
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 3525 20 20 4875 3525 4895 3525
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 3600 20 20 4875 3600 4895 3600
+-6
+6 4200 3900 4500 4125
+4 1 0 50 -1 0 9 0.0000 2 105 210 4350 4075 HB\001
+-6
+6 3600 3900 3900 4125
+4 1 0 50 -1 0 9 0.0000 2 105 210 3750 4075 HB\001
+-6
+6 3300 3900 3600 4125
+4 1 0 50 -1 0 9 0.0000 2 105 210 3450 4075 HB\001
+-6
+6 2850 3900 3150 4125
+4 1 0 50 -1 0 9 0.0000 2 105 210 3000 4075 HB\001
+-6
+6 2400 3900 2700 4125
+4 1 0 50 -1 0 9 0.0000 2 105 210 2550 4075 HB\001
+-6
+6 5775 1950 6000 2100
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5850 2025 20 20 5850 2025 5850 2005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5925 2025 20 20 5925 2025 5925 2005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 6000 2025 20 20 6000 2025 6000 2005
+-6
+6 1125 1275 2250 3750
+6 1200 3375 2250 3750
 2 2 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
-	 1200 3900 1950 3900 1950 4425 1200 4425 1200 3900
+	 1200 3375 2250 3375 2250 3750 1200 3750 1200 3375
+4 1 0 50 -1 0 9 0.0000 2 135 675 1725 3525 fast lookup\001
+4 1 0 50 -1 0 9 0.0000 2 105 285 1725 3675 table\001
+-6
+6 1200 2925 2250 3225
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1200 2925 2250 2925 2250 3225 1200 3225 1200 2925
+4 1 0 50 -1 0 9 0.0000 2 105 720 1725 3150 bucket sizes\001
+-6
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1125 1500 2175 1500 2175 2775 1125 2775 1125 1500
+4 1 0 50 -1 0 9 0.0000 2 105 315 1650 1650 locks\001
+4 1 0 50 -1 0 9 0.0000 2 105 555 1650 1800 sbrk start\001
+4 1 0 50 -1 0 9 0.0000 2 135 900 1650 2700 free array space\001
+4 1 0 50 -1 0 9 0.0000 2 135 705 1650 1425 heap master\001
+4 1 0 50 -1 0 9 0.0000 2 105 690 1650 2250 sbrk extend\001
+4 1 0 50 -1 0 9 0.0000 2 135 765 1650 2400 free heap top\001
+4 1 0 50 -1 0 9 0.0000 2 135 855 1650 2550 last heap array\001
+4 1 0 50 -1 0 9 0.0000 2 135 900 1650 1950 sbrk remaining\001
+4 1 0 50 -1 0 9 0.0000 2 105 510 1650 2100 sbrk end\001
+-6
+6 6825 3075 7575 3600
 2 2 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
-	 1200 3000 1800 3000 1800 3525 1200 3525 1200 3000
-4 2 0 50 -1 0 10 0.0000 2 135 300 2325 1950 lock\001
-4 2 0 50 -1 0 10 0.0000 2 120 270 2325 2100 size\001
-4 2 0 50 -1 0 10 0.0000 2 120 270 2325 2400 free\001
-4 2 0 50 -1 0 10 0.0000 2 165 495 2325 2250 (away)\001
-4 1 0 50 -1 0 10 0.0000 2 180 1455 4575 4575 global pool (sbrk)\001
-4 1 0 50 -1 0 10 0.0000 2 180 900 4200 3300 local pools\001
-4 1 0 50 -1 0 10 0.0000 2 180 1695 4350 1425 global heaps (mmap)\001
-4 1 0 50 -1 0 10 0.0000 2 180 810 3000 1725 heap$_1$\001
-4 1 0 50 -1 0 10 0.0000 2 180 810 4200 1725 heap$_2$\001
-4 1 0 50 -1 0 10 0.0000 2 120 255 1500 3150 fast\001
-4 1 0 50 -1 0 10 0.0000 2 180 495 1500 3300 lookup\001
-4 1 0 50 -1 0 10 0.0000 2 135 330 1500 3450 table\001
-4 1 0 50 -1 0 10 0.0000 2 120 315 1575 4050 stats\001
-4 1 0 50 -1 0 10 0.0000 2 120 600 1575 4200 counters\001
-4 1 0 50 -1 0 10 0.0000 2 135 330 1575 4350 table\001
+	 6825 3075 7575 3075 7575 3600 6825 3600 6825 3075
+4 1 0 50 -1 0 9 0.0000 2 90 270 7200 3225 stats\001
+4 1 0 50 -1 0 9 0.0000 2 90 495 7200 3375 counters\001
+4 1 0 50 -1 0 9 0.0000 2 105 285 7200 3525 table\001
+-6
+6 7950 2775 8100 3075
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 8025 2850 20 20 8025 2850 8045 2850
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 8025 2925 20 20 8025 2925 8045 2925
+1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 8025 3000 20 20 8025 3000 8045 3000
+-6
+6 7935 4005 8100 4035
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 7950 4025 20 20 7950 4025 7950 4005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 8025 4025 20 20 8025 4025 8025 4005
+1 3 0 1 0 0 50 -1 20 0.000 1 1.5708 8100 4025 20 20 8100 4025 8100 4005
+-6
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4275 1725 5475 1725 5475 2400 4275 2400 4275 1725
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 2475 1725 3675 1725 3675 2400 2475 2400 2475 1725
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 2625 2700 3525 2700 3525 2925 2625 2925 2625 2700
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 4800 3900 4800 4125
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 3300 3900 3300 4125
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 3900 3900 3900 4125
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4425 2700 5325 2700 5325 2925 4425 2925 4425 2700
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4425 3075 5325 3075 5325 3300 4425 3300 4425 3075
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 2625 3075 3975 3075 3975 3300 2625 3300 2625 3075
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 4
+	1 1 1.00 45.00 90.00
+	 4500 2275 4350 2275 4350 3600 4500 3600
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 4
+	1 1 1.00 45.00 90.00
+	 2700 2275 2550 2275 2550 3600 2700 3600
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 3600 3900 3600 4125
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 2700 3900 2700 4125
+2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 2400 1500 3975 1500 3975 2475 2400 2475 2400 1500
+2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 4200 1500 5775 1500 5775 2475 4200 2475 4200 1500
+2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 2400 3900 6000 3900 6000 4125 2400 4125 2400 3900
+2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 7275 3900 7875 3900 7875 4125 7275 4125 7275 3900
+2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 6750 3900 7125 3900 7125 4125 6750 4125 6750 3900
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 6075 1350 6075 3675
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7125 1725 7125 2025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7275 1725 7275 2025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7425 1725 7425 2025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7575 1725 7575 2025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7725 1725 7725 2025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 6675 1725 7875 1725
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 6975 1725 6975 2025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 6675 1875 7875 1875
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 6825 1725 6825 2025
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 6675 1725 7875 1725 7875 2325 6675 2325 6675 1725
+2 2 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
+	 6675 2025 7875 2025 7875 2175 6675 2175 6675 2025
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 6825 2025 6825 2175
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 6975 2025 6975 2175
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7125 2025 7125 2175
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7275 2025 7275 2175
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7425 2025 7425 2175
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7575 2025 7575 2175
+2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7725 2025 7725 2175
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 6825 2175 6825 2325
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 6975 2175 6975 2325
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7125 2175 7125 2325
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7275 2175 7275 2325
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7425 2175 7425 2325
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7575 2175 7575 2325
+2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
+	 7725 2175 7725 2325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 6750 2250 6750 2475
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 7200 2250 7200 2475
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 7650 2250 7650 2475
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 6750 2550 6750 2775
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 6675 2475 6825 2475 6825 2625 6675 2625 6675 2475
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 7200 2550 7200 2775
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 7500 2475 7800 2475 7800 2625 7500 2625 7500 2475
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 7100 2475 7325 2475 7325 2625 7100 2625 7100 2475
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 6675 2775 6825 2775 6825 2925 6675 2925 6675 2775
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 7100 2775 7325 2775 7325 2925 7100 2925 7100 2775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+	1 1 1.00 45.00 90.00
+	 7800 2100 8025 2100 8025 2475
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 7950 2475 8100 2475 8100 2625 7950 2625 7950 2475
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 45.00 90.00
+	 8025 2550 8025 2775
+4 1 0 50 -1 0 9 0.0000 2 135 795 4875 1650 heap$_{16}$\001
+4 1 0 50 -1 2 9 0.0000 2 135 1035 4875 1875 per heap structs\001
+4 1 0 50 -1 0 9 0.0000 2 135 990 3075 2175 buffer remaining\001
+4 1 0 50 -1 0 9 0.0000 2 105 645 3075 2325 buffer start\001
+4 1 0 50 -1 0 9 0.0000 2 135 825 3075 2025 next free heap\001
+4 1 0 50 -1 2 9 0.0000 2 135 1035 3075 1875 per heap structs\001
+4 1 0 50 -1 0 9 0.0000 2 135 570 3075 1650 heap$_0$\001
+4 1 0 50 -1 0 9 0.0000 2 135 720 3075 2625 heap buffers\001
+4 1 0 50 -1 0 9 0.0000 2 135 825 4875 2025 next free heap\001
+4 1 0 50 -1 0 9 0.0000 2 135 900 3150 1425 heap array$_0$\001
+4 1 0 50 -1 0 9 0.0000 2 135 900 4950 1425 heap array$_1$\001
+4 2 0 50 -1 0 9 0.0000 2 105 255 2325 4050 sbrk\001
+4 1 0 50 -1 0 9 0.0000 2 90 255 2400 4275 start\001
+4 1 0 50 -1 0 9 0.0000 2 105 645 4875 2325 buffer start\001
+4 1 0 50 -1 0 9 0.0000 2 135 720 4875 2625 heap buffers\001
+4 1 0 50 -1 0 9 0.0000 2 135 990 4875 2175 buffer remaining\001
+4 1 0 50 -1 0 9 0.0000 2 135 600 5400 4050 remaining\001
+4 2 0 50 -1 0 9 0.0000 2 105 375 6675 4050 mmap\001
+4 1 0 50 -1 2 9 0.0000 2 135 1245 7200 1425 per heap structures\001
+4 2 0 50 -1 0 9 0.0000 2 105 225 6600 2025 size\001
+4 2 0 50 -1 0 9 0.0000 2 135 270 6600 1875 heap\001
+4 1 0 50 -1 0 9 0.0000 2 105 465 7275 1650 freelists\001
+4 2 0 50 -1 0 9 0.0000 2 105 210 6600 2325 free\001
+4 2 0 50 -1 0 9 0.0000 2 90 405 6600 2175 remote\001
+4 1 0 50 -1 0 9 0.0000 2 105 210 6000 4275 end\001
Index: doc/papers/llheap/local.bib
===================================================================
--- doc/papers/llheap/local.bib	(revision 8e90fd6bdd26acf67f6a5bae4b294ee9084241dc)
+++ doc/papers/llheap/local.bib	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -35,14 +35,7 @@
 }
 
-@article{Chicken,
-    keywords	= {Chicken},
-    author	= {Doug Zongker},
-    title	= {Chicken Chicken Chicken: Chicken Chicken},
-    year	= 2006
-}
-
 @misc{TBB,
     keywords 	= {Intel, TBB},
-    key	= {TBB},
+    key		= {TBB},
     title 	= {Thread Building Blocks},
     howpublished= {Intel, \url{https://www.threadingbuildingblocks.org}},
@@ -50,6 +43,15 @@
 }
 
+@misc{litemalloc,
+    keywords	= {lock free},
+    author	= {Ivan Tkatchev and Veniamin Gvozdikov},
+    title	= {Lite Malloc},
+    month	= jul,
+    year	= 2018,
+    howpublished= {\url{https://github.com/Begun/lockfree-malloc}},
+}
+
 @manual{www-cfa,
-    key	= {CFA},
+    key		= {CFA},
     keywords 	= {Cforall},
     author	= {C$\forall$},
@@ -65,12 +67,4 @@
     year	= 2015,
     note	= {\url{https://www.iso.org/standard/66343.html}},
-}
-
-@misc{BankTransfer,
-    key	= {Bank Transfer},
-    keywords 	= {Bank Transfer},
-    title 	= {Bank Account Transfer Problem},
-    howpublished	= {Wiki Wiki Web, \url{http://wiki.c2.com/?BankAccountTransferProblem}},
-    year	= 2010
 }
 
@@ -164,13 +158,13 @@
 @article{Berger00,
     author	= {Emery D. Berger and Kathryn S. McKinley and Robert D. Blumofe and Paul R. Wilson},
-    title	= {Hoard: A Scalable Memory Allocator for Multithreaded Applications},
-    booktitle	= {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
-    journal	= sigplan,
-    volume	= 35,
-    number	= 11,
+    title	= {Hoard: a scalable memory allocator for multithreaded applications},
+    publisher	= {Association for Computing Machinery},
+    address	= {New York, NY, USA},
+    volume	= 28,
+    number	= 5,
+    journal	= {SIGARCH Comput. Archit. News},
+    year	= {2000},
     month	= nov,
-    year	= 2000,
     pages	= {117-128},
-    note	= {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
 }
 
@@ -178,5 +172,5 @@
     author	= {Emery D. Berger and Benjamin G. Zorn and Kathryn S. McKinley},
     title	= {Reconsidering Custom Memory Allocation},
-    organization= {Proceedings of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002},
+    organization= {Proc. of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002},
     month	= nov,
     year	= 2002,
@@ -194,5 +188,5 @@
     pages	= {176-185},
     year	= 1999,
-    url		= {http://citeseer.ist.psu.edu/article/larson98memory.html}
+    note	= {\url{http://citeseer.ist.psu.edu/article/larson98memory.html}},
 }
 
@@ -204,5 +198,5 @@
     address	= {Chalmers University of Technology},
     year	= 2004,
-    url		= {http://citeseer.ist.psu.edu/gidenstam04allocating.html} 
+    note	= {\url{http://citeseer.ist.psu.edu/gidenstam04allocating.html}}, 
 }
 
@@ -213,5 +207,5 @@
     year	= 2002,
     month	= aug,
-    url		= {http://citeseer.ist.psu.edu/article/berger02memory.html}
+    note	= {\url{http://citeseer.ist.psu.edu/article/berger02memory.html}},
 }
 
@@ -260,5 +254,5 @@
     month	= jul,
     year	= 2001,
-    url		= {http://www.ddj.com/mobile/184404685?pgno=1}
+    note	= {\url{http://www.ddj.com/mobile/184404685?pgno=1}},
 }
 
@@ -271,9 +265,9 @@
 
 @misc{tcmalloc,
-    author	= {Sanjay Ghemawat and Paul Menage},
-    title	= {tcmalloc version 1.5},
-    month	= jan,
-    year	= 2010,
-    howpublished= {\url{http://google-perftools.googlecode.com/files/google-perftools-1.5.tar.gz}},
+    author	= {{multiple contributors}},
+    title	= {TCMalloc : Thread-Caching Malloc},
+    month	= dec,
+    year	= 2024,
+    howpublished= {\url{https://gperftools.github.io/gperftools/tcmalloc.html}},
 }
 
@@ -282,10 +276,10 @@
     title	= {Scalable Locality-Conscious Multithreaded Memory Allocation},
     organization= {International Symposium on Memory Management (ISSM'06)},
+    year	= 2006,
     month	= jun,
-    year	= 2006,
-    pages	= {84-94},
     location	= {Ottawa, Ontario, Canada},
     publisher	= {ACM},
     address	= {New York, NY, USA},
+    pages	= {84-94},
 }
 
@@ -294,4 +288,12 @@
     title	= {Streamflow},
     howpublished= {\url{http://people.cs.vt.edu/~scschnei/streamflow}},
+}
+
+@misc{llheap,
+    author	= {Peter A. Buhr and Mubeen Zulfiqar},
+    title	= {llheap: low-latency memory allocator},
+    year	= 2025,
+    month	= jun,
+    howpublished= {\url{https://github.com/cforall/llheap}},
 }
 
@@ -303,5 +305,5 @@
     year	= 1994,
     month	= nov,
-    url		= {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}
+    note	= {\url{http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}},
 }
 
@@ -322,5 +324,5 @@
     pages	= {177-186},
     year	= 1993,
-    url		= {http://citeseer.ist.psu.edu/grunwald93improving.html}
+    note	= {\url{http://citeseer.ist.psu.edu/grunwald93improving.html}},
 }
 
@@ -331,5 +333,5 @@
     address	= {Kinross Scotland, UK},
     year	= 1995,
-    url		= {http://citeseer.ist.psu.edu/wilson95dynamic.html} 
+    note	= {\url{http://citeseer.ist.psu.edu/wilson95dynamic.html}}, 
 }
 
@@ -341,5 +343,5 @@
     isbn	= {1-58113-338-3},
     pages	= {9-17},
-    location	= {San Jose, California, United States},
+    location	= {San Jose, CA, USA},
     publisher	= {ACM Press},
     address	= {New York, NY, USA}
@@ -399,5 +401,5 @@
     author	= {Paul R. Wilson},
     title	= {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies},
-    url		= {http://citeseer.ist.psu.edu/337869.html}
+    note	= {\url{http://citeseer.ist.psu.edu/337869.html}},
 }
 
@@ -421,5 +423,5 @@
     isbn	= {0-89791-598-4},
     pages	= {177-186},
-    location	= {Albuquerque, New Mexico, United States},
+    location	= {Albuquerque, New Mexico, USA},
     publisher	= {ACM Press},
     address	= {New York, NY, USA}
@@ -432,5 +434,5 @@
     month	= feb,
     year	= 2001,
-    url		= {http://www.ddj.com/cpp/184403766}
+    note	= {\url{http://www.ddj.com/cpp/184403766}},
 }
 
@@ -460,12 +462,11 @@
     author	= {Xianglong Huang and Brian T Lewis and Kathryn S McKinley},
     title	= {Dynamic Code Management: Improving Whole Program Code Locality in Managed Runtimes},
-    organization= {VEE '06: Proceedings of the 2nd international conference on Virtual execution environments},
+    organization= {VEE '06: Proc. of the 2nd International Conf. on Virtual Execution Environments},
     year	= 2006,
-    isbn	= {1-59593-332-6},
-    pages	= {133-143},
     location	= {Ottawa, Ontario, Canada},
     publisher	= {ACM Press},
-    address	= {New York, NY, USA}
- }
+    address	= {New York, NY, USA},
+    pages	= {133-143},
+}
 
 @inproceedings{Herlihy03,
@@ -475,5 +476,5 @@
     year	= 2003,
     month	= may,
-    url		= {http://www.cs.brown.edu/~mph/publications.html}
+    note	= {\url{http://www.cs.brown.edu/~mph/publications.html}},
 }
 
@@ -485,5 +486,5 @@
     address	= {130 Lytton Avenue, Palo Alto, CA 94301 and Campus Box 430, Boulder, CO 80309},
     year	= 1993,
-    url		= {http://citeseer.ist.psu.edu/detlefs93memory.html} 
+    note	= {\url{http://citeseer.ist.psu.edu/detlefs93memory.html}},
 }
 
@@ -530,5 +531,5 @@
     address	= {Chalmers University of Technology},
     year	= 2004,
-    url		= {http://citeseer.ist.psu.edu/gidenstam04allocating.html} 
+    note	= {\url{http://citeseer.ist.psu.edu/gidenstam04allocating.html}}, 
 }
 
@@ -539,5 +540,5 @@
     year	= 2002,
     month	= aug,
-    url		= {http://citeseer.ist.psu.edu/article/berger02memory.html}
+    note	= {\url{http://citeseer.ist.psu.edu/article/berger02memory.html}},
 }
 
@@ -558,5 +559,5 @@
 @misc{tbbmalloc,
     key		= {tbbmalloc},
-    author	= {multiple contributors},
+    author	= {{multiple contributors}},
     title	= {Threading Building Blocks},
     month	= mar,
@@ -590,5 +591,5 @@
 @misc{glibc,
     key		= {glibc},
-    author	= {multiple contributors},
+    author	= {{multiple contributors}},
     title	= {glibc version 2.31},
     month	= feb,
@@ -599,9 +600,17 @@
 @misc{jemalloc,
     key		= {jemalloc},
-    author	= {multiple contributors},
+    author	= {{multiple contributors}},
     title	= {jemalloc version 5.2.1},
     month	= apr,
     year	= 2022,
-    howpublished= {\url{https://github.com/jemalloc/jemalloc}{https://github.com/jemalloc/jemalloc}},
+    howpublished= {\url{https://github.com/jemalloc/jemalloc}},
+}
+
+@misc{Evans06,
+    author	= {Jason Evans},
+    title	= {A Scalable Concurrent \texttt{malloc(3)} Implementation for {FreeBSD}},
+    month	= apr,
+    year	= 2006,
+    howpublished= {\url{https://papers.freebsd.org/2006/bsdcan/evans-jemalloc.files/evans-jemalloc-paper.pdf}},
 }
 
@@ -631,9 +640,9 @@
     author	= {R. Blumofe and C. Leiserson},
     title	= {Scheduling Multithreaded Computations by Work Stealing},
-    booktitle	= {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.},
+    organization= {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.},
     pages	= {356-368},
     year	= 1994,
     month	= nov,
-    url		= {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}
+    note	= {\url{http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}},
 }
 
@@ -647,5 +656,4 @@
     issn	= {0164-1212},
     pages	= {107-118},
-    doi		= {http://dx.doi.org/10.1016/S0164-1212(00)00122-9},
     publisher	= {Elsevier Science Inc.},
     address	= {New York, NY, USA}
@@ -655,5 +663,5 @@
     author	= {Paul R. Wilson},
     title	= {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies},
-    url		= {http://citeseer.ist.psu.edu/337869.html}
+    note	= {\url{http://citeseer.ist.psu.edu/337869.html}},
 }
 
@@ -661,12 +669,21 @@
     author	= {Dirk Grunwald and Benjamin Zorn and Robert Henderson},
     title	= {Improving the Cache Locality of Memory Allocation},
-    booktitle	= {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation},
+    organization= {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation},
     year	= 1993,
-    isbn	= {0-89791-598-4},
     pages	= {177-186},
-    location	= {Albuquerque, New Mexico, United States},
-    doi		= {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/155090.155107},
+    location	= {Albuquerque, New Mexico, USA},
     publisher	= {ACM Press},
     address	= {New York, NY, USA}
+}
+
+@inproceedings{Bolosky93,
+    author	= {William J. Bolosky and Michael L. Scott},
+    title	= {False Sharing and its Effect on Shared Memory Performance},
+    organization= {4th Symp. on Experiences with Distributed and Multiprocessor Systems (SEDMS)},
+    year	= 1993,
+    location	= {San Diego, CA, USA},
+    publisher	= {USENIX Association},
+    address	= {Berkeley, CA, USA},
+    note	= {\url{https://www.cs.rochester.edu/u/scott/papers/1993\_SEDMS\_false\_sharing.pdf}},
 }
 
@@ -677,5 +694,13 @@
     month	= feb,
     year	= 2001,
-    url		= {http://www.ddj.com/cpp/184403766}
+    note	= {\url{http://www.ddj.com/cpp/184403766}},
+}
+
+@misc{Desnoyers19,
+    author	= {Mathieu Desnoyers},
+    title	= {The 5-year journey to bring restartable sequences to Linux},
+    month	= feb,
+    year	= 2019,
+    howpublished={\url{https://www.efficios.com/blog/2019/02/08/linux-restartable-sequences}},
 }
 
@@ -698,7 +723,38 @@
     author	= {M. Herlihy and V. Luchangco and M. Moir},
     title	= {Obstruction-free Synchronization: Double-ended Queues as an Example},
-    booktitle	= {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems},
+    organization= {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems},
     year	= 2003,
     month	= may,
-    url		= {http://www.cs.brown.edu/~mph/publications.html}
-}
+    note	= {\url{http://www.cs.brown.edu/~mph/publications.html}},
+}
+
+@article{Fatourou12,
+    keywords	= {synchronization techniques, hierarchical algorithms, concurrent data structures, combining, blocking algorithms},
+    author	= {Panagiota Fatourou and Nikolaos D. Kallimanis},
+    title	= {Revisiting the Combining Synchronization Technique},
+    publisher	= {ACM},
+    address	= {New York, NY, USA},
+    volume	= 47,
+    number	= 8,
+    journal	= {SIGPLAN Not.},
+    year	= 2012,
+    month	= feb,
+    pages	= {257-266},
+}
+
+@manual{Go1.3,
+    keywords 	= {conservative garbage collection},
+    title	= {Go 1.3 Release Notes},
+    month	= jun,
+    year	= 2014,
+    note	= {\url{https://go.dev/doc/go1.3\#garbage_collector}},
+}
+
+@misc{JavaScriptGC,
+    keywords 	= {Intel, TBB},
+    author	= {Steve Fink},
+    title 	= {JavaScript: Clawing Our Way Back To Precision},
+    howpublished= {\url{https://blog.mozilla.org/javascript/2013/07/18/clawing-our-way-back-to-precision/}},
+    month	= jul,
+    year	= 2013,
+}
Index: doc/papers/llheap/plotcacheL.gp
===================================================================
--- doc/papers/llheap/plotcacheL.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotcacheL.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,39 @@
+DIR=GRAPH
+SCALE=100000000
+set macros
+set output GRAPH.".cacheL.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (4,8,16,32)
+set logscale x
+set yrange [2:*]
+set ytics 2
+#set format y "%3g"
+#set logscale y
+
+if ( GRAPH eq "prolog" ) {
+	set terminal pslatex size 1.15, 2.125 color solid 9;
+#	set ylabel sprintf( "Elapse Time, sec.", SCALE ) offset 1,0
+#	set key maxrows 1 at screen 4.5, screen 1.03 samplen 1 font "Helvetica,18"
+	set key off
+	set title "ARM"
+} else {
+	set terminal pslatex size 1.15, 2.125 color solid 9;
+	set key off
+	if ( GRAPH eq "swift" ) {
+	set title "AMD"
+} else { if ( GRAPH eq "java" ) {
+	set title "Intel"
+} else {
+	print "unknown machine name"
+	exit
+}}}
+
+plot DIR."/cacheL" \
+	   i 0 using 1:4 title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+	'' i 1 using 1:4 title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+	'' i 2 using 1:4 title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+	'' i 3 using 1:4 title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+	'' i 4 using 1:4 title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+	'' i 5 using 1:4 title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+	'' i 6 using 1:4 title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
Index: doc/papers/llheap/plotcacheS.gp
===================================================================
--- doc/papers/llheap/plotcacheS.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotcacheS.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,38 @@
+DIR=GRAPH
+SCALE=100000000
+set macros
+set output GRAPH.".cacheS.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (4,8,16,32)
+set logscale x
+set yrange [2:*]
+#set ytics 2
+#set format y "%3g"
+#set logscale y
+
+if ( GRAPH eq "prolog" ) {
+	set terminal pslatex size 1.25, 2.125 color solid 9;
+	set ylabel sprintf( "Elapse Time, sec.", SCALE ) # offset 1,0
+	set key maxrows 1 at screen 4.7, screen 1.03 samplen 0.1 font "Helvetica,18"
+	set title "ARM"
+} else {
+	set terminal pslatex size 1.15, 2.125 color solid 9;
+	set key off
+	if ( GRAPH eq "swift" ) {
+	set title "AMD"
+} else { if ( GRAPH eq "java" ) {
+	set title "Intel"
+} else {
+	print "unknown machine name"
+	exit
+}}}
+
+plot DIR."/cacheS" \
+	   i 0 using 1:4 title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+	'' i 1 using 1:4 title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+	'' i 2 using 1:4 title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+	'' i 3 using 1:4 title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+	'' i 4 using 1:4 title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+	'' i 5 using 1:4 title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+	'' i 6 using 1:4 title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
Index: doc/papers/llheap/plotexp.gp
===================================================================
--- doc/papers/llheap/plotexp.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotexp.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,115 @@
+set terminal pslatex size 6.125, 7.6 color solid 9;
+#set terminal postscript portrait enhanced size 7.5, 10. color solid 9.5;
+#set terminal wxt size 950,1250
+
+DIR=GRAPH
+
+set macros
+set output GRAPH.".tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (4,8,16,32)
+set format y "%4g"
+set logscale x
+set logscale y
+
+array titles[13];
+titles[1] = "x = malloc( 0 ) / free( x )"
+titles[2] = "free( NULL )"
+titles[3] = "x = malloc( 42 ) / free( x )"
+titles[4] = "x[0..100) = malloc( 42 ) / free( x[0..100 ) )"
+titles[5] = "x[0..1000) = malloc( 42 ) / free( x[0..1000 ) )"
+titles[6] = "x[0..100) = malloc( 42 ) / free( x(100..0] )"
+titles[7] = "x[0..1000) = malloc( 42 ) / free( x(1000..0] )"
+titles[8] = "x = malloc( [0..100) ) / free( x )"
+titles[9] = "x[0..100) = malloc( [0..100) ) / free( x[0..100) )"
+titles[10] = "x[0..1000) = malloc( [0..1000) ) / free( x[0..1000) )"
+titles[11] = "x[0..100) = malloc( [0..100) ) / free( x(100..0] )"
+titles[12] = "x[0..1000) = malloc( [0..1000) ) / free( x(1000..0] )"
+titles[13] = "MMAP repeats experiments 3-7 with malloc( 1Mb )"
+
+array yrange[17];
+yrange[1] = 300
+yrange[2] = 30
+yrange[3] = 300
+yrange[4] = 300
+yrange[5] = 3000
+yrange[6] = 300
+yrange[7] = 3000
+yrange[8] = 300
+yrange[9] = 300
+yrange[10] = 3000
+yrange[11] = 300
+yrange[12] = 3000
+yrange[13] = 300
+yrange[14] = 300
+yrange[15] = 300
+yrange[16] = 300
+yrange[17] = 300
+
+set xrange [3:38]
+
+top=.992
+set label "Allocators" at screen .46, screen top
+set key at screen 0.57, screen top - .01
+set label "Experiment \\#" at screen .65, screen top
+do for [x = 0:11] {
+   set label sprintf( "%2d. %s", x+1, titles[x+1] ) at screen .58, screen top - .02 - x * 0.016
+}
+set label titles[13] at screen .59, screen top - .02 - 12 * 0.016 - 0.001
+set label "SBRK" at screen 0, screen 1.01
+set label "MMAP" at screen 0, screen 0.245
+
+set multiplot layout 4,5 rowsfirst margins 0.08,0.96,0.03,0.97 spacing 0.06,0.08
+do for [x = 0:1] {
+	if ( x == 0 ) {
+		set ylabel "Elapse Time, sec., log scale" offset 1.5,0
+		set yrange [20:yrange[x+1]]
+		set ytics (20,50,100,yrange[x+1])
+	} else {
+		unset ylabel
+		set yrange [10:yrange[x+1]]
+		set ytics (10,15,20,yrange[x+1])
+	}
+	#set title titles[x+1]
+	set title "Experiment ".(x+1)
+	plot DIR."/testdata.exp" \
+		   i x*7   using 1:2:3 title columnheader(1) with errorbars lt rgb "blue"	pt  2  ps 2 lw 1, \
+		'' i x*7+1 using 1:2:3 title columnheader(1) with errorbars lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+		'' i x*7+2 using 1:2:3 title columnheader(1) with errorbars lt rgb "coral"	pt  14  ps 2 lw 4, \
+		'' i x*7+3 using 1:2:3 title columnheader(1) with errorbars lt rgb "black"	pt  16  ps 2 lw 2, \
+		'' i x*7+4 using 1:2:3 title columnheader(1) with errorbars lt rgb "red"	pt  8  ps 2 lw 2, \
+		'' i x*7+5 using 1:2:3 title columnheader(1) with errorbars lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+		'' i x*7+6 using 1:2:3 title columnheader(1) with errorbars lt rgb "brown"	pt  4  ps 2 lw 1
+}
+set multiplot next
+set multiplot next
+set multiplot next
+do for [x = 2:16] {
+	if ( x == 2 || x == 7  || x == 12 ) {
+		set ylabel "Elapse Time, sec., log scale" offset 1.5,0
+	} else {
+		unset ylabel
+	}
+	#set title titles[x+1]
+	if ( x < 12 ) {
+		set title "Experiment ".(x+1)
+		set yrange [20:yrange[x+1]]
+		set ytics (20,50,100,300,1000,yrange[x+1])
+	} else {
+		set title "Experiment ".(x+1-10)
+		set yrange [.1:yrange[x+1]]
+		set ytics (.1,.5,10,25,100,yrange[x+1])
+	}
+	plot DIR."/testdata.exp" \
+		   i x*7   using 1:2:3 title columnheader(1) with errorbars lt rgb "blue"	pt  2  ps 2 lw 1, \
+		'' i x*7+1 using 1:2:3 title columnheader(1) with errorbars lt rgb "dark-green" pt  3  ps 2 lw 1, \
+		'' i x*7+2 using 1:2:3 title columnheader(1) with errorbars lt rgb "coral"	pt  14  ps 2 lw 4, \
+		'' i x*7+3 using 1:2:3 title columnheader(1) with errorbars lt rgb "black"	pt  16  ps 2 lw 2, \
+		'' i x*7+4 using 1:2:3 title columnheader(1) with errorbars lt rgb "red	"	pt  8  ps 2 lw 2, \
+		'' i x*7+5 using 1:2:3 title columnheader(1) with errorbars lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+		'' i x*7+6 using 1:2:3 title columnheader(1) with errorbars lt rgb "brown"	pt  4  ps 2 lw 1
+}
+unset multiplot
+
+#test
Index: doc/papers/llheap/plotlarson.gp
===================================================================
--- doc/papers/llheap/plotlarson.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotlarson.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,35 @@
+set terminal pslatex size 4, 2.0 color solid 9;
+#set terminal postscript portrait enhanced size 7.5, 10. color solid 9.5;
+#set terminal wxt size 950,1250
+
+DIR=GRAPH
+SCALE=1000000
+
+set macros
+set output GRAPH.".larson.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (4,8,16,32)
+set format y "%4g"
+set logscale x
+#set logscale y
+top=.9
+set key at screen 1.3, screen top - .01
+set ylabel sprintf( "Throughput, sec., $\\times$ %g", SCALE ) offset 2,0
+
+array titles[2];
+titles[1] = "30 16 4096 8096 100 4141"
+titles[2] = "5 8 1000 5000 100 4141"
+
+set multiplot layout 1,2 rowsfirst margins 0.2,top,0.03,top spacing 0.24,0.08
+do for [x = 0:1] {
+	set title "args ". titles[x+1]
+	plot DIR."/larsondata" \
+		   i x*7 using 1:($2/SCALE) title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+		'' i x*7+1 using 1:($2/SCALE) title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+		'' i x*7+2 using 1:($2/SCALE) title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+		'' i x*7+3  using 1:($2/SCALE) title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+		'' i x*7+4 using 1:($2/SCALE) title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+		'' i x*7+5 using 1:($2/SCALE) title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+		'' i x*7+6 using 1:($2/SCALE) title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
+} # for
Index: doc/papers/llheap/plotownership.gp
===================================================================
--- doc/papers/llheap/plotownership.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotownership.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,38 @@
+DIR=GRAPH
+SCALE=100000000
+set macros
+set output GRAPH.".ownership.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (2,4,8,16,32,64,128)
+set yrange [5:1000]
+set ytics (5,10,25,100,250,1000)
+set format y "%4g"
+set logscale x
+set logscale y
+
+if ( GRAPH eq "prolog" ) {
+	set terminal pslatex size 1.85, 2.25 color solid 9;
+	set ylabel sprintf( "Throughput, sec. $\\times$ %g, log scale", SCALE ) offset 1.2,0
+	set key maxrows 1 at screen 3.28, screen 1.03 samplen 0.5 font "Helvetica,18"
+	set title "ARM (a)"
+} else {
+	set terminal pslatex size 1.7, 2.25 color solid 9;
+	set key off
+	if ( GRAPH eq "swift" ) {
+	set title "AMD (b)"
+} else { if ( GRAPH eq "java" ) {
+	set title "Intel (c)"
+} else {
+	print "unknown machine name"
+	exit
+}}}
+
+plot DIR."/ownership.hexp" \
+	   i 0 using 2:($4/SCALE) title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+	'' i 1 using 2:($4/SCALE) title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+	'' i 2 using 2:($4/SCALE) title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+	'' i 3 using 2:($4/SCALE) title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+	'' i 4 using 2:($4/SCALE) title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+	'' i 5 using 2:($4/SCALE) title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+	'' i 6 using 2:($4/SCALE) title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
Index: doc/papers/llheap/plotownership_res.gp
===================================================================
--- doc/papers/llheap/plotownership_res.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotownership_res.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,40 @@
+DIR=GRAPH
+
+set terminal pslatex size 1.85, 2.25 color solid 9;
+set ylabel "System time, sec." offset 1.5,0
+#set terminal postscript portrait enhanced size 7.5, 10. color solid 9.5;
+#set terminal wxt size 950,1250
+
+set macros
+set output GRAPH.".ownershipres.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (2,4,8,16,32,64,128)
+set yrange [0:1000]
+set ytics 0,200,1000
+set format y "%4g"
+set logscale x
+#set logscale y
+#set key at screen 1.3, screen top - .01
+set key off
+
+if ( GRAPH eq "prolog" ) {
+	set title "ARM"
+} else { if ( GRAPH eq "swift" ) {
+	set title "AMD (d)"
+} else { if ( GRAPH eq "java" ) {
+	set title "Intel"
+} else {
+	print "unknown machine name"
+	exit
+}}}
+
+#set multiplot layout 1,2 rowsfirst margins 0.2,top,0.03,top spacing 0.24,0.08
+	plot DIR."/ownership.hres" \
+		   i 0 using 1:3 title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+		'' i 1 using 1:3 title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+		'' i 2 using 1:3 title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+		'' i 3 using 1:3 title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+		'' i 4 using 1:3 title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+		'' i 5 using 1:3 title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+		'' i 6 using 1:3 title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
Index: doc/papers/llheap/plotrealloc.gp
===================================================================
--- doc/papers/llheap/plotrealloc.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotrealloc.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,38 @@
+DIR=GRAPH
+SCALE=100000000
+set macros
+set output GRAPH.".realloc.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (32,64,128,256)
+set logscale x
+#set yrange [5:1000]
+#set ytics (5,10,25,100,250,1000)
+set format y "%4g"
+#set logscale y
+
+if ( GRAPH eq "prolog" ) {
+	set terminal pslatex size 1.42, 2.125 color solid 9;
+	set ylabel sprintf( "Elapse Time, sec.", SCALE ) offset 2,0
+	set key maxrows 1 at screen 4.32, screen 1.03 samplen 0.5 font "Helvetica,18"
+	set title "ARM"
+} else {
+	set terminal pslatex size 1.32, 2.125 color solid 9;
+	set key off
+	if ( GRAPH eq "swift" ) {
+	set title "AMD"
+} else { if ( GRAPH eq "java" ) {
+	set title "Intel"
+} else {
+	print "unknown machine name"
+	exit
+}}}
+
+plot DIR."/realloc" \
+	   i 0 using 2:6 title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+	'' i 1 using 2:6 title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+	'' i 2 using 2:6 title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+	'' i 3 using 2:6 title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+	'' i 4 using 2:6 title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+	'' i 5 using 2:6 title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+	'' i 6 using 2:6 title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
Index: doc/papers/llheap/plotreallocsim.gp
===================================================================
--- doc/papers/llheap/plotreallocsim.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotreallocsim.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,39 @@
+DIR=GRAPH
+SCALE=100000000
+set macros
+set output GRAPH.".reallocsim.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (32,64,128,256)
+set logscale x
+#set yrange [5:1000]
+#set ytics (5,10,25,100,250,1000)
+#set format y "%4g"
+#set logscale y
+
+if ( GRAPH eq "prolog" ) {
+	set terminal pslatex size 1.2, 2.125 color solid 9;
+#	set ylabel sprintf( "Elapse Time, sec.", SCALE ) offset 1,0
+#	set key maxrows 1 at screen 4.5, screen 1.03 samplen 1 font "Helvetica,18"
+	set key off
+	set title "ARM"
+} else {
+	set terminal pslatex size 1.2, 2.125 color solid 9;
+	set key off
+	if ( GRAPH eq "swift" ) {
+	set title "AMD"
+} else { if ( GRAPH eq "java" ) {
+	set title "Intel"
+} else {
+	print "unknown machine name"
+	exit
+}}}
+
+plot DIR."/reallocsim" \
+	   i 0 using 2:5 title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+	'' i 1 using 2:5 title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+	'' i 2 using 2:5 title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+	'' i 3 using 2:5 title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+	'' i 4 using 2:5 title columnheader(1) lt rgb "red"	pt  8  ps 2 lw 2, \
+	'' i 5 using 2:5 title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+	'' i 6 using 2:5 title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
Index: doc/papers/llheap/plotres.gp
===================================================================
--- doc/papers/llheap/plotres.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
+++ doc/papers/llheap/plotres.gp	(revision 73475a514fc02d477703262a68537a2d2b727722)
@@ -0,0 +1,69 @@
+set terminal pslatex size 5.8, 2.4 color solid 9;
+#set terminal postscript portrait enhanced size 7.5, 10. color solid 9.5;
+#set terminal wxt size 950,1250
+
+DIR=GRAPH
+
+set macros
+set output GRAPH."2.tex"
+#set pointsize 2.0
+set grid linetype 0
+set xtics (4,8,16,32)
+#set autoscale y
+set logscale x
+
+array titles[8];
+titles[1] = "user sbrk"
+titles[2] = "system sbrk"
+titles[3] = "real sbrk"
+titles[4] = "max memory sbrk"
+titles[5] = "user mmap"
+titles[6] = "system mmap"
+titles[7] = "real mmap"
+titles[8] = "max memory mmap"
+
+set xrange [3:38]
+
+set multiplot layout 2,4 rowsfirst margins 0.06,0.98,0.08,0.87 spacing 0.08,0.18
+
+if ( strstrt( GRAPH, "prolog" ) != 0 ) {
+	set key maxrows 1 at screen 0.99, screen 1.04 samplen 0.5 font "Helvetica,18"
+} else {
+	set key off
+}
+
+do for [x = 0:7] {
+	set logscale y 10
+	set yrange [*:*]
+	unset ylabel
+	if ( x == 3 || x == 7 ) {
+		set title titles[x+1]
+#		set logscale y 2
+		unset logscale y
+		set format y '%.0s'
+		set yrange [2:*]
+		set ylabel "megabytes" offset 1.0,0
+	} else {
+		if ( x == 0 || x == 4 ) {
+			set ylabel "sec., log scale" offset 1.5,0
+		}
+		set format y '%g'
+		if ( x == 1 || x == 5 ) {
+			set title titles[x+1]
+			set yrange [1:*]
+		} else {
+			set title titles[x+1]
+		}
+	}
+	plot DIR."/testdata.res" \
+		   i x*7   using 1:2 title columnheader(1) lt rgb "blue"	pt  2  ps 2 lw 1, \
+		'' i x*7+1 using 1:2 title columnheader(1) lt rgb "dark-green"	pt  3  ps 2 lw 1, \
+		'' i x*7+2 using 1:2 title columnheader(1) lt rgb "coral"	pt  14  ps 2 lw 4, \
+		'' i x*7+3 using 1:2 title columnheader(1) lt rgb "black"	pt  16  ps 2 lw 2, \
+		'' i x*7+4 using 1:2 title columnheader(1) lt rgb "red"		pt  8  ps 2 lw 2, \
+		'' i x*7+5 using 1:2 title columnheader(1) lt rgb "dark-violet" pt  10  ps 2 lw 1, \
+		'' i x*7+6 using 1:2 title columnheader(1) lt rgb "brown"	pt  4  ps 2 lw 1
+}
+unset multiplot
+
+#test
