source: doc/papers/llheap/Paper.tex @ 9eb7f07c

ADTast-experimental
Last change on this file since 9eb7f07c was 9eb7f07c, checked in by Peter A. Buhr <pabuhr@…>, 13 months ago

more updates for llheap paper

  • Property mode set to 100644
File size: 191.6 KB
Line 
1\documentclass[AMA,STIX1COL]{WileyNJD-v2}
2
3% Latex packages used in the document.
4
5\usepackage{comment}
6\usepackage{epic,eepic}
7\usepackage{upquote}                                                                    % switch curled `'" to straight
8\usepackage{relsize}
9\usepackage{xspace}
10\usepackage{calc}
11\usepackage[scaled=0.88]{helvet}                                                % descent Helvetica font and scale to times size
12\usepackage[T1]{fontenc}
13\usepackage{listings}                                                                   % format program code
14\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt]{subfig}
15\renewcommand{\thesubfigure}{(\alph{subfigure})}
16\usepackage{enumitem}
17
18\hypersetup{breaklinks=true}
19
20\usepackage[pagewise]{lineno}
21\renewcommand{\linenumberfont}{\scriptsize\sffamily}
22
23\usepackage{varioref}                                   % extended references
24% adjust varioref package with default "section" and "page" titles, and optional title with faraway page numbers
25% \VRef{label} => Section 2.7, \VPageref{label} => page 17
26% \VRef[Figure]{label} => Figure 3.4, \VPageref{label} => page 17
27% \renewcommand{\reftextfaceafter}{\unskip}
28% \renewcommand{\reftextfacebefore}{\unskip}
29% \renewcommand{\reftextafter}{\unskip}
30% \renewcommand{\reftextbefore}{\unskip}
31% \renewcommand{\reftextfaraway}[1]{\unskip, p.~\pageref{#1}}
32% \renewcommand{\reftextpagerange}[2]{\unskip, pp.~\pageref{#1}--\pageref{#2}}
33% \newcommand{\VRef}[2][Section]{\ifx#1\@empty\else{#1}\nobreakspace\fi\vref{#2}}
34% \newcommand{\VRefrange}[3][Sections]{\ifx#1\@empty\else{#1}\nobreakspace\fi\vrefrange{#2}{#3}}
35% \newcommand{\VPageref}[2][page]{\ifx#1\@empty\else{#1}\nobreakspace\fi\pageref{#2}}
36% \newcommand{\VPagerefrange}[3][pages]{\ifx#1\@empty\else{#1}\nobreakspace\fi\pageref{#2}{#3}}
37
38\makeatletter
39\newcommand{\abbrevFont}{\textit}                       % set empty for no italics
40\newcommand{\CheckCommaColon}{\@ifnextchar{,}{}{\@ifnextchar{:}{}{,\xspace}}}
41\newcommand{\CheckPeriod}{\@ifnextchar{.}{}{.\xspace}}
42\newcommand{\EG}{\abbrevFont{e}.\abbrevFont{g}.}
43\newcommand{\eg}{\EG\CheckCommaColon}
44\newcommand{\IE}{\abbrevFont{i}.\abbrevFont{e}.}
45\newcommand{\ie}{\IE\CheckCommaColon}
46\newcommand{\ETC}{\abbrevFont{etc}}
47\newcommand{\etc}{\ETC\CheckPeriod}
48\newcommand{\VS}{\abbrevFont{vs}}
49\newcommand{\vs}{\VS\CheckPeriod}
50
51\newcommand{\newtermFont}{\emph}
52\newcommand{\newterm}[1]{\newtermFont{#1}}
53
54\newcommand{\CFAIcon}{\textsf{C}\raisebox{\depth}{\rotatebox{180}{\textsf{A}}}\xspace} % Cforall symbolic name
55\newcommand{\CFA}{\protect\CFAIcon}             % safe for section/caption
56\newcommand{\CFL}{\textrm{Cforall}\xspace}      % Cforall symbolic name
57\newcommand{\CCIcon}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}} % C++ icon
58\newcommand{\CC}[1][]{\protect\CCIcon{#1}\xspace}               % C++ symbolic name
59\newcommand{\uC}{$\mu$\CC}
60\newcommand{\Csharp}{C\raisebox{-0.7ex}{\relsize{2}$^\sharp$}\xspace} % C# symbolic name
61
62\newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{#1}}}
63\newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}}
64\newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}}
65\newcommand{\LstStringStyle}[1]{{\lst@basicstyle{\lst@stringstyle{#1}}}}
66
67\newlength{\parindentlnth}
68\setlength{\parindentlnth}{\parindent}
69\newlength{\gcolumnposn}                                % temporary hack because lstlisting does not handle tabs correctly
70\newlength{\columnposn}
71\setlength{\gcolumnposn}{3.25in}
72\setlength{\columnposn}{\gcolumnposn}
73\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
74\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
75\makeatother
76
77\lstset{
78columns=fullflexible,
79basicstyle=\linespread{0.9}\sf,                                                 % reduce line spacing and use sanserif font
80stringstyle=\tt,                                                                                % use typewriter font
81tabsize=5,                                                                                              % N space tabbing
82xleftmargin=\parindentlnth,                                                             % indent code to paragraph indentation
83%mathescape=true,                                                                               % LaTeX math escape in CFA code $...$
84escapechar=\$,                                                                                  % LaTeX escape in CFA code
85keepspaces=true,                                                                                %
86showstringspaces=false,                                                                 % do not show spaces with cup
87showlines=true,                                                                                 % show blank lines at end of code
88aboveskip=4pt,                                                                                  % spacing above/below code block
89belowskip=3pt,
90moredelim=**[is][\color{red}]{`}{`},
91}% lstset
92
93% CFA programming language, based on ANSI C (with some gcc additions)
94\lstdefinelanguage{CFA}[ANSI]{C}{
95        morekeywords={
96                _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, __attribute, __attribute__,
97                auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__,
98                coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally,
99                __float80, float80, __float128, float128, forall, ftype, generator, _Generic, _Imaginary, __imag, __imag__,
100                inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or,
101                otype, restrict, resume, __restrict, __restrict__, __signed, __signed__, _Static_assert, suspend, thread,
102                _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__,
103                virtual, __volatile, __volatile__, waitfor, when, with, zero_t},
104        moredirectives={defined,include_next},
105        % replace/adjust listing characters that look bad in sanserif
106        literate={-}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1
107                {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
108                {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1
109                {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2,
110}
111
112% uC++ programming language, based on ANSI C++
113\lstdefinelanguage{uC++}[ANSI]{C++}{
114        morekeywords={
115                _Accept, _AcceptReturn, _AcceptWait, _Actor, _At, _CatchResume, _Cormonitor, _Coroutine, _Disable,
116                _Else, _Enable, _Event, _Finally, _Monitor, _Mutex, _Nomutex, _PeriodicTask, _RealTimeTask,
117                _Resume, _Select, _SporadicTask, _Task, _Timeout, _When, _With, _Throw},
118}
119
120% Go programming language: https://github.com/julienc91/listings-golang/blob/master/listings-golang.sty
121\lstdefinelanguage{Golang}{
122        morekeywords=[1]{package,import,func,type,struct,return,defer,panic,recover,select,var,const,iota,},
123        morekeywords=[2]{string,uint,uint8,uint16,uint32,uint64,int,int8,int16,int32,int64,
124                bool,float32,float64,complex64,complex128,byte,rune,uintptr, error,interface},
125        morekeywords=[3]{map,slice,make,new,nil,len,cap,copy,close,true,false,delete,append,real,imag,complex,chan,},
126        morekeywords=[4]{for,break,continue,range,goto,switch,case,fallthrough,if,else,default,},
127        morekeywords=[5]{Println,Printf,Error,},
128        sensitive=true,
129        morecomment=[l]{//},
130        morecomment=[s]{/*}{*/},
131        morestring=[b]',
132        morestring=[b]",
133        morestring=[s]{`}{`},
134        % replace/adjust listing characters that look bad in sanserif
135        literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1
136                {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
137                {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1
138                {<-}{\makebox[2ex][c]{\textrm{\textless}\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}}2,
139}
140
141\lstnewenvironment{cfa}[1][]
142{\lstset{language=CFA,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
143{}
144\lstnewenvironment{C++}[1][]                            % use C++ style
145{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
146{}
147\lstnewenvironment{uC++}[1][]
148{\lstset{language=uC++,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
149{}
150\lstnewenvironment{Go}[1][]
151{\lstset{language=Golang,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
152{}
153\lstnewenvironment{python}[1][]
154{\lstset{language=python,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
155{}
156\lstnewenvironment{java}[1][]
157{\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
158{}
159
160% inline code @...@
161\lstMakeShortInline@%
162
163% \let\OLDthebibliography\thebibliography
164% \renewcommand\thebibliography[1]{
165%   \OLDthebibliography{#1}
166%   \setlength{\parskip}{0pt}
167%   \setlength{\itemsep}{4pt plus 0.3ex}
168% }
169
170\newsavebox{\myboxA}
171\newsavebox{\myboxB}
172\newsavebox{\myboxC}
173\newsavebox{\myboxD}
174
175%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
176
177\articletype{RESEARCH ARTICLE}%
178
179% Referees
180% Doug Lea, dl@cs.oswego.edu, SUNY Oswego
181% Herb Sutter, hsutter@microsoft.com, Microsoft Corp
182% Gor Nishanov, gorn@microsoft.com, Microsoft Corp
183% James Noble, kjx@ecs.vuw.ac.nz, Victoria University of Wellington, School of Engineering and Computer Science
184
185\received{XXXXX}
186\revised{XXXXX}
187\accepted{XXXXX}
188
189\raggedbottom
190
191\title{High-Performance Concurrent Memory Allocation}
192
193\author[1]{Mubeen Zulfiqar}
194\author[1]{Peter A. Buhr*}
195\author[1]{Thierry Delisle}
196\author[1]{Ayelet Wasik}
197\authormark{ZULFIQAR \textsc{et al.}}
198
199\address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, ON}, \country{Canada}}}
200
201\corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}
202
203% \fundingInfo{Natural Sciences and Engineering Research Council of Canada}
204
205\abstract[Summary]{
206A new C-based concurrent memory-allocator is presented, called llheap.
207It can be used standalone in C/\CC applications with multiple kernel threads, or embedded into high-performance user-threading programming languages.
208llheap extends the feature set of existing C allocation by remembering zero-filled (\lstinline{calloc}) and aligned properties (\lstinline{memalign}) in an allocation.
209These properties can be queried, allowing programmers to write safer programs by preserving these properties in future allocations.
210As well, \lstinline{realloc} preserves these properties when enlarging storage requests, again increasing future allocation safety.
211llheap also extends the C allocation API with \lstinline{resize}, extended \lstinline{realloc}, \lstinline{aalloc}, \lstinline{amemalign}, and \lstinline{cmemalign} providing orthongoal ac, so programmers do not make mistakes writing theses useful allocation operations.
212It is competitive with the best current memory allocators,
213The ability to use \CFA's advanced type-system (and possibly \CC's too) to combine advanced memory operations into one allocation routine using named arguments shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
214 low-latency
215 without a performance loss
216The llheap allocator also provides comprehensive statistics for all allocation operations, which are invaluable in understanding and debugging a program's dynamic behaviour.
217As well, llheap provides a debugging mode where allocations are checked with internal pre/post conditions and invariants. It is extremely useful, especially for students.
218% No other memory allocator examined in the work provides such comprehensive statistics gathering.
219% While not as powerful as the \lstinline{valgrind} interpreter, a large number of allocations mistakes are detected.
220% Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
221%
222% A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs. It has been an interesting challenge.
223% These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs.
224% Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc, and the new allocator llheap are all compared using the new micro-benchmark test-suite.
225}% aabstract
226
227\keywords{C \CFA (Cforall) coroutine concurrency generator monitor parallelism runtime thread}
228
229
230\begin{document}
231%\linenumbers                           % comment out to turn off line numbering
232
233\maketitle
234
235
236\section{Introduction}
237
238Memory management takes a sequence of program generated allocation/deallocation requests and attempts to satisfy them within a fixed-sized block of memory while minimizing the total amount of memory used.
239A general-purpose dynamic-allocation algorithm cannot anticipate future allocation requests so its output is rarely optimal.
240However, memory allocators do take advantage of regularities in allocation patterns for typical programs to produce excellent results, both in time and space (similar to LRU paging).
241In general, allocators use a number of similar techniques, each optimizing specific allocation patterns.
242Nevertheless, memory allocators are a series of compromises, occasionally with some static or dynamic tuning parameters to optimize specific program-request patterns.
243
244
245\subsection{Memory Structure}
246\label{s:MemoryStructure}
247
248Figure~\ref{f:ProgramAddressSpace} shows the typical layout of a program's address space divided into the following zones (right to left): static code/data, dynamic allocation, dynamic code/data, and stack, with free memory surrounding the dynamic code/data~\cite{memlayout}.
249Static code and data are placed into memory at load time from the executable and are fixed-sized at runtime.
250Dynamic-allocation memory starts empty and grows/shrinks as the program dynamically creates/deletes variables with independent lifetime.
251The programming-language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
252Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}.
253However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control.
254Stack memory is managed by the program call/return-mechanism using a simple LIFO technique, which works well for sequential programs.
255For stackful coroutines and user threads, a new stack is commonly created in dynamic-allocation memory.
256This work focuses solely on management of the dynamic-allocation memory.
257
258\begin{figure}
259\centering
260\input{AddressSpace}
261\vspace{-5pt}
262\caption{Program Address Space Divided into Zones}
263\label{f:ProgramAddressSpace}
264\end{figure}
265
266
267\subsection{Dynamic Memory-Management}
268\label{s:DynamicMemoryManagement}
269
270Modern programming languages manage dynamic-allocation memory in different ways.
271Some languages, such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, provide explicit allocation but \emph{implicit} deallocation of data through garbage collection~\cite{Wilson92}.
272In general, garbage collection supports memory compaction, where dynamic (live) data is moved during runtime to better utilize space.
273However, moving data requires finding pointers to it and updating them to reflect new data locations.
274Programming languages such as C~\cite{C}, \CC~\cite{C++}, and Rust~\cite{Rust} provide the programmer with explicit allocation \emph{and} deallocation of data.
275These languages cannot find and subsequently move live data because pointers can be created to any storage zone, including internal components of allocated objects, and may contain temporary invalid values generated by pointer arithmetic.
276Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
277This work only examines dynamic memory-management with \emph{explicit} deallocation.
278While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.
279
280Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime.
281When this allocator proves inadequate, programmers often write specialize allocators for specific needs.
282C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator.
283Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
284However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved.
285For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
286This work examines the design of high-performance allocators for use by kernel and user multi-threaded applications written in C/\CC.
287
288
289\subsection{Contributions}
290\label{s:Contributions}
291
292This work provides the following contributions in the area of explicit concurrent dynamic-allocation:
293\begin{enumerate}[leftmargin=*,itemsep=0pt]
294\item
295Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running on multiple kernel threads (M:N threading).
296
297\item
298Extend the standard C heap functionality by preserving with each allocation: its request size plus the amount allocated, whether an allocation is zero fill, and allocation alignment.
299
300\item
301Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ to zero-fill and align when storage is extended or copied.
302Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying.
303This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient.
304
305\item
306Provide additional heap operations to complete programmer expectation with respect to accessing different allocation properties.
307\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
308\item
309@resize( oaddr, size )@ re-purpose an old allocation for a new type \emph{without} preserving fill or alignment.
310\item
311@resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
312\item
313@realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment.
314\item
315@aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled.
316\item
317@amemalign( alignment, dim, elemSize )@ same as @aalloc@ with memory alignment.
318\item
319@cmemalign( alignment, dim, elemSize )@ same as @calloc@ with memory alignment.
320\end{itemize}
321
322\item
323Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties.
324
325\item
326Provide additional query operations to access information about an allocation:
327\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
328\item
329@malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@.
330If the allocation is not aligned or @addr@ is @NULL@, the minimal alignment is returned.
331\item
332@malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@.
333\item
334@malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@.
335\item
336@malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
337\end{itemize}
338
339\item
340Provide complete, fast, and contention-free allocation statistics to help understand allocation behaviour:
341\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
342\item
343@malloc_stats()@ print memory-allocation statistics on the file-descriptor set by @malloc_stats_fd@.
344\item
345@malloc_info( options, stream )@ print memory-allocation statistics as an XML string on the specified file-descriptor set by @malloc_stats_fd@.
346\item
347@malloc_stats_fd( fd )@ set file-descriptor number for printing memory-allocation statistics (default @STDERR_FILENO@).
348This file descriptor is used implicitly by @malloc_stats@ and @malloc_info@.
349\end{itemize}
350
351\item
352Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
353
354\item
355Build 8 different versions of the allocator: static or dynamic linking, with or without statistics or debugging.
356A program may link to any of these 8 versions of the allocator often without recompilation.
357
358\item
359A micro-benchmark test-suite for comparing allocators rather than relying on a suite of arbitrary programs.
360These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs
361\end{enumerate}
362
363
364\section{Background}
365
366The following discussion is a quick overview of the moving-pieces that affect the design of a memory allocator and its performance.
367It is assumed that dynamic allocates and deallocates acquire storage for a program variable, referred to as an \newterm{object}, through calls such as @malloc@ and @free@ in C, and @new@ and @delete@ in \CC.
368Space for each allocated object comes from the dynamic-allocation zone.
369
370A \newterm{memory allocator} contains a complex data-structure and code that manages the layout of objects in the dynamic-allocation zone.
371The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
372Objects in C/\CC cannot be moved to aid the packing process, only adjacent free storage can be \newterm{coalesced} into larger free areas.
373The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage via operating-system calls, such as @mmap@ or @sbrk@ in UNIX.
374
375
376\subsection{Allocator Components}
377\label{s:AllocatorComponents}
378
379Figure~\ref{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}.
380The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data.
381The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
382For multi-threaded programs, additional management data may exist in \newterm{thread-local storage} (TLS) for each kernel thread executing the program.
383The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
384Allocated objects (light grey) are variable sized, and are allocated and maintained by the program;
385\ie only the program knows the location of allocated storage not the memory allocator.
386Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating easy location of new allocations.
387Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program;
388if there are multiple reserved blocks, they are also chained together, usually internally.
389
390\begin{figure}
391\centering
392\input{AllocatorComponents}
393\caption{Allocator Components (Heap)}
394\label{f:AllocatorComponents}
395\end{figure}
396
397In most allocator designs, allocated objects have management data embedded within them.
398Figure~\ref{f:AllocatedObject} shows an allocated object with a header, trailer, and optional spacing around the object.
399The header contains information about the object, \eg size, type, etc.
400The trailer may be used to simplify coalescing and/or for security purposes to mark the end of an object.
401An object may be preceded by padding to ensure proper alignment.
402Some algorithms quantize allocation requests, resulting in additional space after an object less than the quantized value.
403% The buckets are often organized as an array of ascending bucket sizes for fast searching, \eg binary search, and the array is stored in the heap management-area, where each bucket is a top point to the freed objects of that size.
404When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
405
406A free object also contains management data, \eg size, pointers, etc.
407Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
408For internal chaining, the amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, allocation requests less than 16 bytes are rounded up.
409The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new management information and/or program data.
410
411\begin{figure}
412\centering
413\input{AllocatedObject}
414\caption{Allocated Object}
415\label{f:AllocatedObject}
416\end{figure}
417
418
419\subsection{Single-Threaded Memory-Allocator}
420\label{s:SingleThreadedMemoryAllocator}
421
422A single-threaded memory-allocator does not run any threads itself, but is used by a single-threaded program.
423Because the memory allocator is only executed by a single thread, concurrency issues do not exist.
424The primary issues in designing a single-threaded memory-allocator are fragmentation and locality.
425
426
427\subsubsection{Fragmentation}
428\label{s:Fragmentation}
429
430Fragmentation is memory requested from the operating system but not used by the program;
431hence, allocated objects are not fragmentation.
432Figure~\ref{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: internal or external.
433
434\begin{figure}
435\centering
436\input{IntExtFragmentation}
437\caption{Internal and External Fragmentation}
438\label{f:InternalExternalFragmentation}
439\end{figure}
440
441\newterm{Internal fragmentation} is memory space that is allocated to the program, but is not intended to be accessed by the program, such as headers, trailers, padding, and spacing around an allocated object.
442Internal fragmentation is problematic when management space is a significant proportion of an allocated object, \eg for small objects ($<$16 bytes), memory usage is doubled.
443An allocator should strive to keep internal management information to a minimum.
444
445\newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.
446This memory is problematic in two ways: heap blowup and highly fragmented memory.
447\newterm{Heap blowup} occurs when freed memory cannot be reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
448Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects, resulting in a checkerboard of adjacent allocated and free areas, where the free blocks have become very small.
449% Figure~\ref{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
450Heap blowup can occur due to allocator policies that are too restrictive in reusing freed memory (the allocated size cannot use a larger free block) and/or no coalescing of free storage.
451% Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
452% Memory is highly fragmented when most free blocks are unusable because of their sizes.
453% For example, Figure~\ref{f:Contiguous} and Figure~\ref{f:HighlyFragmented} have the same quantity of external fragmentation, but Figure~\ref{f:HighlyFragmented} is highly fragmented.
454% If there is a request to allocate a large object, Figure~\ref{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while Figure~\ref{f:HighlyFragmented} likely has to request more memory from the operating system.
455
456% \begin{figure}
457% \centering
458% \input{MemoryFragmentation}
459% \caption{Memory Fragmentation}
460% \label{f:MemoryFragmentation}
461% \vspace{10pt}
462% \subfloat[Contiguous]{
463%       \input{ContigFragmentation}
464%       \label{f:Contiguous}
465% } % subfloat
466%       \subfloat[Highly Fragmented]{
467%       \input{NonContigFragmentation}
468% \label{f:HighlyFragmented}
469% } % subfloat
470% \caption{Fragmentation Quality}
471% \label{f:FragmentationQuality}
472% \end{figure}
473
474For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
475The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
476Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
477Any storage larger than the request can become spacing after the object or be split into a smaller free object.
478% The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list.
479
480The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
481When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to spacing after the object.
482A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
483The fewer bin sizes, the fewer lists need to be searched and maintained;
484however, unusable space after object increases, leading to more internal fragmentation.
485The more bin sizes, the longer the search and the less likely a matching free objects is found, leading to more external fragmentation and potentially heap blowup.
486A variation of the binning algorithm allows objects to be allocated from larger bin sizes when the matching bins is empty, and the freed object can be returned to the matching or larger bin (some advantages to either scheme).
487% For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
488% For subsequent requests, the bin free-lists contain objects of different sizes, ranging from one bin-size to the next (8-16 in this example), and a sequential-fit algorithm may be used to find an object large enough for the requested size on the associated bin list.
489
490The third approach is \newterm{splitting} and \newterm{coalescing algorithms}.
491When an object is allocated, if there are no free objects of the requested size, a larger free object may be split into two smaller objects to satisfy the allocation request without obtaining more memory from the operating system.
492For example, in the \newterm{buddy system}, a block of free memory is split into two equal chunks, one of those chunks is again split into two equal chunks, and so on until a block just large enough to fit the requested object is created.
493When an object is deallocated it is coalesced with the objects immediately before and after it in memory, if they are free, turning them into one larger object.
494Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled.
495In all cases, coalescing increases allocation latency, hence some allocations can cause unbounded delays during coalescing.
496While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup.
497% Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory.
498
499
500\subsubsection{Locality}
501\label{s:Locality}
502
503The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}.
504% Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period.
505% Temporal locality commonly occurs during an iterative computation with a fixed set of disjoint variables, while spatial locality commonly occurs when traversing an array.
506Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy.
507% When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
508For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory.
509% A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}.
510
511Temporal locality is largely controlled by how a program accesses its variables~\cite{Feng05}.
512Nevertheless, a memory allocator can have some indirect influence on temporal locality and largely dictates spatial locality.
513For temporal locality, an allocator can return storage for new allocations that was just freed as these memory locations are still \emph{warm} in the memory hierarchy.
514For spatial locality, an allocator can place objects used together close together in memory, so the working set of the program fits into the fewest possible cache lines and pages.
515% However, usage patterns are different for every program as is the underlying hardware memory architecture;
516% hence, no general-purpose memory-allocator can provide ideal locality for every program on every computer.
517
518There are a number of ways a memory allocator can degrade locality by increasing the working set.
519For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request, \eg sequential-fit algorithm, which can perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}.
520Another way locality can be degraded is by spatially separating related data.
521For example, in a binning allocator, objects of different sizes are allocated from different bins that may be located in different pages of memory.
522
523
524\subsection{Multi-Threaded Memory-Allocator}
525\label{s:MultiThreadedMemoryAllocator}
526
527A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program.
528In addition to single-threaded design issues of fragmentation and locality, a multi-threaded allocator is simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
529
530
531\subsubsection{Mutual Exclusion}
532\label{s:MutualExclusion}
533
534\newterm{Mutual exclusion} provides sequential access to the shared management data of the heap.
535There are two performance issues for mutual exclusion.
536First is the overhead necessary to perform (at least) a hardware atomic operation every time a shared resource is accessed.
537Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released.
538Contention can be reduced in a number of ways:
5391) Using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks.
5402) Using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff.
5413) Using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
542However, all of these approaches have degenerate cases where program contention is high, which occurs outside of the allocator.
543
544
545\subsubsection{False Sharing}
546\label{s:FalseSharing}
547
548False sharing is a dynamic phenomenon leading to cache thrashing.
549When two or more threads on separate CPUs simultaneously change different objects sharing a cache line, the change invalidates the other thread's associated cache, even though these threads may be uninterested in the other modified object.
550False sharing can occur in three different ways: program induced, allocator-induced active, and allocator-induced passive;
551a memory allocator can only affect the latter two.
552
553Assume two objects, object$_1$ and object$_2$, share a cache line.
554\newterm{Program-induced false-sharing} occurs when thread$_1$ passes a reference to object$_2$ to thread$_2$, and then threads$_1$ modifies object$_1$ while thread$_2$ modifies object$_2$.
555% Figure~\ref{f:ProgramInducedFalseSharing} shows when Thread$_1$ passes Object$_2$ to Thread$_2$, a false-sharing situation forms when Thread$_1$ modifies Object$_1$ and Thread$_2$ modifies Object$_2$.
556% Changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line.
557% \begin{figure}
558% \centering
559% \subfloat[Program-Induced False-Sharing]{
560%       \input{ProgramFalseSharing}
561%       \label{f:ProgramInducedFalseSharing}
562% } \\
563% \vspace{5pt}
564% \subfloat[Allocator-Induced Active False-Sharing]{
565%       \input{AllocInducedActiveFalseSharing}
566%       \label{f:AllocatorInducedActiveFalseSharing}
567% } \\
568% \vspace{5pt}
569% \subfloat[Allocator-Induced Passive False-Sharing]{
570%       \input{AllocInducedPassiveFalseSharing}
571%       \label{f:AllocatorInducedPassiveFalseSharing}
572% } subfloat
573% \caption{False Sharing}
574% \label{f:FalseSharing}
575% \end{figure}
576\newterm{Allocator-induced active false-sharing}\label{s:AllocatorInducedActiveFalseSharing} occurs when object$_1$ and object$_2$ are heap allocated and their references are passed to thread$_1$ and thread$_2$, which modify the objects.
577% For example, in Figure~\ref{f:AllocatorInducedActiveFalseSharing}, each thread allocates an object and loads a cache-line of memory into its associated cache.
578% Again, changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line.
579\newterm{Allocator-induced passive false-sharing}\label{s:AllocatorInducedPassiveFalseSharing} occurs
580% is another form of allocator-induced false-sharing caused by program-induced false-sharing.
581% When an object in a program-induced false-sharing situation is deallocated, a future allocation of that object may cause passive false-sharing.
582when thread$_1$ passes object$_2$ to thread$_2$, and thread$_2$ subsequently deallocates object$_2$, and then object$_2$ is reallocated to thread$_2$ while thread$_1$ is still using object$_1$.
583
584
585\subsubsection{Heap Blowup}
586\label{s:HeapBlowup}
587
588In a multi-threaded program, heap blowup can occur when memory freed by one thread is inaccessible to other threads due to the allocation strategy.
589Specific examples are presented in later subsections.
590
591
592\subsection{Multi-Threaded Memory-Allocator Features}
593\label{s:MultiThreadedMemoryAllocatorFeatures}
594
595The following features are used in the construction of multi-threaded memory-allocators:
596\begin{enumerate}[itemsep=0pt]
597\item multiple heaps: with or without a global heap, or with or without heap ownership.
598\item object containers: with or without ownership, fixed or variable sized, global or local free-lists.
599\item hybrid private/public heap
600\item allocation buffer
601\item lock-free operations
602\end{enumerate}
603The first feature, multiple heaps, pertains to different kinds of heaps.
604The second feature, object containers, pertains to the organization of objects within the storage area.
605The remaining features apply to different parts of the allocator design or implementation.
606
607
608\subsection{Multiple Heaps}
609\label{s:MultipleHeaps}
610
611A multi-threaded allocator has potentially multiple threads and heaps.
612The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity.
613The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see Figure~\ref{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see Figure~\ref{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see Figure~\ref{f:PerThreadHeap}), which is almost back to a single-threaded allocator.
614
615\begin{figure}
616\centering
617\subfloat[T:1]{
618%       \input{SingleHeap.pstex_t}
619        \input{SingleHeap}
620        \label{f:SingleHeap}
621} % subfloat
622\vrule
623\subfloat[T:H]{
624%       \input{MultipleHeaps.pstex_t}
625        \input{SharedHeaps}
626        \label{f:SharedHeaps}
627} % subfloat
628\vrule
629\subfloat[1:1]{
630%       \input{MultipleHeapsGlobal.pstex_t}
631        \input{PerThreadHeap}
632        \label{f:PerThreadHeap}
633} % subfloat
634\caption{Multiple Heaps, Thread:Heap Relationship}
635\end{figure}
636
637\paragraph{T:1 model} where all threads allocate and deallocate objects from one heap.
638Memory is obtained from the freed objects, or reserved memory in the heap, or from the operating system (OS);
639the heap may also return freed memory to the operating system.
640The arrows indicate the direction memory conceptually moves for each kind of operation: allocation moves memory along the path from the heap/operating-system to the user application, while deallocation moves memory along the path from the application back to the heap/operating-system.
641To safely handle concurrency, a single lock may be used for all heap operations or fine-grained locking for different operations.
642Regardless, a single heap may be a significant source of contention for programs with a large amount of memory allocation.
643
644\paragraph{T:H model} where each thread allocates storage from several heaps depending on certain criteria, with the goal of reducing contention by spreading allocations/deallocations across the heaps.
645The decision on when to create a new heap and which heap a thread allocates from depends on the allocator design.
646To determine which heap to access, each thread must point to its associated heap in some way.
647The performance goal is to reduce the ratio of heaps to threads.
648However, the worse case can result in more heaps than threads, \eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces.
649Locking is required, since more than one thread may concurrently access a heap during its lifetime, but contention is reduced because fewer threads access a specific heap.
650
651% For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
652% At creation, a thread is associated with a heap from the pool.
653% In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
654% If an unlocked heap is found, the thread changes its association and uses that heap.
655% If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
656% or the thread can block waiting for a heap to become available.
657% While the heap-pool approach often minimizes the number of extant heaps, the worse case can result in more heaps than threads;
658% \eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces.
659
660% Threads using multiple heaps need to determine the specific heap to access for an allocation/deallocation, \ie association of thread to heap.
661% A number of techniques are used to establish this association.
662% The simplest approach is for each thread to have a pointer to its associated heap (or to administrative information that points to the heap), and this pointer changes if the association changes.
663% For threading systems with thread-local storage, the heap pointer is created using this mechanism;
664% otherwise, the heap routines must simulate thread-local storage using approaches like hashing the thread's stack-pointer or thread-id to find its associated heap.
665
666% The storage management for multiple heaps is more complex than for a single heap (see Figure~\ref{f:AllocatorComponents}).
667% Figure~\ref{f:MultipleHeapStorage} illustrates the general storage layout for multiple heaps.
668% Allocated and free objects are labelled by the thread or heap they are associated with.
669% (Links between free objects are removed for simplicity.)
670The management information for multiple heaps in the static zone must be able to locate all heaps.
671The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
672Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory.
673An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
674Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
675Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the operating system.
676
677% \begin{figure}
678% \centering
679% \input{MultipleHeapsStorage}
680% \caption{Multiple-Heap Storage}
681% \label{f:MultipleHeapStorage}
682% \end{figure}
683
684Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
685The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
686Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the operating system, which can be expensive.
687Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
688In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
689
690Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space.
691Now, each heap obtains and returns storage to/from the global heap rather than the operating system.
692Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold.
693Similarly, the global heap buffers this memory, obtaining and returning storage to/from the operating system as necessary.
694The global heap does not have its own thread and makes no internal allocation requests;
695instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations.
696Hence, the worst-case cost of a memory operation includes all these steps.
697With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the operating system to achieve the same goal and is independent of the mechanism used by the operating system to present dynamic memory to an address space.
698
699However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking.
700A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention.
701In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap.
702
703
704\paragraph{1:1 model (thread heaps)} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see ownership in Section~\ref{s:Ownership}).
705An additional benefit of thread heaps is improved locality due to better memory layout.
706As each thread only allocates from its heap, all objects are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages.
707In contrast, the T:H model spreads each thread's objects over a larger area in different heaps.
708Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap.
709For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them.
710Hence, allocator-induced active false-sharing cannot occur because the memory for thread heaps never overlaps.
711
712When a thread terminates, there are two options for handling its thread heap.
713First is to free all objects in the thread heap to the global heap and destroy the thread heap.
714Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
715Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
716Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible.
717
718
719\subsubsection{User-Level Threading}
720
721It is possible to use any of the heap models with user-level (M:N) threading.
722However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
723It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
724Figure~\ref{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime.
725Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
726
727\begin{figure}
728\centering
729\input{UserKernelHeaps}
730\caption{User-Level Kernel Heaps}
731\label{f:UserLevelKernelHeaps}
732\end{figure}
733
734Adopting this model results in a subtle problem with shared heaps.
735With kernel threading, an operation that is started by a kernel thread is always completed by that thread.
736For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.
737However, this correctness property is not preserved for user-level threading.
738A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}.
739When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
740To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
741However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption does not happen that frequently.
742Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
743Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation;
744techniques like rollforward can be used to force an eventual preemption.
745
746
747\subsubsection{Ownership}
748\label{s:Ownership}
749
750\newterm{Ownership} defines which heap an object is returned-to on deallocation.
751If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects.
752Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime.
753Figure~\ref{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
754Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
755For the 1:1 thread:heap relationship, a thread only allocates from its own heap, and without ownership, a thread only frees objects to its own heap, which means the heap is private to its owner thread and does not require any locking, called a \newterm{private heap}.
756For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
757
758\begin{figure}
759\centering
760\subfloat[Ownership]{
761        \input{MultipleHeapsOwnership}
762} % subfloat
763\hspace{0.25in}
764\subfloat[No Ownership]{
765        \input{MultipleHeapsNoOwnership}
766} % subfloat
767\caption{Heap Ownership}
768\label{f:HeapsOwnership}
769\end{figure}
770
771% Figure~\ref{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
772% (For simplicity, assume the heaps all use the same size of reserves storage.)
773% In contrast to Figure~\ref{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
774% Passive false-sharing may still occur, if delayed ownership is used (see below).
775
776% \begin{figure}
777% \centering
778% \input{MultipleHeapsOwnershipStorage.pstex_t}
779% \caption{Multiple-Heap Storage with Ownership}
780% \label{f:MultipleHeapStorageOwnership}
781% \end{figure}
782
783The main advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
784Ownership prevents the classical problem where one thread performs allocations from one heap, passes the object to another thread, and the receiving thread deallocates the object to another heap, hence draining the initial heap of storage.
785Because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
786The exception is for the 1:1 model if reserved memory does not overlap a cache-line because all allocated storage within a used area is associated with a single thread.
787In this case, there is no allocator-induced active false-sharing because two adjacent allocated objects used by different threads cannot share a cache-line.
788Finally, there is no allocator-induced passive false-sharing because two adjacent allocated objects used by different threads cannot occur as free objects are returned to the owner heap.
789% For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, the deallocation by Thread$_2$ returns Object$_2$ back to Thread$_1$'s heap;
790% hence a subsequent allocation by Thread$_2$ cannot return this storage.
791The disadvantage of ownership is deallocating to another thread's heap so heaps are no longer private and require locks to provide safe concurrent access.
792
793Object ownership can be immediate or delayed, meaning free objects may be batched on a separate free list either by the returning or receiving thread.
794While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
795It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
796Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads.
797Finally, it is possible for heaps to temporarily steal owned objects rather than return them immediately and then reallocate these objects again.
798It is unclear whether the complexity of this approach is worthwhile.
799% However, stealing can result in passive false-sharing.
800% For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially.
801% If Thread$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur.
802
803
804\begin{figure}
805\centering
806\subfloat[Object Headers]{
807        \input{ObjectHeaders}
808        \label{f:ObjectHeaders}
809} % subfloat
810\subfloat[Object Container]{
811        \input{Container}
812        \label{f:ObjectContainer}
813} % subfloat
814\caption{Header Placement}
815\label{f:HeaderPlacement}
816\end{figure}
817
818
819\subsection{Object Containers}
820\label{s:ObjectContainers}
821
822Associating header data with every allocation can result in significant internal fragmentation, as shown in Figure~\ref{f:ObjectHeaders}.
823Especially if the headers contain redundant data, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
824As well, the redundant data can result in poor cache usage, since only a portion of the cache line is holding useful data from the program's perspective.
825Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}.
826While the header and object are spatially together in memory, they are generally not accessed temporarily together;
827\eg an object is accessed by the program after it is allocated, while the header is accessed by the allocator after it is free.
828
829The alternative factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks} in~\cite{Berger00}), as in Figure~\ref{f:ObjectContainer}.
830The header for the container holds information necessary for all objects in the container;
831a trailer may also be used at the end of the container.
832Similar to the approach described for thread heaps in Section~\ref{s:MultipleHeaps}, if container boundaries do not overlap with memory of another container at crucial boundaries and all objects in a container are allocated to the same thread, allocator-induced active false-sharing is avoided.
833
834The difficulty with object containers lies in finding the object header/trailer given only the object address, since that is normally the only information passed to the deallocation operation.
835One way is to start containers on aligned addresses in memory, then truncate the lower bits of the object address to obtain the header address (or round up and subtract the trailer size to obtain the trailer address).
836For example, if an object at address 0xFC28\,EF08 is freed and containers are aligned on 64\,KB (0x0001\,0000) addresses, then the container header is at 0xFC28\,0000.
837
838Normally, a container has homogeneous objects, \eg object size and ownership.
839This approach greatly reduces internal fragmentation since far fewer headers are required, and potentially increases spatial locality as a cache line or page holds more objects since the objects are closer together.
840However, different sized objects are further apart in separate containers.
841Depending on the program, this may or may not improve locality.
842If the program uses several objects from a small number of containers in its working set, then locality is improved since fewer cache lines and pages are required.
843If the program uses many containers, there is poor locality, as both caching and paging increase.
844Another drawback is that external fragmentation may be increased since containers reserve space for objects that may never be allocated, \ie there are often multiple containers for each size only partially full.
845However, external fragmentation can be reduced by using small containers.
846
847Containers with heterogeneous objects implies different headers describing them, which complicates the problem of locating a specific header solely by an address.
848A couple of solutions can be used to implement containers with heterogeneous objects.
849However, the problem with allowing objects of different sizes is that the number of objects, and therefore headers, in a single container is unpredictable.
850One solution allocates headers at one end of the container, while allocating objects from the other end of the container;
851when the headers meet the objects, the container is full.
852Freed objects cannot be split or coalesced since this causes the number of headers to change.
853The difficulty in this strategy remains in finding the header for a specific object;
854in general, a search is necessary to find the object's header among the container headers.
855A second solution combines the use of container headers and individual object headers.
856Each object header stores the object's heterogeneous information, such as its size, while the container header stores the homogeneous information, such as the owner when using ownership.
857This approach allows containers to hold different types of objects, but does not completely separate headers from objects.
858% The benefit of the container in this case is to reduce some redundant information that is factored into the container header.
859
860% In summary, object containers trade off internal fragmentation for external fragmentation by isolating common administration information to remove/reduce internal fragmentation, but at the cost of external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
861% A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on program access-patterns.
862
863
864\subsubsection{Container Ownership}
865\label{s:ContainerOwnership}
866
867Without ownership, objects in a container are deallocated to the heap currently associated with the thread that frees the object.
868Thus, different objects in a container may be on different heap free-lists. % (see Figure~\ref{f:ContainerNoOwnershipFreelist}).
869With ownership, all objects in a container belong to the same heap,
870% (see Figure~\ref{f:ContainerOwnershipFreelist}),
871so ownership of an object is determined by the container owner.
872If multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
873Only with the 1:1 model and ownership is active and passive false-sharing avoided (see Section~\ref{s:Ownership}).
874Passive false-sharing may still occur, if delayed ownership is used.
875Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap.
876
877% \begin{figure}
878% \centering
879% \subfloat[No Ownership]{
880%       \input{ContainerNoOwnershipFreelist}
881%       \label{f:ContainerNoOwnershipFreelist}
882% } % subfloat
883% \vrule
884% \subfloat[Ownership]{
885%       \input{ContainerOwnershipFreelist}
886%       \label{f:ContainerOwnershipFreelist}
887% } % subfloat
888% \caption{Free-list Structure with Container Ownership}
889% \end{figure}
890
891When a container changes ownership, the ownership of all objects within it change as well.
892Moving a container involves moving all objects on the heap's free-list in that container to the new owner.
893This approach can reduce contention for the global heap, since each request for objects from the global heap returns a container rather than individual objects.
894
895Additional restrictions may be applied to the movement of containers to prevent active false-sharing.
896For example, if a container changes ownership through the global heap, then when a thread allocates an object from the newly acquired container it is actively false-sharing even though no objects are passed among threads.
897Note, once the thread frees the object, no more false sharing can occur until the container changes ownership again.
898To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
899One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the operating system.
900
901% \begin{figure}
902% \centering
903% \subfloat[]{
904%       \input{ContainerFalseSharing1}
905%       \label{f:ContainerFalseSharing1}
906% } % subfloat
907% \subfloat[]{
908%       \input{ContainerFalseSharing2}
909%       \label{f:ContainerFalseSharing2}
910% } % subfloat
911% \caption{Active False-Sharing using Containers}
912% \label{f:ActiveFalseSharingContainers}
913% \end{figure}
914
915Using containers with ownership increases external fragmentation since a new container for a requested object size must be allocated separately for each thread requesting it.
916% In Figure~\ref{f:ExternalFragmentationContainerOwnership}, using object ownership allocates 80\% more space than without ownership.
917
918% \begin{figure}
919% \centering
920% \subfloat[No Ownership]{
921%       \input{ContainerNoOwnership}
922% } % subfloat
923% \\
924% \subfloat[Ownership]{
925%       \input{ContainerOwnership}
926% } % subfloat
927% \caption{External Fragmentation with Container Ownership}
928% \label{f:ExternalFragmentationContainerOwnership}
929% \end{figure}
930
931
932\subsubsection{Container Size}
933\label{s:ContainerSize}
934
935One way to control the external fragmentation caused by allocating a large container for a small number of requested objects is to vary the size of the container.
936As described earlier, container boundaries need to be aligned on addresses that are a power of two to allow easy location of the header (by truncating lower bits).
937Aligning containers in this manner also determines the size of the container.
938However, the size of the container has different implications for the allocator.
939
940The larger the container, the fewer containers are needed, and hence, the fewer headers need to be maintained in memory, improving both internal fragmentation and potentially performance.
941However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation.
942With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container.
943In general, large objects, \eg greater than 64\,KB, are allocated directly from the operating system and are returned immediately to the operating system to reduce long-term external fragmentation.
944If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate.
945Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header.
946
947In order to find the container header when using different sized containers, a super container is used (see~Figure~\ref{f:SuperContainers}).
948The super container spans several containers, contains a header with information for finding each container header, and starts on an aligned address.
949Super-container headers are found using the same method used to find container headers by dropping the lower bits of an object address.
950The containers within a super container may be different sizes or all the same size.
951If the containers in the super container are different sizes, then the super-container header must be searched to determine the specific container for an object given its address.
952If all containers in the super container are the same size, \eg 16KB, then a specific container header can be found by a simple calculation.
953The free space at the end of a super container is used to allocate new containers.
954
955\begin{figure}
956\centering
957\input{SuperContainers}
958% \includegraphics{diagrams/supercontainer.eps}
959\caption{Super Containers}
960\label{f:SuperContainers}
961\end{figure}
962
963Minimal internal and external fragmentation is achieved by having as few containers as possible, each being as full as possible.
964It is also possible to achieve additional benefit by using larger containers for popular small sizes, as it reduces the number of containers with associated headers.
965However, this approach assumes it is possible for an allocator to determine in advance which sizes are popular.
966Keeping statistics on requested sizes allows the allocator to make a dynamic decision about which sizes are popular.
967For example, after receiving a number of allocation requests for a particular size, that size is considered a popular request size and larger containers are allocated for that size.
968If the decision is incorrect, larger containers than necessary are allocated that remain mostly unused.
969A programmer may be able to inform the allocator about popular object sizes, using a mechanism like @mallopt@, in order to select an appropriate container size for each object size.
970
971
972\subsubsection{Container Free-Lists}
973\label{s:containersfreelists}
974
975The container header allows an alternate approach for managing the heap's free-list.
976Rather than maintain a global free-list throughout the heap the containers are linked through their headers and only the local free objects within a container are linked together.
977Note, maintaining free lists within a container assumes all free objects in the container are associated with the same heap;
978thus, this approach only applies to containers with ownership.
979
980This alternate free-list approach can greatly reduce the complexity of moving all freed objects belonging to a container to another heap.
981To move a container using a global free-list, the free list is first searched to find all objects within the container.
982Each object is then removed from the free list and linked together to form a local free-list for the move to the new heap.
983With local free-lists in containers, the container is simply removed from one heap's free list and placed on the new heap's free list.
984Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
985However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation.
986
987% \begin{figure}
988% \centering
989% \subfloat[Global Free-List Among Containers]{
990%       \input{FreeListAmongContainers}
991%       \label{f:GlobalFreeListAmongContainers}
992% } % subfloat
993% \hspace{0.25in}
994% \subfloat[Local Free-List Within Containers]{
995%       \input{FreeListWithinContainers}
996%       \label{f:LocalFreeListWithinContainers}
997% } % subfloat
998% \caption{Container Free-List Structure}
999% \label{f:ContainerFreeListStructure}
1000% \end{figure}
1001
1002When all objects in the container are the same size, a single free-list is sufficient.
1003However, when objects in the container are different size, the header needs a free list for each size class when using a binning allocation algorithm, which can be a significant increase in the container-header size.
1004The alternative is to use a different allocation algorithm with a single free-list, such as a sequential-fit allocation-algorithm.
1005
1006
1007\subsubsection{Hybrid Private/Public Heap}
1008\label{s:HybridPrivatePublicHeap}
1009
1010Section~\ref{s:Ownership} discusses advantages and disadvantages of public heaps (T:H model and with ownership) and private heaps (thread heaps with ownership).
1011For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps (see~Figure~\ref{f:HybridPrivatePublicHeap}).
1012The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
1013In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
1014Similarly, a thread first deallocates an object to its private heap, and second to the public heap.
1015Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
1016Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
1017Finally, when a thread frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing thread's private heap for delayed ownership, which allows the freeing thread to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return.
1018
1019\begin{figure}
1020\centering
1021\input{PrivatePublicHeaps.pstex_t}
1022\caption{Hybrid Private/Public Heap for Per-thread Heaps}
1023\label{f:HybridPrivatePublicHeap}
1024% \vspace{10pt}
1025% \input{RemoteFreeList.pstex_t}
1026% \caption{Remote Free-List}
1027% \label{f:RemoteFreeList}
1028\end{figure}
1029
1030As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified.
1031For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
1032To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
1033Since the remote free-list is occasionally cleared during an allocation, this adds to that cost.
1034Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object.
1035
1036If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking.
1037In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur.
1038If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations.
1039
1040The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator.
1041Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used.
1042In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps.
1043For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach.
1044
1045
1046\subsection{Allocation Buffer}
1047\label{s:AllocationBuffer}
1048
1049An allocation buffer is reserved memory (see Section~\ref{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
1050That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
1051Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
1052The allocation buffer reduces contention and the number of global/operating-system calls.
1053For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
1054
1055Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation).
1056Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
1057Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish.
1058
1059Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
1060For example, if all objects sharing a cache line come from the same allocation buffer, then these objects are allocated to the same thread, avoiding active false-sharing.
1061Active false-sharing may still occur if objects are freed to the global heap and reused by another heap.
1062
1063Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated.
1064A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or operating system.
1065The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer.
1066
1067The unused part of a container, neither allocated or freed, is an allocation buffer.
1068For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made.
1069This lazy method of constructing objects is beneficial in terms of paging and caching.
1070For example, although an entire container, possibly spanning several pages, is allocated from the operating system, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache.
1071
1072
1073\subsection{Lock-Free Operations}
1074\label{s:LockFreeOperations}
1075
1076A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual thread has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}.
1077(A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.)
1078Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
1079While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress.
1080With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design.
1081Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
1082on the other hand, a thread may busy-wait for an unbounded period holding a processor.
1083Finally, lock-free implementations have greater complexity and hardware dependency.
1084Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
1085Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex.
1086Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
1087
1088
1089\section{Allocator}
1090\label{c:Allocator}
1091
1092This section presents a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code), called llheap (low-latency heap), for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
1093The new allocator fulfills the GNU C Library allocator API~\cite{GNUallocAPI}.
1094
1095
1096\subsection{llheap}
1097
1098The primary design objective for llheap is low-latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator have a delay during an allocator call.
1099(Large allocations requiring initialization, \eg zero fill, and/or copying are not covered by the low-latency objective.)
1100A direct consequence of this objective is very simple or no storage coalescing;
1101hence, llheap's design is willing to use more storage to lower latency.
1102This objective is apropos because systems research and industrial applications are striving for low latency and computers have huge amounts of RAM memory.
1103Finally, llheap's performance should be comparable with the current best allocators (see performance comparison in Section~\ref{c:Performance}).
1104
1105% The objective of llheap's new design was to fulfill following requirements:
1106% \begin{itemize}
1107% \item It should be concurrent and thread-safe for multi-threaded programs.
1108% \item It should avoid global locks, on resources shared across all threads, as much as possible.
1109% \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
1110% \item It should be a lightweight memory allocator.
1111% \end{itemize}
1112
1113%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1114
1115\subsection{Design Choices}
1116
1117% Some of the rejected designs are discussed because they show the path to the final design (see discussion in Section~\ref{s:MultipleHeaps}).
1118% Note, a few simple tests for a design choice were compared with the current best allocators to determine the viability of a design.
1119
1120
1121% \paragraph{T:1 model}
1122% Figure~\ref{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are shared among N KTs.
1123% This design leverages the fact that usually the allocation requests are less than 1024 bytes and there are only a few different request sizes.
1124% When KTs $\le$ N, the common bucket sizes are uncontented;
1125% when KTs $>$ N, the free buckets are contented and latency increases significantly.
1126% In all cases, a KT must acquire/release a lock, contented or uncontented, along the fast allocation path because a bucket is shared.
1127% Therefore, while threads are contending for a small number of buckets sizes, the buckets are distributed among them to reduce contention, which lowers latency;
1128% however, picking N is workload specific.
1129%
1130% \begin{figure}
1131% \centering
1132% \input{AllocDS1}
1133% \caption{T:1 with Shared Buckets}
1134% \label{f:T1SharedBuckets}
1135% \end{figure}
1136%
1137% Problems:
1138% \begin{itemize}
1139% \item
1140% Need to know when a KT is created/destroyed to assign/unassign a shared bucket-number from the memory allocator.
1141% \item
1142% When no thread is assigned a bucket number, its free storage is unavailable.
1143% \item
1144% All KTs contend for the global-pool lock for initial allocations, before free-lists get populated.
1145% \end{itemize}
1146% Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency.
1147
1148% \paragraph{T:H model}
1149% Figure~\ref{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded (distributed) across the KTs.
1150% A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket.
1151% When KT $\le$ N, the heaps might be uncontented;
1152% when KTs $>$ N, the heaps are contented.
1153% In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared.
1154% By increasing N, this approach reduces contention but increases storage (time versus space);
1155% however, picking N is workload specific.
1156%
1157% \begin{figure}
1158% \centering
1159% \input{AllocDS2}
1160% \caption{T:H with Shared Heaps}
1161% \label{f:THSharedHeaps}
1162% \end{figure}
1163%
1164% Problems:
1165% \begin{itemize}
1166% \item
1167% Need to know when a KT is created/destroyed to assign/unassign a heap from the memory allocator.
1168% \item
1169% When no thread is assigned to a heap, its free storage is unavailable.
1170% \item
1171% Ownership issues arise (see Section~\ref{s:Ownership}).
1172% \item
1173% All KTs contend for the local/global-pool lock for initial allocations, before free-lists get populated.
1174% \end{itemize}
1175% Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency.
1176
1177% \paragraph{T:H model, H = number of CPUs}
1178% This design is the T:H model but H is set to the number of CPUs on the computer or the number restricted to an application, \eg via @taskset@.
1179% (See Figure~\ref{f:THSharedHeaps} but with a heap bucket per CPU.)
1180% Hence, each CPU logically has its own private heap and local pool.
1181% A memory operation is serviced from the heap associated with the CPU executing the operation.
1182% This approach removes fastpath locking and contention, regardless of the number of KTs mapped across the CPUs, because only one KT is running on each CPU at a time (modulo operations on the global pool and ownership).
1183% This approach is essentially an M:N approach where M is the number if KTs and N is the number of CPUs.
1184%
1185% Problems:
1186% \begin{itemize}
1187% \item
1188% Need to know when a CPU is added/removed from the @taskset@.
1189% \item
1190% Need a fast way to determine the CPU a KT is executing on to access the appropriate heap.
1191% \item
1192% Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
1193% \begin{quote}
1194% A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
1195% \end{quote}
1196% If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
1197% Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
1198% Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the operating system is providing the second thread via the signal handler.
1199%
1200% Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical subsection after undoing its writes, if the critical subsection is preempted.
1201% \end{itemize}
1202% Tests showed that @librseq@ can determine the particular CPU quickly but setting up the restartable critical-subsection along the allocation fast-path produced a significant increase in allocation costs.
1203% Also, the number of undoable writes in @librseq@ is limited and restartable sequences cannot deal with user-level thread (UT) migration across KTs.
1204% For example, UT$_1$ is executing a memory operation by KT$_1$ on CPU$_1$ and a time-slice preemption occurs.
1205% The signal handler context switches UT$_1$ onto the user-level ready-queue and starts running UT$_2$ on KT$_1$, which immediately calls a memory operation.
1206% Since KT$_1$ is still executing on CPU$_1$, @librseq@ takes no action because it assumes KT$_1$ is still executing the same critical subsection.
1207% Then UT$_1$ is scheduled onto KT$_2$ by the user-level scheduler, and its memory operation continues in parallel with UT$_2$ using references into the heap associated with CPU$_1$, which corrupts CPU$_1$'s heap.
1208% If @librseq@ had an @rseq_abort@ which:
1209% \begin{enumerate}
1210% \item
1211% Marked the current restartable critical-subsection as cancelled so it restarts when attempting to commit.
1212% \item
1213% Do nothing if there is no current restartable critical subsection in progress.
1214% \end{enumerate}
1215% Then @rseq_abort@ could be called on the backside of a  user-level context-switching.
1216% A feature similar to this idea might exist for hardware transactional-memory.
1217% A significant effort was made to make this approach work but its complexity, lack of robustness, and performance costs resulted in its rejection.
1218
1219% \subsubsection{Allocation Fastpath}
1220% \label{s:AllocationFastpath}
1221
1222llheap's design was reviewed and changed multiple times during its development.  Only the final design choices are
1223discussed in this paper.
1224(See~\cite{Zulfiqar22} for a discussion of alternate choices and reasons for rejecting them.)
1225All designs were analyzed for the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced.
1226The heap model choosen is 1:1, which is the T:H model with T = H, where there is one thread-local heap for each KT.
1227(See Figure~\ref{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
1228Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
1229Heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership.
1230
1231Problems:
1232\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1233\item
1234Need to know when a KT starts/terminates to create/delete its heap.
1235
1236\noindent
1237It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
1238\item
1239There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
1240
1241\noindent
1242The classic solution only deletes a heap after all referents are returned, which is complex.
1243The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
1244If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
1245To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
1246In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
1247\item
1248There can be significant external fragmentation as the number of KTs increases.
1249
1250\noindent
1251In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
1252Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
1253\item
1254Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
1255\begin{quote}
1256A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
1257\end{quote}
1258If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
1259Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
1260Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the operating system is providing the second thread via the signal handler.
1261
1262Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical subsection after undoing its writes, if the critical subsection is preempted.
1263
1264%There is the same serially-reusable problem with UTs migrating across KTs.
1265\end{itemize}
1266Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
1267
1268
1269\vspace{5pt}
1270\noindent
1271The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
1272For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N.
1273For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
1274However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
1275More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
1276So the 1:1 model had no atomic actions along the fastpath and no special operating-system support requirements.
1277The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in Section~\ref{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
1278
1279
1280% \begin{itemize}
1281% \item
1282% A decentralized design is better to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designs shard the whole heap which has all the buckets with the addition of sharding @sbrk@ area. So Design 1 was eliminated.
1283% \item
1284% Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenario.
1285% \item
1286% Design 3 was eliminated because it was slower than Design 4 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achieve user-threading safety which has some cost to it.
1287% that  because of 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
1288% \end{itemize}
1289% Of the four designs for a low-latency memory allocator, the 1:1 model was chosen for the following reasons:
1290
1291% \subsubsection{Advantages of distributed design}
1292%
1293% The distributed design of llheap is concurrent to work in multi-threaded applications.
1294% Some key benefits of the distributed design of llheap are as follows:
1295% \begin{itemize}
1296% \item
1297% The bump allocation is concurrent as memory taken from @sbrk@ is sharded across all heaps as bump allocation reserve. The call to @sbrk@ will be protected using locks but bump allocation (on memory taken from @sbrk@) will not be contended once the @sbrk@ call has returned.
1298% \item
1299% Low or almost no contention on heap resources.
1300% \item
1301% It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
1302% \item
1303% Distributed design avoids unnecessary locks on resources shared across all KTs.
1304% \end{itemize}
1305
1306\subsubsection{Allocation Latency}
1307
1308A primary goal of llheap is low latency, hence the name low-latency heap (llheap).
1309Two forms of latency are internal and external.
1310Internal latency is the time to perform an allocation, while external latency is time to obtain/return storage from/to the operating system.
1311Ideally latency is $O(1)$ with a small constant.
1312
1313To obtain $O(1)$ internal latency means no searching on the allocation fastpath and largely prohibits coalescing, which leads to external fragmentation.
1314The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
1315
1316To obtain $O(1)$ external latency means obtaining one large storage area from the operating system and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation.
1317Excluding real-time operating-systems, operating-system operations are unbounded, and hence some external latency is unavoidable.
1318The mitigating factor is that operating-system calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \pageref{p:malloc_expansion}).
1319Furthermore, while operating-system calls are unbounded, many are now reasonably fast, so their latency is tolerable and infrequent.
1320
1321
1322%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1323
1324\subsection{llheap Structure}
1325
1326Figure~\ref{f:llheapStructure} shows the design of llheap, which uses the following features:
13271:1 multiple-heap model to minimize the fastpath,
1328can be built with or without heap ownership,
1329headers per allocation versus containers,
1330no coalescing to minimize latency,
1331global heap memory (pool) obtained from the operating system using @mmap@ to create and reuse heaps needed by threads,
1332local reserved memory (pool) per heap obtained from global pool,
1333global reserved memory (pool) obtained from the operating system using @sbrk@ call,
1334optional fast-lookup table for converting allocation requests into bucket sizes,
1335optional statistic-counters table for accumulating counts of allocation operations.
1336
1337\begin{figure}
1338\centering
1339% \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
1340\input{llheap}
1341\caption{llheap Structure}
1342\label{f:llheapStructure}
1343\end{figure}
1344
1345llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
1346There is a global bump-pointer to the next free heap in the array.
1347When this array is exhausted, another array of heaps is allocated.
1348There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads.
1349When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
1350
1351When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
1352When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps.
1353The free heaps are stored on stack so hot storage is reused first.
1354Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used.
1355This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
1356llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
1357
1358Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
1359All objects in a bucket are of the same size.
1360The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system.
1361Each free bucket of a specific size has two lists.
13621) A free stack used solely by the KT heap-owner, so push/pop operations do not require locking.
1363The free objects are a stack so hot storage is reused first.
13642) For ownership, a shared away-stack for KTs to return storage allocated by other KTs, so push/pop operations require locking.
1365When the free stack is empty, the entire ownership stack is removed and becomes the head of the corresponding free stack.
1366
1367Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$.
1368First, the allocation is divided into small (@sbrk@) or large (@mmap@).
1369For large allocations, the storage is mapped directly from the operating system.
1370For small allocations, $S$ is quantized into a bucket size.
1371Quantizing is performed using a binary search over the ordered bucket array.
1372An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
1373The @char@ type restricts the number of bucket sizes to 256.
1374For $S$ > 64K, a binary search is used.
1375Then, the allocation storage is obtained from the following locations (in order), with increasing latency:
1376bucket's free stack,
1377bucket's away stack,
1378heap's local pool,
1379global pool,
1380operating system (@sbrk@).
1381
1382\begin{algorithm}
1383\caption{Dynamic object allocation of size $S$}\label{alg:heapObjectAlloc}
1384\begin{algorithmic}[1]
1385\State $\textit{O} \gets \text{NULL}$
1386\If {$S >= \textit{mmap-threshhold}$}
1387        \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
1388\Else
1389        \State $\textit{B} \gets \text{smallest free-bucket} \geq S$
1390        \If {$\textit{B's free-list is empty}$}
1391                \If {$\textit{B's away-list is empty}$}
1392                        \If {$\textit{heap's allocation buffer} < S$}
1393                                \State $\text{get allocation from global pool (which might call \lstinline{sbrk})}$
1394                        \EndIf
1395                        \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
1396                \Else
1397                        \State $\textit{merge B's away-list into free-list}$
1398                        \State $\textit{O} \gets \text{pop an object from B's free-list}$
1399                \EndIf
1400        \Else
1401                \State $\textit{O} \gets \text{pop an object from B's free-list}$
1402        \EndIf
1403        \State $\textit{O's owner} \gets \text{B}$
1404\EndIf
1405\State $\Return \textit{ O}$
1406\end{algorithmic}
1407\end{algorithm}
1408
1409\begin{algorithm}
1410\caption{Dynamic object free at address $A$ with object ownership}\label{alg:heapObjectFreeOwn}
1411\begin{algorithmic}[1]
1412\If {$\textit{A mapped allocation}$}
1413        \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
1414\Else
1415        \State $\text{B} \gets \textit{O's owner}$
1416        \If {$\textit{B is thread-local heap's bucket}$}
1417                \State $\text{push A to B's free-list}$
1418        \Else
1419                \State $\text{push A to B's away-list}$
1420        \EndIf
1421\EndIf
1422\end{algorithmic}
1423\end{algorithm}
1424
1425\begin{algorithm}
1426\caption{Dynamic object free at address $A$ without object ownership}\label{alg:heapObjectFreeNoOwn}
1427\begin{algorithmic}[1]
1428\If {$\textit{A mapped allocation}$}
1429        \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
1430\Else
1431        \State $\text{B} \gets \textit{O's owner}$
1432        \If {$\textit{B is thread-local heap's bucket}$}
1433                \State $\text{push A to B's free-list}$
1434        \Else
1435                \State $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
1436                \State $\text{push A to C's free-list}$
1437        \EndIf
1438\EndIf
1439\end{algorithmic}
1440\end{algorithm}
1441
1442
1443Algorithm~\ref{alg:heapObjectFreeOwn} shows the de-allocation (free) outline for an object at address $A$ with ownership.
1444First, the address is divided into small (@sbrk@) or large (@mmap@).
1445For large allocations, the storage is unmapped back to the operating system.
1446For small allocations, the bucket associated with the request size is retrieved.
1447If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
1448If the bucket is not local to the thread, the allocation is pushed onto the owning thread's associated away stack.
1449
1450Algorithm~\ref{alg:heapObjectFreeNoOwn} shows the de-allocation (free) outline for an object at address $A$ without ownership.
1451The algorithm is the same as for ownership except if the bucket is not local to the thread.
1452Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
1453
1454Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
1455Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
1456This design simplifies heap-management code during development and maintenance.
1457
1458
1459\subsubsection{Alignment}
1460
1461Most dynamic memory allocations have a minimum storage alignment for the contained object(s).
1462Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX).
1463In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers.
1464For consistency, the object header is normally aligned at this same boundary.
1465Larger alignments must be a power of 2, such as page alignment (4/8K).
1466Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment.
1467
1468For alignments greater than the minimum, the obvious approach for aligning to address @A@ is: compute the next address that is a multiple of @N@ after the current end of the heap, @E@, plus room for the header before @A@ and the size of the allocation after @A@, moving the end of the heap to @E'@.
1469\begin{center}
1470\input{Alignment1}
1471\end{center}
1472The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations.
1473The same approach is used for sufficiently large free blocks, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
1474In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation.
1475However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object.
1476As well, it does not work for large allocations, where many memory allocators switch from program @sbrk@ to operating-system @mmap@.
1477The reason is that @mmap@ only starts on a page boundary, and it is difficult to reuse the storage before the alignment boundary for other requests.
1478Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request.
1479
1480Instead, llheap alignment is accomplished by making a \emph{pessimistic} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
1481\begin{center}
1482\input{Alignment2}
1483\end{center}
1484The amount of storage necessary is @alignment - M + size@, which ensures there is an address, @A@, after the storage returned from @malloc@, @P@, that is a multiple of @alignment@ followed by sufficient storage for the data object.
1485The approach is pessimistic because if @P@ already has the correct alignment @N@, the initial allocation has already requested sufficient space to move to the next multiple of @N@.
1486For this special case, there is @alignment - M@ bytes of unused storage after the data object, which subsequently can be used by @realloc@.
1487
1488Note, the address returned is @A@, which is subsequently returned to @free@.
1489However, to correctly free the allocated object, the value @P@ must be computable, since that is the value generated by @malloc@ and returned within @memalign@.
1490Hence, there must be a mechanism to detect when @P@ $\neq$ @A@ and how to compute @P@ from @A@.
1491
1492The llheap approach uses two headers:
1493the \emph{original} header associated with a memory allocation from @malloc@, and a \emph{fake} header within this storage before the alignment boundary @A@, which is returned from @memalign@, e.g.:
1494\begin{center}
1495\input{Alignment2Impl}
1496\end{center}
1497Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments greater than @M@.
1498When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation.
1499Therefore, there is always room for an @M@-byte fake header before @A@.
1500
1501The fake header must supply an indicator to distinguish it from a normal header and the location of address @P@ generated by @malloc@.
1502This information is encoded as an offset from A to P and the initialize alignment (discussed in Section~\ref{s:ReallocStickyProperties}).
1503To distinguish a fake header from a normal header, the least-significant bit of the alignment is used because the offset participates in multiple calculations, while the alignment is just remembered data.
1504\begin{center}
1505\input{FakeHeader}
1506\end{center}
1507
1508
1509\subsubsection{\lstinline{realloc} and Sticky Properties}
1510\label{s:ReallocStickyProperties}
1511
1512The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
1513\begin{flushleft}
1514\begin{tabular}{ll}
1515\multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manually}} \\
1516\begin{lstlisting}
1517T * naddr = realloc( oaddr, newSize );
1518
1519
1520
1521\end{lstlisting}
1522&
1523\begin{lstlisting}
1524T * naddr = (T *)malloc( newSize ); $\C[2.4in]{// new storage}$
1525memcpy( naddr, addr, oldSize );  $\C{// copy old bytes}$
1526free( addr );                           $\C{// free old storage}$
1527addr = naddr;                           $\C{// change pointer}\CRT$
1528\end{lstlisting}
1529\end{tabular}
1530\end{flushleft}
1531The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying.
1532This pattern is not used enough to reduce storage management costs.
1533In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the allocation pattern.
1534
1535The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation.
1536Are these properties transient or persistent (``sticky'')?
1537For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size?
1538That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserved?
1539Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment.
1540This silent problem is unintuitive to programmers and difficult to locate because it is transient.
1541To prevent these problems, llheap preserves initial allocation properties for the lifetime of an allocation and the semantics of @realloc@ are augmented to preserve these properties, with additional query routines.
1542This change makes the realloc pattern efficient and safe.
1543
1544
1545\subsubsection{Header}
1546
1547To preserve allocation properties requires storing additional information with an allocation,
1548The best available option is the header, where Figure~\ref{f:llheapNormalHeader} shows the llheap storage layout.
1549The header has two data field sized appropriately for 32/64-bit alignment requirements.
1550The first field is a union of three values:
1551\begin{description}
1552\item[bucket pointer]
1553is for allocated storage and points back to the bucket associated with this storage requests (see Figure~\ref{f:llheapStructure} for the fields accessible in a bucket).
1554\item[mapped size]
1555is for mapped storage and is the storage size for use in unmapping.
1556\item[next free block]
1557is for free storage and is an intrusive pointer chaining same-size free blocks onto a bucket's free stack.
1558\end{description}
1559The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes.
1560Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management.
1561
1562\begin{figure}
1563\centering
1564\input{Header}
1565\caption{llheap Normal Header}
1566\label{f:llheapNormalHeader}
1567\end{figure}
1568
1569The low-order 3-bits of the first field are \emph{unused} for any stored values as these values are 16-byte aligned by default, whereas the second field may use all of its bits.
1570The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
1571Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header.
1572This implementation allows a fast test if any of the lower 3-bits are on (@&@ and compare).
1573If no bits are on, it implies a basic allocation, which is handled quickly;
1574otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
1575Since most allocations are basic, they will take significantly less time as the memory operations will be done along the allocation and free fastpath.
1576
1577
1578\subsection{Statistics and Debugging}
1579
1580llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand allocation behaviour.
1581Incrementing statistic counters must appear on the allocation fastpath.
1582As noted, any atomic operation along the fastpath produces a significant increase in allocation costs.
1583To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so heap operations do not require atomic operations.
1584
1585To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
1586Note, the list is locked to prevent errors traversing an active list;
1587the statistics counters are not locked and can flicker during accumulation.
1588Figure~\ref{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
1589No other memory allocator studied provides as comprehensive statistical information.
1590Finally, these statistics were invaluable during the development of this work for debugging and verifying correctness and should be equally valuable to application developers.
1591
1592\begin{figure}
1593\begin{lstlisting}
1594Heap statistics: (storage request / allocation)
1595  malloc >0 calls 2,766; 0 calls 2,064; storage 12,715 / 13,367 bytes
1596  aalloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
1597  calloc >0 calls 6; 0 calls 0; storage 1,008 / 1,104 bytes
1598  memalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
1599  amemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
1600  cmemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
1601  resize >0 calls 0; 0 calls 0; storage 0 / 0 bytes
1602  realloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
1603  free !null calls 2,766; null calls 4,064; storage 12,715 / 13,367 bytes
1604  away pulls 0; pushes 0; storage 0 / 0 bytes
1605  sbrk calls 1; storage 10,485,760 bytes
1606  mmap calls 10,000; storage 10,000 / 10,035 bytes
1607  munmap calls 10,000; storage 10,000 / 10,035 bytes
1608  threads started 4; exited 3
1609  heaps new 4; reused 0
1610\end{lstlisting}
1611\caption{Statistics Output}
1612\label{f:StatiticsOutput}
1613\end{figure}
1614
1615llheap can also be built with debug checking, which inserts many asserts along all allocation paths.
1616These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruptions because internal values (like header fields) are overwritten.
1617These checks are best effort as opposed to complete allocation checking as in @valgrind@.
1618Nevertheless, the checks detect many allocation problems.
1619There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
1620For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer.
1621To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \pageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
1622Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
1623
1624Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
1625
1626
1627\subsection{User-level Threading Support}
1628\label{s:UserlevelThreadingSupport}
1629
1630The serially-reusable problem (see \pageref{p:SeriallyReusable}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
1631The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical subsections such as starting a memory operation on one KT and completing it on another.
1632Locking these critical subsections negates any attempt for a quick fastpath and results in high contention.
1633For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
1634Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads.
1635To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-subsection for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
1636The rollforward flag is tested at the end of each allocation funnel routine (see \pageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
1637
1638llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period.
1639On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately.
1640On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe.
1641For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
1642Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
1643
1644The fast technique (with lower run time cost) is to define a special code subsection and places all non-interruptible routines in this subsection.
1645The linker places all code in this subsection into a contiguous block of memory, but the order of routines within the block is unspecified.
1646Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible subsection, and aborts if executing within this subsection and sets the rollforward flag.
1647This technique is fragile because any calls in the non-interruptible code outside of the non-interruptible subsection (like @sbrk@) must be bracketed with disable/enable interrupts and these calls must be along the slowpath.
1648Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible subsection.
1649This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible subsection and their assembler code examined.
1650These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading.
1651
1652
1653\subsection{Bootstrapping}
1654
1655There are problems bootstrapping a memory allocator.
1656\begin{enumerate}
1657\item
1658Programs can be statically or dynamically linked.
1659\item
1660The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely.
1661\item
1662Knowing a KT's start and end independently from the KT code is difficult.
1663\end{enumerate}
1664
1665For static linking, the allocator is loaded with the program.
1666Hence, allocation calls immediately invoke the allocator operation defined by the loaded allocation library and there is only one memory allocator used in the program.
1667This approach allows allocator substitution by placing an allocation library before any other in the linked/load path.
1668
1669Allocator substitution is similar for dynamic linking, but the problem is that the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
1670As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including from the dynamic loader.
1671Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
1672Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
1673Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
1674
1675All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
1676The problem is getting initialization done before the first allocator call.
1677However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
1678Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}.
1679This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call.
1680As a result, calls to allocation routines occur without initialization.
1681To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
1682
1683Two other important execution points are program startup and termination, which include prologue or epilogue code to bootstrap a program, which programmers are unaware of.
1684For example, dynamic-memory allocations before/after the application starts should not be considered in statistics because the application does not make these calls.
1685llheap establishes these two points using routines:
1686\begin{lstlisting}
1687__attribute__(( constructor( 100 ) )) static void startup( void ) {
1688        // clear statistic counters
1689        // reset allocUnfreed counter
1690}
1691__attribute__(( destructor( 100 ) )) static void shutdown( void ) {
1692        // sum allocUnfreed for all heaps
1693        // subtract global unfreed storage
1694        // if allocUnfreed > 0 then print warning message
1695}
1696\end{lstlisting}
1697which use global constructor/destructor priority 100, where the linker calls these routines at program prologue/epilogue in increasing/decreasing order of priority.
1698Application programs may only use global constructor/destructor priorities greater than 100.
1699Hence, @startup@ is called after the program prologue but before the application starts, and @shutdown@ is called after the program terminates but before the program epilogue.
1700By resetting counters in @startup@, prologue allocations are ignored, and checking unfreed storage in @shutdown@ checks only application memory management, ignoring the program epilogue.
1701
1702While @startup@/@shutdown@ apply to the program KT, a concurrent program creates additional KTs that do not trigger these routines.
1703However, it is essential for the allocator to know when each KT is started/terminated.
1704One approach is to create a thread-local object with a construct/destructor, which is triggered after a new KT starts and before it terminates, respectively.
1705\begin{lstlisting}
1706struct ThreadManager {
1707        volatile bool pgm_thread;
1708        ThreadManager() {} // unusable
1709        ~ThreadManager() { if ( pgm_thread ) heapManagerDtor(); }
1710};
1711static thread_local ThreadManager threadManager;
1712\end{lstlisting}
1713Unfortunately, thread-local variables are created lazily, \ie on the first dereference of @threadManager@, which then triggers its constructor.
1714Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
1715Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
1716Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
1717The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
1718
1719Finally, there is a recursive problem when the singleton pattern dereferences @pgm_thread@ to initialize the thread-local object, because its initialization calls @atExit@, which immediately calls @malloc@ to obtain storage.
1720This recursion is handled with another thread-local flag to prevent double initialization.
1721A similar problem exists when the KT terminates and calls member @~ThreadManager@, because immediately afterwards, the terminating KT calls @free@ to deallocate the storage obtained from the @atExit@.
1722In the meantime, the terminated heap has been put on the global-heap free-stack, and may be active by a new KT, so the @atExit@ free is handled as a free to another heap and put onto the away list using locking.
1723
1724For user threading systems, the KTs are controlled by the runtime, and hence, start/end pointers are known and interact directly with the llheap allocator for \uC and \CFA, which eliminates or simplifies several of these problems.
1725The following API was created to provide interaction between the language runtime and the allocator.
1726\begin{lstlisting}
1727void startThread();                     $\C{// KT starts}$
1728void finishThread();                    $\C{// KT ends}$
1729void startup();                         $\C{// when application code starts}$
1730void shutdown();                        $\C{// when application code ends}$
1731bool traceHeap();                       $\C{// enable allocation/free printing for debugging}$
1732bool traceHeapOn();                     $\C{// start printing allocation/free calls}$
1733bool traceHeapOff();                    $\C{// stop printing allocation/free calls}$
1734\end{lstlisting}
1735This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way.
1736
1737%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1738
1739\subsection{Added Features and Methods}
1740
1741The C dynamic-allocation API (see Figure~\ref{f:CDynamicAllocationAPI}) is neither orthogonal nor complete.
1742For example,
1743\begin{itemize}
1744\item
1745It is possible to zero fill or align an allocation but not both.
1746\item
1747It is \emph{only} possible to zero fill an array allocation.
1748\item
1749It is not possible to resize a memory allocation without data copying.
1750\item
1751@realloc@ does not preserve initial allocation properties.
1752\end{itemize}
1753As a result, programmers must provide these options, which is error prone, resulting in blaming the entire programming language for a poor dynamic-allocation API.
1754Furthermore, newer programming languages have better type systems that can provide safer and more powerful APIs for memory allocation.
1755
1756\begin{figure}
1757\begin{lstlisting}
1758void * malloc( size_t size );
1759void * calloc( size_t nmemb, size_t size );
1760void * realloc( void * ptr, size_t size );
1761void * reallocarray( void * ptr, size_t nmemb, size_t size );
1762void free( void * ptr );
1763void * memalign( size_t alignment, size_t size );
1764void * aligned_alloc( size_t alignment, size_t size );
1765int posix_memalign( void ** memptr, size_t alignment, size_t size );
1766void * valloc( size_t size );
1767void * pvalloc( size_t size );
1768
1769struct mallinfo mallinfo( void );
1770int mallopt( int param, int val );
1771int malloc_trim( size_t pad );
1772size_t malloc_usable_size( void * ptr );
1773void malloc_stats( void );
1774int malloc_info( int options, FILE * fp );
1775\end{lstlisting}
1776\caption{C Dynamic-Allocation API}
1777\label{f:CDynamicAllocationAPI}
1778\end{figure}
1779
1780The following presents design and API changes for C, \CC (\uC), and \CFA, all of which are implemented in llheap.
1781
1782
1783\subsubsection{Out of Memory}
1784
1785Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
1786hence the need to return an alternate value for a zero-sized allocation.
1787A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
1788In theory, notifying the programmer of memory failure allows recovery;
1789in practice, it is almost impossible to gracefully recover when out of memory.
1790Hence, the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen because no pseudo allocation is necessary.
1791
1792
1793\subsubsection{C Interface}
1794
1795For C, it is possible to increase functionality and orthogonality of the dynamic-memory API to make allocation better for programmers.
1796
1797For existing C allocation routines:
1798\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1799\item
1800@calloc@ sets the sticky zero-fill property.
1801\item
1802@memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property.
1803\item
1804@realloc@ and @reallocarray@ preserve sticky properties.
1805\end{itemize}
1806
1807The C dynamic-memory API is extended with the following routines:
1808
1809\paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
1810extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory.
1811@aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines.
1812
1813\noindent\textbf{Usage}
1814@aalloc@ takes two parameters.
1815\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1816\item
1817@dim@: number of array objects
1818\item
1819@elemSize@: size of array object
1820\end{itemize}
1821It returns the address of the dynamic array or @NULL@ if either @dim@ or @elemSize@ are zero.
1822
1823\paragraph{\lstinline{void * resize( void * oaddr, size_t size )}}
1824extends @realloc@ for resizing an existing allocation \emph{without} copying previous data into the new allocation or preserving sticky properties.
1825@resize@ is significantly faster than @realloc@, which is the only alternative.
1826
1827\noindent\textbf{Usage}
1828@resize@ takes two parameters.
1829\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1830\item
1831@oaddr@: address to be resized
1832\item
1833@size@: new allocation size (smaller or larger than previous)
1834\end{itemize}
1835It returns the address of the old or new storage with the specified new size or @NULL@ if @size@ is zero.
1836
1837\paragraph{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
1838extends @aalloc@ and @memalign@ for allocating an aligned dynamic array of objects.
1839Sets sticky alignment property.
1840
1841\noindent\textbf{Usage}
1842@amemalign@ takes three parameters.
1843\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1844\item
1845@alignment@: alignment requirement
1846\item
1847@dim@: number of array objects
1848\item
1849@elemSize@: size of array object
1850\end{itemize}
1851It returns the address of the aligned dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
1852
1853\paragraph{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
1854extends @amemalign@ with zero fill and has the same usage as @amemalign@.
1855Sets sticky zero-fill and alignment property.
1856It returns the address of the aligned, zero-filled dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
1857
1858\paragraph{\lstinline{size_t malloc_alignment( void * addr )}}
1859returns the alignment of the dynamic object for use in aligning similar allocations.
1860
1861\noindent\textbf{Usage}
1862@malloc_alignment@ takes one parameter.
1863\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1864\item
1865@addr@: address of an allocated object.
1866\end{itemize}
1867It returns the alignment of the given object, where objects not allocated with alignment return the minimal allocation alignment.
1868
1869\paragraph{\lstinline{bool malloc_zero_fill( void * addr )}}
1870returns true if the object has the zero-fill sticky property for use in zero filling similar allocations.
1871
1872\noindent\textbf{Usage}
1873@malloc_zero_fill@ takes one parameters.
1874
1875\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1876\item
1877@addr@: address of an allocated object.
1878\end{itemize}
1879It returns true if the zero-fill sticky property is set and false otherwise.
1880
1881\paragraph{\lstinline{size_t malloc_size( void * addr )}}
1882returns the request size of the dynamic object (updated when an object is resized) for use in similar allocations.
1883See also @malloc_usable_size@.
1884
1885\noindent\textbf{Usage}
1886@malloc_size@ takes one parameters.
1887\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1888\item
1889@addr@: address of an allocated object.
1890\end{itemize}
1891It returns the request size or zero if @addr@ is @NULL@.
1892
1893\paragraph{\lstinline{int malloc_stats_fd( int fd )}}
1894changes the file descriptor where @malloc_stats@ writes statistics (default @stdout@).
1895
1896\noindent\textbf{Usage}
1897@malloc_stats_fd@ takes one parameters.
1898\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1899\item
1900@fd@: file descriptor.
1901\end{itemize}
1902It returns the previous file descriptor.
1903
1904\paragraph{\lstinline{size_t malloc_expansion()}}
1905\label{p:malloc_expansion}
1906set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
1907It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization.
1908
1909\paragraph{\lstinline{size_t malloc_mmap_start()}}
1910set the crossover between allocations occurring in the @sbrk@ area or separately mapped.
1911It returns the crossover point used throughout a program, \ie called once at heap initialization.
1912
1913\paragraph{\lstinline{size_t malloc_unfreed()}}
1914\label{p:malloc_unfreed}
1915amount subtracted to adjust for unfreed program storage (debug only).
1916It returns the new subtraction amount and called by @malloc_stats@.
1917
1918
1919\subsubsection{\CC Interface}
1920
1921The following extensions take advantage of overload polymorphism in the \CC type-system.
1922
1923\paragraph{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
1924extends @resize@ with an alignment re\-quirement.
1925
1926\noindent\textbf{Usage}
1927takes three parameters.
1928\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1929\item
1930@oaddr@: address to be resized
1931\item
1932@nalign@: alignment requirement
1933\item
1934@size@: new allocation size (smaller or larger than previous)
1935\end{itemize}
1936It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
1937
1938\paragraph{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
1939extends @realloc@ with an alignment re\-quirement and has the same usage as aligned @resize@.
1940
1941
1942\subsubsection{\CFA Interface}
1943
1944The following extensions take advantage of overload polymorphism in the \CFA type-system.
1945The key safety advantage of the \CFA type system is using the return type to select overloads;
1946hence, a polymorphic routine knows the returned type and its size.
1947This capability is used to remove the object size parameter and correctly cast the return storage to match the result type.
1948For example, the following is the \CFA wrapper for C @malloc@:
1949\begin{cfa}
1950forall( T & | sized(T) ) {
1951        T * malloc( void ) {
1952                if ( _Alignof(T) <= libAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
1953                else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
1954        } // malloc
1955\end{cfa}
1956and is used as follows:
1957\begin{lstlisting}
1958int * i = malloc();
1959double * d = malloc();
1960struct Spinlock { ... } __attribute__(( aligned(128) ));
1961Spinlock * sl = malloc();
1962\end{lstlisting}
1963where each @malloc@ call provides the return type as @T@, which is used with @sizeof@, @_Alignof@, and casting the storage to the correct type.
1964This interface removes many of the common allocation errors in C programs.
1965Figure~\ref{f:CFADynamicAllocationAPI} show the \CFA wrappers for the equivalent C/\CC allocation routines with same semantic behaviour.
1966
1967\begin{figure}
1968\begin{lstlisting}
1969T * malloc( void );
1970T * aalloc( size_t dim );
1971T * calloc( size_t dim );
1972T * resize( T * ptr, size_t size );
1973T * realloc( T * ptr, size_t size );
1974T * memalign( size_t align );
1975T * amemalign( size_t align, size_t dim );
1976T * cmemalign( size_t align, size_t dim  );
1977T * aligned_alloc( size_t align );
1978int posix_memalign( T ** ptr, size_t align );
1979T * valloc( void );
1980T * pvalloc( void );
1981\end{lstlisting}
1982\caption{\CFA C-Style Dynamic-Allocation API}
1983\label{f:CFADynamicAllocationAPI}
1984\end{figure}
1985
1986In addition to the \CFA C-style allocator interface, a new allocator interface is provided to further increase orthogonality and usability of dynamic-memory allocation.
1987This interface helps programmers in three ways.
1988\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
1989\item
1990naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
1991\item
1992named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls.
1993\item
1994object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results.
1995\end{itemize}
1996Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg
1997\begin{cfa}
1998duration ?@`@h( int h );                // ? denote the position of the function operand
1999duration ?@`@m( int m );
2000duration ?@`@s( int s );
2001duration dur = 3@`@h + 42@`@m + 17@`@s;
2002\end{cfa}
2003
2004\paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}}
2005is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations.
2006These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine.
2007A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
2008A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
2009The variable number of arguments consist of allocation properties, which can be combined to produce different kinds of allocations.
2010The only restriction is for properties @realloc@ and @resize@, which cannot be combined.
2011
2012The allocation property functions are:
2013\subparagraph{\lstinline{T_align ?`align( size_t alignment )}}
2014to align the allocation.
2015The alignment parameter must be $\ge$ the default alignment (@libAlign()@ in \CFA) and a power of two, \eg:
2016\begin{cfa}
2017int * i0 = alloc( @4096`align@ );  sout | i0 | nl;
2018int * i1 = alloc( 3, @4096`align@ );  sout | i1; for (i; 3 ) sout | &i1[i]; sout | nl;
2019
20200x555555572000
20210x555555574000 0x555555574000 0x555555574004 0x555555574008
2022\end{cfa}
2023returns a dynamic object and object array aligned on a 4096-byte boundary.
2024
2025\subparagraph{\lstinline{S_fill(T) ?`fill ( /* various types */ )}}
2026to initialize storage.
2027There are three ways to fill storage:
2028\begin{enumerate}
2029\item
2030A char fills each byte of each object.
2031\item
2032An object of the returned type fills each object.
2033\item
2034An object array pointer fills some or all of the corresponding object array.
2035\end{enumerate}
2036For example:
2037\begin{cfa}[numbers=left]
2038int * i0 = alloc( @0n`fill@ );  sout | *i0 | nl;  // disambiguate 0
2039int * i1 = alloc( @5`fill@ );  sout | *i1 | nl;
2040int * i2 = alloc( @'\xfe'`fill@ ); sout | hex( *i2 ) | nl;
2041int * i3 = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | i3[i]; sout | nl;
2042int * i4 = alloc( 5, @0xdeadbeefN`fill@ );  for ( i; 5 ) sout | hex( i4[i] ); sout | nl;
2043int * i5 = alloc( 5, @i3`fill@ );  for ( i; 5 ) sout | i5[i]; sout | nl;
2044int * i6 = alloc( 5, @[i3, 3]`fill@ );  for ( i; 5 ) sout | i6[i]; sout | nl;
2045\end{cfa}
2046\begin{lstlisting}[numbers=left]
20470
20485
20490xfefefefe
20505 5 5 5 5
20510xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef
20525 5 5 5 5
20535 5 5 -555819298 -555819298  // two undefined values
2054\end{lstlisting}
2055Examples 1 to 3 fill an object with a value or characters.
2056Examples 4 to 7 fill an array of objects with values, another array, or part of an array.
2057
2058\subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}}
2059used to resize, realign, and fill, where the old object data is not copied to the new object.
2060The old object type may be different from the new object type, since the values are not used.
2061For example:
2062\begin{cfa}[numbers=left]
2063int * i = alloc( @5`fill@ );  sout | i | *i;
2064i = alloc( @i`resize@, @256`align@, @7`fill@ );  sout | i | *i;
2065double * d = alloc( @i`resize@, @4096`align@, @13.5`fill@ );  sout | d | *d;
2066\end{cfa}
2067\begin{lstlisting}[numbers=left]
20680x55555556d5c0 5
20690x555555570000 7
20700x555555571000 13.5
2071\end{lstlisting}
2072Examples 2 to 3 change the alignment, fill, and size for the initial storage of @i@.
2073
2074\begin{cfa}[numbers=left]
2075int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
2076ia = alloc( 10, @ia`resize@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
2077sout | ia; ia = alloc( 5, @ia`resize@, @512`align@, @13`fill@ ); sout | ia; for ( i; 5 ) sout | ia[i]; sout | nl;;
2078ia = alloc( 3, @ia`resize@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
2079\end{cfa}
2080\begin{lstlisting}[numbers=left]
20815 5 5 5 5
20827 7 7 7 7 7 7 7 7 7
20830x55555556d560 0x555555571a00 13 13 13 13 13
20840x555555572000 0x555555572000 2 0x555555572004 2 0x555555572008 2
2085\end{lstlisting}
2086Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
2087
2088\subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}}
2089used to resize, realign, and fill, where the old object data is copied to the new object.
2090The old object type must be the same as the new object type, since the value is used.
2091Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
2092For example:
2093\begin{cfa}[numbers=left]
2094int * i = alloc( @5`fill@ );  sout | i | *i;
2095i = alloc( @i`realloc@, @256`align@ );  sout | i | *i;
2096i = alloc( @i`realloc@, @4096`align@, @13`fill@ );  sout | i | *i;
2097\end{cfa}
2098\begin{lstlisting}[numbers=left]
20990x55555556d5c0 5
21000x555555570000 5
21010x555555571000 5
2102\end{lstlisting}
2103Examples 2 to 3 change the alignment for the initial storage of @i@.
2104The @13`fill@ in example 3 does nothing because no extra space is added.
2105
2106\begin{cfa}[numbers=left]
2107int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
2108ia = alloc( 10, @ia`realloc@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
2109sout | ia; ia = alloc( 1, @ia`realloc@, @512`align@, @13`fill@ ); sout | ia; for ( i; 1 ) sout | ia[i]; sout | nl;;
2110ia = alloc( 3, @ia`realloc@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
2111\end{cfa}
2112\begin{lstlisting}[numbers=left]
21135 5 5 5 5
21145 5 5 5 5 7 7 7 7 7
21150x55555556c560 0x555555570a00 5
21160x555555571000 0x555555571000 5 0x555555571004 2 0x555555571008 2
2117\end{lstlisting}
2118Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
2119The @13`fill@ in example 3 does nothing because no extra space is added.
2120
2121These \CFA allocation features are used extensively in the development of the \CFA runtime.
2122
2123
2124\section{Benchmarks}
2125\label{s:Benchmarks}
2126
2127%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2128%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2129%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Micro Benchmark Suite
2130%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2131%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2132
2133There are two basic approaches for evaluating computer software: benchmarks and micro-benchmarks.
2134\begin{description}
2135\item[Benchmarks]
2136are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc).
2137The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation.
2138Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns.
2139\item[Micro-Benchmarks]
2140attempt to extract the common execution patterns associated with an application and run the pattern independently.
2141This approach removes any masking from extraneous application code, allows execution pattern to be very precise, and provides an opportunity for the execution pattern to have multiple independent tuning adjustments (knobs).
2142Micro-benchmarks are often criticized for inadequately representing real-world applications.
2143\end{description}
2144
2145While some crucial software components have standard benchmarks, no standard benchmark exists for testing and comparing memory allocators.
2146In the past, an assortment of applications have been used for benchmarking allocators~\cite{Detlefs93,Berger00,Berger01,berger02reconsidering}: P2C, GS, Espresso/Espresso-2, CFRAC/CFRAC-2, GMake, GCC, Perl/Perl-2, Gawk/Gawk-2, XPDF/XPDF-2, ROBOOP, Lindsay.
2147As well, an assortment of micro-benchmark have been used for benchmarking allocators~\cite{larson99memory,Berger00,streamflow}: threadtest, shbench, Larson, consume, false sharing.
2148Many of these benchmark applications and micro-benchmarks are old and may not reflect current application allocation patterns.
2149
2150This work designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters.
2151The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance.
2152% These programs can be taken as a standard to benchmark an allocator's basic goals.
2153These programs give details of an allocator's memory overhead and speed under certain allocation patterns.
2154The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs.
2155Each micro-benchmark program has multiple control knobs specified by command-line arguments.
2156
2157The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics.
2158An allocator's speed is benchmarked in different ways, as are issues like false sharing.
2159
2160
2161\subsection{Prior Multi-Threaded Micro-Benchmarks}
2162
2163Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level.
2164The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level.
2165None of the prior work addresses multi-threading at the UT level.
2166
2167
2168\subsubsection{threadtest}
2169
2170This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently.
2171There is no interaction among threads, \ie no object sharing.
2172Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
2173The execution time of the benchmark evaluates its efficiency.
2174
2175
2176\subsubsection{shbench}
2177
2178This benchmark is similar to threadtest but each thread randomly allocate and free a number of \emph{random-sized} objects.
2179It is a stress test that also uses runtime to determine efficiency of the allocator.
2180
2181
2182\subsubsection{Larson}
2183
2184This benchmark simulates a server environment.
2185Multiple threads are created where each thread allocates and frees a number of random-sized objects within a size range.
2186Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process.
2187The number of thread generations varies depending on the thread speed.
2188It calculates memory operations per second as an indicator of the memory allocator's performance.
2189
2190
2191\subsection{New Multi-Threaded Micro-Benchmarks}
2192
2193The following new benchmarks were created to assess multi-threaded programs at the KT and UT level.
2194For generating random values, two generators are supported: uniform~\cite{uniformPRNG} and fisher~\cite{fisherPRNG}.
2195
2196
2197\subsubsection{Churn Benchmark}
2198\label{s:ChurnBenchmark}
2199
2200The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory.
2201Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@.
2202Churn simulates a memory intensive program and can be tuned to create different scenarios.
2203
2204Figure~\ref{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark.
2205This benchmark creates a buffer with M spots and an allocation in each spot, and then starts K threads.
2206Each thread picks a random spot in M, frees the object currently at that spot, and allocates a new object for that spot.
2207Each thread repeats this cycle N times.
2208The main thread measures the total time taken for the whole benchmark and that time is used to evaluate the memory allocator's performance.
2209
2210\begin{figure}
2211\centering
2212\begin{lstlisting}
2213Main Thread
2214        create worker threads
2215        note time T1
2216        ...
2217        note time T2
2218        churn_speed = (T2 - T1)
2219Worker Thread
2220        initialize variables
2221        ...
2222        for ( N )
2223                R = random spot in array
2224                free R
2225                allocate new object at R
2226\end{lstlisting}
2227%\includegraphics[width=1\textwidth]{figures/bench-churn.eps}
2228\caption{Churn Benchmark}
2229\label{fig:ChurnBenchFig}
2230\end{figure}
2231
2232The adjustment knobs for churn are:
2233\begin{description}[itemsep=0pt,parsep=0pt]
2234\item[thread:]
2235number of threads (K).
2236\item[spots:]
2237number of spots for churn (M).
2238\item[obj:]
2239number of objects per thread (N).
2240\item[max:]
2241maximum object size.
2242\item[min:]
2243minimum object size.
2244\item[step:]
2245object size increment.
2246\item[distro:]
2247object size distribution
2248\end{description}
2249
2250
2251\subsubsection{Cache Thrash}
2252\label{sec:benchThrashSec}
2253
2254The cache-thrash micro-benchmark measures allocator-induced active false-sharing as illustrated in Section~\ref{s:AllocatorInducedActiveFalseSharing}.
2255If memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
2256When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases.
2257
2258Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
2259Ideally, a memory allocator should distance the dynamic memory region of one thread from another.
2260Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads.
2261
2262Figure~\ref{fig:benchThrashFig} shows the pseudo code for the cache-thrash micro-benchmark.
2263First, it creates K worker threads.
2264Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line.
2265Each thread repeats this for N times.
2266The main thread measures the total time taken for all worker threads to complete.
2267Worker threads sharing cache lines with each other are expected to take longer.
2268
2269\begin{figure}
2270\centering
2271\input{AllocInducedActiveFalseSharing}
2272\medskip
2273\begin{lstlisting}
2274Main Thread
2275        create worker threads
2276        ...
2277        signal workers to allocate
2278        ...
2279        signal workers to free
2280        ...
2281Worker Thread$\(_1\)$
2282        warm up memory in chunks of 16 bytes
2283        ...
2284        For N
2285                malloc an object
2286                read/write the object M times
2287                free the object
2288        ...
2289Worker Thread$\(_2\)$
2290        // same as Worker Thread$\(_1\)$
2291\end{lstlisting}
2292%\input{MemoryOverhead}
2293%\includegraphics[width=1\textwidth]{figures/bench-cache-thrash.eps}
2294\caption{Allocator-Induced Active False-Sharing Benchmark}
2295\label{fig:benchThrashFig}
2296\end{figure}
2297
2298The adjustment knobs for cache access scenarios are:
2299\begin{description}[itemsep=0pt,parsep=0pt]
2300\item[thread:]
2301number of threads (K).
2302\item[iterations:]
2303iterations of cache benchmark (N).
2304\item[cacheRW:]
2305repetitions of reads/writes to object (M).
2306\item[size:]
2307object size.
2308\end{description}
2309
2310
2311\subsubsection{Cache Scratch}
2312\label{s:CacheScratch}
2313
2314The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in Section~\ref{s:AllocatorInducedPassiveFalseSharing}.
2315As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
2316In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object.
2317
2318% An allocator can unintentionally induce false sharing depending upon its management of the freed objects.
2319% If thread Thread$_1$ allocates multiple objects together, they may be allocated on the same cache line by the memory allocator.
2320% If Thread$_1$ passes these object to thread Thread$_2$, then both threads may share the same cache line but this scenario is not induced by the allocator;
2321% instead, the program induced this situation.
2322% Now if Thread$_2$ frees this object and then allocate an object of the same size, the allocator may return the same object, which is on a cache line shared with thread Thread$_1$.
2323
2324Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly.
2325An allocator using object ownership, as described in subsection Section~\ref{s:Ownership}, is less susceptible to allocator-induced passive false-sharing.
2326If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line.
2327
2328Figure~\ref{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark.
2329First, it allocates K dynamic objects together, one for each of the K worker threads, possibly causing memory allocator to allocate these objects on the same cache line.
2330Then it create K worker threads and passes an object from the K allocated objects to each of the K threads.
2331Each worker thread frees the object passed by the main thread.
2332Then, it allocates an object and reads/writes it repetitively for M times possibly causing frequent cache invalidations.
2333Each worker repeats this N times.
2334
2335\begin{figure}
2336\centering
2337\input{AllocInducedPassiveFalseSharing}
2338\medskip
2339\begin{lstlisting}
2340Main Thread
2341        malloc N objects $for$ each worker $thread$
2342        create worker threads and pass N objects to each worker
2343        ...
2344        signal workers to allocate
2345        ...
2346        signal workers to free
2347        ...
2348Worker Thread$\(_1\)$
2349        warmup memory in chunks of 16 bytes
2350        ...
2351        free the object passed by the Main Thread
2352        For N
2353                malloc new object
2354                read/write the object M times
2355                free the object
2356        ...
2357Worker Thread$\(_2\)$
2358        // same as Worker Thread$\(_1\)$
2359\end{lstlisting}
2360%\includegraphics[width=1\textwidth]{figures/bench-cache-scratch.eps}
2361\caption{Program-Induced Passive False-Sharing Benchmark}
2362\label{fig:benchScratchFig}
2363\end{figure}
2364
2365Each thread allocating an object after freeing the original object passed by the main thread should cause the memory allocator to return the same object that was initially allocated by the main thread if the allocator did not return the initial object back to its owner (main thread).
2366Then, intensive read/write on the shared cache line by multiple threads should slow down worker threads due to to high cache invalidations and misses.
2367Main thread measures the total time taken for all the workers to complete.
2368
2369Similar to benchmark cache thrash in subsection Section~\ref{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments.
2370\begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt]
2371\item[threads:]
2372number of threads (K).
2373\item[iterations:]
2374iterations of cache benchmark (N).
2375\item[cacheRW:]
2376repetitions of reads/writes to object (M).
2377\item[size:]
2378object size.
2379\end{description}
2380
2381
2382\subsubsection{Speed Micro-Benchmark}
2383\label{s:SpeedMicroBenchmark}
2384\vspace*{-4pt}
2385
2386The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines:
2387\begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt]
2388\item malloc
2389\item realloc
2390\item free
2391\item calloc
2392\item malloc-free
2393\item realloc-free
2394\item calloc-free
2395\item malloc-realloc
2396\item calloc-realloc
2397\item malloc-realloc-free
2398\item calloc-realloc-free
2399\item malloc-realloc-free-calloc
2400\end{enumerate}
2401
2402Figure~\ref{fig:SpeedBenchFig} shows the pseudo code for the speed micro-benchmark.
2403Each routine in the chain is called for N objects and then those allocated objects are used when calling the next routine in the allocation chain.
2404This tests the latency of the memory allocator when multiple routines are chained together, \eg the call sequence malloc-realloc-free-calloc gives a complete picture of the major allocation routines when combined together.
2405For each chain, the time is recorded to visualize performance of a memory allocator against each chain.
2406
2407\begin{figure}
2408\centering
2409\begin{lstlisting}[morekeywords={foreach}]
2410Main Thread
2411        create worker threads
2412        foreach ( allocation chain )
2413                note time T1
2414                ...
2415                note time T2
2416                chain_speed = (T2 - T1) / number-of-worker-threads * N )
2417Worker Thread
2418        initialize variables
2419        ...
2420        foreach ( routine in allocation chain )
2421                call routine N times
2422\end{lstlisting}
2423%\includegraphics[width=1\textwidth]{figures/bench-speed.eps}
2424\caption{Speed Benchmark}
2425\label{fig:SpeedBenchFig}
2426\end{figure}
2427
2428The adjustment knobs for memory usage are:
2429\begin{description}[itemsep=0pt,parsep=0pt]
2430\item[max:]
2431maximum object size.
2432\item[min:]
2433minimum object size.
2434\item[step:]
2435object size increment.
2436\item[distro:]
2437object size distribution.
2438\item[objects:]
2439number of objects per thread.
2440\item[workers:]
2441number of worker threads.
2442\end{description}
2443
2444
2445\subsubsection{Memory Micro-Benchmark}
2446\label{s:MemoryMicroBenchmark}
2447
2448The memory micro-benchmark measures the memory overhead of an allocator.
2449It allocates a number of dynamic objects and reads @/proc/self/proc/maps@ to get the total memory requested by the allocator from the OS.
2450It calculates the memory overhead by computing the difference between the memory the allocator requests from the OS and the memory that the program allocates.
2451This micro-benchmark is like Larson and stresses the ability of an allocator to deal with object sharing.
2452
2453Figure~\ref{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark.
2454It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads.
2455A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer.
2456A consumer frees these objects.
2457After every memory operation, program memory usage is recorded throughout the runtime.
2458This data is used to visualize the memory usage and consumption for the program.
2459
2460\begin{figure}
2461\centering
2462\begin{lstlisting}
2463Main Thread
2464        print memory snapshot
2465        create producer threads
2466Producer Thread (K)
2467        set free start
2468        create consumer threads
2469        for ( N )
2470                allocate memory
2471                print memory snapshot
2472Consumer Thread (M)
2473        wait while ( allocations < free start )
2474        for ( N )
2475                free memory
2476                print memory snapshot
2477\end{lstlisting}
2478%\includegraphics[width=1\textwidth]{figures/bench-memory.eps}
2479\caption{Memory Footprint Micro-Benchmark}
2480\label{fig:MemoryBenchFig}
2481\end{figure}
2482
2483The global adjustment knobs for this micro-benchmark are:
2484\begin{description}[itemsep=0pt,parsep=0pt]
2485\item[producer (K):]
2486sets the number of producer threads.
2487\item[consumer (M):]
2488sets number of consumers threads for each producer.
2489\item[round:]
2490sets production and consumption round size.
2491\end{description}
2492
2493The adjustment knobs for object allocation are:
2494\begin{description}[itemsep=0pt,parsep=0pt]
2495\item[max:]
2496maximum object size.
2497\item[min:]
2498minimum object size.
2499\item[step:]
2500object size increment.
2501\item[distro:]
2502object size distribution.
2503\item[objects (N):]
2504number of objects per thread.
2505\end{description}
2506
2507
2508\section{Performance}
2509\label{c:Performance}
2510
2511This section uses the micro-benchmarks from Section~\ref{s:Benchmarks} to test a number of current memory allocators, including llheap.
2512The goal is to see if llheap is competitive with the currently popular memory allocators.
2513
2514
2515\subsection{Machine Specification}
2516
2517The performance experiments were run on two different multi-core architectures (x64 and ARM) to determine if there is consistency across platforms:
2518\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
2519\item
2520\textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
2521\item
2522\textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0
2523\end{itemize}
2524
2525
2526\subsection{Existing Memory Allocators}
2527\label{sec:curAllocatorSec}
2528
2529With dynamic allocation being an important feature of C, there are many stand-alone memory allocators that have been designed for different purposes.
2530For this work, 7 of the most popular and widely used memory allocators were selected for comparison, along with llheap.
2531
2532\paragraph{llheap (\textsf{llh})}
2533is the thread-safe allocator from Chapter~\ref{c:Allocator}
2534\\
2535\textbf{Version:} 1.0
2536\textbf{Configuration:} Compiled with dynamic linking, but without statistics or debugging.\\
2537\textbf{Compilation command:} @make@
2538
2539\paragraph{glibc (\textsf{glc})}
2540\cite{glibc} is the default glibc thread-safe allocator.
2541\\
2542\textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\
2543\textbf{Configuration:} Compiled by Ubuntu 20.04.\\
2544\textbf{Compilation command:} N/A
2545
2546\paragraph{dlmalloc (\textsf{dl})}
2547\cite{dlmalloc} is a thread-safe allocator that is single threaded and single heap.
2548It maintains free-lists of different sizes to store freed dynamic memory.
2549\\
2550\textbf{Version:} 2.8.6\\
2551\textbf{Configuration:} Compiled with preprocessor @USE_LOCKS@.\\
2552\textbf{Compilation command:} @gcc -g3 -O3 -Wall -Wextra -fno-builtin-malloc -fno-builtin-calloc@ @-fno-builtin-realloc -fno-builtin-free -fPIC -shared -DUSE_LOCKS -o libdlmalloc.so malloc-2.8.6.c@
2553
2554\paragraph{hoard (\textsf{hrd})}
2555\cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
2556\\
2557\textbf{Version:} 3.13\\
2558\textbf{Configuration:} Compiled with hoard's default configurations and @Makefile@.\\
2559\textbf{Compilation command:} @make all@
2560
2561\paragraph{jemalloc (\textsf{je})}
2562\cite{jemalloc} is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena.
2563Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
2564\\
2565\textbf{Version:} 5.2.1\\
2566\textbf{Configuration:} Compiled with jemalloc's default configurations and @Makefile@.\\
2567\textbf{Compilation command:} @autogen.sh; configure; make; make install@
2568
2569\paragraph{ptmalloc3 (\textsf{pt3})}
2570\cite{ptmalloc3} is a modification of dlmalloc.
2571It is a thread-safe multi-threaded memory allocator that uses multiple heaps.
2572ptmalloc3 heap has similar design to dlmalloc's heap.
2573\\
2574\textbf{Version:} 1.8\\
2575\textbf{Configuration:} Compiled with ptmalloc3's @Makefile@ using option ``linux-shared''.\\
2576\textbf{Compilation command:} @make linux-shared@
2577
2578\paragraph{rpmalloc (\textsf{rp})}
2579\cite{rpmalloc} is a thread-safe allocator that is multi-threaded and uses per-thread heap.
2580Each heap has multiple size-classes and each size-class contains memory regions of the relevant size.
2581\\
2582\textbf{Version:} 1.4.1\\
2583\textbf{Configuration:} Compiled with rpmalloc's default configurations and ninja build system.\\
2584\textbf{Compilation command:} @python3 configure.py; ninja@
2585
2586\paragraph{tbb malloc (\textsf{tbb})}
2587\cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread.
2588Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
2589\\
2590\textbf{Version:} intel tbb 2020 update 2, tbb\_interface\_version == 11102\\
2591\textbf{Configuration:} Compiled with tbbmalloc's default configurations and @Makefile@.\\
2592\textbf{Compilation command:} @make@
2593
2594% \subsection{Experiment Environment}
2595% We used our micro benchmark suite (FIX ME: cite mbench) to evaluate these memory allocators Section~\ref{sec:curAllocatorSec} and our own memory allocator uHeap Section~\ref{sec:allocatorSec}.
2596
2597\subsection{Experiments}
2598
2599Each micro-benchmark is configured and run with each of the allocators,
2600The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs.
2601All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see Section~\ref{s:MemoryMicroBenchmark}).
2602
2603%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2604%% CHURN
2605%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2606
2607\subsubsection{Churn Micro-Benchmark}
2608
2609Churn tests allocators for speed under intensive dynamic memory usage (see Section~\ref{s:ChurnBenchmark}).
2610This experiment was run with following configurations:
2611\begin{description}[itemsep=0pt,parsep=0pt]
2612\item[thread:]
26131, 2, 4, 8, 16, 32, 48
2614\item[spots:]
261516
2616\item[obj:]
2617100,000
2618\item[max:]
2619500
2620\item[min:]
262150
2622\item[step:]
262350
2624\item[distro:]
2625fisher
2626\end{description}
2627
2628% -maxS          : 500
2629% -minS          : 50
2630% -stepS                 : 50
2631% -distroS       : fisher
2632% -objN          : 100000
2633% -cSpots                : 16
2634% -threadN       : 1, 2, 4, 8, 16
2635
2636Figure~\ref{fig:churn} shows the results for algol and nasus.
2637The X-axis shows the number of threads;
2638the Y-axis shows the total experiment time.
2639Each allocator's performance for each thread is shown in different colors.
2640
2641\begin{figure}
2642\centering
2643    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/churn} } \\
2644    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/churn} }
2645\caption{Churn}
2646\label{fig:churn}
2647\end{figure}
2648
2649\paragraph{Assessment}
2650All allocators did well in this micro-benchmark, except for \textsf{dl} on the ARM.
2651\textsf{dl}'s is the slowest, indicating some small bottleneck with respect to the other allocators.
2652\textsf{je} is the fastest, with only a small benefit over the other allocators.
2653% llheap is slightly slower because it uses ownership, where many of the allocations have remote frees, which requires locking.
2654% When llheap is compiled without ownership, its performance is the same as the other allocators (not shown).
2655
2656%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2657%% THRASH
2658%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2659
2660\subsubsection{Cache Thrash}
2661\label{sec:cache-thrash-perf}
2662
2663Thrash tests memory allocators for active false sharing (see Section~\ref{sec:benchThrashSec}).
2664This experiment was run with following configurations:
2665\begin{description}[itemsep=0pt,parsep=0pt]
2666\item[threads:]
26671, 2, 4, 8, 16, 32, 48
2668\item[iterations:]
26691,000
2670\item[cacheRW:]
26711,000,000
2672\item[size:]
26731
2674\end{description}
2675
2676% * Each allocator was tested for its performance across different number of threads.
2677% Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
2678
2679Figure~\ref{fig:cacheThrash} shows the results for algol and nasus.
2680The X-axis shows the number of threads;
2681the Y-axis shows the total experiment time.
2682Each allocator's performance for each thread is shown in different colors.
2683
2684\begin{figure}
2685\centering
2686    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/cache_thrash_0-thrash} } \\
2687    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/cache_thrash_0-thrash} }
2688\caption{Cache Thrash}
2689\label{fig:cacheThrash}
2690\end{figure}
2691
2692\paragraph{Assessment}
2693All allocators did well in this micro-benchmark, except for \textsf{dl} and \textsf{pt3}.
2694\textsf{dl} uses a single heap for all threads so it is understandable that it generates so much active false-sharing.
2695Requests from different threads are dealt with sequentially by the single heap (using a single lock), which can allocate objects to different threads on the same cache line.
2696\textsf{pt3} uses the T:H model, so multiple threads can use one heap, but the active false-sharing is less than \textsf{dl}.
2697The rest of the memory allocators generate little or no active false-sharing.
2698
2699%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2700%% SCRATCH
2701%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2702
2703\subsubsection{Cache Scratch}
2704
2705Scratch tests memory allocators for program-induced allocator-preserved passive false-sharing (see Section~\ref{s:CacheScratch}).
2706This experiment was run with following configurations:
2707\begin{description}[itemsep=0pt,parsep=0pt]
2708\item[threads:]
27091, 2, 4, 8, 16, 32, 48
2710\item[iterations:]
27111,000
2712\item[cacheRW:]
27131,000,000
2714\item[size:]
27151
2716\end{description}
2717
2718% * Each allocator was tested for its performance across different number of threads.
2719% Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
2720
2721Figure~\ref{fig:cacheScratch} shows the results for algol and nasus.
2722The X-axis shows the number of threads;
2723the Y-axis shows the total experiment time.
2724Each allocator's performance for each thread is shown in different colors.
2725
2726\begin{figure}
2727\centering
2728    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/cache_scratch_0-scratch} } \\
2729    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/cache_scratch_0-scratch} }
2730\caption{Cache Scratch}
2731\label{fig:cacheScratch}
2732\end{figure}
2733
2734\paragraph{Assessment}
2735This micro-benchmark divides the allocators into two groups.
2736First is the high-performer group: \textsf{llh}, \textsf{je}, and \textsf{rp}.
2737These memory allocators generate little or no passive false-sharing and their performance difference is negligible.
2738Second is the low-performer group, which includes the rest of the memory allocators.
2739These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator.
2740All of the allocators in this group are sharing heaps among threads at some level.
2741
2742Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see Section~\ref{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch.
2743It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing.
2744
2745%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2746%% SPEED
2747%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2748
2749\subsubsection{Speed Micro-Benchmark}
2750
2751Speed tests memory allocators for runtime latency (see Section~\ref{s:SpeedMicroBenchmark}).
2752This experiment was run with following configurations:
2753\begin{description}
2754\item[max:]
2755500
2756\item[min:]
275750
2758\item[step:]
275950
2760\item[distro:]
2761fisher
2762\item[objects:]
2763100,000
2764\item[workers:]
27651, 2, 4, 8, 16, 32, 48
2766\end{description}
2767
2768% -maxS    :  500
2769% -minS    :  50
2770% -stepS   :  50
2771% -distroS :  fisher
2772% -objN    :  1000000
2773% -threadN    : \{ 1, 2, 4, 8, 16 \} *
2774
2775%* Each allocator was tested for its performance across different number of threads.
2776%Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
2777
2778Figures~\ref{fig:speed-3-malloc} to~\ref{fig:speed-14-malloc-calloc-realloc-free} show 12 figures, one figure for each chain of the speed benchmark.
2779The X-axis shows the number of threads;
2780the Y-axis shows the total experiment time.
2781Each allocator's performance for each thread is shown in different colors.
2782
2783\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
2784\item Figure~\ref{fig:speed-3-malloc} shows results for chain: malloc
2785\item Figure~\ref{fig:speed-4-realloc} shows results for chain: realloc
2786\item Figure~\ref{fig:speed-5-free} shows results for chain: free
2787\item Figure~\ref{fig:speed-6-calloc} shows results for chain: calloc
2788\item Figure~\ref{fig:speed-7-malloc-free} shows results for chain: malloc-free
2789\item Figure~\ref{fig:speed-8-realloc-free} shows results for chain: realloc-free
2790\item Figure~\ref{fig:speed-9-calloc-free} shows results for chain: calloc-free
2791\item Figure~\ref{fig:speed-10-malloc-realloc} shows results for chain: malloc-realloc
2792\item Figure~\ref{fig:speed-11-calloc-realloc} shows results for chain: calloc-realloc
2793\item Figure~\ref{fig:speed-12-malloc-realloc-free} shows results for chain: malloc-realloc-free
2794\item Figure~\ref{fig:speed-13-calloc-realloc-free} shows results for chain: calloc-realloc-free
2795\item Figure~\ref{fig:speed-14-malloc-calloc-realloc-free} shows results for chain: malloc-realloc-free-calloc
2796\end{itemize}
2797
2798\paragraph{Assessment}
2799This micro-benchmark divides the allocators into two groups: with and without @calloc@.
2800@calloc@ uses @memset@ to set the allocated memory to zero, which dominates the cost of the allocation chain (large performance increase) and levels performance across the allocators.
2801But the difference among the allocators in a @calloc@ chain still gives an idea of their relative performance.
2802
2803All allocators did well in this micro-benchmark across all allocation chains, except for \textsf{dl}, \textsf{pt3}, and \textsf{hrd}.
2804Again, the low-performing allocators are sharing heaps among threads, so the contention causes performance increases with increasing numbers of threads.
2805Furthermore, chains with @free@ can trigger coalescing, which slows the fast path.
2806The high-performing allocators all illustrate low latency across the allocation chains, \ie there are no performance spikes as the chain lengths, that might be caused by contention and/or coalescing.
2807Low latency is important for applications that are sensitive to unknown execution delays.
2808
2809%speed-3-malloc.eps
2810\begin{figure}
2811\centering
2812    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-3-malloc} } \\
2813    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-3-malloc} }
2814\caption{Speed benchmark chain: malloc}
2815\label{fig:speed-3-malloc}
2816\end{figure}
2817
2818%speed-4-realloc.eps
2819\begin{figure}
2820\centering
2821    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-4-realloc} } \\
2822    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-4-realloc} }
2823\caption{Speed benchmark chain: realloc}
2824\label{fig:speed-4-realloc}
2825\end{figure}
2826
2827%speed-5-free.eps
2828\begin{figure}
2829\centering
2830    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-5-free} } \\
2831    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-5-free} }
2832\caption{Speed benchmark chain: free}
2833\label{fig:speed-5-free}
2834\end{figure}
2835
2836%speed-6-calloc.eps
2837\begin{figure}
2838\centering
2839    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-6-calloc} } \\
2840    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-6-calloc} }
2841\caption{Speed benchmark chain: calloc}
2842\label{fig:speed-6-calloc}
2843\end{figure}
2844
2845%speed-7-malloc-free.eps
2846\begin{figure}
2847\centering
2848    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-7-malloc-free} } \\
2849    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-7-malloc-free} }
2850\caption{Speed benchmark chain: malloc-free}
2851\label{fig:speed-7-malloc-free}
2852\end{figure}
2853
2854%speed-8-realloc-free.eps
2855\begin{figure}
2856\centering
2857    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-8-realloc-free} } \\
2858    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-8-realloc-free} }
2859\caption{Speed benchmark chain: realloc-free}
2860\label{fig:speed-8-realloc-free}
2861\end{figure}
2862
2863%speed-9-calloc-free.eps
2864\begin{figure}
2865\centering
2866    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-9-calloc-free} } \\
2867    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-9-calloc-free} }
2868\caption{Speed benchmark chain: calloc-free}
2869\label{fig:speed-9-calloc-free}
2870\end{figure}
2871
2872%speed-10-malloc-realloc.eps
2873\begin{figure}
2874\centering
2875    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-10-malloc-realloc} } \\
2876    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-10-malloc-realloc} }
2877\caption{Speed benchmark chain: malloc-realloc}
2878\label{fig:speed-10-malloc-realloc}
2879\end{figure}
2880
2881%speed-11-calloc-realloc.eps
2882\begin{figure}
2883\centering
2884    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-11-calloc-realloc} } \\
2885    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-11-calloc-realloc} }
2886\caption{Speed benchmark chain: calloc-realloc}
2887\label{fig:speed-11-calloc-realloc}
2888\end{figure}
2889
2890%speed-12-malloc-realloc-free.eps
2891\begin{figure}
2892\centering
2893    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-12-malloc-realloc-free} } \\
2894    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-12-malloc-realloc-free} }
2895\caption{Speed benchmark chain: malloc-realloc-free}
2896\label{fig:speed-12-malloc-realloc-free}
2897\end{figure}
2898
2899%speed-13-calloc-realloc-free.eps
2900\begin{figure}
2901\centering
2902    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-13-calloc-realloc-free} } \\
2903    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-13-calloc-realloc-free} }
2904\caption{Speed benchmark chain: calloc-realloc-free}
2905\label{fig:speed-13-calloc-realloc-free}
2906\end{figure}
2907
2908%speed-14-{m,c,re}alloc-free.eps
2909\begin{figure}
2910\centering
2911    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-14-m-c-re-alloc-free} } \\
2912    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-14-m-c-re-alloc-free} }
2913\caption{Speed benchmark chain: malloc-calloc-realloc-free}
2914\label{fig:speed-14-malloc-calloc-realloc-free}
2915\end{figure}
2916
2917%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2918%% MEMORY
2919%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2920
2921\newpage
2922\subsubsection{Memory Micro-Benchmark}
2923\label{s:MemoryMicroBenchmark}
2924
2925This experiment is run with the following two configurations for each allocator.
2926The difference between the two configurations is the number of producers and consumers.
2927Configuration 1 has one producer and one consumer, and configuration 2 has 4 producers, where each producer has 4 consumers.
2928
2929\noindent
2930Configuration 1:
2931\begin{description}[itemsep=0pt,parsep=0pt]
2932\item[producer (K):]
29331
2934\item[consumer (M):]
29351
2936\item[round:]
2937100,000
2938\item[max:]
2939500
2940\item[min:]
294150
2942\item[step:]
294350
2944\item[distro:]
2945fisher
2946\item[objects (N):]
2947100,000
2948\end{description}
2949
2950% -threadA :  1
2951% -threadF :  1
2952% -maxS    :  500
2953% -minS    :  50
2954% -stepS   :  50
2955% -distroS :  fisher
2956% -objN    :  100000
2957% -consumeS:  100000
2958
2959\noindent
2960Configuration 2:
2961\begin{description}[itemsep=0pt,parsep=0pt]
2962\item[producer (K):]
29634
2964\item[consumer (M):]
29654
2966\item[round:]
2967100,000
2968\item[max:]
2969500
2970\item[min:]
297150
2972\item[step:]
297350
2974\item[distro:]
2975fisher
2976\item[objects (N):]
2977100,000
2978\end{description}
2979
2980% -threadA :  4
2981% -threadF :  4
2982% -maxS    :  500
2983% -minS    :  50
2984% -stepS   :  50
2985% -distroS :  fisher
2986% -objN    :  100000
2987% -consumeS:  100000
2988
2989% \begin{table}[b]
2990% \centering
2991%     \begin{tabular}{ |c|c|c| }
2992%      \hline
2993%     Memory Allocator & Configuration 1 Result & Configuration 2 Result\\
2994%      \hline
2995%     llh & Figure~\ref{fig:mem-1-prod-1-cons-100-llh} & Figure~\ref{fig:mem-4-prod-4-cons-100-llh}\\
2996%      \hline
2997%     dl & Figure~\ref{fig:mem-1-prod-1-cons-100-dl} & Figure~\ref{fig:mem-4-prod-4-cons-100-dl}\\
2998%      \hline
2999%     glibc & Figure~\ref{fig:mem-1-prod-1-cons-100-glc} & Figure~\ref{fig:mem-4-prod-4-cons-100-glc}\\
3000%      \hline
3001%     hoard & Figure~\ref{fig:mem-1-prod-1-cons-100-hrd} & Figure~\ref{fig:mem-4-prod-4-cons-100-hrd}\\
3002%      \hline
3003%     je & Figure~\ref{fig:mem-1-prod-1-cons-100-je} & Figure~\ref{fig:mem-4-prod-4-cons-100-je}\\
3004%      \hline
3005%     pt3 & Figure~\ref{fig:mem-1-prod-1-cons-100-pt3} & Figure~\ref{fig:mem-4-prod-4-cons-100-pt3}\\
3006%      \hline
3007%     rp & Figure~\ref{fig:mem-1-prod-1-cons-100-rp} & Figure~\ref{fig:mem-4-prod-4-cons-100-rp}\\
3008%      \hline
3009%     tbb & Figure~\ref{fig:mem-1-prod-1-cons-100-tbb} & Figure~\ref{fig:mem-4-prod-4-cons-100-tbb}\\
3010%      \hline
3011%     \end{tabular}
3012% \caption{Memory benchmark results}
3013% \label{table:mem-benchmark-figs}
3014% \end{table}
3015% Table Section~\ref{table:mem-benchmark-figs} shows the list of figures that contain memory benchmark results.
3016
3017Figures~\ref{fig:mem-1-prod-1-cons-100-llh}{fig:mem-4-prod-4-cons-100-tbb} show 16 figures, two figures for each of the 8 allocators, one for each configuration.
3018Each figure has 2 graphs, one for each experiment environment.
3019Each graph has following 5 subgraphs that show memory usage and statistics throughout the micro-benchmark's lifetime.
3020\begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
3021\item \textit{\textbf{current\_req\_mem(B)}} shows the amount of dynamic memory requested and currently in-use of the benchmark.
3022\item \textit{\textbf{heap}}* shows the memory requested by the program (allocator) from the system that lies in the heap (@sbrk@) area.
3023\item \textit{\textbf{mmap\_so}}* shows the memory requested by the program (allocator) from the system that lies in the @mmap@ area.
3024\item \textit{\textbf{mmap}}* shows the memory requested by the program (allocator or shared libraries) from the system that lies in the @mmap@ area.
3025\item \textit{\textbf{total\_dynamic}} shows the total usage of dynamic memory by the benchmark program, which is a sum of \textit{heap}, \textit{mmap}, and \textit{mmap\_so}.
3026\end{itemize}
3027* These statistics are gathered by monitoring a process's @/proc/self/maps@ file.
3028
3029The X-axis shows the time when the memory information is polled.
3030The Y-axis shows the memory usage in bytes.
3031
3032For this experiment, the difference between the memory requested by the benchmark (\textit{current\_req\_mem(B)}) and the memory that the process has received from system (\textit{heap}, \textit{mmap}) should be minimum.
3033This difference is the memory overhead caused by the allocator and shows the level of fragmentation in the allocator.
3034
3035\paragraph{Assessment}
3036First, the differences in the shape of the curves between architectures (top ARM, bottom x64) is small, where the differences are in the amount of memory used.
3037Hence, it is possible to focus on either the top or bottom graph.
3038
3039Second, the heap curve is 0 for four memory allocators: \textsf{hrd}, \textsf{je}, \textsf{pt3}, and \textsf{rp}, indicating these memory allocators only use @mmap@ to get memory from the system and ignore the @sbrk@ area.
3040
3041The total dynamic memory is higher for \textsf{hrd} and \textsf{tbb} than the other allocators.
3042The main reason is the use of superblocks (see Section~\ref{s:ObjectContainers}) containing objects of the same size.
3043These superblocks are maintained throughout the life of the program.
3044
3045\textsf{pt3} is the only memory allocator where the total dynamic memory goes down in the second half of the program lifetime when the memory is freed by the benchmark program.
3046It makes pt3 the only memory allocator that gives memory back to the operating system as it is freed by the program.
3047
3048% FOR 1 THREAD
3049
3050%mem-1-prod-1-cons-100-llh.eps
3051\begin{figure}
3052\centering
3053    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-llh} } \\
3054    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-llh} }
3055\caption{Memory benchmark results with Configuration-1 for llh memory allocator}
3056\label{fig:mem-1-prod-1-cons-100-llh}
3057\end{figure}
3058
3059%mem-1-prod-1-cons-100-dl.eps
3060\begin{figure}
3061\centering
3062    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-dl} } \\
3063    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-dl} }
3064\caption{Memory benchmark results with Configuration-1 for dl memory allocator}
3065\label{fig:mem-1-prod-1-cons-100-dl}
3066\end{figure}
3067
3068%mem-1-prod-1-cons-100-glc.eps
3069\begin{figure}
3070\centering
3071    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-glc} } \\
3072    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-glc} }
3073\caption{Memory benchmark results with Configuration-1 for glibc memory allocator}
3074\label{fig:mem-1-prod-1-cons-100-glc}
3075\end{figure}
3076
3077%mem-1-prod-1-cons-100-hrd.eps
3078\begin{figure}
3079\centering
3080    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-hrd} } \\
3081    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-hrd} }
3082\caption{Memory benchmark results with Configuration-1 for hoard memory allocator}
3083\label{fig:mem-1-prod-1-cons-100-hrd}
3084\end{figure}
3085
3086%mem-1-prod-1-cons-100-je.eps
3087\begin{figure}
3088\centering
3089    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-je} } \\
3090    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-je} }
3091\caption{Memory benchmark results with Configuration-1 for je memory allocator}
3092\label{fig:mem-1-prod-1-cons-100-je}
3093\end{figure}
3094
3095%mem-1-prod-1-cons-100-pt3.eps
3096\begin{figure}
3097\centering
3098    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-pt3} } \\
3099    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-pt3} }
3100\caption{Memory benchmark results with Configuration-1 for pt3 memory allocator}
3101\label{fig:mem-1-prod-1-cons-100-pt3}
3102\end{figure}
3103
3104%mem-1-prod-1-cons-100-rp.eps
3105\begin{figure}
3106\centering
3107    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-rp} } \\
3108    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-rp} }
3109\caption{Memory benchmark results with Configuration-1 for rp memory allocator}
3110\label{fig:mem-1-prod-1-cons-100-rp}
3111\end{figure}
3112
3113%mem-1-prod-1-cons-100-tbb.eps
3114\begin{figure}
3115\centering
3116    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-tbb} } \\
3117    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-tbb} }
3118\caption{Memory benchmark results with Configuration-1 for tbb memory allocator}
3119\label{fig:mem-1-prod-1-cons-100-tbb}
3120\end{figure}
3121
3122% FOR 4 THREADS
3123
3124%mem-4-prod-4-cons-100-llh.eps
3125\begin{figure}
3126\centering
3127    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-llh} } \\
3128    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-llh} }
3129\caption{Memory benchmark results with Configuration-2 for llh memory allocator}
3130\label{fig:mem-4-prod-4-cons-100-llh}
3131\end{figure}
3132
3133%mem-4-prod-4-cons-100-dl.eps
3134\begin{figure}
3135\centering
3136    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-dl} } \\
3137    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-dl} }
3138\caption{Memory benchmark results with Configuration-2 for dl memory allocator}
3139\label{fig:mem-4-prod-4-cons-100-dl}
3140\end{figure}
3141
3142%mem-4-prod-4-cons-100-glc.eps
3143\begin{figure}
3144\centering
3145    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-glc} } \\
3146    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-glc} }
3147\caption{Memory benchmark results with Configuration-2 for glibc memory allocator}
3148\label{fig:mem-4-prod-4-cons-100-glc}
3149\end{figure}
3150
3151%mem-4-prod-4-cons-100-hrd.eps
3152\begin{figure}
3153\centering
3154    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-hrd} } \\
3155    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-hrd} }
3156\caption{Memory benchmark results with Configuration-2 for hoard memory allocator}
3157\label{fig:mem-4-prod-4-cons-100-hrd}
3158\end{figure}
3159
3160%mem-4-prod-4-cons-100-je.eps
3161\begin{figure}
3162\centering
3163    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-je} } \\
3164    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-je} }
3165\caption{Memory benchmark results with Configuration-2 for je memory allocator}
3166\label{fig:mem-4-prod-4-cons-100-je}
3167\end{figure}
3168
3169%mem-4-prod-4-cons-100-pt3.eps
3170\begin{figure}
3171\centering
3172    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-pt3} } \\
3173    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-pt3} }
3174\caption{Memory benchmark results with Configuration-2 for pt3 memory allocator}
3175\label{fig:mem-4-prod-4-cons-100-pt3}
3176\end{figure}
3177
3178%mem-4-prod-4-cons-100-rp.eps
3179\begin{figure}
3180\centering
3181    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-rp} } \\
3182        %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-rp} }
3183\caption{Memory benchmark results with Configuration-2 for rp memory allocator}
3184\label{fig:mem-4-prod-4-cons-100-rp}
3185\end{figure}
3186
3187%mem-4-prod-4-cons-100-tbb.eps
3188\begin{figure}
3189\centering
3190    %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-tbb} } \\
3191    %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-tbb} }
3192\caption{Memory benchmark results with Configuration-2 for tbb memory allocator}
3193\label{fig:mem-4-prod-4-cons-100-tbb}
3194\end{figure}
3195
3196
3197\section{Conclusion}
3198
3199% \noindent
3200% ====================
3201%
3202% Writing Points:
3203% \begin{itemize}
3204% \item
3205% Summarize u-benchmark suite.
3206% \item
3207% Summarize @uHeapLmmm@.
3208% \item
3209% Make recommendations on memory allocator design.
3210% \end{itemize}
3211%
3212% \noindent
3213% ====================
3214
3215The goal of this work was to build a low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
3216The new llheap memory-allocator achieves all of these goals, while maintaining and managing sticky allocation information without a performance loss.
3217Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally.
3218Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
3219
3220Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not have to do these useful allocation operations themselves.
3221The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
3222
3223Providing comprehensive statistics for all allocation operations is invaluable in understanding and debugging a program's dynamic behaviour.
3224No other memory allocator provides such comprehensive statistics gathering.
3225This capability was used extensively during the development of llheap to verify its behaviour.
3226As well, providing a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
3227While not as powerful as the @valgrind@ interpreter, a large number of allocation mistakes are detected.
3228Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
3229
3230The ability to compile llheap with static/dynamic linking and optional statistics/debugging provides programers with multiple mechanisms to balance performance and safety.
3231These allocator versions are easy to use because they can be linked to an application without recompilation.
3232
3233Starting a micro-benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
3234The current micro-benchmarks allow some understanding of allocator implementation properties without actually looking at the implementation.
3235For example, the memory micro-benchmark quickly identified how several of the allocators work at the global level.
3236It was not possible to show how the micro-benchmarks adjustment knobs were used to tune to an interesting test point.
3237Many graphs were created and discarded until a few were selected for the work.
3238
3239
3240\subsection{Future Work}
3241
3242A careful walk-though of the allocator fastpath should yield additional optimizations for a slight performance gain.
3243In particular, analysing the implementation of rpmalloc, which is often the fastest allocator,
3244
3245The micro-benchmark project requires more testing and analysis.
3246Additional allocation patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the most useful tuning knobs.
3247Also, identifying ways to visualize the results of the micro-benchmarks is a work in progress.
3248
3249After llheap is made available on GitHub, interacting with its users to locate problems and improvements will make llbench a more robust memory allocator.
3250As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional information.
3251
3252
3253
3254\section{Acknowledgements}
3255
3256This research is funded by the NSERC/Waterloo-Huawei (\url{http://www.huawei.com}) Joint Innovation Lab. %, and Peter Buhr is partially funded by the Natural Sciences and Engineering Research Council of Canada.
3257
3258{%
3259\fontsize{9bp}{11.5bp}\selectfont%
3260\bibliography{pl,local}
3261}%
3262
3263\end{document}
3264
3265% Local Variables: %
3266% tab-width: 4 %
3267% fill-column: 120 %
3268% compile-command: "make" %
3269% End: %
Note: See TracBrowser for help on using the repository browser.