Changeset 92538ab for doc


Ignore:
Timestamp:
Apr 10, 2022, 2:53:18 PM (3 years ago)
Author:
JiadaL <j82liang@…>
Branches:
ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
Children:
d8e2a09
Parents:
4559b34 (diff), 6256891 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Resolve conflict

Location:
doc
Files:
69 added
19 edited
2 moved

Legend:

Unmodified
Added
Removed
  • doc/LaTeXmacros/common.sty

    r4559b34 r92538ab  
    1111%% Created On       : Sat Apr  9 10:06:17 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Mon Feb  7 23:00:46 2022
    14 %% Update Count     : 569
     13%% Last Modified On : Sat Apr  2 17:35:23 2022
     14%% Update Count     : 570
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    301301  {=>}{$\Rightarrow$}2
    302302  {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
    303 defaultdialect={CFA},
    304303}% lstset
    305304}% CFAStyle
  • doc/LaTeXmacros/common.tex

    r4559b34 r92538ab  
    1111%% Created On       : Sat Apr  9 10:06:17 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Mon Feb  7 23:00:08 2022
    14 %% Update Count     : 552
     13%% Last Modified On : Sat Apr  2 16:42:31 2022
     14%% Update Count     : 553
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    306306  {=>}{$\Rightarrow$}2
    307307  {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
    308 defaultdialect={CFA},
    309308}% lstset
    310309}% CFAStyle
  • doc/theses/mubeen_zulfiqar_MMath/Makefile

    r4559b34 r92538ab  
    1 DOC = uw-ethesis.pdf
    2 BASE = ${DOC:%.pdf=%} # remove suffix
    3 # directory for latex clutter files
    4 BUILD = build
    5 TEXSRC = $(wildcard *.tex)
    6 FIGSRC = $(wildcard *.fig)
    7 BIBSRC = $(wildcard *.bib)
    8 TEXLIB = .:../../LaTeXmacros:${BUILD}: # common latex macros
    9 BIBLIB = .:../../bibliography # common citation repository
     1# Configuration variables
     2
     3Build = build
     4Figures = figures
     5Pictures = pictures
     6
     7TeXSRC = ${wildcard *.tex}
     8FigSRC = ${notdir ${wildcard ${Figures}/*.fig}}
     9PicSRC = ${notdir ${wildcard ${Pictures}/*.fig}}
     10BibSRC = ${wildcard *.bib}
     11
     12TeXLIB = .:../../LaTeXmacros:${Build}:          # common latex macros
     13BibLIB = .:../../bibliography                   # common citation repository
    1014
    1115MAKEFLAGS = --no-print-directory # --silent
    12 VPATH = ${BUILD}
     16VPATH = ${Build} ${Figures} ${Pictures} # extra search path for file names used in document
    1317
    14 ### Special Rules:
     18DOCUMENT = uw-ethesis.pdf
     19BASE = ${basename ${DOCUMENT}}                  # remove suffix
    1520
    16 .PHONY: all clean
     21# Commands
     22
     23LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
     24BibTeX = BIBINPUTS=${BibLIB} && export BIBINPUTS && bibtex
     25#Glossary = INDEXSTYLE=${Build} makeglossaries-lite
     26
     27# Rules and Recipes
     28
     29.PHONY : all clean                              # not file names
    1730.PRECIOUS: %.dvi %.ps # do not delete intermediate files
     31.ONESHELL :
    1832
    19 ### Commands:
    20 LATEX = TEXINPUTS=${TEXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${BUILD}
    21 BIBTEX = BIBINPUTS=${BIBLIB} bibtex
    22 #GLOSSARY = INDEXSTYLE=${BUILD} makeglossaries-lite
     33all : ${DOCUMENT}
    2334
    24 ### Rules and Recipes:
     35clean :
     36        @rm -frv ${DOCUMENT} ${Build}
    2537
    26 all: ${DOC}
     38# File Dependencies
    2739
    28 ${BUILD}/%.dvi: ${TEXSRC} ${FIGSRC:%.fig=%.tex} ${BIBSRC} Makefile | ${BUILD}
    29         ${LATEX} ${BASE}
    30         ${BIBTEX} ${BUILD}/${BASE}
    31         ${LATEX} ${BASE}
    32 #       ${GLOSSARY} ${BUILD}/${BASE}
    33 #       ${LATEX} ${BASE}
     40%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BibSRC} Makefile | ${Build}
     41        ${LaTeX} ${BASE}
     42        ${BibTeX} ${Build}/${BASE}
     43        ${LaTeX} ${BASE}
     44        # if needed, run latex again to get citations
     45        if fgrep -s "LaTeX Warning: Citation" ${basename $@}.log ; then ${LaTeX} ${BASE} ; fi
     46#       ${Glossary} ${Build}/${BASE}
     47#       ${LaTeX} ${BASE}
    3448
    35 ${BUILD}:
    36         mkdir $@
     49${Build}:
     50        mkdir -p $@
    3751
    38 %.pdf : ${BUILD}/%.ps | ${BUILD}
     52%.pdf : ${Build}/%.ps | ${Build}
    3953        ps2pdf $<
    4054
    41 %.ps : %.dvi | ${BUILD}
     55%.ps : %.dvi | ${Build}
    4256        dvips $< -o $@
    4357
    44 %.tex : %.fig | ${BUILD}
    45         fig2dev -L eepic $< > ${BUILD}/$@
     58%.tex : %.fig | ${Build}
     59        fig2dev -L eepic $< > ${Build}/$@
    4660
    47 %.ps : %.fig | ${BUILD}
    48         fig2dev -L ps $< > ${BUILD}/$@
     61%.ps : %.fig | ${Build}
     62        fig2dev -L ps $< > ${Build}/$@
    4963
    50 %.pstex : %.fig | ${BUILD}
    51         fig2dev -L pstex $< > ${BUILD}/$@
    52         fig2dev -L pstex_t -p ${BUILD}/$@ $< > ${BUILD}/$@_t
    53 
    54 clean:
    55         @rm -frv ${DOC} ${BUILD} *.fig.bak
     64%.pstex : %.fig | ${Build}
     65        fig2dev -L pstex $< > ${Build}/$@
     66        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/theses/mubeen_zulfiqar_MMath/allocator.tex

    r4559b34 r92538ab  
    11\chapter{Allocator}
    22
    3 \noindent
    4 ====================
    5 
    6 Writing Points:
    7 \begin{itemize}
    8 \item
    9 Objective of uHeapLmmm.
    10 \item
    11 Design philosophy.
    12 \item
    13 Background and previous design of uHeapLmmm.
    14 \item
    15 Distributed design of uHeapLmmm.
    16 
    17 ----- SHOULD WE GIVE IMPLEMENTATION DETAILS HERE? -----
    18 
    19 \PAB{Maybe. There might be an Implementation chapter.}
    20 \item
    21 figure.
    22 \item
    23 Advantages of distributed design.
    24 \end{itemize}
    25 
    26 The new features added to uHeapLmmm (incl. @malloc\_size@ routine)
    27 \CFA alloc interface with examples.
    28 
    29 \begin{itemize}
    30 \item
    31 Why did we need it?
    32 \item
    33 The added benefits.
    34 \end{itemize}
    35 
    36 
    37 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    38 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    39 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% uHeapLmmm Design
    40 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    41 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    42 
    43 \section{Objective of uHeapLmmm}
    44 UHeapLmmm is a lightweight memory allocator. The objective behind uHeapLmmm is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements).
    45 
    46 \subsection{Design philosophy}
    47 The objective of uHeapLmmm's new design was to fulfill following requirements:
    48 \begin{itemize}
    49 \item It should be concurrent to be used in multi-threaded programs.
     3\section{uHeap}
     4uHeap is a lightweight memory allocator. The objective behind uHeap is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements).
     5
     6The objective of uHeap's new design was to fulfill following requirements:
     7\begin{itemize}
     8\item It should be concurrent and thread-safe for multi-threaded programs.
    509\item It should avoid global locks, on resources shared across all threads, as much as possible.
    5110\item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
     
    5514%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    5615
    57 \section{Background and previous design of uHeapLmmm}
    58 uHeapLmmm was originally designed by X in X (FIX ME: add original author after confirming with Peter).
    59 (FIX ME: make and add figure of previous design with description)
    60 
    61 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    62 
    63 \section{Distributed design of uHeapLmmm}
    64 uHeapLmmm's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed:
    65 
    66 \paragraph{Design 1: Decentralized}
     16\section{Design choices for uHeap}
     17uHeap's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed:
     18
     19\paragraph{Design 1: Centralized}
     20One heap, but lower bucket sizes are N-shared across KTs.
     21This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.
     22When KTs $\le$ N, the important bucket sizes are uncontented.
     23When KTs $>$ N, the free buckets are contented.
     24Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.
     25\begin{cquote}
     26\centering
     27\input{AllocDS2}
     28\end{cquote}
     29Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.
     30When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated).
     31
     32\paragraph{Design 2: Decentralized N Heaps}
    6733Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area.
    6834Kernel threads (KT) are assigned to the N heaps.
     
    7743Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT.
    7844
    79 \paragraph{Design 2: Centralized}
    80 One heap, but lower bucket sizes are N-shared across KTs.
    81 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.
    82 When KTs $\le$ N, the important bucket sizes are uncontented.
    83 When KTs $>$ N, the free buckets are contented.
    84 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.
    85 \begin{cquote}
     45\paragraph{Design 3: Decentralized Per-thread Heaps}
     46Design 3 is similar to design 2 but instead of having an M:N model, it uses a 1:1 model. So, instead of having N heaos and sharing them among M KTs, Design 3 has one heap for each KT.
     47Dynamic number of heaps: create a thread-local heap for each kernel thread (KT) with a bump-area allocated from the @sbrk@ area.
     48Each KT will have its own exclusive thread-local heap. Heap will be uncontended between KTs regardless how many KTs have been created.
     49Operations on @sbrk@ area will still be protected by locks.
     50%\begin{cquote}
     51%\centering
     52%\input{AllocDS3} FIXME add figs
     53%\end{cquote}
     54Problems: We cannot destroy the heap when a KT exits because our dynamic objects have ownership and they are returned to the heap that created them when the program frees a dynamic object. All dynamic objects point back to their owner heap. If a thread A creates an object O, passes it to another thread B, and A itself exits. When B will free object O, O should return to A's heap so A's heap should be preserved for the lifetime of the whole program as their might be objects in-use of other threads that were allocated by A. Also, we need to know when a KT is created and destroyed to know when to create/destroy a heap for the KT.
     55
     56\paragraph{Design 4: Decentralized Per-CPU Heaps}
     57Design 4 is similar to Design 3 but instead of having a heap for each thread, it creates a heap for each CPU.
     58Fixed number of heaps for a machine: create a heap for each CPU with a bump-area allocated from the @sbrk@ area.
     59Each CPU will have its own CPU-local heap. When the program does a dynamic memory operation, it will be entertained by the heap of the CPU where the process is currently running on.
     60Each CPU will have its own exclusive heap. Just like Design 3(FIXME cite), heap will be uncontended between KTs regardless how many KTs have been created.
     61Operations on @sbrk@ area will still be protected by locks.
     62To deal with preemtion during a dynamic memory operation, librseq(FIXME cite) will be used to make sure that the whole dynamic memory operation completes on one CPU. librseq's restartable sequences can make it possible to re-run a critical section and undo the current writes if a preemption happened during the critical section's execution.
     63%\begin{cquote}
     64%\centering
     65%\input{AllocDS4} FIXME add figs
     66%\end{cquote}
     67
     68Problems: This approach was slower than the per-thread model. Also, librseq does not provide such restartable sequences to detect preemtions in user-level threading system which is important to us as CFA(FIXME cite) has its own threading system that we want to support.
     69
     70Out of the four designs, Design 3 was chosen because of the following reasons.
     71\begin{itemize}
     72\item
     73Decentralized designes are better in general as compared to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designes shard the whole heap which has all the buckets with the addition of sharding sbrk area. So Design 1 was eliminated.
     74\item
     75Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenerio.
     76\item
     77Design 4 was eliminated because it was slower than Design 3 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achive user-threading safety which has some cost to it. Desing 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
     78\end{itemize}
     79
     80
     81\subsection{Advantages of distributed design}
     82
     83The distributed design of uHeap is concurrent to work in multi-threaded applications.
     84
     85Some key benefits of the distributed design of uHeap are as follows:
     86
     87\begin{itemize}
     88\item
     89The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The call to sbrk will be protected using locks but bump allocation (on memory taken from sbrk) will not be contended once the sbrk call has returned.
     90\item
     91Low or almost no contention on heap resources.
     92\item
     93It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
     94\item
     95Distributed design avoids unnecassry locks on resources shared across all KTs.
     96\end{itemize}
     97
     98%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     99
     100\section{uHeap Structure}
     101
     102As described in (FIXME cite 2.4) uHeap uses following features of multi-threaded memory allocators.
     103\begin{itemize}
     104\item
     105uHeap has multiple heaps without a global heap and uses 1:1 model. (FIXME cite 2.5 1:1 model)
     106\item
     107uHeap uses object ownership. (FIXME cite 2.5.2)
     108\item
     109uHeap does not use object containers (FIXME cite 2.6) or any coalescing technique. Instead each dynamic object allocated by uHeap has a header than contains bookkeeping information.
     110\item
     111Each thread-local heap in uHeap has its own allocation buffer that is taken from the system using sbrk() call. (FIXME cite 2.7)
     112\item
     113Unless a heap is freeing an object that is owned by another thread's heap or heap is using sbrk() system call, uHeap is mostly lock-free which eliminates most of the contention on shared resources. (FIXME cite 2.8)
     114\end{itemize}
     115
     116As uHeap uses a heap per-thread model to reduce contention on heap resources, we manage a list of heaps (heap-list) that can be used by threads. The list is empty at the start of the program. When a kernel thread (KT) is created, we check if heap-list is empty. If no then a heap is removed from the heap-list and is given to this new KT to use exclusively. If yes then a new heap object is created in dynamic memory and is given to this new KT to use exclusively. When a KT exits, its heap is not destroyed but instead its heap is put on the heap-list and is ready to be reused by new KTs.
     117
     118This reduces the memory footprint as the objects on free-lists of a KT that has exited can be reused by a new KT. Also, we preserve all the heaps that were created during the lifetime of the program till the end of the program. uHeap uses object ownership where an object is freed to the free-buckets of the heap that allocated it. Even after a KT A has exited, its heap has to be preserved as there might be objects in-use of other threads that were initially allocated by A and the passed to other threads.
     119
     120\begin{figure}
    86121\centering
    87 \input{AllocDS2}
    88 \end{cquote}
    89 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.
    90 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated).
    91 
    92 Out of the two designs, Design 1 was chosen because it's concurrency is better across all bucket-sizes as design-2 shards a few buckets of selected sizes while design-1 shards all the buckets. Design-2 shards the whole heap which has all the buckets with the addition of sharding sbrk area.
    93 
    94 \subsection{Advantages of distributed design}
    95 The distributed design of uHeapLmmm is concurrent to work in multi-threaded applications.
    96 
    97 Some key benefits of the distributed design of uHeapLmmm are as follows:
    98 
    99 \begin{itemize}
    100 \item
    101 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The lock on bump allocation (on memory taken from sbrk) will only be contended if KTs > N. The contention on sbrk area is less likely as it will only happen in the case if heaps assigned to two KTs get short of bump allocation reserve simultanously.
    102 \item
    103 N heaps are created at the start of the program and destroyed at the end of program. When a KT is created, we only assign it to one of the heaps. When a KT is destroyed, we only dissociate it from the assigned heap but we do not destroy that heap. That heap will go back to our pool-of-heaps, ready to be used by some new KT. And if that heap was shared among multiple KTs (like the case of KTs > N) then, on deletion of one KT, that heap will be still in-use of the other KTs. This will prevent creation and deletion of heaps during run-time as heaps are re-usable which helps in keeping low-memory footprint.
    104 \item
    105 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
    106 \item
    107 Distributed design avoids unnecassry locks on resources shared across all KTs.
    108 \end{itemize}
    109 
    110 FIX ME: Cite performance comparison of the two heap designs if required
     122\includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
     123\caption{HeapStructure}
     124\label{fig:heapStructureFig}
     125\end{figure}
     126
     127Each heap uses seggregated free-buckets that have free objects of a specific size. Each free-bucket of a specific size has following 2 lists in it:
     128\begin{itemize}
     129\item
     130Free list is used when a thread is freeing an object that is owned by its own heap so free list does not use any locks/atomic-operations as it is only used by the owner KT.
     131\item
     132Away list is used when a thread A is freeing an object that is owned by another KT B's heap. This object should be freed to the owner heap (B's heap) so A will place the object on the away list of B. Away list is lock protected as it is shared by all other threads.
     133\end{itemize}
     134
     135When a dynamic object of a size S is requested. The thread-local heap will check if S is greater than or equal to the mmap threshhold. Any request larger than the mmap threshhold is fulfilled by allocating an mmap area of that size and such requests are not allocated on sbrk area. The value of this threshhold can be changed using mallopt routine but the new value should not be larger than our biggest free-bucket size.
     136
     137Algorithm~\ref{alg:heapObjectAlloc} briefly shows how an allocation request is fulfilled.
     138
     139\begin{algorithm}
     140\caption{Dynamic object allocation of size S}\label{alg:heapObjectAlloc}
     141\begin{algorithmic}[1]
     142\State $\textit{O} \gets \text{NULL}$
     143\If {$S < \textit{mmap-threshhold}$}
     144        \State $\textit{B} \gets (\text{smallest free-bucket} \geq S)$
     145        \If {$\textit{B's free-list is empty}$}
     146                \If {$\textit{B's away-list is empty}$}
     147                        \If {$\textit{heap's allocation buffer} < S$}
     148                                \State $\text{get allocation buffer using system call sbrk()}$
     149                        \EndIf
     150                        \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
     151                \Else
     152                        \State $\textit{merge B's away-list into free-list}$
     153                        \State $\textit{O} \gets \text{pop an object from B's free-list}$
     154                \EndIf
     155        \Else
     156                \State $\textit{O} \gets \text{pop an object from B's free-list}$
     157        \EndIf
     158        \State $\textit{O's owner} \gets \text{B}$
     159\Else
     160        \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
     161\EndIf
     162\State $\Return \textit{ O}$
     163\end{algorithmic}
     164\end{algorithm}
     165
    111166
    112167%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    113168
    114169\section{Added Features and Methods}
    115 To improve the UHeapLmmm allocator (FIX ME: cite uHeapLmmm) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.
     170To improve the uHeap allocator (FIX ME: cite uHeap) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a \CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.
    116171
    117172\subsection{C Interface}
    118173We added a few more features and routines to the allocator's C interface that can make the allocator more usable to the programmers. THese features will programmer more control on the dynamic memory allocation.
    119174
    120 \subsubsection void * aalloc( size\_t dim, size\_t elemSize )
    121 aalloc is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0.
    122 \paragraph{Usage}
    123 aalloc takes two parameters.
    124 
    125 \begin{itemize}
    126 \item
    127 dim: number of objects in the array
    128 \item
    129 elemSize: size of the object in the array.
    130 \end{itemize}
    131 It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns NULL pointer.
    132 
    133 \subsubsection void * resize( void * oaddr, size\_t size )
    134 resize is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is realloc but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object.
    135 \paragraph{Usage}
    136 resize takes two parameters.
    137 
    138 \begin{itemize}
    139 \item
    140 oaddr: the address of the old object that needs to be resized.
    141 \item
    142 size: the new size requirement of the to which the old object needs to be resized.
    143 \end{itemize}
    144 It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns NULL pointer.
    145 
    146 \subsubsection void * resize( void * oaddr, size\_t nalign, size\_t size )
    147 This resize is an extension of the above resize (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement.
     175\subsection{Out of Memory}
     176
     177Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
     178hence the need to return an alternate value for a zero-sized allocation.
     179The alternative is to abort a program when out of memory.
     180In theory, notifying the programmer allows recovery;
     181in practice, it is almost impossible to gracefully when out of memory, so the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen.
     182
     183
     184\subsection{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
     185@aalloc@ is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0.
     186\paragraph{Usage}
     187@aalloc@ takes two parameters.
     188
     189\begin{itemize}
     190\item
     191@dim@: number of objects in the array
     192\item
     193@elemSize@: size of the object in the array.
     194\end{itemize}
     195It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns a @NULL@ pointer.
     196
     197\subsection{\lstinline{void * resize( void * oaddr, size_t size )}}
     198@resize@ is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is @realloc@ but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object.
     199\paragraph{Usage}
     200@resize@ takes two parameters.
     201
     202\begin{itemize}
     203\item
     204@oaddr@: the address of the old object that needs to be resized.
     205\item
     206@size@: the new size requirement of the to which the old object needs to be resized.
     207\end{itemize}
     208It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns a @NULL@ pointer.
     209
     210\subsection{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
     211This @resize@ is an extension of the above @resize@ (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement.
    148212\paragraph{Usage}
    149213This resize takes three parameters. It takes an additional parameter of nalign as compared to the above resize (FIX ME: cite above resize).
     
    151215\begin{itemize}
    152216\item
    153 oaddr: the address of the old object that needs to be resized.
    154 \item
    155 nalign: the new alignment to which the old object needs to be realigned.
    156 \item
    157 size: the new size requirement of the to which the old object needs to be resized.
    158 \end{itemize}
    159 It returns an object with the size and alignment given in the parameters. On failure, it returns a NULL pointer.
    160 
    161 \subsubsection void * amemalign( size\_t alignment, size\_t dim, size\_t elemSize )
     217@oaddr@: the address of the old object that needs to be resized.
     218\item
     219@nalign@: the new alignment to which the old object needs to be realigned.
     220\item
     221@size@: the new size requirement of the to which the old object needs to be resized.
     222\end{itemize}
     223It returns an object with the size and alignment given in the parameters. On failure, it returns a @NULL@ pointer.
     224
     225\subsection{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
    162226amemalign is a hybrid of memalign and aalloc. It allows programmer to allocate an aligned dynamic array of objects without calculating the total size of the array explicitly. It frees the programmer from calculating the total size of the array.
    163227\paragraph{Usage}
     
    166230\begin{itemize}
    167231\item
    168 alignment: the alignment to which the dynamic array needs to be aligned.
    169 \item
    170 dim: number of objects in the array
    171 \item
    172 elemSize: size of the object in the array.
    173 \end{itemize}
    174 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns NULL pointer.
    175 
    176 \subsubsection void * cmemalign( size\_t alignment, size\_t dim, size\_t elemSize )
     232@alignment@: the alignment to which the dynamic array needs to be aligned.
     233\item
     234@dim@: number of objects in the array
     235\item
     236@elemSize@: size of the object in the array.
     237\end{itemize}
     238It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns a @NULL@ pointer.
     239
     240\subsection{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
    177241cmemalign is a hybrid of amemalign and calloc. It allows programmer to allocate an aligned dynamic array of objects that is 0 filled. The current way to do this in other allocators is to allocate an aligned object with memalign and then fill it with 0 explicitly. This routine provides both features of aligning and 0 filling, implicitly.
    178242\paragraph{Usage}
     
    181245\begin{itemize}
    182246\item
    183 alignment: the alignment to which the dynamic array needs to be aligned.
    184 \item
    185 dim: number of objects in the array
    186 \item
    187 elemSize: size of the object in the array.
    188 \end{itemize}
    189 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns NULL pointer.
    190 
    191 \subsubsection size\_t malloc\_alignment( void * addr )
    192 malloc\_alignment returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.
    193 \paragraph{Usage}
    194 malloc\_alignment takes one parameters.
    195 
    196 \begin{itemize}
    197 \item
    198 addr: the address of the currently allocated dynamic object.
    199 \end{itemize}
    200 malloc\_alignment returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeapLmmm allocator.
    201 
    202 \subsubsection bool malloc\_zero\_fill( void * addr )
    203 malloc\_zero\_fill returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.
    204 \paragraph{Usage}
    205 malloc\_zero\_fill takes one parameters.
    206 
    207 \begin{itemize}
    208 \item
    209 addr: the address of the currently allocated dynamic object.
    210 \end{itemize}
    211 malloc\_zero\_fill returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.
    212 
    213 \subsubsection size\_t malloc\_size( void * addr )
    214 malloc\_size returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is malloc\_usable\_size. But, malloc\_size is different from malloc\_usable\_size as malloc\_usabe\_size returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, malloc\_size returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.
    215 \paragraph{Usage}
    216 malloc\_size takes one parameters.
    217 
    218 \begin{itemize}
    219 \item
    220 addr: the address of the currently allocated dynamic object.
    221 \end{itemize}
    222 malloc\_size returns the allocation size of the given dynamic object. On failure, it return zero.
    223 
    224 \subsubsection void * realloc( void * oaddr, size\_t nalign, size\_t size )
    225 This realloc is an extension of the default realloc (FIX ME: cite default realloc). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.
    226 \paragraph{Usage}
    227 This realloc takes three parameters. It takes an additional parameter of nalign as compared to the default realloc.
    228 
    229 \begin{itemize}
    230 \item
    231 oaddr: the address of the old object that needs to be reallocated.
    232 \item
    233 nalign: the new alignment to which the old object needs to be realigned.
    234 \item
    235 size: the new size requirement of the to which the old object needs to be resized.
    236 \end{itemize}
    237 It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a NULL pointer.
    238 
    239 \subsection{CFA Malloc Interface}
    240 We added some routines to the malloc interface of CFA. These routines can only be used in CFA and not in our standalone uHeapLmmm allocator as these routines use some features that are only provided by CFA and not by C. It makes the allocator even more usable to the programmers.
    241 CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.
    242 
    243 \subsubsection T * malloc( void )
     247@alignment@: the alignment to which the dynamic array needs to be aligned.
     248\item
     249@dim@: number of objects in the array
     250\item
     251@elemSize@: size of the object in the array.
     252\end{itemize}
     253It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns a @NULL@ pointer.
     254
     255\subsection{\lstinline{size_t malloc_alignment( void * addr )}}
     256@malloc_alignment@ returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.
     257\paragraph{Usage}
     258@malloc_alignment@ takes one parameters.
     259
     260\begin{itemize}
     261\item
     262@addr@: the address of the currently allocated dynamic object.
     263\end{itemize}
     264@malloc_alignment@ returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeap allocator.
     265
     266\subsection{\lstinline{bool malloc_zero_fill( void * addr )}}
     267@malloc_zero_fill@ returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.
     268\paragraph{Usage}
     269@malloc_zero_fill@ takes one parameters.
     270
     271\begin{itemize}
     272\item
     273@addr@: the address of the currently allocated dynamic object.
     274\end{itemize}
     275@malloc_zero_fill@ returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.
     276
     277\subsection{\lstinline{size_t malloc_size( void * addr )}}
     278@malloc_size@ returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is @malloc_usable_size@. But, @malloc_size@ is different from @malloc_usable_size@ as @malloc_usabe_size@ returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, @malloc_size@ returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.
     279\paragraph{Usage}
     280@malloc_size@ takes one parameters.
     281
     282\begin{itemize}
     283\item
     284@addr@: the address of the currently allocated dynamic object.
     285\end{itemize}
     286@malloc_size@ returns the allocation size of the given dynamic object. On failure, it return zero.
     287
     288\subsection{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
     289This @realloc@ is an extension of the default @realloc@ (FIX ME: cite default @realloc@). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.
     290\paragraph{Usage}
     291This @realloc@ takes three parameters. It takes an additional parameter of nalign as compared to the default @realloc@.
     292
     293\begin{itemize}
     294\item
     295@oaddr@: the address of the old object that needs to be reallocated.
     296\item
     297@nalign@: the new alignment to which the old object needs to be realigned.
     298\item
     299@size@: the new size requirement of the to which the old object needs to be resized.
     300\end{itemize}
     301It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a @NULL@ pointer.
     302
     303\subsection{\CFA Malloc Interface}
     304We added some routines to the malloc interface of \CFA. These routines can only be used in \CFA and not in our standalone uHeap allocator as these routines use some features that are only provided by \CFA and not by C. It makes the allocator even more usable to the programmers.
     305\CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.
     306
     307\subsection{\lstinline{T * malloc( void )}}
    244308This malloc is a simplified polymorphic form of defualt malloc (FIX ME: cite malloc). It does not take any parameter as compared to default malloc that takes one parameter.
    245309\paragraph{Usage}
    246310This malloc takes no parameters.
    247 It returns a dynamic object of the size of type T. On failure, it return NULL pointer.
    248 
    249 \subsubsection T * aalloc( size\_t dim )
     311It returns a dynamic object of the size of type @T@. On failure, it returns a @NULL@ pointer.
     312
     313\subsection{\lstinline{T * aalloc( size_t dim )}}
    250314This aalloc is a simplified polymorphic form of above aalloc (FIX ME: cite aalloc). It takes one parameter as compared to the above aalloc that takes two parameters.
    251315\paragraph{Usage}
     
    254318\begin{itemize}
    255319\item
    256 dim: required number of objects in the array.
    257 \end{itemize}
    258 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. On failure, it return NULL pointer.
    259 
    260 \subsubsection T * calloc( size\_t dim )
     320@dim@: required number of objects in the array.
     321\end{itemize}
     322It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
     323
     324\subsection{\lstinline{T * calloc( size_t dim )}}
    261325This calloc is a simplified polymorphic form of defualt calloc (FIX ME: cite calloc). It takes one parameter as compared to the default calloc that takes two parameters.
    262326\paragraph{Usage}
     
    265329\begin{itemize}
    266330\item
    267 dim: required number of objects in the array.
    268 \end{itemize}
    269 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. On failure, it return NULL pointer.
    270 
    271 \subsubsection T * resize( T * ptr, size\_t size )
    272 This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as CFA provides gives allocator the liberty to get the alignment of the returned type.
     331@dim@: required number of objects in the array.
     332\end{itemize}
     333It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
     334
     335\subsection{\lstinline{T * resize( T * ptr, size_t size )}}
     336This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
    273337\paragraph{Usage}
    274338This resize takes two parameters.
     
    276340\begin{itemize}
    277341\item
    278 ptr: address of the old object.
    279 \item
    280 size: the required size of the new object.
    281 \end{itemize}
    282 It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type T. On failure, it return NULL pointer.
    283 
    284 \subsubsection T * realloc( T * ptr, size\_t size )
    285 This realloc is a simplified polymorphic form of defualt realloc (FIX ME: cite realloc with align). It takes two parameters as compared to the above realloc that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as CFA provides gives allocator the liberty to get the alignment of the returned type.
    286 \paragraph{Usage}
    287 This realloc takes two parameters.
    288 
    289 \begin{itemize}
    290 \item
    291 ptr: address of the old object.
    292 \item
    293 size: the required size of the new object.
    294 \end{itemize}
    295 It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type T. On failure, it return NULL pointer.
    296 
    297 \subsubsection T * memalign( size\_t align )
     342@ptr@: address of the old object.
     343\item
     344@size@: the required size of the new object.
     345\end{itemize}
     346It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
     347
     348\subsection{\lstinline{T * realloc( T * ptr, size_t size )}}
     349This @realloc@ is a simplified polymorphic form of defualt @realloc@ (FIX ME: cite @realloc@ with align). It takes two parameters as compared to the above @realloc@ that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
     350\paragraph{Usage}
     351This @realloc@ takes two parameters.
     352
     353\begin{itemize}
     354\item
     355@ptr@: address of the old object.
     356\item
     357@size@: the required size of the new object.
     358\end{itemize}
     359It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
     360
     361\subsection{\lstinline{T * memalign( size_t align )}}
    298362This memalign is a simplified polymorphic form of defualt memalign (FIX ME: cite memalign). It takes one parameters as compared to the default memalign that takes two parameters.
    299363\paragraph{Usage}
     
    302366\begin{itemize}
    303367\item
    304 align: the required alignment of the dynamic object.
    305 \end{itemize}
    306 It returns a dynamic object of the size of type T that is aligned to given parameter align. On failure, it return NULL pointer.
    307 
    308 \subsubsection T * amemalign( size\_t align, size\_t dim )
     368@align@: the required alignment of the dynamic object.
     369\end{itemize}
     370It returns a dynamic object of the size of type @T@ that is aligned to given parameter align. On failure, it returns a @NULL@ pointer.
     371
     372\subsection{\lstinline{T * amemalign( size_t align, size_t dim )}}
    309373This amemalign is a simplified polymorphic form of above amemalign (FIX ME: cite amemalign). It takes two parameter as compared to the above amemalign that takes three parameters.
    310374\paragraph{Usage}
     
    313377\begin{itemize}
    314378\item
    315 align: required alignment of the dynamic array.
    316 \item
    317 dim: required number of objects in the array.
    318 \end{itemize}
    319 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. The returned object is aligned to the given parameter align. On failure, it return NULL pointer.
    320 
    321 \subsubsection T * cmemalign( size\_t align, size\_t dim  )
     379@align@: required alignment of the dynamic array.
     380\item
     381@dim@: required number of objects in the array.
     382\end{itemize}
     383It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align. On failure, it returns a @NULL@ pointer.
     384
     385\subsection{\lstinline{T * cmemalign( size_t align, size_t dim  )}}
    322386This cmemalign is a simplified polymorphic form of above cmemalign (FIX ME: cite cmemalign). It takes two parameter as compared to the above cmemalign that takes three parameters.
    323387\paragraph{Usage}
     
    326390\begin{itemize}
    327391\item
    328 align: required alignment of the dynamic array.
    329 \item
    330 dim: required number of objects in the array.
    331 \end{itemize}
    332 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. The returned object is aligned to the given parameter align and is zero filled. On failure, it return NULL pointer.
    333 
    334 \subsubsection T * aligned\_alloc( size\_t align )
    335 This aligned\_alloc is a simplified polymorphic form of defualt aligned\_alloc (FIX ME: cite aligned\_alloc). It takes one parameter as compared to the default aligned\_alloc that takes two parameters.
    336 \paragraph{Usage}
    337 This aligned\_alloc takes one parameter.
    338 
    339 \begin{itemize}
    340 \item
    341 align: required alignment of the dynamic object.
    342 \end{itemize}
    343 It returns a dynamic object of the size of type T that is aligned to the given parameter. On failure, it return NULL pointer.
    344 
    345 \subsubsection int posix\_memalign( T ** ptr, size\_t align )
    346 This posix\_memalign is a simplified polymorphic form of defualt posix\_memalign (FIX ME: cite posix\_memalign). It takes two parameters as compared to the default posix\_memalign that takes three parameters.
    347 \paragraph{Usage}
    348 This posix\_memalign takes two parameter.
    349 
    350 \begin{itemize}
    351 \item
    352 ptr: variable address to store the address of the allocated object.
    353 \item
    354 align: required alignment of the dynamic object.
    355 \end{itemize}
    356 
    357 It stores address of the dynamic object of the size of type T in given parameter ptr. This object is aligned to the given parameter. On failure, it return NULL pointer.
    358 
    359 \subsubsection T * valloc( void )
    360 This valloc is a simplified polymorphic form of defualt valloc (FIX ME: cite valloc). It takes no parameters as compared to the default valloc that takes one parameter.
    361 \paragraph{Usage}
    362 valloc takes no parameters.
    363 It returns a dynamic object of the size of type T that is aligned to the page size. On failure, it return NULL pointer.
    364 
    365 \subsubsection T * pvalloc( void )
    366 This pcvalloc is a simplified polymorphic form of defualt pcvalloc (FIX ME: cite pcvalloc). It takes no parameters as compared to the default pcvalloc that takes one parameter.
    367 \paragraph{Usage}
    368 pvalloc takes no parameters.
    369 It returns a dynamic object of the size that is calcutaed by rouding the size of type T. The returned object is also aligned to the page size. On failure, it return NULL pointer.
    370 
    371 \subsection Alloc Interface
    372 In addition to improve allocator interface both for CFA and our standalone allocator uHeapLmmm in C. We also added a new alloc interface in CFA that increases usability of dynamic memory allocation.
     392@align@: required alignment of the dynamic array.
     393\item
     394@dim@: required number of objects in the array.
     395\end{itemize}
     396It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align and is zero filled. On failure, it returns a @NULL@ pointer.
     397
     398\subsection{\lstinline{T * aligned_alloc( size_t align )}}
     399This @aligned_alloc@ is a simplified polymorphic form of defualt @aligned_alloc@ (FIX ME: cite @aligned_alloc@). It takes one parameter as compared to the default @aligned_alloc@ that takes two parameters.
     400\paragraph{Usage}
     401This @aligned_alloc@ takes one parameter.
     402
     403\begin{itemize}
     404\item
     405@align@: required alignment of the dynamic object.
     406\end{itemize}
     407It returns a dynamic object of the size of type @T@ that is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
     408
     409\subsection{\lstinline{int posix_memalign( T ** ptr, size_t align )}}
     410This @posix_memalign@ is a simplified polymorphic form of defualt @posix_memalign@ (FIX ME: cite @posix_memalign@). It takes two parameters as compared to the default @posix_memalign@ that takes three parameters.
     411\paragraph{Usage}
     412This @posix_memalign@ takes two parameter.
     413
     414\begin{itemize}
     415\item
     416@ptr@: variable address to store the address of the allocated object.
     417\item
     418@align@: required alignment of the dynamic object.
     419\end{itemize}
     420
     421It stores address of the dynamic object of the size of type @T@ in given parameter ptr. This object is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
     422
     423\subsection{\lstinline{T * valloc( void )}}
     424This @valloc@ is a simplified polymorphic form of defualt @valloc@ (FIX ME: cite @valloc@). It takes no parameters as compared to the default @valloc@ that takes one parameter.
     425\paragraph{Usage}
     426@valloc@ takes no parameters.
     427It returns a dynamic object of the size of type @T@ that is aligned to the page size. On failure, it returns a @NULL@ pointer.
     428
     429\subsection{\lstinline{T * pvalloc( void )}}
     430\paragraph{Usage}
     431@pvalloc@ takes no parameters.
     432It returns a dynamic object of the size that is calcutaed by rouding the size of type @T@. The returned object is also aligned to the page size. On failure, it returns a @NULL@ pointer.
     433
     434\subsection{Alloc Interface}
     435In addition to improve allocator interface both for \CFA and our standalone allocator uHeap in C. We also added a new alloc interface in \CFA that increases usability of dynamic memory allocation.
    373436This interface helps programmers in three major ways.
    374437
     
    379442Parametre Positions: alloc interface frees programmers from remembering parameter postions in call to routines.
    380443\item
    381 Object Size: alloc interface does not require programmer to mention the object size as CFA allows allocator to determince the object size from returned type of alloc call.
    382 \end{itemize}
    383 
    384 Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.
    385 
    386 \subsubsection{Routine: T * alloc( ... )}
    387 Call to alloc wihout any parameter returns one object of size of type T allocated dynamically.
     444Object Size: alloc interface does not require programmer to mention the object size as \CFA allows allocator to determince the object size from returned type of alloc call.
     445\end{itemize}
     446
     447Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of \CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.
     448
     449\subsection{Routine: \lstinline{T * alloc( ... )}}
     450Call to alloc wihout any parameter returns one object of size of type @T@ allocated dynamically.
    388451Only the dimension (dim) parameter for array allocation has the fixed position in the alloc routine. If programmer wants to allocate an array of objects that the required number of members in the array has to be given as the first parameter to the alloc routine.
    389 alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except `realloc and `resize that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both `resize and `realloc are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.
     452alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except @`realloc@ and @`resize@ that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both @`resize@ and @`realloc@ are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.
    390453
    391454\paragraph{Dim}
    392 This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type T.
    393 It represents the required number of members in the array allocation as in CFA's aalloc (FIX ME: cite aalloc).
    394 This parameter should be of type size\_t.
    395 
    396 Example: int a = alloc( 5 )
     455This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type @T@.
     456It represents the required number of members in the array allocation as in \CFA's aalloc (FIX ME: cite aalloc).
     457This parameter should be of type @size_t@.
     458
     459Example: @int a = alloc( 5 )@
    397460This call will return a dynamic array of five integers.
    398461
    399462\paragraph{Align}
    400 This parameter is position-free and uses a backtick routine align (`align). The parameter passed with `align should be of type size\_t. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in CFA) then the passed alignment parameter will be rejected and the default alignment will be used.
    401 
    402 Example: int b = alloc( 5 , 64`align )
     463This parameter is position-free and uses a backtick routine align (@`align@). The parameter passed with @`align@ should be of type @size_t@. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in \CFA) then the passed alignment parameter will be rejected and the default alignment will be used.
     464
     465Example: @int b = alloc( 5 , 64`align )@
    403466This call will return a dynamic array of five integers. It will align the allocated object to 64.
    404467
    405468\paragraph{Fill}
    406 This parameter is position-free and uses a backtick routine fill (`fill). In case of realloc, only the extra space after copying the data in the old object will be filled with given parameter.
     469This parameter is position-free and uses a backtick routine fill (@`fill@). In case of @realloc@, only the extra space after copying the data in the old object will be filled with given parameter.
    407470Three types of parameters can be passed using `fill.
    408471
    409472\begin{itemize}
    410473\item
    411 char: A char can be passed with `fill to fill the whole dynamic allocation with the given char recursively till the end of required allocation.
    412 \item
    413 Object of returned type: An object of type of returned type can be passed with `fill to fill the whole dynamic allocation with the given object recursively till the end of required allocation.
    414 \item
    415 Dynamic object of returned type: A dynamic object of type of returned type can be passed with `fill to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to `fill or the end of requested allocation reaches.
    416 \end{itemize}
    417 
    418 Example: int b = alloc( 5 , 'a'`fill )
     474@char@: A char can be passed with @`fill@ to fill the whole dynamic allocation with the given char recursively till the end of required allocation.
     475\item
     476Object of returned type: An object of type of returned type can be passed with @`fill@ to fill the whole dynamic allocation with the given object recursively till the end of required allocation.
     477\item
     478Dynamic object of returned type: A dynamic object of type of returned type can be passed with @`fill@ to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to @`fill@ or the end of requested allocation reaches.
     479\end{itemize}
     480
     481Example: @int b = alloc( 5 , 'a'`fill )@
    419482This call will return a dynamic array of five integers. It will fill the allocated object with character 'a' recursively till the end of requested allocation size.
    420483
    421 Example: int b = alloc( 5 , 4`fill )
     484Example: @int b = alloc( 5 , 4`fill )@
    422485This call will return a dynamic array of five integers. It will fill the allocated object with integer 4 recursively till the end of requested allocation size.
    423486
    424 Example: int b = alloc( 5 , a`fill ) where a is a pointer of int type
     487Example: @int b = alloc( 5 , a`fill )@ where @a@ is a pointer of int type
    425488This call will return a dynamic array of five integers. It will copy data in a to the returned object non-recursively untill end of a or the newly allocated object is reached.
    426489
    427490\paragraph{Resize}
    428 This parameter is position-free and uses a backtick routine resize (`resize). It represents the old dynamic object (oaddr) that the programmer wants to
     491This parameter is position-free and uses a backtick routine resize (@`resize@). It represents the old dynamic object (oaddr) that the programmer wants to
    429492\begin{itemize}
    430493\item
     
    435498fill with something.
    436499\end{itemize}
    437 The data in old dynamic object will not be preserved in the new object. The type of object passed to `resize and the returned type of alloc call can be different.
    438 
    439 Example: int b = alloc( 5 , a`resize )
     500The data in old dynamic object will not be preserved in the new object. The type of object passed to @`resize@ and the returned type of alloc call can be different.
     501
     502Example: @int b = alloc( 5 , a`resize )@
    440503This call will resize object a to a dynamic array that can contain 5 integers.
    441504
    442 Example: int b = alloc( 5 , a`resize , 32`align )
     505Example: @int b = alloc( 5 , a`resize , 32`align )@
    443506This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
    444507
    445 Example: int b = alloc( 5 , a`resize , 32`align , 2`fill)
     508Example: @int b = alloc( 5 , a`resize , 32`align , 2`fill )@
    446509This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32 and will be filled with 2.
    447510
    448511\paragraph{Realloc}
    449 This parameter is position-free and uses a backtick routine realloc (`realloc). It represents the old dynamic object (oaddr) that the programmer wants to
     512This parameter is position-free and uses a backtick routine @realloc@ (@`realloc@). It represents the old dynamic object (oaddr) that the programmer wants to
    450513\begin{itemize}
    451514\item
     
    456519fill with something.
    457520\end{itemize}
    458 The data in old dynamic object will be preserved in the new object. The type of object passed to `realloc and the returned type of alloc call cannot be different.
    459 
    460 Example: int b = alloc( 5 , a`realloc )
     521The data in old dynamic object will be preserved in the new object. The type of object passed to @`realloc@ and the returned type of alloc call cannot be different.
     522
     523Example: @int b = alloc( 5 , a`realloc )@
    461524This call will realloc object a to a dynamic array that can contain 5 integers.
    462525
    463 Example: int b = alloc( 5 , a`realloc , 32`align )
     526Example: @int b = alloc( 5 , a`realloc , 32`align )@
    464527This call will realloc object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
    465528
    466 Example: int b = alloc( 5 , a`realloc , 32`align , 2`fill)
     529Example: @int b = alloc( 5 , a`realloc , 32`align , 2`fill )@
    467530This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. The extra space after copying data of a to the returned object will be filled with 2.
  • doc/theses/mubeen_zulfiqar_MMath/background.tex

    r4559b34 r92538ab  
    1 \chapter{Background}
    2 
    3 \noindent
     1\begin{comment}
    42====================
    5 
    63Writing Points:
    74\begin{itemize}
     
    1916Features and limitations.
    2017\end{itemize}
    21 
    22 \noindent
    23 ====================
    24 
    25 \section{Background}
    26 
    27 % FIXME: cite wasik
    28 \cite{wasik.thesis}
    29 
    30 \subsection{Memory Allocation}
    31 With dynamic allocation being an important feature of C, there are many standalone memory allocators that have been designed for different purposes. For this thesis, we chose 7 of the most popular and widely used memory allocators.
    32 
    33 \paragraph{dlmalloc}
    34 dlmalloc (FIX ME: cite allocator) is a thread-safe allocator that is single threaded and single heap. dlmalloc maintains free-lists of different sizes to store freed dynamic memory. (FIX ME: cite wasik)
    35 
    36 \paragraph{hoard}
    37 Hoard (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thred heaps that have thread-local free-lists, and a gloabl shared heap. (FIX ME: cite wasik)
    38 
    39 \paragraph{jemalloc}
    40 jemalloc (FIX ME: cite allocator) is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena. Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
    41 
    42 \paragraph{ptmalloc}
    43 ptmalloc (FIX ME: cite allocator) is a modification of dlmalloc. It is a thread-safe multi-threaded memory allocator that uses multiple heaps. ptmalloc heap has similar design to dlmalloc's heap.
    44 
    45 \paragraph{rpmalloc}
    46 rpmalloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses per-thread heap. Each heap has multiple size-classes and each size-calss contains memory regions of the relevant size.
    47 
    48 \paragraph{tbb malloc}
    49 tbb malloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
    50 
    51 \paragraph{tc malloc}
    52 tcmalloc (FIX ME: cite allocator) is a thread-safe allocator. It uses per-thread cache to store free objects that prevents contention on shared resources in multi-threaded application. A central free-list is used to refill per-thread cache when it gets empty.
    53 
    54 \subsection{Benchmarks}
    55 There are multiple benchmarks that are built individually and evaluate different aspects of a memory allocator. But, there is not standard set of benchamrks that can be used to evaluate multiple aspects of memory allocators.
    56 
    57 \paragraph{threadtest}
    58 (FIX ME: cite benchmark and hoard) Each thread repeatedly allocates and then deallocates 100,000 objects. Runtime of the benchmark evaluates its efficiency.
    59 
    60 \paragraph{shbench}
    61 (FIX ME: cite benchmark and hoard) Each thread allocates and randomly frees a number of random-sized objects. It is a stress test that also uses runtime to determine efficiency of the allocator.
    62 
    63 \paragraph{larson}
    64 (FIX ME: cite benchmark and hoard) Larson simulates a server environment. Multiple threads are created where each thread allocator and free a number of objects within a size range. Some objects are passed from threads to the child threads to free. It caluculates memory operations per second as an indicator of memory allocator's performance.
     18\end{comment}
     19
     20\chapter[Background]{Background\footnote{Part of this chapter draws from similar background work in~\cite{wasik.thesis} with many updates.}}
     21
     22
     23A program dynamically allocates and deallocates the storage for a variable, referred to as an \newterm{object}, through calls such as @malloc@ and @free@ in C, and @new@ and @delete@ in \CC.
     24Space for each allocated object comes from the dynamic-allocation zone.
     25A \newterm{memory allocator} contains a complex data-structure and code that manages the layout of objects in the dynamic-allocation zone.
     26The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
     27Objects in C/\CC cannot be moved to aid the packing process, only adjacent free storage can be \newterm{coalesced} into larger free areas.
     28The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage via operating-system calls, such as @mmap@ or @sbrk@ in UNIX.
     29
     30
     31\section{Allocator Components}
     32\label{s:AllocatorComponents}
     33
     34\VRef[Figure]{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}.
     35The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data.
     36The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
     37The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
     38Allocated objects (white) are variable sized, and allocated and maintained by the program;
     39\ie only the program knows the location of allocated storage, not the memory allocator.
     40\begin{figure}[h]
     41\centering
     42\input{AllocatorComponents}
     43\caption{Allocator Components (Heap)}
     44\label{f:AllocatorComponents}
     45\end{figure}
     46Freed objects (light grey) represent memory deallocated by the program, which are linked into one or more lists facilitating easy location of new allocations.
     47Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
     48Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program;
     49if there are multiple reserved blocks, they are also chained together, usually internally.
     50
     51Allocated and freed objects typically have additional management data embedded within them.
     52\VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object.
     53The header contains information about the object, \eg size, type, etc.
     54The trailer may be used to simplify an allocation implementation, \eg coalescing, and/or for security purposes to mark the end of an object.
     55An object may be preceded by padding to ensure proper alignment.
     56Some algorithms quantize allocation requests into distinct sizes resulting in additional spacing after objects less than the quantized value.
     57When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
     58A free object also contains management data, \eg size, chaining, etc.
     59The amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, any allocation request less than 16 bytes must be rounded up, otherwise the free list cannot use internal chaining.
     60The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new management information and possibly data.
     61
     62\begin{figure}
     63\centering
     64\input{AllocatedObject}
     65\caption{Allocated Object}
     66\label{f:AllocatedObject}
     67\end{figure}
     68
     69
     70\section{Single-Threaded Memory-Allocator}
     71\label{s:SingleThreadedMemoryAllocator}
     72
     73A single-threaded memory-allocator does not run any threads itself, but is used by a single-threaded program.
     74Because the memory allocator is only executed by a single thread, concurrency issues do not exist.
     75The primary issues in designing a single-threaded memory-allocator are fragmentation and locality.
     76
     77
     78\subsection{Fragmentation}
     79\label{s:Fragmentation}
     80
     81Fragmentation is memory requested from the operating system but not used by the program;
     82hence, allocated objects are not fragmentation.
     83\VRef[Figure]{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: internal or external.
     84
     85\begin{figure}
     86\centering
     87\input{IntExtFragmentation}
     88\caption{Internal and External Fragmentation}
     89\label{f:InternalExternalFragmentation}
     90\end{figure}
     91
     92\newterm{Internal fragmentation} is memory space that is allocated to the program, but is not intended to be accessed by the program, such as headers, trailers, padding, and spacing around an allocated object.
     93This memory is typically used by the allocator for management purposes or required by the architecture for correctness, \eg alignment.
     94Internal fragmentation is problematic when management space is a significant proportion of an allocated object.
     95For example, if internal fragmentation is as large as the object being managed, then the memory usage for that object is doubled.
     96An allocator should strive to keep internal management information to a minimum.
     97
     98\newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.
     99This memory is problematic in two ways: heap blowup and highly fragmented memory.
     100\newterm{Heap blowup} occurs when memory freed by the program is not reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
     101Heap blowup can occur due to allocator policies that are too restrictive in reusing freed memory and/or no coalescing of free storage.
     102Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects.
     103\VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
     104Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
     105Memory is highly fragmented when the sizes of most free blocks are unusable.
     106For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented.
     107If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system.
     108
     109\begin{figure}
     110\centering
     111\input{MemoryFragmentation}
     112\caption{Memory Fragmentation}
     113\label{f:MemoryFragmentation}
     114\vspace{10pt}
     115\subfigure[Contiguous]{
     116        \input{ContigFragmentation}
     117        \label{f:Contiguous}
     118} % subfigure
     119        \subfigure[Highly Fragmented]{
     120        \input{NonContigFragmentation}
     121\label{f:HighlyFragmented}
     122} % subfigure
     123\caption{Fragmentation Quality}
     124\label{f:FragmentationQuality}
     125\end{figure}
     126
     127For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
     128The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
     129Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
     130Any storage larger than the request can become spacing after the object or be split into a smaller free object.
     131The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list.
     132
     133The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
     134When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to spacing after the object.
     135A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
     136The fewer bin-sizes, the fewer lists need to be searched and maintained;
     137however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation.
     138The more bin-sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.
     139A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size.
     140For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
     141For subsequent requests, the bin free-lists contain objects of different sizes, ranging from one bin-size to the next (8-16 in this example), and a sequential-fit algorithm may be used to find an object large enough for the requested size on the associated bin list.
     142
     143The third approach is \newterm{splitting} and \newterm{coalescing algorithms}.
     144When an object is allocated, if there are no free objects of the requested size, a larger free object may be split into two smaller objects to satisfy the allocation request without obtaining more memory from the operating system.
     145For example, in the buddy system, a block of free memory is split into two equal chunks, one of those chunks is again split into two equal chunks, and so on until a block just large enough to fit the requested object is created.
     146When an object is deallocated it is coalesced with the objects immediately before and after it in memory, if they are free, turning them into one larger object.
     147Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled.
     148In all cases, coalescing increases allocation latency, hence some allocations can cause unbounded delays during coalescing.
     149While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup.
     150Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory.
     151
     152
     153\subsection{Locality}
     154\label{s:Locality}
     155
     156The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}.
     157Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period.
     158Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.
     159
     160Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy.
     161When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
     162For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory.
     163A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}.
     164
     165Temporal locality is largely controlled by how a program accesses its variables~\cite{Feng05}.
     166Nevertheless, a memory allocator can have some indirect influence on temporal locality and largely dictates spatial locality.
     167For temporal locality, an allocator can return storage for new allocations that was just freed as these memory locations are still \emph{warm} in the memory hierarchy.
     168For spatial locality, an allocator can place objects used together close together in memory, so the working set of the program fits into the fewest possible cache lines and pages.
     169However, usage patterns are different for every program as is the underlying hardware memory architecture;
     170hence, no general-purpose memory-allocator can provide ideal locality for every program on every computer.
     171
     172There are a number of ways a memory allocator can degrade locality by increasing the working set.
     173For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request, \eg sequential-fit algorithm.
     174If there are a (large) number of objects accessed in very different areas of memory, the allocator may perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}.
     175Another way locality can be degraded is by spatially separating related data.
     176For example, in a binning allocator, objects of different sizes are allocated from different bins that may be located in different pages of memory.
     177
     178
     179\section{Multi-Threaded Memory-Allocator}
     180\label{s:MultiThreadedMemoryAllocator}
     181
     182A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program.
     183In addition to single-threaded design issues of fragmentation and locality, a multi-threaded allocator is simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
     184
     185
     186\subsection{Mutual Exclusion}
     187\label{s:MutualExclusion}
     188
     189\newterm{Mutual exclusion} provides sequential access to the shared management data of the heap.
     190There are two performance issues for mutual exclusion.
     191First is the overhead necessary to perform (at least) a hardware atomic operation every time a shared resource is accessed.
     192Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released.
     193Contention can be reduced in a number of ways:
     194\begin{itemize}[itemsep=0pt]
     195\item
     196using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks;
     197\item
     198using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff;
     199\item
     200using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
     201\end{itemize}
     202However, all of these approaches have degenerate cases where program contention is high, which occurs outside of the allocator.
     203
     204
     205\subsection{False Sharing}
     206\label{s:FalseSharing}
     207
     208False sharing is a dynamic phenomenon leading to cache thrashing.
     209When two or more threads on separate CPUs simultaneously change different objects sharing a cache line, the change invalidates the other thread's associated cache, even though these threads may be uninterested in the other modified object.
     210False sharing can occur in three different ways: program induced, allocator-induced active, and allocator-induced passive;
     211a memory allocator can only affect the latter two.
     212
     213\paragraph{\newterm{Program-induced false-sharing}} occurs when one thread passes an object sharing a cache line to another thread, and both threads modify the respective objects.
     214\VRef[Figure]{f:ProgramInducedFalseSharing} shows when Task$_1$ passes Object$_2$ to Task$_2$, a false-sharing situation forms when Task$_1$ modifies Object$_1$ and Task$_2$ modifies Object$_2$.
     215Changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line.
     216
     217\begin{figure}
     218\centering
     219\subfigure[Program-Induced False-Sharing]{
     220        \input{ProgramFalseSharing}
     221        \label{f:ProgramInducedFalseSharing}
     222} \\
     223\vspace{5pt}
     224\subfigure[Allocator-Induced Active False-Sharing]{
     225        \input{AllocInducedActiveFalseSharing}
     226        \label{f:AllocatorInducedActiveFalseSharing}
     227} \\
     228\vspace{5pt}
     229\subfigure[Allocator-Induced Passive False-Sharing]{
     230        \input{AllocInducedPassiveFalseSharing}
     231        \label{f:AllocatorInducedPassiveFalseSharing}
     232} % subfigure
     233\caption{False Sharing}
     234\label{f:FalseSharing}
     235\end{figure}
     236
     237\paragraph{\newterm{Allocator-induced active false-sharing}} occurs when objects are allocated within the same cache line but to different threads.
     238For example, in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}, each task allocates an object and loads a cache-line of memory into its associated cache.
     239Again, changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line.
     240
     241\paragraph{\newterm{Allocator-induced passive false-sharing}} is another form of allocator-induced false-sharing caused by program-induced false-sharing.
     242When an object in a program-induced false-sharing situation is deallocated, a future allocation of that object may cause passive false-sharing.
     243For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Task$_1$ passes Object$_2$ to Task$_2$, and Task$_2$ subsequently deallocates Object$_2$.
     244Allocator-induced passive false-sharing occurs when Object$_2$ is reallocated to Task$_2$ while Task$_1$ is still using Object$_1$.
     245
     246
     247\subsection{Heap Blowup}
     248\label{s:HeapBlowup}
     249
     250In a multi-threaded program, heap blowup can occur when memory freed by one thread is inaccessible to other threads due to the allocation strategy.
     251Specific examples are presented in later sections.
     252
     253
     254\section{Multi-Threaded Memory-Allocator Features}
     255\label{s:MultiThreadedMemoryAllocatorFeatures}
     256
     257The following features are used in the construction of multi-threaded memory-allocators:
     258\begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=0.5ex\parsep=0pt\itemsep=0pt}
     259\item multiple heaps
     260\begin{list}{\alph{enumii})}{\usecounter{enumii}\topsep=0.5ex\parsep=0pt\itemsep=0pt}
     261\item with or without a global heap
     262\item with or without ownership
     263\end{list}
     264\item object containers
     265\begin{list}{\alph{enumii})}{\usecounter{enumii}\topsep=0.5ex\parsep=0pt\itemsep=0pt}
     266\item with or without ownership
     267\item fixed or variable sized
     268\item global or local free-lists
     269\end{list}
     270\item hybrid private/public heap
     271\item allocation buffer
     272\item lock-free operations
     273\end{list}
     274The first feature, multiple heaps, pertains to different kinds of heaps.
     275The second feature, object containers, pertains to the organization of objects within the storage area.
     276The remaining features apply to different parts of the allocator design or implementation.
     277
     278
     279\section{Multiple Heaps}
     280\label{s:MultipleHeaps}
     281
     282A multi-threaded allocator has potentially multiple threads and heaps.
     283The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity.
     284The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see \VRef[Figure]{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see \VRef[Figure]{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see \VRef[Figure]{f:PerThreadHeap}), which is almost back to a single-threaded allocator.
     285
     286
     287\paragraph{T:1 model} where all threads allocate and deallocate objects from one heap.
     288Memory is obtained from the freed objects, or reserved memory in the heap, or from the operating system (OS);
     289the heap may also return freed memory to the operating system.
     290The arrows indicate the direction memory conceptually moves for each kind of operation: allocation moves memory along the path from the heap/operating-system to the user application, while deallocation moves memory along the path from the application back to the heap/operating-system.
     291To safely handle concurrency, a single heap uses locking to provide mutual exclusion.
     292Whether using a single lock for all heap operations or fine-grained locking for different operations, a single heap may be a significant source of contention for programs with a large amount of memory allocation.
     293
     294\begin{figure}
     295\centering
     296\subfigure[T:1]{
     297%       \input{SingleHeap.pstex_t}
     298        \input{SingleHeap}
     299        \label{f:SingleHeap}
     300} % subfigure
     301\vrule
     302\subfigure[T:H]{
     303%       \input{MultipleHeaps.pstex_t}
     304        \input{SharedHeaps}
     305        \label{f:SharedHeaps}
     306} % subfigure
     307\vrule
     308\subfigure[1:1]{
     309%       \input{MultipleHeapsGlobal.pstex_t}
     310        \input{PerThreadHeap}
     311        \label{f:PerThreadHeap}
     312} % subfigure
     313\caption{Multiple Heaps, Thread:Heap Relationship}
     314\end{figure}
     315
     316
     317\paragraph{T:H model} where each thread allocates storage from several heaps depending on certain criteria, with the goal of reducing contention by spreading allocations/deallocations across the heaps.
     318The decision on when to create a new heap and which heap a thread allocates from depends on the allocator design.
     319The performance goal is to reduce the ratio of heaps to threads.
     320In general, locking is required, since more than one thread may concurrently access a heap during its lifetime, but contention is reduced because fewer threads access a specific heap.
     321
     322For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
     323At creation, a thread is associated with a heap from the pool.
     324When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
     325If an unlocked heap is found, the thread changes its association and uses that heap.
     326If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
     327or the thread can block waiting for a heap to become available.
     328While the heap-pool approach often minimizes the number of extant heaps, the worse case can result in more heaps than threads;
     329\eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces.
     330
     331Threads using multiple heaps need to determine the specific heap to access for an allocation/deallocation, \ie association of thread to heap.
     332A number of techniques are used to establish this association.
     333The simplest approach is for each thread to have a pointer to its associated heap (or to administrative information that points to the heap), and this pointer changes if the association changes.
     334For threading systems with thread-local storage, the heap pointer is created using this mechanism;
     335otherwise, the heap routines must simulate thread-local storage using approaches like hashing the thread's stack-pointer or thread-id to find its associated heap.
     336
     337The storage management for multiple heaps is more complex than for a single heap (see \VRef[Figure]{f:AllocatorComponents}).
     338\VRef[Figure]{f:MultipleHeapStorage} illustrates the general storage layout for multiple heaps.
     339Allocated and free objects are labelled by the thread or heap they are associated with.
     340(Links between free objects are removed for simplicity.)
     341The management information in the static zone must be able to locate all heaps in the dynamic zone.
     342The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
     343Each heap in the dynamic zone is composed of a list of a free objects and a pointer to its reserved memory.
     344An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
     345Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
     346Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the operating system.
     347
     348\begin{figure}
     349\centering
     350\input{MultipleHeapsStorage}
     351\caption{Multiple-Heap Storage}
     352\label{f:MultipleHeapStorage}
     353\end{figure}
     354
     355Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
     356The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
     357Additionally, objects freed by one heap cannot be reused by other threads, except indirectly by returning free memory to the operating system, which can be expensive.
     358(Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.)
     359In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
     360
     361Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space.
     362Now, each heap obtains and returns storage to/from the global heap rather than the operating system.
     363Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold.
     364Similarly, the global heap buffers this memory, obtaining and returning storage to/from the operating system as necessary.
     365The global heap does not have its own thread and makes no internal allocation requests;
     366instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations.
     367Hence, the worst-case cost of a memory operation includes all these steps.
     368With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the operating system to achieve the same goal and is independent of the mechanism used by the operating system to present dynamic memory to an address space.
     369
     370However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking.
     371A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention.
     372In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap.
     373
     374
     375\paragraph{1:1 model (thread heaps)} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see ownership in \VRef{s:Ownership}).
     376An additional benefit of thread heaps is improved locality due to better memory layout.
     377As each thread only allocates from its heap, all objects for a thread are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages.
     378In contrast, the T:H model spreads each thread's objects over a larger area in different heaps.
     379Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap.
     380For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory, no two threads share a page or cache line unless pointers are passed among them.
     381Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps.
     382
     383When a thread terminates, there are two options for handling its heap.
     384First is to free all objects in the heap to the global heap and destroy the thread heap.
     385Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
     386Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
     387Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible..
     388
     389
     390\subsection{User-Level Threading}
     391
     392It is possible to use any of the heap models with user-level (M:N) threading.
     393However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
     394It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
     395\VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime.
     396Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
     397
     398\begin{figure}
     399\centering
     400\input{UserKernelHeaps}
     401\caption{User-Level Kernel Heaps}
     402\label{f:UserLevelKernelHeaps}
     403\end{figure}
     404
     405Adopting this model results in a subtle problem with shared heaps.
     406With kernel threading, an operation that is started by a kernel thread is always completed by that thread.
     407For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.
     408
     409However, this correctness property is not preserved for user-level threading.
     410A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}.
     411When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
     412To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
     413However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).
     414Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
     415Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation.
     416
     417
     418\begin{figure}
     419\centering
     420\subfigure[Ownership]{
     421        \input{MultipleHeapsOwnership}
     422} % subfigure
     423\hspace{0.25in}
     424\subfigure[No Ownership]{
     425        \input{MultipleHeapsNoOwnership}
     426} % subfigure
     427\caption{Heap Ownership}
     428\label{f:HeapsOwnership}
     429\end{figure}
     430
     431
     432\subsection{Ownership}
     433\label{s:Ownership}
     434
     435\newterm{Ownership} defines which heap an object is returned-to on deallocation.
     436If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects.
     437Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime.
     438\VRef[Figure]{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
     439Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
     440For the 1:1 thread:heap relationship, a thread only allocates from its own heap, and without ownership, a thread only frees objects to its own heap, which means the heap is private to its owner thread and does not require any locking, called a \newterm{private heap}.
     441For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
     442
     443\VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
     444(For simplicity assume the heaps all use the same size of reserves storage.)
     445In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
     446Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
     447The exception is for the 1:1 model if reserved memory does not overlap a cache-line because all allocated storage within a used area is associated with a single thread.
     448In this case, there is no allocator-induced active false-sharing (see \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}) because two adjacent allocated objects used by different threads cannot share a cache-line.
     449As well, there is no allocator-induced passive false-sharing (see \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}) because two adjacent allocated objects used by different threads cannot occur because free objects are returned to the owner heap.
     450% Passive false-sharing may still occur, if delayed ownership is used (see below).
     451
     452\begin{figure}
     453\centering
     454\input{MultipleHeapsOwnershipStorage.pstex_t}
     455\caption{Multiple-Heap Storage with Ownership}
     456\label{f:MultipleHeapStorageOwnership}
     457\end{figure}
     458
     459The main advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
     460Ownership prevents the classical problem where one thread performs allocations from one heap, passes the object to another thread, and the receiving thread deallocates the object to another heap, hence draining the initial heap of storage.
     461As well, allocator-induced passive false-sharing is eliminated because returning an object to its owner heap means it can never be allocated to another thread.
     462For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, the deallocation by Task$_2$ returns Object$_2$ back to Task$_1$'s heap;
     463hence a subsequent allocation by Task$_2$ cannot return this storage.
     464The disadvantage of ownership is deallocating to another task's heap so heaps are no longer private and require locks to provide safe concurrent access.
     465
     466Object ownership can be immediate or delayed, meaning free objects may be batched on a separate free list either by the returning or receiving thread.
     467While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
     468It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
     469Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads.
     470
     471It is possible for heaps to steal objects rather than return them and reallocating these objects when storage runs out on a heap.
     472However, stealing can result in passive false-sharing.
     473For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Task$_2$'s heap initially.
     474If Task$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur.
     475
     476
     477\section{Object Containers}
     478\label{s:ObjectContainers}
     479
     480Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}.
     481Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
     482As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective.
     483Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}:
     484while the header and object are together in memory, they are generally not accessed together;
     485\eg the object is accessed by the program when it is allocated, while the header is accessed by the allocator when the object is free.
     486
     487\begin{figure}
     488\centering
     489\subfigure[Object Headers]{
     490        \input{ObjectHeaders}
     491        \label{f:ObjectHeaders}
     492} % subfigure
     493\subfigure[Object Container]{
     494        \input{Container}
     495        \label{f:ObjectContainer}
     496} % subfigure
     497\caption{Header Placement}
     498\label{f:HeaderPlacement}
     499\end{figure}
     500
     501An alternative approach factors common header/trailer information to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks} in~\cite{Berger00}), as in \VRef[Figure]{f:ObjectContainer}.
     502The header for the container holds information necessary for all objects in the container;
     503a trailer may also be used at the end of the container.
     504Similar to the approach described for thread heaps in \VRef{s:MultipleHeaps}, if container boundaries do not overlap with memory of another container at crucial boundaries and all objects in a container are allocated to the same thread, allocator-induced active false-sharing is avoided.
     505
     506The difficulty with object containers lies in finding the object header/trailer given only the object address, since that is normally the only information passed to the deallocation operation.
     507One way to do this is to start containers on aligned addresses in memory, then truncate the lower bits of the object address to obtain the header address (or round up and subtract the trailer size to obtain the trailer address).
     508For example, if an object at address 0xFC28\,EF08 is freed and containers are aligned on 64\,KB (0x0001\,0000) addresses, then the container header is at 0xFC28\,0000.
     509
     510Normally, a container has homogeneous objects of fixed size, with fixed information in the header that applies to all container objects (\eg object size and ownership).
     511This approach greatly reduces internal fragmentation since far fewer headers are required, and potentially increases spatial locality as a cache line or page holds more objects since the objects are closer together due to the lack of headers.
     512However, although similar objects are close spatially within the same container, different sized objects are further apart in separate containers.
     513Depending on the program, this may or may not improve locality.
     514If the program uses several objects from a small number of containers in its working set, then locality is improved since fewer cache lines and pages are required.
     515If the program uses many containers, there is poor locality, as both caching and paging increase.
     516Another drawback is that external fragmentation may be increased since containers reserve space for objects that may never be allocated by the program, \ie there are often multiple containers for each size only partially full.
     517However, external fragmentation can be reduced by using small containers.
     518
     519Containers with heterogeneous objects implies different headers describing them, which complicates the problem of locating a specific header solely by an address.
     520A couple of solutions can be used to implement containers with heterogeneous objects.
     521However, the problem with allowing objects of different sizes is that the number of objects, and therefore headers, in a single container is unpredictable.
     522One solution allocates headers at one end of the container, while allocating objects from the other end of the container;
     523when the headers meet the objects, the container is full.
     524Freed objects cannot be split or coalesced since this causes the number of headers to change.
     525The difficulty in this strategy remains in finding the header for a specific object;
     526in general, a search is necessary to find the object's header among the container headers.
     527A second solution combines the use of container headers and individual object headers.
     528Each object header stores the object's heterogeneous information, such as its size, while the container header stores the homogeneous information, such as the owner when using ownership.
     529This approach allows containers to hold different types of objects, but does not completely separate headers from objects.
     530The benefit of the container in this case is to reduce some redundant information that is factored into the container header.
     531
     532In summary, object containers trade off internal fragmentation for external fragmentation by isolating common administration information to remove/reduce internal fragmentation, but at the cost of external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
     533A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on program access-patterns.
     534
     535
     536\subsection{Container Ownership}
     537\label{s:ContainerOwnership}
     538
     539Without ownership, objects in a container are deallocated to the heap currently associated with the thread that frees the object.
     540Thus, different objects in a container may be on different heap free-lists (see \VRef[Figure]{f:ContainerNoOwnershipFreelist}).
     541With ownership, all objects in a container belong to the same heap (see \VRef[Figure]{f:ContainerOwnershipFreelist}), so ownership of an object is determined by the container owner.
     542If multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
     543Only with the 1:1 model and ownership is active and passive false-sharing avoided (see \VRef{s:Ownership}).
     544Passive false-sharing may still occur, if delayed ownership is used.
     545Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap.
     546
     547\begin{figure}
     548\centering
     549\subfigure[No Ownership]{
     550        \input{ContainerNoOwnershipFreelist}
     551        \label{f:ContainerNoOwnershipFreelist}
     552} % subfigure
     553\vrule
     554\subfigure[Ownership]{
     555        \input{ContainerOwnershipFreelist}
     556        \label{f:ContainerOwnershipFreelist}
     557} % subfigure
     558\caption{Free-list Structure with Container Ownership}
     559\end{figure}
     560
     561When a container changes ownership, the ownership of all objects within it change as well.
     562Moving a container involves moving all objects on the heap's free-list in that container to the new owner.
     563This approach can reduce contention for the global heap, since each request for objects from the global heap returns a container rather than individual objects.
     564
     565Additional restrictions may be applied to the movement of containers to prevent active false-sharing.
     566For example, in \VRef[Figure]{f:ContainerFalseSharing1}, a container being used by Task$_1$ changes ownership, through the global heap.
     567In \VRef[Figure]{f:ContainerFalseSharing2}, when Task$_2$ allocates an object from the newly acquired container it is actively false-sharing even though no objects are passed among threads.
     568Note, once the object is freed by Task$_1$, no more false sharing can occur until the container changes ownership again.
     569To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
     570One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the operating system.
     571
     572\begin{figure}
     573\centering
     574\subfigure[]{
     575        \input{ContainerFalseSharing1}
     576        \label{f:ContainerFalseSharing1}
     577} % subfigure
     578\subfigure[]{
     579        \input{ContainerFalseSharing2}
     580        \label{f:ContainerFalseSharing2}
     581} % subfigure
     582\caption{Active False-Sharing using Containers}
     583\label{f:ActiveFalseSharingContainers}
     584\end{figure}
     585
     586Using containers with ownership increases external fragmentation since a new container for a requested object size must be allocated separately for each thread requesting it.
     587In \VRef[Figure]{f:ExternalFragmentationContainerOwnership}, using object ownership allocates 80\% more space than without ownership.
     588
     589\begin{figure}
     590\centering
     591\subfigure[No Ownership]{
     592        \input{ContainerNoOwnership}
     593} % subfigure
     594\\
     595\subfigure[Ownership]{
     596        \input{ContainerOwnership}
     597} % subfigure
     598\caption{External Fragmentation with Container Ownership}
     599\label{f:ExternalFragmentationContainerOwnership}
     600\end{figure}
     601
     602
     603\subsection{Container Size}
     604\label{s:ContainerSize}
     605
     606One way to control the external fragmentation caused by allocating a large container for a small number of requested objects is to vary the size of the container.
     607As described earlier, container boundaries need to be aligned on addresses that are a power of two to allow easy location of the header (by truncating lower bits).
     608Aligning containers in this manner also determines the size of the container.
     609However, the size of the container has different implications for the allocator.
     610
     611The larger the container, the fewer containers are needed, and hence, the fewer headers need to be maintained in memory, improving both internal fragmentation and potentially performance.
     612However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation.
     613With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container.
     614In general, large objects, \eg greater than 64\,KB, are allocated directly from the operating system and are returned immediately to the operating system to reduce long-term external fragmentation.
     615If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate.
     616Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header.
     617
     618In order to find the container header when using different sized containers, a super container is used (see~\VRef[Figure]{f:SuperContainers}).
     619The super container spans several containers, contains a header with information for finding each container header, and starts on an aligned address.
     620Super-container headers are found using the same method used to find container headers by dropping the lower bits of an object address.
     621The containers within a super container may be different sizes or all the same size.
     622If the containers in the super container are different sizes, then the super-container header must be searched to determine the specific container for an object given its address.
     623If all containers in the super container are the same size, \eg 16KB, then a specific container header can be found by a simple calculation.
     624The free space at the end of a super container is used to allocate new containers.
     625
     626\begin{figure}
     627\centering
     628\input{SuperContainers}
     629% \includegraphics{diagrams/supercontainer.eps}
     630\caption{Super Containers}
     631\label{f:SuperContainers}
     632\end{figure}
     633
     634Minimal internal and external fragmentation is achieved by having as few containers as possible, each being as full as possible.
     635It is also possible to achieve additional benefit by using larger containers for popular small sizes, as it reduces the number of containers with associated headers.
     636However, this approach assumes it is possible for an allocator to determine in advance which sizes are popular.
     637Keeping statistics on requested sizes allows the allocator to make a dynamic decision about which sizes are popular.
     638For example, after receiving a number of allocation requests for a particular size, that size is considered a popular request size and larger containers are allocated for that size.
     639If the decision is incorrect, larger containers than necessary are allocated that remain mostly unused.
     640A programmer may be able to inform the allocator about popular object sizes, using a mechanism like @mallopt@, in order to select an appropriate container size for each object size.
     641
     642
     643\subsection{Container Free-Lists}
     644\label{s:containersfreelists}
     645
     646The container header allows an alternate approach for managing the heap's free-list.
     647Rather than maintain a global free-list throughout the heap (see~\VRef[Figure]{f:GlobalFreeListAmongContainers}), the containers are linked through their headers and only the local free objects within a container are linked together (see~\VRef[Figure]{f:LocalFreeListWithinContainers}).
     648Note, maintaining free lists within a container assumes all free objects in the container are associated with the same heap;
     649thus, this approach only applies to containers with ownership.
     650
     651This alternate free-list approach can greatly reduce the complexity of moving all freed objects belonging to a container to another heap.
     652To move a container using a global free-list, as in \VRef[Figure]{f:GlobalFreeListAmongContainers}, the free list is first searched to find all objects within the container.
     653Each object is then removed from the free list and linked together to form a local free-list for the move to the new heap.
     654With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list.
     655Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
     656The cost is adding information to a header, which increases the header size, and therefore internal fragmentation.
     657
     658\begin{figure}
     659\centering
     660\subfigure[Global Free-List Among Containers]{
     661        \input{FreeListAmongContainers}
     662        \label{f:GlobalFreeListAmongContainers}
     663} % subfigure
     664\hspace{0.25in}
     665\subfigure[Local Free-List Within Containers]{
     666        \input{FreeListWithinContainers}
     667        \label{f:LocalFreeListWithinContainers}
     668} % subfigure
     669\caption{Container Free-List Structure}
     670\label{f:ContainerFreeListStructure}
     671\end{figure}
     672
     673When all objects in the container are the same size, a single free-list is sufficient.
     674However, when objects in the container are different size, the header needs a free list for each size class when using a binning allocation algorithm, which can be a significant increase in the container-header size.
     675The alternative is to use a different allocation algorithm with a single free-list, such as a sequential-fit allocation-algorithm.
     676
     677
     678\subsection{Hybrid Private/Public Heap}
     679\label{s:HybridPrivatePublicHeap}
     680
     681Section~\Vref{s:Ownership} discusses advantages and disadvantages of public heaps (T:H model and with ownership) and private heaps (thread heaps with ownership).
     682For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps (see~\VRef[Figure]{f:HybridPrivatePublicHeap}).
     683The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
     684In the hybrid approach, a task first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
     685Similarly, a task first deallocates an object its private heap, and second to the public heap.
     686Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
     687Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
     688Finally, when a task frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing task's private heap for delayed ownership, which allows the freeing task to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return.
     689
     690\begin{figure}
     691\centering
     692\input{PrivatePublicHeaps.pstex_t}
     693\caption{Hybrid Private/Public Heap for Per-thread Heaps}
     694\label{f:HybridPrivatePublicHeap}
     695% \vspace{10pt}
     696% \input{RemoteFreeList.pstex_t}
     697% \caption{Remote Free-List}
     698% \label{f:RemoteFreeList}
     699\end{figure}
     700
     701As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified.
     702For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
     703To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
     704Since the remote free-list is occasionally cleared during an allocation, this adds to that cost.
     705Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object.
     706
     707If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking.
     708In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur.
     709If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations.
     710
     711The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator.
     712Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used.
     713In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps.
     714For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach.
     715
     716
     717\section{Allocation Buffer}
     718\label{s:AllocationBuffer}
     719
     720An allocation buffer is reserved memory (see~\VRef{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
     721That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
     722Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
     723The allocation buffer reduces contention and the number of global/operating-system calls.
     724For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
     725
     726Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation).
     727Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
     728Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish.
     729
     730Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
     731For example, if all objects sharing a cache line come from the same allocation buffer, then these objects are allocated to the same thread, avoiding active false-sharing.
     732Active false-sharing may still occur if objects are freed to the global heap and reused by another heap.
     733
     734Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated.
     735A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or operating system.
     736The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer.
     737
     738The unused part of a container, neither allocated or freed, is an allocation buffer.
     739For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made.
     740This lazy method of constructing objects is beneficial in terms of paging and caching.
     741For example, although an entire container, possibly spanning several pages, is allocated from the operating system, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache.
     742
     743
     744\section{Lock-Free Operations}
     745\label{s:LockFreeOperations}
     746
     747A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual task has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}.
     748(A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.)
     749Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
     750While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress.
     751With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design.
     752Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
     753on the other hand, a thread may busy-wait for an unbounded period holding a processor.
     754Finally, lock-free implementations have greater complexity and hardware dependency.
     755Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
     756Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex.
     757Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
  • doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex

    r4559b34 r92538ab  
    4141%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    4242
     43
     44\section{Benchmarks}
     45There are multiple benchmarks that are built individually and evaluate different aspects of a memory allocator. But, there is not standard set of benchamrks that can be used to evaluate multiple aspects of memory allocators.
     46
     47\paragraph{threadtest}
     48(FIX ME: cite benchmark and hoard) Each thread repeatedly allocates and then deallocates 100,000 objects. Runtime of the benchmark evaluates its efficiency.
     49
     50\paragraph{shbench}
     51(FIX ME: cite benchmark and hoard) Each thread allocates and randomly frees a number of random-sized objects. It is a stress test that also uses runtime to determine efficiency of the allocator.
     52
     53\paragraph{larson}
     54(FIX ME: cite benchmark and hoard) Larson simulates a server environment. Multiple threads are created where each thread allocator and free a number of objects within a size range. Some objects are passed from threads to the child threads to free. It caluculates memory operations per second as an indicator of memory allocator's performance.
     55
     56
    4357\section{Performance Matrices of Memory Allocators}
    4458
  • doc/theses/mubeen_zulfiqar_MMath/intro.tex

    r4559b34 r92538ab  
    11\chapter{Introduction}
    22
     3% Shared-memory multi-processor computers are ubiquitous and important for improving application performance.
     4% However, writing programs that take advantage of multiple processors is not an easy task~\cite{Alexandrescu01b}, \eg shared resources can become a bottleneck when increasing (scaling) threads.
     5% One crucial shared resource is program memory, since it is used by all threads in a shared-memory concurrent-program~\cite{Berger00}.
     6% Therefore, providing high-performance, scalable memory-management is important for virtually all shared-memory multi-threaded programs.
     7
     8\vspace*{-23pt}
     9Memory management takes a sequence of program generated allocation/deallocation requests and attempts to satisfy them within a fixed-sized block of memory while minimizing the total amount of memory used.
     10A general-purpose dynamic-allocation algorithm cannot anticipate future allocation requests so its output is rarely optimal.
     11However, memory allocators do take advantage of regularities in allocation patterns for typical programs to produce excellent results, both in time and space (similar to LRU paging).
     12In general, allocators use a number of similar techniques, each optimizing specific allocation patterns.
     13Nevertheless, memory allocators are a series of compromises, occasionally with some static or dynamic tuning parameters to optimize specific program-request patterns.
     14
     15
     16\section{Memory Structure}
     17\label{s:MemoryStructure}
     18
     19\VRef[Figure]{f:ProgramAddressSpace} shows the typical layout of a program's address space divided into the following zones (right to left): static code/data, dynamic allocation, dynamic code/data, and stack, with free memory surrounding the dynamic code/data~\cite{memlayout}.
     20Static code and data are placed into memory at load time from the executable and are fixed-sized at runtime.
     21Dynamic-allocation memory starts empty and grows/shrinks as the program dynamically creates/deletes variables with independent lifetime.
     22The programming-language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
     23Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}.
     24However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control.
     25Stack memory is managed by the program call-mechanism using a simple LIFO technique, which works well for sequential programs.
     26For multi-threaded programs (and coroutines), a new stack is created for each thread;
     27these thread stacks are commonly created in dynamic-allocation memory.
     28This thesis focuses on management of the dynamic-allocation memory.
     29
     30\begin{figure}
     31\centering
     32\input{AddressSpace}
     33\vspace{-5pt}
     34\caption{Program Address Space Divided into Zones}
     35\label{f:ProgramAddressSpace}
     36\end{figure}
     37
     38
     39\section{Dynamic Memory-Management}
     40\label{s:DynamicMemoryManagement}
     41
     42Modern programming languages manage dynamic-allocation memory in different ways.
     43Some languages, such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, provide explicit allocation but \emph{implicit} deallocation of data through garbage collection~\cite{Wilson92}.
     44In general, garbage collection supports memory compaction, where dynamic (live) data is moved during runtime to better utilize space.
     45However, moving data requires finding pointers to it and updating them to reflect new data locations.
     46Programming languages such as C~\cite{C}, \CC~\cite{C++}, and Rust~\cite{Rust} provide the programmer with explicit allocation \emph{and} deallocation of data.
     47These languages cannot find and subsequently move live data because pointers can be created to any storage zone, including internal components of allocated objects, and may contain temporary invalid values generated by pointer arithmetic.
     48Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
     49This thesis only examines dynamic memory-management with \emph{explicit} deallocation.
     50While garbage collection and compaction are not part this work, many of the work's results are applicable to the allocation phase in any memory-management approach.
     51
     52Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime.
     53When this allocator proves inadequate, programmers often write specialize allocators for specific needs.
     54C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator.
     55(Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.)
     56However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved.
     57For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
     58This thesis examines the design of high-performance allocators for use by kernel and user multi-threaded applications written in C/\CC.
     59
     60
     61\section{Contributions}
     62\label{s:Contributions}
     63
     64This work provides the following contributions in the area of concurrent dynamic allocation:
     65\begin{enumerate}[leftmargin=*]
     66\item
     67Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
     68
     69\item
     70Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@.
     71
     72\item
     73Extend the standard C heap functionality by preserving with each allocation:
     74\begin{itemize}[itemsep=0pt]
     75\item
     76its request size plus the amount allocated,
     77\item
     78whether an allocation is zero fill,
     79\item
     80and allocation alignment.
     81\end{itemize}
     82
     83\item
     84Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ to zero-fill and align when storage is extended or copied.
     85Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying.
     86This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient.
     87
     88\item
     89Provide additional heap operations to complete programmer expectation with respect to accessing different allocation properties.
     90\begin{itemize}
     91\item
     92@resize( oaddr, size )@ re-purpose an old allocation for a new type \emph{without} preserving fill or alignment.
     93\item
     94@resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
     95\item
     96@realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment.
     97\item
     98@aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled.
     99\item
     100@amemalign( alignment, dim, elemSize )@ same as @aalloc@ with memory alignment.
     101\item
     102@cmemalign( alignment, dim, elemSize )@ same as @calloc@ with memory alignment.
     103\end{itemize}
     104
     105\item
     106Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties.
     107
     108\item
     109Provide additional query operations to access information about an allocation:
     110\begin{itemize}
     111\item
     112@malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@.
     113If the allocation is not aligned or @addr@ is the @nulladdr@, the minimal alignment is returned.
     114\item
     115@malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@.
     116\item
     117@malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@.
     118\item
     119@malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
     120\end{itemize}
     121
     122\item
     123Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation.
     124
     125\item
     126Provide complete, fast, and contention-free allocation statistics to help understand program behaviour:
     127\begin{itemize}
     128\item
     129@malloc_stats()@ print memory-allocation statistics on the file-descriptor set by @malloc_stats_fd@.
     130\item
     131@malloc_info( options, stream )@ print memory-allocation statistics as an XML string on the specified file-descriptor set by @malloc_stats_fd@.
     132\item
     133@malloc_stats_fd( fd )@ set file-descriptor number for printing memory-allocation statistics (default @STDERR_FILENO@).
     134This file descriptor is used implicitly by @malloc_stats@ and @malloc_info@.
     135\end{itemize}
     136
     137\item
     138Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination.
     139
     140\item
     141Build 4 different versions of the allocator:
     142\begin{itemize}
     143\item
     144static or dynamic linking
     145\item
     146statistic/debugging (testing) or no statistic/debugging (performance)
     147\end{itemize}
     148A program may link to any of these 4 versions of the allocator often without recompilation.
     149(It is possible to separate statistics and debugging, giving 8 different versions.)
     150
     151\item
     152A micro-benchmark test-suite for comparing allocators rather than relying on a suite of arbitrary programs.
     153These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs
     154\end{enumerate}
     155
     156\begin{comment}
    3157\noindent
    4158====================
     
    26180
    27181\section{Introduction}
    28 Dynamic memory allocation and management is one of the core features of C. It gives programmer the freedom to allocate, free, use, and manage dynamic memory himself. The programmer is not given the complete control of the dynamic memory management instead an interface of memory allocator is given to the progrmmer that can be used to allocate/free dynamic memory for the application's use.
    29 
    30 Memory allocator is a layer between thr programmer and the system. Allocator gets dynamic memory from the system in heap/mmap area of application storage and manages it for programmer's use.
    31 
    32 GNU C Library (FIX ME: cite this) provides an interchangeable memory allocator that can be replaced with a custom memory allocator that supports required features and fulfills application's custom needs. It also allows others to innovate in memory allocation and design their own memory allocator. GNU C Library has set guidelines that should be followed when designing a standalone memory allocator. GNU C Library requires new memory allocators to have atlease following set of functions in their allocator's interface:
     182Dynamic memory allocation and management is one of the core features of C. It gives programmer the freedom to allocate, free, use, and manage dynamic memory himself. The programmer is not given the complete control of the dynamic memory management instead an interface of memory allocator is given to the programmer that can be used to allocate/free dynamic memory for the application's use.
     183
     184Memory allocator is a layer between the programmer and the system. Allocator gets dynamic memory from the system in heap/mmap area of application storage and manages it for programmer's use.
     185
     186GNU C Library (FIX ME: cite this) provides an interchangeable memory allocator that can be replaced with a custom memory allocator that supports required features and fulfills application's custom needs. It also allows others to innovate in memory allocation and design their own memory allocator. GNU C Library has set guidelines that should be followed when designing a stand-alone memory allocator. GNU C Library requires new memory allocators to have at lease following set of functions in their allocator's interface:
    33187
    34188\begin{itemize}
     
    43197\end{itemize}
    44198
    45 In addition to the above functions, GNU C Library also provides some more functions to increase the usability of the dynamic memory allocator. Most standalone allocators also provide all or some of the above additional functions.
     199In addition to the above functions, GNU C Library also provides some more functions to increase the usability of the dynamic memory allocator. Most stand-alone allocators also provide all or some of the above additional functions.
    46200
    47201\begin{itemize}
     
    60214\end{itemize}
    61215
    62 With the rise of concurrent applications, memory allocators should be able to fulfill dynamic memory requests from multiple threads in parallel without causing contention on shared resources. There needs to be a set of a standard benchmarks that can be used to evaluate an allocator's performance in different scenerios.
     216With the rise of concurrent applications, memory allocators should be able to fulfill dynamic memory requests from multiple threads in parallel without causing contention on shared resources. There needs to be a set of a standard benchmarks that can be used to evaluate an allocator's performance in different scenarios.
    63217
    64218\section{Research Objectives}
     
    69223Design a lightweight concurrent memory allocator with added features and usability that are currently not present in the other memory allocators.
    70224\item
    71 Design a suite of benchmarks to evalute multiple aspects of a memory allocator.
     225Design a suite of benchmarks to evaluate multiple aspects of a memory allocator.
    72226\end{itemize}
    73227
    74228\section{An outline of the thesis}
    75229LAST FIX ME: add outline at the end
     230\end{comment}
  • doc/theses/mubeen_zulfiqar_MMath/performance.tex

    r4559b34 r92538ab  
    1818\noindent
    1919====================
     20
     21\section{Machine Specification}
     22
     23The performance experiments were run on three different multicore systems to determine if there is consistency across platforms:
     24\begin{itemize}
     25\item
     26AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz
     27\item
     28Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz
     29\item
     30Intel Xeon Gold 5220R, 48-core socket $\times$ 2, 2.20GHz
     31\end{itemize}
     32
     33
     34\section{Existing Memory Allocators}
     35With dynamic allocation being an important feature of C, there are many stand-alone memory allocators that have been designed for different purposes. For this thesis, we chose 7 of the most popular and widely used memory allocators.
     36
     37\paragraph{dlmalloc}
     38dlmalloc (FIX ME: cite allocator) is a thread-safe allocator that is single threaded and single heap. dlmalloc maintains free-lists of different sizes to store freed dynamic memory. (FIX ME: cite wasik)
     39
     40\paragraph{hoard}
     41Hoard (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap. (FIX ME: cite wasik)
     42
     43\paragraph{jemalloc}
     44jemalloc (FIX ME: cite allocator) is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena. Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
     45
     46\paragraph{ptmalloc}
     47ptmalloc (FIX ME: cite allocator) is a modification of dlmalloc. It is a thread-safe multi-threaded memory allocator that uses multiple heaps. ptmalloc heap has similar design to dlmalloc's heap.
     48
     49\paragraph{rpmalloc}
     50rpmalloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses per-thread heap. Each heap has multiple size-classes and each size-class contains memory regions of the relevant size.
     51
     52\paragraph{tbb malloc}
     53tbb malloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
     54
     55\paragraph{tc malloc}
     56tcmalloc (FIX ME: cite allocator) is a thread-safe allocator. It uses per-thread cache to store free objects that prevents contention on shared resources in multi-threaded application. A central free-list is used to refill per-thread cache when it gets empty.
     57
    2058
    2159\section{Memory Allocators}
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.bib

    r4559b34 r92538ab  
    3434    year          = "2008"
    3535}
     36
     37@article{Sleator85,
     38    author      = {Sleator, Daniel Dominic and Tarjan, Robert Endre},
     39    title       = {Self-Adjusting Binary Search Trees},
     40    journal     = jacm,
     41    volume      = 32,
     42    number      = 3,
     43    year        = 1985,
     44    issn        = {0004-5411},
     45    pages       = {652-686},
     46    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/3828.3835},
     47    address     = {New York, NY, USA},
     48}
     49
     50@article{Berger00,
     51    author      = {Emery D. Berger and Kathryn S. McKinley and Robert D. Blumofe and Paul R. Wilson},
     52    title       = {Hoard: A Scalable Memory Allocator for Multithreaded Applications},
     53    booktitle   = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
     54    journal     = sigplan,
     55    volume      = 35,
     56    number      = 11,
     57    month       = nov,
     58    year        = 2000,
     59    pages       = {117-128},
     60    note        = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
     61}
     62
     63@inproceedings{berger02reconsidering,
     64    author      = {Emery D. Berger and Benjamin G. Zorn and Kathryn S. McKinley},
     65    title       = {Reconsidering Custom Memory Allocation},
     66    booktitle   = {Proceedings of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002},
     67    month       = nov,
     68    year        = 2002,
     69    location    = {Seattle, Washington, USA},
     70    publisher   = {ACM},
     71    address     = {New York, NY, USA},
     72}
     73
     74@article{larson99memory,
     75    author      = {Per-{\AA}ke Larson and Murali Krishnan},
     76    title       = {Memory Allocation for Long-Running Server Applications},
     77    journal     = sigplan,
     78    volume      = 34,
     79    number      = 3,
     80    pages       = {176-185},
     81    year        = 1999,
     82    url         = {http://citeseer.ist.psu.edu/article/larson98memory.html}
     83}
     84
     85@techreport{gidpt04,
     86    author      = {Anders Gidenstam and Marina Papatriantafilou and Philippas Tsigas},
     87    title       = {Allocating Memory in a Lock-Free Manner},
     88    number      = {2004-04},
     89    institution = {Computing Science},
     90    address     = {Chalmers University of Technology},
     91    year        = 2004,
     92    url         = {http://citeseer.ist.psu.edu/gidenstam04allocating.html}
     93}
     94
     95@phdthesis{berger02thesis,
     96    author      = {Emery Berger},
     97    title       = {Memory Management for High-Performance Applications},
     98    school      = {The University of Texas at Austin},
     99    year        = 2002,
     100    month       = aug,
     101    url         = {http://citeseer.ist.psu.edu/article/berger02memory.html}
     102}
     103
     104@misc{sgimisc,
     105    author      = {SGI},
     106    title       = {The Standard Template Library for {C++}},
     107    note        = {\textsf{www.sgi.com/\-tech/\-stl/\-Allocators.html}},
     108}
     109
     110@misc{dlmalloc,
     111    author      = {Doug Lea},
     112    title       = {dlmalloc version 2.8.4},
     113    month       = may,
     114    year        = 2009,
     115    note        = {\textsf{ftp://g.oswego.edu/\-pub/\-misc/\-malloc.c}},
     116}
     117
     118@misc{ptmalloc2,
     119    author      = {Wolfram Gloger},
     120    title       = {ptmalloc version 2},
     121    month       = jun,
     122    year        = 2006,
     123    note        = {\textsf{http://www.malloc.de/\-malloc/\-ptmalloc2-current.tar.gz}},
     124}
     125
     126@misc{nedmalloc,
     127    author      = {Niall Douglas},
     128    title       = {nedmalloc version 1.06 Beta},
     129    month       = jan,
     130    year        = 2010,
     131    note        = {\textsf{http://\-prdownloads.\-sourceforge.\-net/\-nedmalloc/\-nedmalloc\_v1.06beta1\_svn1151.zip}},
     132}
     133
     134@misc{hoard,
     135    author      = {Emery D. Berger},
     136    title       = {hoard version 3.8},
     137    month       = nov,
     138    year        = 2009,
     139    note        = {\textsf{http://www.cs.umass.edu/\-$\sim$emery/\-hoard/\-hoard-3.8/\-source/hoard-38.tar.gz}},
     140}
     141
     142@comment{mtmalloc,
     143    author      = {Greg Nakhimovsky},
     144    title       = {Improving Scalability of Multithreaded Dynamic Memory Allocation},
     145    journal     = {Dr. Dobb's},
     146    month       = jul,
     147    year        = 2001,
     148    url         = {http://www.ddj.com/mobile/184404685?pgno=1}
     149}
     150
     151@misc{mtmalloc,
     152    key         = {mtmalloc},
     153    title       = {mtmalloc.c},
     154    year        = 2009,
     155    note        = {\textsf{http://src.opensolaris.org/\-source/\-xref/\-onnv/\-onnv-gate/\-usr/\-src/\-lib/\-libmtmalloc/\-common/\-mtmalloc.c}},
     156}
     157
     158@misc{tcmalloc,
     159    author      = {Sanjay Ghemawat and Paul Menage},
     160    title       = {tcmalloc version 1.5},
     161    month       = jan,
     162    year        = 2010,
     163    note        = {\textsf{http://google-perftools.\-googlecode.\-com/\-files/\-google-perftools-1.5.tar.gz}},
     164}
     165
     166@inproceedings{streamflow,
     167    author      = {Scott Schneider and Christos D. Antonopoulos and Dimitrios S. Nikolopoulos},
     168    title       = {Scalable Locality-Conscious Multithreaded Memory Allocation},
     169    booktitle   = {International Symposium on Memory Management (ISSM'06)},
     170    month       = jun,
     171    year        = 2006,
     172    pages       = {84-94},
     173    location    = {Ottawa, Ontario, Canada},
     174    publisher   = {ACM},
     175    address     = {New York, NY, USA},
     176}
     177
     178@misc{streamflowweb,
     179    author      = {Scott Schneider and Christos Antonopoulos and Dimitrios Nikolopoulos},
     180    title       = {Streamflow},
     181    note        = {\textsf{http://people.cs.vt.edu/\-\char`\~scschnei/\-streamflow}},
     182}
     183
     184@inproceedings{Blumofe94,
     185    author      = {R. Blumofe and C. Leiserson},
     186    title       = {Scheduling Multithreaded Computations by Work Stealing},
     187    booktitle   = {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.},
     188    pages       = {356-368},
     189    year        = 1994,
     190    month       = nov,
     191    url         = {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}
     192}
     193
     194@article{Johnstone99,
     195    author      = {Mark S. Johnstone and Paul R. Wilson},
     196    title       = {The Memory Fragmentation Problem: Solved?},
     197    journal     = sigplan,
     198    volume      = 34,
     199    number      = 3,
     200    pages       = {26-36},
     201    year        = 1999,
     202}
     203
     204@inproceedings{Grunwald93,
     205    author      = {Dirk Grunwald and Benjamin G. Zorn and Robert Henderson},
     206    title       = {Improving the Cache Locality of Memory Allocation},
     207    booktitle   = {{SIGPLAN} Conference on Programming Language Design and Implementation},
     208    pages       = {177-186},
     209    year        = 1993,
     210    url         = {http://citeseer.ist.psu.edu/grunwald93improving.html}
     211}
     212
     213@inproceedings{Wilson95,
     214    author      = {Wilson, Paul R. and Johnstone, Mark S. and Neely, Michael and Boles, David},
     215    title       = {Dynamic Storage Allocation: A Survey and Critical Review},
     216    booktitle   = {Proc. Int. Workshop on Memory Management},
     217    address     = {Kinross Scotland, UK},
     218    year        = 1995,
     219    url         = {http://citeseer.ist.psu.edu/wilson95dynamic.html}
     220}
     221
     222@inproceedings{Siebert00,
     223    author      = {Fridtjof Siebert},
     224    title       = {Eliminating External Fragmentation in a Non-moving Garbage Collector for Java},
     225    booktitle   = {CASES '00: Proceedings of the 2000 international conference on Compilers, architecture, and synthesis for embedded systems},
     226    year        = 2000,
     227    isbn        = {1-58113-338-3},
     228    pages       = {9-17},
     229    location    = {San Jose, California, United States},
     230    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/354880.354883},
     231    publisher   = {ACM Press},
     232    address     = {New York, NY, USA}
     233}
     234
     235@inproceedings{Lim98,
     236   author       = {Tian F. Lim and Przemyslaw Pardyak and Brian N. Bershad},
     237   title        = {A Memory-Efficient Real-Time Non-copying Garbage Collector},
     238   booktitle    = {ISMM '98: Proceedings of the 1st international symposium on Memory management},
     239   year         = 1998,
     240   isbn         = {1-58113-114-3},
     241   pages        = {118-129},
     242   location     = {Vancouver, British Columbia, Canada},
     243   doi          = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/286860.286873},
     244   publisher    = {ACM Press},
     245   address      = {New York, NY, USA}
     246}
     247
     248@article{Chang01,
     249    author      = {J. Morris Chang and Woo Hyong Lee and Witawas Srisa-an},
     250    title       = {A Study of the Allocation Behavior of {C++} Programs},
     251    journal     = {J. Syst. Softw.},
     252    volume      = 57,
     253    number      = 2,
     254    year        = 2001,
     255    issn        = {0164-1212},
     256    pages       = {107-118},
     257    doi         = {http://dx.doi.org/10.1016/S0164-1212(00)00122-9},
     258    publisher   = {Elsevier Science Inc.},
     259    address     = {New York, NY, USA}
     260}
     261
     262@article{Herlihy93,
     263    author      = {Maurice Herlihy},
     264    title       = {A Methodology for Implementing Highly Concurrent Data Objects},
     265    journal     = toplas,
     266    volume      = 15,
     267    number      = 5,
     268    year        = 1993,
     269    issn        = {0164-0925},
     270    pages       = {745-770},
     271    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/161468.161469},
     272    publisher   = {ACM Press},
     273    address     = {New York, NY, USA}
     274}
     275
     276@article{Denning05,
     277    author      = {Peter J. Denning},
     278    title       = {The Locality Principle},
     279    journal     = cacm,
     280    volume      = 48,
     281    number      = 7,
     282    year        = 2005,
     283    issn        = {0001-0782},
     284    pages       = {19-24},
     285    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/1070838.1070856},
     286    publisher   = {ACM Press},
     287    address     = {New York, NY, USA}
     288}
     289
     290@misc{wilson-locality,
     291    author      = {Paul R. Wilson},
     292    title       = {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies},
     293    url         = {http://citeseer.ist.psu.edu/337869.html}
     294}
     295
     296@inproceedings{Feng05,
     297    author      = {Yi Feng and Emery D. Berger},
     298    title       = {A Locality-Improving Dynamic Memory Allocator},
     299    booktitle   = {Proceedings of the 2005 Workshop on Memory System Performance},
     300    location    = {Chicago, Illinois},
     301    publisher   = {ACM},
     302    address     = {New York, NY, USA},
     303    month       = jun,
     304    year        = 2005,
     305    pages       = {68-77},
     306}
     307
     308@inproceedings{grunwald-locality,
     309    author      = {Dirk Grunwald and Benjamin Zorn and Robert Henderson},
     310    title       = {Improving the Cache Locality of Memory Allocation},
     311    booktitle   = {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation},
     312    year        = 1993,
     313    isbn        = {0-89791-598-4},
     314    pages       = {177-186},
     315    location    = {Albuquerque, New Mexico, United States},
     316    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/155090.155107},
     317    publisher   = {ACM Press},
     318    address     = {New York, NY, USA}
     319}
     320
     321@article{Alexandrescu01b,
     322    author      = {Andrei Alexandrescu},
     323    title       = {{volatile} -- Multithreaded Programmer's Best Friend},
     324    journal     = {Dr. Dobb's},
     325    month       = feb,
     326    year        = 2001,
     327    url         = {http://www.ddj.com/cpp/184403766}
     328}
     329
     330@article{Attardi03,
     331    author      = {Joseph Attardi and Neelakanth Nadgir},
     332    title       = {A Comparison of Memory Allocators in Multiprocessors},
     333    journal     = {Sun Developer Network},
     334    month       = jun,
     335    year        = 2003,
     336    note        = {\textsf{http://developers.sun.com/\-solaris/\-articles/\-multiproc/\-multiproc.html}},
     337}
     338
     339@unpublished{memlayout,
     340    author      = {Peter Jay Salzman},
     341    title       = {Memory Layout and the Stack},
     342    journal     = {Using GNU's GDB Debugger},
     343    note        = {\textsf{http://dirac.org/\-linux/\-gdb/\-02a-Memory\_Layout\_And\_The\_Stack.php}},
     344}
     345
     346@unpublished{Ferguson07,
     347    author      = {Justin N. Ferguson},
     348    title       = {Understanding the Heap by Breaking It},
     349    note        = {\textsf{https://www.blackhat.com/\-presentations/\-bh-usa-07/Ferguson/\-Whitepaper/\-bh-usa-07-ferguson-WP.pdf}},
     350}
     351
     352@inproceedings{Huang06,
     353    author      = {Xianglong Huang and Brian T Lewis and Kathryn S McKinley},
     354    title       = {Dynamic Code Management: Improving Whole Program Code Locality in Managed Runtimes},
     355    booktitle   = {VEE '06: Proceedings of the 2nd international conference on Virtual execution environments},
     356    year        = 2006,
     357    isbn        = {1-59593-332-6},
     358    pages       = {133-143},
     359    location    = {Ottawa, Ontario, Canada},
     360    doi         = {http://doi.acm.org/10.1145/1134760.1134779},
     361    publisher   = {ACM Press},
     362    address     = {New York, NY, USA}
     363 }
     364
     365@inproceedings{Herlihy03,
     366    author      = {M. Herlihy and V. Luchangco and M. Moir},
     367    title       = {Obstruction-free Synchronization: Double-ended Queues as an Example},
     368    booktitle   = {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems},
     369    year        = 2003,
     370    month       = may,
     371    url         = {http://www.cs.brown.edu/~mph/publications.html}
     372}
     373
     374@techreport{Detlefs93,
     375    author      = {David L. Detlefs and Al Dosser and Benjamin Zorn},
     376    title       = {Memory Allocation Costs in Large {C} and {C++} Programs},
     377    number      = {CU-CS-665-93},
     378    institution = {University of Colorado},
     379    address     = {130 Lytton Avenue, Palo Alto, CA 94301 and Campus Box 430, Boulder, CO 80309},
     380    year        = 1993,
     381    url         = {http://citeseer.ist.psu.edu/detlefs93memory.html}
     382}
     383
     384@inproceedings{Oyama99,
     385    author      = {Y. Oyama and K. Taura and A. Yonezawa},
     386    title       = {Executing Parallel Programs With Synchronization Bottlenecks Efficiently},
     387    booktitle   = {Proceedings of International Workshop on Parallel and Distributed Computing for Symbolic and Irregular Applications (PDSIA '99)},
     388    year        = {1999},
     389    pages       = {182--204},
     390    publisher   = {World Scientific},
     391    address     = {Sendai, Japan},
     392}
     393
     394@inproceedings{Dice02,
     395    author      = {Dave Dice and Alex Garthwaite},
     396    title       = {Mostly Lock-Free Malloc},
     397    booktitle   = {Proceedings of the 3rd international symposium on Memory management (ISMM'02)},
     398    month       = jun,
     399    year        = 2002,
     400    pages       = {163-174},
     401    location    = {Berlin, Germany},
     402    publisher   = {ACM},
     403    address     = {New York, NY, USA},
     404}
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

    r4559b34 r92538ab  
    6060% For hyperlinked PDF, suitable for viewing on a computer, use this:
    6161\documentclass[letterpaper,12pt,titlepage,oneside,final]{book}
     62\usepackage[T1]{fontenc}        % Latin-1 => 256-bit characters, => | not dash, <> not Spanish question marks
    6263
    6364% For PDF, suitable for double-sided printing, change the PrintVersion variable below to "true" and use this \documentclass line instead of the one above:
     
    8586\usepackage{comment} % Removes large sections of the document.
    8687\usepackage{tabularx}
     88\usepackage{subfigure}
     89
     90\usepackage{algorithm}
     91\usepackage{algpseudocode}
    8792
    8893% Hyperlinks make it very easy to navigate an electronic document.
     
    167172\input{common}
    168173%\usepackageinput{common}
    169 \CFAStyle                                               % CFA code-style for all languages
    170 \lstset{basicstyle=\linespread{0.9}\tt}                 % CFA typewriter font
     174\CFAStyle                                               % CFA code-style
     175\lstset{language=CFA}                                   % default language
     176\lstset{basicstyle=\linespread{0.9}\sf}                 % CFA typewriter font
     177\newcommand{\uC}{$\mu$\CC}
    171178\newcommand{\PAB}[1]{{\color{red}PAB: #1}}
    172179
     
    224231\addcontentsline{toc}{chapter}{\textbf{References}}
    225232
    226 \bibliography{uw-ethesis,pl}
     233\bibliography{pl,uw-ethesis}
    227234% Tip: You can create multiple .bib files to organize your references.
    228235% Just list them all in the \bibliogaphy command, separated by commas (no spaces).
  • doc/theses/thierry_delisle_PhD/thesis/Makefile

    r4559b34 r92538ab  
    2929PICTURES = ${addsuffix .pstex, \
    3030        base \
     31        base_avg \
    3132        empty \
    3233        emptybit \
     
    3839        system \
    3940        cycle \
     41        result.cycle.jax.ops \
    4042}
    4143
     
    112114        python3 $< $@
    113115
     116build/result.%.ns.svg : data/% | ${Build}
     117        ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops"
     118
     119build/result.%.ops.svg : data/% | ${Build}
     120        ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second"
     121
    114122## pstex with inverted colors
    115123%.dark.pstex : fig/%.fig Makefile | ${Build}
  • doc/theses/thierry_delisle_PhD/thesis/fig/base.fig

    r4559b34 r92538ab  
    8989         5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470
    9090         5700 5210
     912 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     92         3600 5700 3600 1200
     932 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     94         4800 5700 4800 1200
     952 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     96         6000 5700 6000 1200
    91974 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001
    92984 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001
  • doc/theses/thierry_delisle_PhD/thesis/glossary.tex

    r4559b34 r92538ab  
    101101
    102102\longnewglossaryentry{at}
    103 {name={fred}}
     103{name={task}}
    104104{
    105105Abstract object representing an unit of work. Systems will offer one or more concrete implementations of this concept (\eg \gls{kthrd}, \gls{job}), however, most of the concept of schedulings are independent of the particular implementations of the work representation. For this reason, this document use the term \Gls{at} to mean any representation and not one in particular.
  • doc/theses/thierry_delisle_PhD/thesis/text/core.tex

    r4559b34 r92538ab  
    33Before discussing scheduling in general, where it is important to address systems that are changing states, this document discusses scheduling in a somewhat ideal scenario, where the system has reached a steady state. For this purpose, a steady state is loosely defined as a state where there are always \glspl{thrd} ready to run and the system has the resources necessary to accomplish the work, \eg, enough workers. In short, the system is neither overloaded nor underloaded.
    44
    5 I believe it is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state can to be pervasive in all states.
     5It is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state tend to be pervasive in all states.
    66
    77\section{Design Goals}
     
    2525It is important to note that these guarantees are expected only up to a point. \Glspl{thrd} that are ready to run should not be prevented to do so, but they still share the limited hardware resources. Therefore, the guarantee is considered respected if a \gls{thrd} gets access to a \emph{fair share} of the hardware resources, even if that share is very small.
    2626
    27 Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is equivalent to or lower than other popular languages, I consider the guarantee achieved.
     27Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is compatitive to other popular languages, the guarantee will be consider achieved.
    2828
    2929More precisely the scheduler should be:
     
    3333\end{itemize}
    3434
    35 \subsection{Fairness vs Scheduler Locality}
     35\subsection{Fairness vs Scheduler Locality} \label{fairnessvlocal}
    3636An important performance factor in modern architectures is cache locality. Waiting for data at lower levels or not present in the cache can have a major impact on performance. Having multiple \glspl{hthrd} writing to the same cache lines also leads to cache lines that must be waited on. It is therefore preferable to divide data among each \gls{hthrd}\footnote{This partitioning can be an explicit division up front or using data structures where different \glspl{hthrd} are naturally routed to different cache lines.}.
    3737
    38 For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in part~\ref{Evaluation} on evaluation.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.
     38For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in the next section.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.
    3939
    4040However, I claim that in practice it is possible to strike a balance between fairness and performance because these goals do not necessarily overlap temporally, where Figure~\ref{fig:fair} shows a visual representation of this behaviour. As mentioned, some unfairness is acceptable; therefore it is desirable to have an algorithm that prioritizes cache locality as long as thread delay does not exceed the execution mental-model.
     
    4848\end{figure}
    4949
    50 \section{Design}
     50\subsection{Performance Challenges}\label{pref:challenge}
     51While there exists a multitude of potential scheduling algorithms, they generally always have to contend with the same performance challenges. Since these challenges are recurring themes in the design of a scheduler it is relevant to describe the central ones here before looking at the design.
     52
     53\subsubsection{Scalability}
     54The most basic performance challenge of a scheduler is scalability.
     55Given a large number of \procs and an even larger number of \ats, scalability measures how fast \procs can enqueue and dequeues \ats.
     56One could expect that doubling the number of \procs would double the rate at which \ats are dequeued, but contention on the internal data structure of the scheduler can lead to worst improvements.
     57While the ready-queue itself can be sharded to alleviate the main source of contention, auxillary scheduling features, \eg counting ready \ats, can also be sources of contention.
     58
     59\subsubsection{Migration Cost}
     60Another important source of latency in scheduling is migration.
     61An \at is said to have migrated if it is executed by two different \proc consecutively, which is the process discussed in \ref{fairnessvlocal}.
     62Migrations can have many different causes, but it certain programs it can be all but impossible to limit migrations.
     63Chapter~\ref{microbench} for example, has a benchmark where any \at can potentially unblock any other \at, which can leat to \ats migrating more often than not.
     64Because of this it is important to design the internal data structures of the scheduler to limit the latency penalty from migrations.
     65
     66
     67\section{Inspirations}
    5168In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering.
    5269
    53 Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
     70Before going into the design of \CFA's scheduler proper, it is relevant to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
    5471
    5572\subsection{Work-Stealing}
    5673
    57 As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work.
    58 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
     74As mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing.
     75In this pattern each \gls{proc} has its own local ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work on their local ready-queue.
     76The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed.
     77In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
    5978On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work.
    60 This means that the scheduler may never balance unfairness that does not result in a \gls{proc} running out of work.
     79This means that the scheduler never balances unfair loads unless they result in a \gls{proc} running out of work.
    6180Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation.
    6281
    6382
    64 Based on these observation, I conclude that \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
     83Based on these observation, the conclusion is that a \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
    6584
    6685\subsection{Relaxed-Fifo}
    6786An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from.
    6887\Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element.
     88All subqueues are protected by TryLocks and \procs simply pick a different subqueue if they fail to acquire the TryLock.
    6989The result is a queue that has both decent scalability and sufficient fairness.
    7090The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element.
     
    7595
    7696While the fairness, of this scheme is good, it does suffer in terms of performance.
    77 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult.
    78 
    79 \section{\CFA}
    80 The \CFA is effectively attempting to merge these two approaches, keeping the best of both.
    81 It is based on the
     97It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and finding non-empty queues can be difficult if there are too few ready \ats.
     98
     99\section{Relaxed-FIFO++}
     100Since it has inherent fairness quelities and decent performance in the presence of many \ats, the relaxed-FIFO queue appears as a good candidate to form the basis of a scheduler.
     101The most obvious problems is for workloads where the number of \ats is barely greater than the number of \procs.
     102In these situations, the wide sharding means most of the sub-queues from which the relaxed queue is formed will be empty.
     103The consequence is that when a dequeue operations attempts to pick a sub-queue at random, it is likely that it picks an empty sub-queue and will have to pick again.
     104This problem can repeat an unbounded number of times.
     105
     106As this is the most obvious challenge, it is worth addressing first.
     107The obvious solution is to supplement each subqueue with some sharded data structure that keeps track of which subqueues are empty.
     108This data structure can take many forms, for example simple bitmask or a binary tree that tracks which branch are empty.
     109Following a binary tree on each pick has fairly good Big O complexity and many modern architectures have powerful bitmask manipulation instructions.
     110However, precisely tracking which sub-queues are empty is actually fundamentally problematic.
     111The reason is that each subqueues are already a form of sharding and the sharding width has presumably already chosen to avoid contention.
     112However, tracking which ready queue is empty is only useful if the tracking mechanism uses denser sharding than the sub queues, then it will invariably create a new source of contention.
     113But if the tracking mechanism is not denser than the sub-queues, then it will generally not provide useful because reading this new data structure risks being as costly as simply picking a sub-queue at random.
     114Early experiments with this approach have shown that even with low success rates, randomly picking a sub-queue can be faster than a simple tree walk.
     115
     116The exception to this rule is using local tracking.
     117If each \proc keeps track locally of which sub-queue is empty, then this can be done with a very dense data structure without introducing a new source of contention.
     118The consequence of local tracking however, is that the information is not complete.
     119Each \proc is only aware of the last state it saw each subqueues but does not have any information about freshness.
     120Even on systems with low \gls{hthrd} count, \eg 4 or 8, this can quickly lead to the local information being no better than the random pick.
     121This is due in part to the cost of this maintaining this information and its poor quality.
     122
     123However, using a very low cost approach to local tracking may actually be beneficial.
     124If the local tracking is no more costly than the random pick, than \emph{any} improvement to the succes rate, however low it is, would lead to a performance benefits.
     125This leads to the following approach:
     126
     127\subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
     128The Relaxed-FIFO approach can be made to handle the case of mostly empty sub-queues by tweaking the \glsxtrlong{prng}.
     129The \glsxtrshort{prng} state can be seen as containing a list of all the future sub-queues that will be accessed.
     130While this is not particularly useful on its own, the consequence is that if the \glsxtrshort{prng} algorithm can be run \emph{backwards}, then the state also contains a list of all the subqueues that were accessed.
     131Luckily, bidirectional \glsxtrshort{prng} algorithms do exist, for example some Linear Congruential Generators\cit{https://en.wikipedia.org/wiki/Linear\_congruential\_generator} support running the algorithm backwards while offering good quality and performance.
     132This particular \glsxtrshort{prng} can be used as follows:
     133
     134Each \proc maintains two \glsxtrshort{prng} states, which whill be refered to as \texttt{F} and \texttt{B}.
     135
     136When a \proc attempts to dequeue a \at, it picks the subqueues by running the \texttt{B} backwards.
     137When a \proc attempts to enqueue a \at, it runs \texttt{F} forward to pick to subqueue to enqueue to.
     138If the enqueue is successful, the state \texttt{B} is overwritten with the content of \texttt{F}.
     139
     140The result is that each \proc will tend to dequeue \ats that it has itself enqueued.
     141When most sub-queues are empty, this technique increases the odds of finding \ats at very low cost, while also offering an improvement on locality in many cases.
     142
     143However, while this approach does notably improve performance in many cases, this algorithm is still not competitive with work-stealing algorithms.
     144The fundamental problem is that the constant randomness limits how much locality the scheduler offers.
     145This becomes problematic both because the scheduler is likely to get cache misses on internal data-structures and because migration become very frequent.
     146Therefore since the approach of modifying to relaxed-FIFO algorithm to behave more like work stealing does not seem to pan out, the alternative is to do it the other way around.
     147
     148\section{Work Stealing++}
     149To add stronger fairness guarantees to workstealing a few changes.
     150First, the relaxed-FIFO algorithm has fundamentally better fairness because each \proc always monitors all subqueues.
     151Therefore the workstealing algorithm must be prepended with some monitoring.
     152Before attempting to dequeue from a \proc's local queue, the \proc must make some effort to make sure remote queues are not being neglected.
     153To make this possible, \procs must be able to determie which \at has been on the ready-queue the longest.
     154Which is the second aspect that much be added.
     155The relaxed-FIFO approach uses timestamps for each \at and this is also what is done here.
     156
    82157\begin{figure}
    83158        \centering
    84159        \input{base.pstex_t}
    85         \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.}
     160        \caption[Base \CFA design]{Base \CFA design \smallskip\newline A Pool of sub-ready queues offers the sharding, two per \glspl{proc}. Each \gls{proc} have local subqueues, however \glspl{proc} can access any of the sub-queues. Each \at is timestamped when enqueued.}
    86161        \label{fig:base}
    87162\end{figure}
    88 
    89 
    90 
    91 % The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.
    92 
    93 % \subsection{Sharding} \label{sec:sharding}
    94 % An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again.
    95 
    96 % \begin{figure}
    97 %       \centering
    98 %       \input{base.pstex_t}
    99 %       \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}
    100 %       \label{fig:base}
    101 % \end{figure}
    102 
    103 % \subsection{Finding threads}
    104 % Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.
    105 
    106 % \begin{figure}
    107 %       \centering
    108 %       \input{empty.pstex_t}
    109 %       \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}
    110 %       \label{fig:empty}
    111 % \end{figure}
    112 
    113 % There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information:
    114 
    115 % \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done.
    116 
    117 % \begin{figure}
    118 %       \centering
    119 %       \vspace*{-5pt}
    120 %       {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}}
    121 %       \vspace*{-5pt}
    122 %       \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.}
    123 %       \label{fig:emptybit}
    124 
    125 %       \vspace*{10pt}
    126 %       {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}}
    127 %       \vspace*{-5pt}
    128 %       \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.}
    129 %       \label{fig:emptytree}
    130 
    131 %       \vspace*{10pt}
    132 %       {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}}
    133 %       \vspace*{-5pt}
    134 %       \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}
    135 %       \label{fig:emptytls}
    136 % \end{figure}
    137 
    138 % \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow.
    139 
    140 % \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.
    141 
    142 % I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach.
    143 
    144 % \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
    145 % In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later.
    146 
    147 % To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards.  Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached.
    148 
    149 % The algorithm works as follows:
    150 % \begin{itemize}
    151 %       \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$.
    152 %       \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions:
    153 %       \begin{itemize}
    154 %               \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$.
    155 %               \item Pop operations use $B$ going backwards on each try.
    156 %       \end{itemize}
    157 % \end{itemize}
    158 
    159 % The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.
    160 
    161 % \section{Details}
     163The algorithm is structure as shown in Figure~\ref{fig:base}.
     164This is very similar to classic workstealing except the local queues are placed in an array so \procs can access eachother's queue in constant time.
     165Sharding width can be adjusted based on need.
     166When a \proc attempts to dequeue a \at, it first picks a random remote queue and compares its timestamp to the timestamps of the local queue(s), dequeue from the remote queue if needed.
     167
     168Implemented as as naively state above, this approach has some obvious performance problems.
     169First, it is necessary to have some damping effect on helping.
     170Random effects like cache misses and preemption can add spurious but short bursts of latency for which helping is not helpful, pun intended.
     171The effect of these bursts would be to cause more migrations than needed and make this workstealing approach slowdown to the match the relaxed-FIFO approach.
     172
     173\begin{figure}
     174        \centering
     175        \input{base_avg.pstex_t}
     176        \caption[\CFA design with Moving Average]{\CFA design with Moving Average \smallskip\newline A moving average is added to each subqueue.}
     177        \label{fig:base-ma}
     178\end{figure}
     179
     180A simple solution to this problem is to compare an exponential moving average\cit{https://en.wikipedia.org/wiki/Moving\_average\#Exponential\_moving\_average} instead if the raw timestamps, shown in Figure~\ref{fig:base-ma}.
     181Note that this is slightly more complex than it sounds because since the \at at the head of a subqueue is still waiting, its wait time has not ended.
     182Therefore the exponential moving average is actually an exponential moving average of how long each already dequeued \at have waited.
     183To compare subqueues, the timestamp at the head must be compared to the current time, yielding the bestcase wait time for the \at at the head of the queue.
     184This new waiting is averaged with the stored average.
     185To limit even more the amount of unnecessary migration, a bias can be added to the local queue, where a remote queue is helped only if its moving average is more than \emph{X} times the local queue's average.
     186None of the experimentation that I have run with these scheduler seem to indicate that the choice of the weight for the moving average or the choice of bis is particularly important.
     187Weigths and biases of similar \emph{magnitudes} have similar effects.
     188
     189With these additions to workstealing, scheduling can be made as fair as the relaxed-FIFO approach, well avoiding the majority of unnecessary migrations.
     190Unfortunately, the performance of this approach does suffer in the cases with no risks of starvation.
     191The problem is that the constant polling of remote subqueues generally entail a cache miss.
     192To make things worst, remote subqueues that are very active, \ie \ats are frequently enqueued and dequeued from them, the higher the chances are that polling will incurr a cache-miss.
     193Conversly, the active subqueues do not benefit much from helping since starvation is already a non-issue.
     194This puts this algorithm in an akward situation where it is paying for a cost, but the cost itself suggests the operation was unnecessary.
     195The good news is that this problem can be mitigated
     196
     197\subsection{Redundant Timestamps}
     198The problem with polling remote queues is due to a tension between the consistency requirement on the subqueue.
     199For the subqueues, correctness is critical. There must be a consensus among \procs on which subqueues hold which \ats.
     200Since the timestamps are use for fairness, it is alco important to have consensus and which \at is the oldest.
     201However, when deciding if a remote subqueue is worth polling, correctness is much less of a problem.
     202Since the only need is that a subqueue will eventually be polled, some data staleness can be acceptable.
     203This leads to a tension where stale timestamps are only problematic in some cases.
     204Furthermore, stale timestamps can be somewhat desirable since lower freshness requirements means less tension on the cache coherence protocol.
     205
     206
     207\begin{figure}
     208        \centering
     209        % \input{base_ts2.pstex_t}
     210        \caption[\CFA design with Redundant Timestamps]{\CFA design with Redundant Timestamps \smallskip\newline A array is added containing a copy of the timestamps. These timestamps are written to with relaxed atomics, without fencing, leading to fewer cache invalidations.}
     211        \label{fig:base-ts2}
     212\end{figure}
     213A solution to this is to create a second array containing a copy of the timestamps and average.
     214This copy is updated \emph{after} the subqueue's critical sections using relaxed atomics.
     215\Glspl{proc} now check if polling is needed by comparing the copy of the remote timestamp instead of the actual timestamp.
     216The result is that since there is no fencing, the writes can be buffered and cause fewer cache invalidations.
     217
     218The correctness argument here is somewhat subtle.
     219The data used for deciding whether or not to poll a queue can be stale as long as it does not cause starvation.
     220Therefore, it is acceptable if stale data make queues appear older than they really are but not fresher.
     221For the timestamps, this means that missing writes to the timestamp is acceptable since they will make the head \at look older.
     222For the moving average, as long as the operation are RW-safe, the average is guaranteed to yield a value that is between the oldest and newest values written.
     223Therefore this unprotected read of the timestamp and average satisfy the limited correctness that is required.
     224
     225\subsection{Per CPU Sharding}
     226
     227\subsection{Topological Work Stealing}
     228
     229
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex

    r4559b34 r92538ab  
    33The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly.
    44This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler.
     5
     6\section{Benchmark Environment}
     7All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine.
     8
     9\paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM.
     10The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55.
     11These EPYCs have 64 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 256 \glspl{hthrd}.
     12The cpus each have 4 MB, 64 MB and 512 MB of L1, L2 and L3 caches respectively.
     13Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared by 4 cores, therefore 8 \glspl{hthrd}.
     14
     15\paragraph{Intel} The Intel machine is a server with four Intel Xeon Platinum 8160 CPUs and 384GB of DDR4 RAM.
     16The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55.
     17These Xeon Platinums have 24 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 192 \glspl{hthrd}.
     18The cpus each have 3 MB, 96 MB and 132 MB of L1, L2 and L3 caches respectively.
     19Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared across the entire CPU, therefore 48 \glspl{hthrd}.
     20
     21This limited sharing of the last level cache on the AMD machine is markedly different than the Intel machine. Indeed, while on both architectures L2 cache misses that are served by L3 caches on a different cpu incurr a significant latency, on AMD it is also the case that cache misses served by a different L3 instance on the same cpu still incur high latency.
     22
    523
    624\section{Cycling latency}
     
    3149\end{figure}
    3250
    33 \todo{check term ``idle sleep handling''}
    3451To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
    3552Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
    36 This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
     53This is to avoid the case where one of the \glspl{proc} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
    3754
    3855The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
    3956
    40 \todo{code, setup, results}
    4157\begin{lstlisting}
    4258        Thread.main() {
     
    5268\end{lstlisting}
    5369
     70\begin{figure}
     71        \centering
     72        \input{result.cycle.jax.ops.pstex_t}
     73        \vspace*{-10pt}
     74        \label{fig:cycle:ns:jax}
     75\end{figure}
    5476
    5577\section{Yield}
  • doc/theses/thierry_delisle_PhD/thesis/text/existing.tex

    r4559b34 r92538ab  
    11\chapter{Previous Work}\label{existing}
    2 Scheduling is a topic with a very long history, predating its use in computer science. As such, early work in computed science was inspired from other fields and focused principally on solving scheduling upfront rather that as the system is running.
     2Scheduling is the process of assigning resources to incomming requests.
     3A very common form of this is assigning available workers to work-requests.
     4The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but scheduling is also common in other fields.
     5For example, in assmebly lines assigning parts in need of assembly to line workers is a form of scheduling.
     6
     7In all these cases, the choice of a scheduling algorithm generally depends first and formost on how much information is available to the scheduler.
     8Workloads that are well-kown, consistent and homegenous can benefit from a scheduler that is optimized to use this information while ill-defined inconsistent heterogenous workloads will require general algorithms.
     9A secondary aspect to that is how much information can be gathered versus how much information must be given as part of the input.
     10There is therefore a spectrum of scheduling algorithms, going from static schedulers that are well informed from the start, to schedulers that gather most of the information needed, to schedulers that can only rely on very limitted information.
     11Note that this description includes both infomation about each requests, \eg time to complete or resources needed, and information about the relationships between request, \eg whether or not some request must be completed before another request starts.
     12
     13Scheduling physical resources, for example in assembly lines, is generally amenable to using very well informed scheduling since information can be gathered much faster than the physical resources can be assigned and workloads are likely to stay stable for long periods of time.
     14When a faster pace is needed and changes are much more frequent gathering information on workloads, up-front or live, can become much more limiting and more general schedulers are needed.
    315
    416\section{Naming Convention}
    5 Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{task} to refer to the abstract objects being scheduled and the term \newterm{worker} to refer to the objects which will execute these tasks.
     17Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{\Gls{at}} to refer to the abstract objects being scheduled and the term \newterm{\Gls{proc}} to refer to the objects which will execute these \glspl{at}.
    618
    719\section{Static Scheduling}
    8 Static schedulers require that programmers explicitly and exhaustively specify dependencies among tasks in order to schedule them. The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere. An example application for these schedulers
    9 
     20Static schedulers require that \glspl{at} have their dependencies and costs explicitly and exhaustively specified prior schedule.
     21The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere.
     22This approach is generally popular in real-time systems since the need for strong guarantees justifies the cost of supplying this information.
    1023In general, static schedulers are less relavant to this project since they require input from the programmers that \CFA does not have as part of its concurrency semantic.
    11 \todo{Rate-monotonic scheduling}
     24Specifying this information explicitly can add a significant burden on the programmers and reduces flexibility, for this reason the \CFA scheduler does not require this information.
    1225
    1326
    1427\section{Dynamic Scheduling}
    15 It may be difficult to fulfill the requirements of static scheduler if dependencies are be conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
     28It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of adding one or more new \gls{at}(s) to the system as their dependencies are resolved. As well as potentially halting or suspending a \gls{at} that dynamically detect unfulfilled dependencies. Each \gls{at} has the responsability of adding the dependent \glspl{at} back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only \glspl{at} we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
    1629
    1730\subsection{Explicitly Informed Dynamic Schedulers}
    18 While dynamic schedulers do not have access to an exhaustive list of dependencies for a task, they may require to provide more or less information about each task, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a tasks takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of tasks and there complexity. For example, providing an exhaustive list of files read by 5 tasks is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct tasks.
     31While dynamic schedulers do not have access to an exhaustive list of dependencies for a \gls{at}, they may require to provide more or less information about each \gls{at}, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a \glspl{at} takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of \glspl{at} and there complexity. For example, providing an exhaustive list of files read by 5 \glspl{at} is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct \glspl{at}.
    1932
    2033Since the goal of this thesis is to provide a scheduler as a replacement for \CFA's existing \emph{uninformed} scheduler, Explicitly Informed schedulers are less relevant to this project. Nevertheless, some strategies are worth mentionnding.
    2134
    2235\subsubsection{Prority Scheduling}
    23 A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority tasks are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every task have a distinct pre-established priority and always run the available task with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of tasks. It can therefore be diserable for schedulers to support tasks with identical priorities and/or automatically setting and adjusting priorites for tasks.
     36A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority \glspl{at} are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every \gls{at} have a distinct pre-established priority and always run the available \gls{at} with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of \glspl{at}. It can therefore be diserable for schedulers to support \glspl{at} with identical priorities and/or automatically setting and adjusting priorites for \glspl{at}. The most common operating some variation on priorities with overlaps and dynamic priority adjustments. For example, Microsoft Windows uses a pair of priorities
     37\cit{https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities,https://docs.microsoft.com/en-us/windows/win32/taskschd/taskschedulerschema-priority-settingstype-element}, one specified by users out of ten possible options and one adjusted by the system.
    2438
    2539\subsection{Uninformed and Self-Informed Dynamic Schedulers}
    26 Several scheduling algorithms do not require programmers to provide additionnal information on each task, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.
     40Several scheduling algorithms do not require programmers to provide additionnal information on each \gls{at}, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.
    2741
    2842
    2943\subsubsection{Feedback Scheduling}
    30 As mentionned, Schedulers may also gather information about each tasks to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain tasks, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
    31 
    32 Feedback scheduler
     44As mentionned, Schedulers may also gather information about each \glspl{at} to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain \glspl{at}, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
    3345
    3446
    3547\section{Work Stealing}\label{existing:workstealing}
    36 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasks and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
     48One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local \glspl{at} first, but allows the possibility for other workers to steal local \glspl{at} if they run out of \glspl{at}. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of \glspl{at} to accomplish and workers without \glspl{at} steal \glspl{at} from random workers. (The Burton and Sleep algorithm had trees of \glspl{at} and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
    3749
    3850Many variations of this algorithm have been proposed over the years\cite{DBLP:journals/ijpp/YangH18}, both optmizations of existing implementations and approaches that account for new metrics.
     
    4052\paragraph{Granularity} A significant portion of early Work Stealing research was concentrating on \newterm{Implicit Parellelism}\cite{wiki:implicitpar}. Since the system was responsible to split the work, granularity is a challenge that cannot be left to the programmers (as opposed to \newterm{Explicit Parellelism}\cite{wiki:explicitpar} where the burden can be left to programmers). In general, fine granularity is better for load balancing and coarse granularity reduces communication overhead. The best performance generally means finding a middle ground between the two. Several methods can be employed, but I believe these are less relevant for threads, which are generally explicit and more coarse grained.
    4153
    42 \paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating tasks from one core to another can be .  \cite{DBLP:journals/tpds/SquillanteL93}
     54\paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating \glspl{at} from one core to another can be .  \cite{DBLP:journals/tpds/SquillanteL93}
    4355
    4456\todo{The survey is not great on this subject}
     
    4759
    4860\subsection{Theoretical Results}
    49 There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among tasks.
     61There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among \glspl{at}.
    5062
    5163However, as \cite{DBLP:journals/ijpp/YangH18} highlights, it is worth mentionning that this theoretical research has mainly focused on ``fully-strict'' computations, \ie workloads that can be fully represented with a Direct Acyclic Graph. It is unclear how well these distributions represent workloads in real world scenarios.
    5264
    5365\section{Preemption}
    54 One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting tasks that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a task are never further apart than some fixed duration. While this helps schedulers guarantee that no tasks will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.
     66One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting \glspl{at} that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a \gls{at} are never further apart than some fixed duration. While this helps schedulers guarantee that no \glspl{at} will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.
    5567
    5668\section{Schedulers in Production}\label{existing:prod}
     
    5870
    5971\subsection{Operating System Schedulers}
    60 Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive tasks and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.
     72Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive \glspl{at} and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.
    6173
    6274\paragraph{Linux's CFS}
    63 The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of tasks waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the task that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of tasks is also impacted by a group based notion of fairness, where tasks belonging to groups having spent less CPU time are preferred to tasks beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).
     75The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of \glspl{at} waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the \gls{at} that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of \glspl{at} is also impacted by a group based notion of fairness, where \glspl{at} belonging to groups having spent less CPU time are preferred to \glspl{at} beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).
    6476
    65 \cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across tasks \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single task and the other with one thousand tasks, the user with a single task does not receive one one thousandth of the CPU time.}, increasing the complexity.
     77\cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across \glspl{at} \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single \gls{at} and the other with one thousand \glspl{at}, the user with a single \gls{at} does not receive one one thousandth of the CPU time.}, increasing the complexity.
    6678
    67 Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority task, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}
     79Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority \gls{at}, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}
    6880
    6981\paragraph{FreeBSD}
     
    7183
    7284\paragraph{Windows(OS)}
    73 Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules tasks based on the highest priorities (lowest number) and how much cpu time each tasks have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
     85Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules \glspl{at} based on the highest priorities (lowest number) and how much cpu time each \glspl{at} have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
    7486
    7587\todo{load balancing}
     
    88100
    89101\subsection{User-Level Schedulers}
    90 By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all tasks have the same user, and therefore cooperation is both feasible and probable.
     102By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all \glspl{at} have the same user, and therefore cooperation is both feasible and probable.
    91103\paragraph{Go}
    92104Go's scheduler uses a Randomized Work Stealing algorithm that has a global runqueue(\emph{GRQ}) and each processor(\emph{P}) has both a fixed-size runqueue(\emph{LRQ}) and a high-priority next ``chair'' holding a single element.\cite{GITHUB:go,YTUBE:go} Preemption is present, but only at function call boundaries.
     
    105117
    106118\paragraph{Intel\textregistered ~Threading Building Blocks}
    107 \newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs tasks or \newterm{jobs}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules tasks as follows (where \textit{t} is the last task completed):
     119\newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs \newterm{jobs}, uninterruptable \glspl{at}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules \glspl{at} as follows (where \textit{t} is the last \gls{at} completed):
    108120\begin{displayquote}
    109121        \begin{enumerate}
     
    125137
    126138\paragraph{Grand Central Dispatch}
    127 This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg tasks on queue $A$ are executed in \emph{FIFO} order.
     139This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg \glspl{at} on queue $A$ are executed in \emph{FIFO} order.
    128140
    129141\todo{load balancing and scheduling}
  • doc/theses/thierry_delisle_PhD/thesis/text/io.tex

    r4559b34 r92538ab  
    11\chapter{User Level \io}
    22As mentioned in Section~\ref{prev:io}, User-Level \io requires multiplexing the \io operations of many \glspl{thrd} onto fewer \glspl{proc} using asynchronous \io operations.
    3 Different operating systems offer various forms of asynchronous operations and as mentioned in Chapter~\ref{intro}, this work is exclusively focused on the Linux operating-system.
     3Different operating systems offer various forms of asynchronous operations and, as mentioned in Chapter~\ref{intro}, this work is exclusively focused on the Linux operating-system.
    44
    55\section{Kernel Interface}
     
    173173The consequence is that the amount of parallelism used to prepare submissions for the next system call is limited.
    174174Beyond this limit, the length of the system call is the throughput limiting factor.
    175 I concluded from early experiments that preparing submissions seems to take about as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.
     175I concluded from early experiments that preparing submissions seems to take at most as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.
    176176Therefore the design of the submission engine must manage multiple instances of @io_uring@ running in parallel, effectively sharding @io_uring@ instances.
    177177Similarly to scheduling, this sharding can be done privately, \ie, one instance per \glspl{proc}, in decoupled pools, \ie, a pool of \glspl{proc} use a pool of @io_uring@ instances without one-to-one coupling between any given instance and any given \gls{proc}, or some mix of the two.
    178178Since completions are sent to the instance where requests were submitted, all instances with pending operations must be polled continously
    179179\footnote{As will be described in Chapter~\ref{practice}, this does not translate into constant cpu usage.}.
     180Note that once an operation completes, there is nothing that ties it to the @io_uring@ instance that handled it.
     181There is nothing preventing a new operation with, for example, the same file descriptors to a different @io_uring@ instance.
    180182
    181183A complicating aspect of submission is @io_uring@'s support for chains of operations, where the completion of an operation triggers the submission of the next operation on the link.
     
    198200The only added complexity is that the number of SQEs is fixed, which means allocation can fail.
    199201
    200 Allocation failures need to be pushed up to the routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.
     202Allocation failures need to be pushed up to a routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.
    201203Furthermore, the routing algorithm should block operations up-front if none of the instances have available SQEs.
    202204
     
    212214
    213215In the case of designating a \gls{thrd}, ideally, when multiple \glspl{thrd} attempt to submit operations to the same @io_uring@ instance, all requests would be batched together and one of the \glspl{thrd} would do the system call on behalf of the others, referred to as the \newterm{submitter}.
    214 In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a current submitter and a next submitter.
     216In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a ``next submitter'' that guarentees everything that is missed by the current submitter is seen by the next one.
    215217Indeed, as long as there is a ``next'' submitter, \glspl{thrd} submitting new \io requests can move on, knowing that some future system call will include their request.
    216218Once the system call is done, the submitter must also free SQEs so that the allocator can reused them.
     
    221223If the submission side does not designate submitters, polling can also submit all SQEs as it is polling events.
    222224A simple approach to polling is to allocate a \gls{thrd} per @io_uring@ instance and simply let the poller \glspl{thrd} poll their respective instances when scheduled.
    223 This design is especially convenient for reasons explained in Chapter~\ref{practice}.
    224225
    225226With this pool of instances approach, the big advantage is that it is fairly flexible.
    226227It does not impose restrictions on what \glspl{thrd} submitting \io operations can and cannot do between allocations and submissions.
    227 It also can gracefully handles running out of ressources, SQEs or the kernel returning @EBUSY@.
     228It also can gracefully handle running out of ressources, SQEs or the kernel returning @EBUSY@.
    228229The down side to this is that many of the steps used for submitting need complex synchronization to work properly.
    229230The routing and allocation algorithm needs to keep track of which ring instances have available SQEs, block incoming requests if no instance is available, prevent barging if \glspl{thrd} are already queued up waiting for SQEs and handle SQEs being freed.
    230231The submission side needs to safely append SQEs to the ring buffer, correctly handle chains, make sure no SQE is dropped or left pending forever, notify the allocation side when SQEs can be reused and handle the kernel returning @EBUSY@.
    231 All this synchronization may have a significant cost and, compare to the next approach presented, this synchronization is entirely overhead.
     232All this synchronization may have a significant cost and, compared to the next approach presented, this synchronization is entirely overhead.
    232233
    233234\subsubsection{Private Instances}
    234235Another approach is to simply create one ring instance per \gls{proc}.
    235 This alleviate the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.
     236This alleviates the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.
    236237This is effectively the same requirement as using @thread_local@ variables.
    237238Since SQEs that are allocated must be submitted to the same ring, on the same \gls{proc}, this effectively forces the application to submit SQEs in allocation order
     
    240241To remove this requirement, a \gls{thrd} would need the ability to ``yield to a specific \gls{proc}'', \ie, park with the promise that it will be run next on a specific \gls{proc}, the \gls{proc} attached to the correct ring.}
    241242, greatly simplifying both allocation and submission.
    242 In this design, allocation and submission form a ring partitionned ring buffer as shown in Figure~\ref{fig:pring}.
     243In this design, allocation and submission form a partitionned ring buffer as shown in Figure~\ref{fig:pring}.
    243244Once added to the ring buffer, the attached \gls{proc} has a significant amount of flexibility with regards to when to do the system call.
    244 Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of threads \glspl{thrd}, etc.
     245Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of \glspl{thrd}, etc.
    245246
    246247\begin{figure}
     
    329330\paragraph{Pending Allocations} can be more complicated to handle.
    330331If the arbiter has available instances, the arbiter can attempt to directly hand over the instance and satisfy the request.
    331 Otherwise
     332Otherwise it must hold onto the list of threads until SQEs are made available again.
     333This handling becomes that much more complex if pending allocation require more than one SQE, since the arbiter must make a decision between statisfying requests in FIFO ordering or satisfy requests for fewer SQEs first.
     334
     335While this arbiter has the potential to solve many of the problems mentionned in above, it also introduces a significant amount of complexity.
     336Tracking which processors are borrowing which instances and which instances have SQEs available ends-up adding a significant synchronization prelude to any I/O operation.
     337Any submission must start with a handshake that pins the currently borrowed instance, if available.
     338An attempt to allocate is then made, but the arbiter can concurrently be attempting to allocate from the same instance from a different \gls{hthrd}.
     339Once the allocation is completed, the submission must still check that the instance is still burrowed before attempt to flush.
     340These extra synchronization steps end-up having a similar cost to the multiple shared instances approach.
     341Furthermore, if the number of instances does not match the number of processors actively submitting I/O, the system can fall into a state where instances are constantly being revoked and end-up cycling the processors, which leads to significant cache deterioration.
     342Because of these reasons, this approach, which sounds promising on paper, does not improve on the private instance approach in practice.
     343
     344\subsubsection{Private Instances V2}
     345
    332346
    333347
  • doc/theses/thierry_delisle_PhD/thesis/thesis.tex

    r4559b34 r92538ab  
    202202
    203203\newcommand\io{\glsxtrshort{io}\xspace}%
     204\newcommand\at{\gls{at}\xspace}%
     205\newcommand\ats{\glspl{at}\xspace}%
     206\newcommand\proc{\gls{proc}\xspace}%
     207\newcommand\procs{\glspl{proc}\xspace}%
    204208
    205209%======================================================================
  • doc/user/user.tex

    r4559b34 r92538ab  
    1111%% Created On       : Wed Apr  6 14:53:29 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Sun Oct 10 12:45:00 2021
    14 %% Update Count     : 5095
     13%% Last Modified On : Mon Feb 14 17:20:39 2022
     14%% Update Count     : 5382
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
    1717% requires tex packages: texlive-base texlive-latex-base tex-common texlive-humanities texlive-latex-extra texlive-fonts-recommended
    1818
    19 \documentclass[twoside,11pt]{article}
     19\documentclass[twoside]{article}
    2020
    2121%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     
    4040% blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
    4141% green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
    42 % LaTex escape §...§ (section symbol) emacs: C-q M-'
     42% LaTex escape ...§ (section symbol) emacs: C-q M-'
    4343% keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
    4444% math escape $...$ (dollar symbol)
     
    8585\newcommand{\B}[1]{{\Textbf[blue]{#1}}}
    8686\newcommand{\G}[1]{{\Textbf[OliveGreen]{#1}}}
     87\newcommand{\Sp}{\R{\textvisiblespace}}
    8788\newcommand{\KWC}{K-W C\xspace}
    8889
     
    156157One of the main design philosophies of \CFA is to ``\Index{describe not prescribe}'', which means \CFA tries to provide a pathway from low-level C programming to high-level \CFA programming, but it does not force programmers to ``do the right thing''.
    157158Programmers can cautiously add \CFA extensions to their C programs in any order and at any time to incrementally move towards safer, higher-level programming.
    158 A programmer is always free to reach back to C from \CFA, for any reason, and in many cases, new \CFA features can be locally switched back to there C counterpart.
    159 There is no notion or requirement for \emph{rewriting} a legacy C program in \CFA;
     159A programmer is always free to reach back to C from \CFA, for any reason, and in many cases, new \CFA features can be locally switched back to their C counterpart.
     160There is no notion or requirement for \emph{rewriting} a legacy C program to \CFA;
    160161instead, a programmer evolves a legacy program into \CFA by incrementally incorporating \CFA features.
    161162As well, new programs can be written in \CFA using a combination of C and \CFA features.
     
    163164
    164165\Index*[C++]{\CC{}}~\cite{c++:v1} had a similar goal 30 years ago, allowing object-oriented programming to be incrementally added to C.
    165 However, \CC currently has the disadvantages of a strong object-oriented bias, multiple legacy design-choices that cannot be updated, and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project.
     166However, \CC currently has the disadvantages of a strong object-oriented bias, multiple legacy design-choices that are difficult to update, and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project.
    166167In contrast, \CFA has 30 years of hindsight and a clean starting point.
    167168
    168169Like \Index*[C++]{\CC{}}, there may be both old and new ways to achieve the same effect.
    169170For example, the following programs compare the C, \CFA, and \CC I/O mechanisms, where the programs output the same result.
    170 \begin{center}
     171\begin{flushleft}
    171172\begin{tabular}{@{}l@{\hspace{1em}}l@{\hspace{1em}}l@{}}
    172 \multicolumn{1}{c@{\hspace{1em}}}{\textbf{C}}   & \multicolumn{1}{c}{\textbf{\CFA}}     & \multicolumn{1}{c}{\textbf{\CC}}      \\
    173 \begin{cfa}
     173\multicolumn{1}{@{}c@{\hspace{1em}}}{\textbf{C}}        & \multicolumn{1}{c}{\textbf{\CFA}}     & \multicolumn{1}{c@{}}{\textbf{\CC}}   \\
     174\begin{cfa}[tabsize=3]
    174175#include <stdio.h>$\indexc{stdio.h}$
    175176
     
    180181\end{cfa}
    181182&
    182 \begin{cfa}
     183\begin{cfa}[tabsize=3]
    183184#include <fstream>$\indexc{fstream}$
    184185
     
    189190\end{cfa}
    190191&
    191 \begin{cfa}
     192\begin{cfa}[tabsize=3]
    192193#include <iostream>$\indexc{iostream}$
    193194using namespace std;
    194195int main() {
    195196        int x = 0, y = 1, z = 2;
    196         ®cout<<x<<" "<<y<<" "<<z<<endl;®
     197        ®cout << x << ' ' << y << ' ' << z << endl;®
    197198}
    198199\end{cfa}
    199200\end{tabular}
    200 \end{center}
     201\end{flushleft}
    201202While \CFA I/O \see{\VRef{s:StreamIOLibrary}} looks similar to \Index*[C++]{\CC{}}, there are important differences, such as automatic spacing between variables and an implicit newline at the end of the expression list, similar to \Index*{Python}~\cite{Python}.
    202203
     
    238239however, it largely extended the C language, and did not address many of C's existing problems.\footnote{%
    239240Two important existing problems addressed were changing the type of character literals from ©int© to ©char© and enumerator from ©int© to the type of its enumerators.}
    240 \Index*{Fortran}~\cite{Fortran08}, \Index*{Ada}~\cite{Ada12}, and \Index*{Cobol}~\cite{Cobol14} are examples of programming languages that took an evolutionary approach, where modern language-features (\eg objects, concurrency) are added and problems fixed within the framework of the existing language.
     241\Index*{Fortran}~\cite{Fortran08}, \Index*{Cobol}~\cite{Cobol14}, and \Index*{Ada}~\cite{Ada12} are examples of programming languages that took an evolutionary approach, where modern language-features (\eg objects, concurrency) are added and problems fixed within the framework of the existing language.
    241242\Index*{Java}~\cite{Java8}, \Index*{Go}~\cite{Go}, \Index*{Rust}~\cite{Rust} and \Index*{D}~\cite{D} are examples of the revolutionary approach for modernizing C/\CC, resulting in a new language rather than an extension of the descendent.
    242243These languages have different syntax and semantics from C, do not interoperate directly with C, and are not systems languages because of restrictive memory-management or garbage collection.
     
    333334long double _Complex ®abs®( long double _Complex );
    334335\end{cfa}
    335 The problem is \Index{name clash} between the C name ©abs© and the \CFA names ©abs©, resulting in two name linkages\index{C linkage}: ©extern "C"© and ©extern "Cforall"© (default).
     336The problem is a \Index{name clash} between the C name ©abs© and the \CFA names ©abs©, resulting in two name linkages\index{C linkage}: ©extern "C"© and ©extern "Cforall"© (default).
    336337Overloaded names must use \newterm{name mangling}\index{mangling!name} to create unique names that are different from unmangled C names.
    337338Hence, there is the same need as in \CC to know if a name is a C or \CFA name, so it can be correctly formed.
     
    377378The program is linked with the debugging version of the runtime system.
    378379The debug version performs runtime checks to aid the debugging phase of a \CFA program, but can substantially slow program execution.
    379 The runtime checks should only be removed after the program is completely debugged.
     380The runtime checks should only be removed after a program is completely debugged.
    380381\textbf{This option is the default.}
    381382
     
    452453cfa $test$.cfa -XCFA -P -XCFA parse -XCFA -n # show program parse without prelude
    453454\end{lstlisting}
     455Alternatively, multiple flages can be specified separated with commas and \emph{without} spaces.
     456\begin{lstlisting}[language=sh,{moredelim=**[is][\protect\color{red}]{®}{®}}]
     457cfa $test$.cfa -XCFA®,®-Pparse®,®-n # show program parse without prelude
     458\end{lstlisting}
    454459\begin{description}[topsep=5pt,itemsep=0pt,parsep=0pt]
    455460\item
     
    533538double ®``®forall = 3.5;
    534539\end{cfa}
    535 
    536 Existing C programs with keyword clashes can be converted by enclosing keyword identifiers in backquotes, and eventually the identifier name can be changed to a non-keyword name.
     540Existing C programs with keyword clashes can be converted by prefixing the keyword identifiers with double backquotes, and eventually the identifier name can be changed to a non-keyword name.
    537541\VRef[Figure]{f:HeaderFileInterposition} shows how clashes in existing C header-files \see{\VRef{s:StandardHeaders}} can be handled using preprocessor \newterm{interposition}: ©#include_next© and ©-I filename©.
    538542Several common C header-files with keyword clashes are fixed in the standard \CFA header-library, so there is a seamless programming-experience.
     
    627631\subsection{\texorpdfstring{\LstKeywordStyle{if} / \LstKeywordStyle{while} Statement}{if / while Statement}}
    628632
    629 The ©if©/©while© expression allows declarations, similar to ©for© declaration expression.\footnote{
    630 Declarations in the ©do©-©while© condition are not useful because they appear after the loop body.}
     633The \Indexc{if}/\Indexc{while} expression allows declarations, similar to \Indexc{for} declaration expression.\footnote{
     634Declarations in the \Indexc{do}-©while© condition are not useful because they appear after the loop body.}
    631635\begin{cfa}
    632636if ( ®int x = f()® ) ... $\C{// x != 0}$
     
    640644while ( ®struct S { int i; } x = { f() }; x.i < 4® ) ... $\C{// relational expression}$
    641645\end{cfa}
    642 Unless a relational expression is specified, each variable is compared not equal to 0, which is the standard semantics for the ©if©/©while© expression, and the results are combined using the logical ©&&© operator.
    643 The scope of the declaration(s) is local to the ©if© statement but exist within both the \emph{then} and \emph{else} clauses.
     646Unless a relational expression is specified, each variable is compared not equal to 0, which is the standard semantics for the ©if©/©while© expression, and the results are combined using the logical \Indexc{&&} operator.
     647The scope of the declaration(s) is local to the ©if©/©while© statement, \ie in both \emph{then} and \emph{else} clauses for ©if©, and loop body for ©while©.
    644648\CC only provides a single declaration always compared ©!=© to 0.
    645649
     
    649653\label{s:caseClause}
    650654
    651 C restricts the ©case© clause of a ©switch© statement to a single value.
     655C restricts the \Indexc{case} clause of a \Indexc{switch} statement to a single value.
    652656For multiple ©case© clauses associated with the same statement, it is necessary to have multiple ©case© clauses rather than multiple values.
    653 Requiring a ©case© clause for each value does not seem to be in the spirit of brevity normally associated with C.
    654 Therefore, the ©case© clause is extended with a list of values, as in:
     657Requiring a ©case© clause for each value is not in the spirit of brevity normally associated with C.
     658Therefore, the ©case© clause is extended with a list of values.
    655659\begin{cquote}
    656660\begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{2em}}l@{}}
     
    703707\subsection{\texorpdfstring{\LstKeywordStyle{switch} Statement}{switch Statement}}
    704708
    705 C allows a number of questionable forms for the ©switch© statement:
     709C allows a number of questionable forms for the \Indexc{switch} statement:
    706710\begin{enumerate}
    707711\item
    708 By default, the end of a ©case© clause\footnote{
     712By default, the end of a \Indexc{case} clause\footnote{
    709713In this section, the term \emph{case clause} refers to either a ©case© or ©default© clause.}
    710714\emph{falls through} to the next ©case© clause in the ©switch© statement;
    711 to exit a ©switch© statement from a ©case© clause requires explicitly terminating the clause with a transfer statement, most commonly ©break©:
     715to exit a ©switch© statement from a ©case© clause requires explicitly terminating the clause with a transfer statement, most commonly \Indexc{break}:
    712716\begin{cfa}
    713717switch ( i ) {
    714718  case 1:
    715719        ...
    716         // fall-through
     720        $\R{\LstCommentStyle{// fall-through}}$
    717721  case 2:
    718722        ...
    719         break;  // exit switch statement
     723        ®break;®        // exit switch statement
    720724}
    721725\end{cfa}
     
    763767}
    764768\end{cfa}
    765 This situation better handled without fall-through by allowing a list of case values \see{\VRef{s:caseClause}}.
     769This situation is better handled by a list of case values \see{\VRef{s:caseClause}}.
    766770While fall-through itself is not a problem, the problem occurs when fall-through is the default, as this semantics is unintuitive to many programmers and is different from most programming languages with a ©switch© statement.
    767771Hence, default fall-through semantics results in a large number of programming errors as programmers often \emph{forget} the ©break© statement at the end of a ©case© clause, resulting in inadvertent fall-through.
     
    777781                ...
    778782        } // if
    779   case 2:
    780         while ( j < 5 ) {
    781                 ...
    782           ®case 3:®             // transfer into "while" statement
    783                 ...
    784         } // while
    785 } // switch
    786783\end{cfa}
    787784This usage branches into control structures, which is known to cause both comprehension and technical difficulties.
     
    789786The technical problem results from the inability to ensure declaration and initialization of variables when blocks are not entered at the beginning.
    790787There are few arguments for this kind of control flow, and therefore, there is a strong impetus to eliminate it.
    791 Nevertheless, C does have an idiom where this capability is used, known as ``\Index*{Duff's device}''~\cite{Duff83}:
     788
     789This C idiom is known as ``\Index*{Duff's device}''~\cite{Duff83}, from this example:
    792790\begin{cfa}
    793791register int n = (count + 7) / 8;
     
    858856still works.
    859857Nevertheless, reversing the default action would have a non-trivial effect on case actions that compound, such as the above example of processing shell arguments.
    860 Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called ©choose©, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword ©fallthrough©/©fallthru©, \eg:
     858Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called \Indexc{choose}, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword \Indexc{fallthrough}/\Indexc{fallthru}, \eg:
    861859\begin{cfa}
    862860®choose® ( i ) {
     
    885883Therefore, no change is made for this issue.
    886884\item
    887 Dealing with unreachable code in a ©switch©/©choose© body is solved by restricting declarations and associated initialization to the start of statement body, which is executed \emph{before} the transfer to the appropriate ©case© clause\footnote{
     885Dealing with unreachable code in a ©switch©/©choose© body is solved by restricting declarations and initialization to the start of statement body, which is executed \emph{before} the transfer to the appropriate ©case© clause\footnote{
    888886Essentially, these declarations are hoisted before the ©switch©/©choose© statement and both declarations and statement are surrounded by a compound statement.} and precluding statements before the first ©case© clause.
    889887Further declarations at the same nesting level as the statement body are disallowed to ensure every transfer into the body is sound.
     
    908906\subsection{Non-terminating and Labelled \texorpdfstring{\LstKeywordStyle{fallthrough}}{Non-terminating and Labelled fallthrough}}
    909907
    910 The ©fallthrough© clause may be non-terminating within a ©case© clause or have a target label to common code from multiple case clauses.
     908The \Indexc{fallthrough} clause may be non-terminating within a \Indexc{case} clause or have a target label to common code from multiple case clauses.
    911909\begin{center}
    912910\begin{tabular}{@{}lll@{}}
     
    960958\end{tabular}
    961959\end{center}
    962 The target label must be below the ©fallthrough© and may not be nested in a control structure, and
    963 the target label must be at the same or higher level as the containing ©case© clause and located at
    964 the same level as a ©case© clause; the target label may be case ©default©, but only associated
    965 with the current ©switch©/©choose© statement.
     960The target label must be below the \Indexc{fallthrough} and may not be nested in a control structure, and
     961the target label must be at the same or higher level as the containing \Indexc{case} clause and located at
     962the same level as a ©case© clause; the target label may be case \Indexc{default}, but only associated
     963with the current \Indexc{switch}/\Indexc{choose} statement.
    966964
    967965\begin{figure}
     
    10761074Looping a fixed number of times, possibly with a loop index, occurs frequently.
    10771075\CFA condenses simply looping to facilitate coding speed and safety.
    1078 The ©for©/©while©/©do-while© loop-control is augmented as follows \see{examples in \VRef[Figure]{f:LoopControlExamples}}:
     1076The \Indexc{for}, \Indexc{while}, and \Indexc{do} loop-control is augmented as follows \see{examples in \VRef[Figure]{f:LoopControlExamples}}:
    10791077\begin{itemize}[itemsep=0pt]
    10801078\item
     
    11451143\subsection{\texorpdfstring{Labelled \LstKeywordStyle{continue} / \LstKeywordStyle{break} Statement}{Labelled continue / break Statement}}
    11461144
    1147 C ©continue© and ©break© statements, for altering control flow, are restricted to one level of nesting for a particular control structure.
     1145C \Indexc{continue} and \Indexc{break} statements, for altering control flow, are restricted to one level of nesting for a particular control structure.
    11481146This restriction forces programmers to use \Indexc{goto} to achieve the equivalent control-flow for more than one level of nesting.
    11491147To prevent having to switch to the ©goto©, \CFA extends the \Indexc{continue}\index{continue@©continue©!labelled}\index{labelled!continue@©continue©} and \Indexc{break}\index{break@©break©!labelled}\index{labelled!break@©break©} with a target label to support static multi-level exit\index{multi-level exit}\index{static multi-level exit}~\cite{Buhr85}, as in Java.
    1150 For both ©continue© and ©break©, the target label must be directly associated with a ©for©, ©while© or ©do© statement;
    1151 for ©break©, the target label can also be associated with a ©switch©, ©if© or compound (©{}©) statement.
     1148For both ©continue© and ©break©, the target label must be directly associated with a \Indexc{for}, \Indexc{while} or \Indexc{do} statement;
     1149for ©break©, the target label can also be associated with a \Indexc{switch}, \Indexc{if} or compound (©{}©) statement.
    11521150\VRef[Figure]{f:MultiLevelExit} shows a comparison between labelled ©continue© and ©break© and the corresponding C equivalent using ©goto© and labels.
    11531151The innermost loop has 8 exit points, which cause continuation or termination of one or more of the 7 \Index{nested control-structure}s.
     
    12241222\end{figure}
    12251223
    1226 Both labelled ©continue© and ©break© are a ©goto©\index{goto@©goto©!restricted} restricted in the following ways:
     1224Both labelled \Indexc{continue} and \Indexc{break} are a \Indexc{goto}\index{goto@©goto©!restricted} restricted in the following ways:
    12271225\begin{itemize}
    12281226\item
     
    12401238
    12411239
     1240\subsection{\texorpdfstring{Extended \LstKeywordStyle{else}}{Extended else}}
     1241\label{s:ExtendedElse}
     1242\index{extended ©else©}
     1243
     1244The ©if© statement has an optional ©else© clause executed if the conditional is false.
     1245This concept is extended to the \Indexc{while}, \Indexc{for}, and \Indexc{do} looping constructs (like Python).
     1246Hence, if the loop conditional becomes false, looping stops and the corresponding ©else© clause is executed, if present.
     1247
     1248The following example is a linear search for the key 3 in an array, where finding the key is handled with a ©break© and not finding with the ©else© clause on the loop construct.
     1249\begin{cquote}
     1250\begin{cfa}
     1251int a[10];
     1252\end{cfa}
     1253\begin{tabular}{@{}lll@{}}
     1254\begin{cfa}
     1255
     1256while ( int i = 0; i < 10 ) {
     1257  if ( a[i] == 3 ) break; // found
     1258        i += 1;
     1259} ®else® { // i == 10
     1260        sout | "not found";
     1261}
     1262\end{cfa}
     1263&
     1264\begin{cfa}
     1265
     1266for ( i; 10 ) {
     1267  if ( a[i] == 3 ) break; // found
     1268
     1269} ®else® { // i == 10
     1270        sout | "not found";
     1271}
     1272\end{cfa}
     1273&
     1274\begin{cfa}
     1275int i = 0;
     1276do {
     1277  if ( a[i] == 3 ) break; // found
     1278        i += 1;
     1279} while( i < 10 ) ®else® { // i == 10
     1280        sout | "not found";
     1281}
     1282\end{cfa}
     1283\end{tabular}
     1284\end{cquote}
     1285Note, \Index{dangling else} now occurs with \Indexc{if}, \Indexc{while}, \Indexc{for}, \Indexc{do}, and \Indexc{waitfor}.
     1286
     1287
    12421288%\subsection{\texorpdfstring{\protect\lstinline{with} Statement}{with Statement}}
    12431289\subsection{\texorpdfstring{\LstKeywordStyle{with} Statement}{with Statement}}
     
    12661312Therefore, reducing aggregate qualification is a useful language design goal.
    12671313
    1268 C allows unnamed nested aggregates that open their scope into the containing aggregate.
     1314C partially addresses the problem by eliminating qualification for enumerated types and unnamed \emph{nested} aggregates, which open their scope into the containing aggregate.
    12691315This feature is used to group fields for attributes and/or with ©union© aggregates.
    12701316\begin{cfa}
    12711317struct S {
    1272         struct { int g,  h; } __attribute__(( aligned(64) ));
     1318        struct $\R{\LstCommentStyle{/* unnamed */}}$ { int g,  h; } __attribute__(( aligned(64) ));
    12731319        int tag;
    1274         union {
     1320        union $\R{\LstCommentStyle{/* unnamed */}}$ {
    12751321                struct { char c1,  c2; } __attribute__(( aligned(128) ));
    12761322                struct { int i1,  i2; };
    12771323                struct { double d1,  d2; };
    12781324        };
    1279 };
    1280 s.g; s.h; s.tag; s.c1; s.c2; s.i1; s.i2; s.d1; s.d2;
     1325} s;
     1326enum { R, G, B };
     1327s.g; s.h;   s.tag = R;   s.c1; s.c2;   s.i1 = G; s.i2 = B;   s.d1; s.d2;
    12811328\end{cfa}
    12821329
     
    13231370\end{cfa}
    13241371where qualification is only necessary to disambiguate the shadowed variable ©i©.
    1325 
    1326 In detail, the ©with© statement may appear as the body of a function or nested within a function body.
     1372In detail, the ©with© statement may form a function body or be nested within a function body.
     1373
    13271374The ©with© clause takes a list of expressions, where each expression provides an aggregate type and object.
    13281375(Enumerations are already opened.)
     
    13331380\end{cfa}
    13341381The expression object is the implicit qualifier for the open structure-fields.
     1382
    13351383\CFA's ability to overload variables \see{\VRef{s:VariableOverload}} and use the left-side of assignment in type resolution means most fields with the same name but different types are automatically disambiguated, eliminating qualification.
    13361384All expressions in the expression list are open in parallel within the compound statement.
     
    13621410\end{cfa}
    13631411A cast or qualification can be used to disambiguate variables within a ©with© \emph{statement}.
    1364 A cast can be used to disambiguate among overload variables in a ©with© \emph{expression}:
     1412A cast can also be used to disambiguate among overload variables in a ©with© \emph{expression}:
    13651413\begin{cfa}
    13661414with ( w ) { ... }                                                      $\C{// ambiguous, same name and no context}$
     
    13711419Finally, there is an interesting problem between parameters and the function-body ©with©, \eg:
    13721420\begin{cfa}
    1373 void ?{}( S & s, int i ) with ( s ) { $\C{// constructor}$
    1374         ®s.i = i;®  j = 3;  m = 5.5; $\C{// initialize fields}$
    1375 }
    1376 \end{cfa}
    1377 Here, the assignment ©s.i = i© means ©s.i = s.i©, which is meaningless, and there is no mechanism to qualify the parameter ©i©, making the assignment impossible using the function-body ©with©.
    1378 To solve this problem, parameters are treated like an initialized aggregate:
    1379 \begin{cfa}
    1380 struct Params {
    1381         S & s;
    1382         int i;
     1421void f( S & s, char c ) with ( s ) {
     1422        ®s.c = c;®  i = 3;  d = 5.5;                    $\C{// initialize fields}$
     1423}
     1424\end{cfa}
     1425Here, the assignment ©s.c = c© means ©s.c = s.c©, which is meaningless, and there is no mechanism to qualify the parameter ©c©, making the assignment impossible using the function-body ©with©.
     1426To solve this problem, parameters \emph{not} explicitly opened are treated like an initialized aggregate:
     1427\begin{cfa}
     1428struct Params {                                                         $\C{// s explicitly opened so S \& s elided}$
     1429        char c;
    13831430} params;
    13841431\end{cfa}
    13851432and implicitly opened \emph{after} a function-body open, to give them higher priority:
    13861433\begin{cfa}
    1387 void ?{}( S & s, int ®i® ) with ( s ) ®with( $\emph{\R{params}}$ )® { // syntax not allowed, illustration only
    1388         s.i = ®i®; j = 3; m = 5.5;
     1434void f( S & s, char ®c® ) with ( s ) ®with( $\emph{\R{params}}$ )® { // syntax not allowed, illustration only
     1435        s.c = ®c;®  i = 3;  d = 5.5;
    13891436}
    13901437\end{cfa}
    13911438This implicit semantic matches with programmer expectation.
    1392 
    13931439
    13941440
     
    33973443This requirement is the same as for comma expressions in argument lists.
    33983444
    3399 Type qualifiers, \ie const and volatile, may modify a tuple type.
    3400 The meaning is the same as for a type qualifier modifying an aggregate type [Int99, x 6.5.2.3(7),x 6.7.3(11)], \ie the qualifier is distributed across all of the types in the tuple, \eg:
     3445Type qualifiers, \ie ©const© and ©volatile©, may modify a tuple type.
     3446The meaning is to distribute the qualifier across all of the types in the tuple, \eg:
    34013447\begin{cfa}
    34023448const volatile [ int, float, const int ] x;
     
    35973643Stream ©exit© implicitly returns ©EXIT_FAILURE© to the shell.
    35983644\begin{cfa}
    3599 ®exit®   | "x (" | x | ") negative value."; // terminate and return EXIT_FAILURE to shell
    3600 ®abort® | "x (" | x | ") negative value."; // terminate and generate stack trace and core file
     3645®exit®   | "x (" | x | ") negative value.";   // terminate and return EXIT_FAILURE to shell
     3646®abort® | "x (" | x | ") negative value.";   // terminate and generate stack trace and core file
    36013647\end{cfa}
    36023648Note, \CFA stream variables ©stdin©, ©stdout©, ©stderr©, ©exit©, and ©abort© overload C variables ©stdin©, ©stdout©, ©stderr©, and functions ©exit© and ©abort©, respectively.
     
    42674313        sout | '1' | '2' | '3';
    42684314        sout | 1 | "" | 2 | "" | 3;
    4269         sout | "x (" | 1 | "x [" | 2 | "x {" | 3 | "x =" | 4 | "x $" | 5 | "x £" | 6 | "x ¥"
    4270                 | 7 | "x ¡" | 8 | "x ¿" | 9 | "x «" | 10;
     4315        sout | "x (" | 1 | "x [" | 2 | "x {" | 3 | "x =" | 4 | "x $" | 5 | "x £" | 6 | "x Â¥"
     4316                | 7 | "x ¡" | 8 | "x ¿" | 9 | "x «" | 10;
    42714317        sout | 1 | ", x" | 2 | ". x" | 3 | "; x" | 4 | "! x" | 5 | "? x" | 6 | "% x"
    4272                 | 7 | "¢ x" | 8 | "» x" | 9 | ") x" | 10 | "] x" | 11 | "} x";
     4318                | 7 | "¢ x" | 8 | "» x" | 9 | ") x" | 10 | "] x" | 11 | "} x";
    42734319        sout | "x`" | 1 | "`x'" | 2 | "'x\"" | 3 | "\"x:" | 4 | ":x " | 5 | " x\t" | 6 | "\tx";
    42744320        sout | "x ( " | 1 | " ) x" | 2 | " , x" | 3 | " :x: " | 4;
     
    44464492The common usage is the short form of the mutex statement\index{ostream@©ostream©!mutex@©mutex©} to lock a stream during a single cascaded I/O expression, \eg:
    44474493\begin{cfa}
    4448 $\emph{thread\(_1\)}$ : ®mutex()® sout | "abc " | "def ";
    4449 $\emph{thread\(_2\)}$ : ®mutex()® sout | "uvw " | "xyz ";
     4494$\emph{thread\(_1\)}$ : ®mutex( sout )® sout | "abc " | "def ";
     4495$\emph{thread\(_2\)}$ : ®mutex( sout )® sout | "uvw " | "xyz ";
    44504496\end{cfa}
    44514497Now, the order of the thread execution is still non-deterministic, but the output is constrained to two possible lines in either order.
     
    44704516®mutex( sout )® {
    44714517        sout | 1;
    4472         ®mutex() sout® | 2 | 3;                         $\C{// unnecessary, but ok because of recursive lock}$
     4518        ®mutex( sout ) sout® | 2 | 3;                           $\C{// unnecessary, but ok because of recursive lock}$
    44734519        sout | 4;
    44744520} // implicitly release sout lock
     
    44824528        int x, y, z, w;
    44834529        sin | x;
    4484         ®mutex() sin® | y | z;                          $\C{// unnecessary, but ok because of recursive lock}$
     4530        ®mutex( sin )® sin | y | z;                                     $\C{// unnecessary, but ok because of recursive lock}$
    44854531        sin | w;
    44864532} // implicitly release sin lock
     
    44914537\Textbf{WARNING:} The general problem of \Index{nested locking} can occur if routines are called in an I/O sequence that block, \eg:
    44924538\begin{cfa}
    4493 ®mutex() sout® | "data:" | rtn( mon );  $\C{// mutex call on monitor}$
     4539®mutex( sout )® sout | "data:" | rtn( mon );    $\C{// mutex call on monitor}$
    44944540\end{cfa}
    44954541If the thread executing the I/O expression blocks in the monitor with the ©sout© lock, other threads writing to ©sout© also block until the thread holding the lock is unblocked and releases it.
     
    44984544\begin{cfa}
    44994545int ®data® = rtn( mon );
    4500 mutex() sout | "data:" | ®data®;
    4501 \end{cfa}
     4546mutex( sout ) sout | "data:" | ®data®;
     4547\end{cfa}
     4548
     4549
     4550\subsection{Locale}
     4551\index{stream!locale}
     4552\index{locale!stream}
     4553
     4554Cultures use different syntax, called a \newterm{locale}, for printing numbers so they are easier to read, \eg:
     4555\begin{cfa}
     455612®,®345®.®123          $\C[1.25in]{// comma separator, period decimal-point}$
     455712®.®345®,®123          $\C{// period separator, comma decimal-point}$
     455812$\Sp$345®,®123®.®     $\C{// space separator, comma decimal-point, period terminator}\CRT$
     4559\end{cfa}
     4560A locale is selected with function ©setlocale©, and the corresponding locale package \emph{must} be installed on the underlying system;
     4561©setlocale© returns ©0p© if the requested locale is unavailable.
     4562Furthermore, a locale covers the syntax for many cultural items, \eg address, measurement, money, etc.
     4563This discussion applies to item ©LC_NUMERIC© for formatting non-monetary integral and floating-point values.
     4564\VRef[Figure]{f:StreamLocale} shows selecting different cultural syntax, which may be associated with one or more countries.
     4565
     4566\begin{figure}
     4567\begin{cfa}
     4568#include <fstream.hfa>
     4569#include <locale.h>                                                     $\C{// setlocale}$
     4570#include <stdlib.h>                                                     $\C{// getenv}$
     4571
     4572int main() {
     4573        void print() {
     4574                sout | 12 | 123 | 1234 | 12345 | 123456 | 1234567;
     4575                sout | 12. | 123.1 | 1234.12 | 12345.123 | 123456.1234 | 1234567.12345;
     4576                sout | nl;
     4577        }
     4578        sout | "Default locale off";
     4579        print();
     4580        sout | "Locale on" | ®setlocale( LC_NUMERIC, getenv( "LANG" ) )®;  // enable local locale
     4581        print();
     4582        sout | "German" | ®setlocale( LC_NUMERIC, "de_DE.UTF-8" )®;  // enable German locale
     4583        print();
     4584        sout | "Ukraine" | ®setlocale( LC_NUMERIC, "uk_UA.utf8" )®;  // enable Ukraine locale
     4585        print();
     4586        sout | "Default locale off" | ®setlocale( LC_NUMERIC, "C" )®;  // disable locale
     4587        print();
     4588}
     4589
     4590Default locale off
     459112 123 1234 12345 123456 1234567
     459212. 123.1 1234.12 12345.123 123456.1234 1234567.12345
     4593
     4594Locale on en_US.UTF-8
     459512 123 1®,®234 12®,®345 123®,®456 1®,®234®,®567
     459612®.® 123®.®1 1®,®234®.®12 12®,®345®.®123 123®,®456®.®1234 1®,®234®,®567®.®12345
     4597
     4598German de_DE.UTF-8
     459912 123 1®.®234 12®.®345 123®.®456 1®.®234®.®567
     460012®.® 123®,®1®.® 1®.®234®,®12 12®.®345®,®123 123®.®456®,®1234 1®.®234®.®567®,®12345
     4601
     4602Ukraine uk_UA.utf8
     460312 123 1 234 12 345 123 456 1 234 567
     460412®.® 123®,®1®.® 1$\Sp$234®,®12®.® 12$\Sp$ 345®,®123®.® 123$\Sp$ 456®,®1234®.® 1$\Sp$ 234$\Sp$567®,®12345®.®
     4605
     4606Default locale off C
     460712 123 1234 12345 123456 1234567
     460812. 123.1 1234.12 12345.123 123456.1234 1234567.12345
     4609\end{cfa}
     4610\caption{Stream Locale}
     4611\label{f:StreamLocale}
     4612\end{figure}
    45024613
    45034614
     
    45554666\end{figure}
    45564667
     4668
    45574669\begin{comment}
    45584670\section{Types}
     
    46374749
    46384750
    4639 \subsection{Structures}
     4751\section{Structures}
    46404752
    46414753Structures in \CFA are basically the same as structures in C.
     
    52705382\subsection{Coroutine}
    52715383
    5272 \Index{Coroutines} are the precursor to tasks.
     5384\Index{Coroutines} are the precursor to threads.
    52735385\VRef[Figure]{f:FibonacciCoroutine} shows a coroutine that computes the \Index*{Fibonacci} numbers.
    52745386
     
    53725484
    53735485
    5374 \subsection{Tasks}
     5486\subsection{Threads}
    53755487
    53765488\CFA also provides a simple mechanism for creating and utilizing user level threads.
    5377 A task provides mutual exclusion like a monitor, and also has its own execution state and a thread of control.
    5378 Similar to a monitor, a task is defined like a structure:
     5489A thread provides mutual exclusion like a monitor, and also has its own execution state and a thread of control.
     5490Similar to a monitor, a thread is defined like a structure:
    53795491
    53805492\begin{figure}
     
    54205532}
    54215533\end{cfa}
    5422 \caption{Simple Tasks}
    5423 \label{f:SimpleTasks}
     5534\caption{Simple Threads}
     5535\label{f:SimpleThreads}
    54245536\end{figure}
    54255537
     
    67886900In \CFA, there are ambiguous cases with dereference and operator identifiers, \eg ©int *?*?()©, where the string ©*?*?© can be interpreted as:
    67896901\begin{cfa}
    6790 *?$\R{\textvisiblespace}$*? $\C{// dereference operator, dereference operator}$
    6791 *$\R{\textvisiblespace}$?*? $\C{// dereference, multiplication operator}$
     6902*?$\Sp$*? $\C{// dereference operator, dereference operator}$
     6903*$\Sp$?*? $\C{// dereference, multiplication operator}$
    67926904\end{cfa}
    67936905By default, the first interpretation is selected, which does not yield a meaningful parse.
     
    68136925Therefore, it is necessary to disambiguate these cases with a space:
    68146926\begin{cfa}
    6815 i++$\R{\textvisiblespace}$? i : 0;
    6816 i?$\R{\textvisiblespace}$++i : 0;
     6927i++$\Sp$? i : 0;
     6928i?$\Sp$++i : 0;
    68176929\end{cfa}
    68186930
     
    74307542char random( void );$\indexc{random}$
    74317543char random( char u ); $\C{// [0,u)}$
    7432 char random( char l, char u ); $\C{// [l,u)}$
     7544char random( char l, char u ); $\C{// [l,u]}$
    74337545int random( void );
    74347546int random( int u ); $\C{// [0,u)}$
    7435 int random( int l, int u ); $\C{// [l,u)}$
     7547int random( int l, int u ); $\C{// [l,u]}$
    74367548unsigned int random( void );
    74377549unsigned int random( unsigned int u ); $\C{// [0,u)}$
    7438 unsigned int random( unsigned int l, unsigned int u ); $\C{// [l,u)}$
     7550unsigned int random( unsigned int l, unsigned int u ); $\C{// [l,u]}$
    74397551long int random( void );
    74407552long int random( long int u ); $\C{// [0,u)}$
    7441 long int random( long int l, long int u ); $\C{// [l,u)}$
     7553long int random( long int l, long int u ); $\C{// [l,u]}$
    74427554unsigned long int random( void );
    74437555unsigned long int random( unsigned long int u ); $\C{// [0,u)}$
    7444 unsigned long int random( unsigned long int l, unsigned long int u ); $\C{// [l,u)}$
     7556unsigned long int random( unsigned long int l, unsigned long int u ); $\C{// [l,u]}$
    74457557float random( void );                                            $\C{// [0.0, 1.0)}$
    74467558double random( void );                                           $\C{// [0.0, 1.0)}$
     
    81068218
    81078219
     8220\section{Pseudo Random Number Generator}
     8221\label{s:PRNG}
     8222
     8223Random numbers are values generated independently, i.e., new values do not depend on previous values (independent trials), \eg lottery numbers, shuffled cards, dice roll, coin flip.
     8224While a primary goal of programming is computing values that are \emph{not} random, random values are useful in simulation, cryptography, games, etc.
     8225A random-number generator is an algorithm that computes independent values.
     8226If the algorithm uses deterministic computation (a predictable sequence of values), it generates \emph{pseudo} random numbers versus \emph{true} random numbers.
     8227
     8228All \newterm{pseudo random-number generators} (\newterm{PRNG}) involve some technique to scramble bits of a value, \eg multiplicative recurrence:
     8229\begin{cfa}
     8230rand = 36973 * (rand & 65535) + (rand >> 16); // scramble bits
     8231\end{cfa}
     8232Multiplication of large values adds new least-significant bits and drops most-significant bits.
     8233\begin{quote}
     8234\begin{tabular}{@{}r|l@{}}
     8235bits 63--32 (most)      & bits 31--0 (least)    \\
     8236\hline
     82370x0                                     & 0x3e8e36                              \\
     82380x5f                            & 0x718c25e1                    \\
     82390xad3e                          & 0x7b5f1dbe                    \\
     82400xbc3b                          & 0xac69ff19                    \\
     82410x1070f                         & 0x2d258dc6                    \\
     8242\end{tabular}
     8243\end{quote}
     8244By dropping bits 63--32, bits 31--0 become scrambled after each multiply.
     8245The least-significant bits \emph{appear} random but the same bits are always generated given a fixed starting value, called the \newterm{seed} (value 0x3e8e36 above).
     8246Hence, if a program uses the same seed, the same sequence of pseudo-random values is generated from the PRNG.
     8247Often the seed is set to another random value like a program's process identifier (©getpid©\index{getpid@©getpid©}) or time when the program is run;
     8248hence, one random value bootstraps another.
     8249Finally, a PRNG usually generates a range of large values, \eg ©[0, UINT_MAX]©, which are scaled using the modulus operator, \eg ©prng() % 5© produces random values in the range 0--4.
     8250
     8251\CFA provides a sequential PRNG type only accessible by a single thread (not thread-safe) and a set of global and companion thread PRNG functions accessible by multiple threads without contention.
     8252\begin{itemize}
     8253\item
     8254The ©PRNG© type is for sequential programs, like coroutining:
     8255\begin{cfa}
     8256struct PRNG { ... }; $\C[3.75in]{// opaque type}$
     8257void ?{}( PRNG & prng ); $\C{// random seed}$
     8258void ?{}( PRNG & prng, uint32_t seed ); $\C{// fixed seed}$
     8259void set_seed( PRNG & prng, uint32_t seed ); $\C{// set seed}$
     8260uint32_t get_seed( PRNG & prng ); $\C{// get seed}$
     8261uint32_t prng( PRNG & prng ); $\C{// [0,UINT\_MAX]}$
     8262uint32_t prng( PRNG & prng, uint32_t u ); $\C{// [0,u)}$
     8263uint32_t prng( PRNG & prng, uint32_t l, uint32_t u ); $\C{// [l,u]}$
     8264uint32_t calls( PRNG & prng ); $\C{// number of calls}\CRT$
     8265\end{cfa}
     8266A ©PRNG© object is used to randomize behaviour or values during execution, \eg in games, a character makes a random move or an object takes on a random value.
     8267In this scenario, it is useful to have multiple ©PRNG© objects, \eg one per player or object.
     8268However, sequential execution is still repeatable given the same starting seeds for all ©PRNG©s.
     8269\VRef[Figure]{f:SequentialPRNG} shows an example that creates two sequential ©PRNG©s, sets both to the same seed (1009), and illustrates the three forms for generating random values, where both ©PRNG©s generate the same sequence of values.
     8270
     8271\begin{figure}
     8272\begin{cfa}
     8273PRNG prng1, prng2;
     8274®set_seed( prng1, 1009 )®;   ®set_seed( prng2, 1009 )®;
     8275for ( 10 ) {
     8276        // Do not cascade prng calls because side-effect functions called in arbitrary order.
     8277        sout | nlOff | ®prng( prng1 )®;  sout | ®prng( prng1, 5 )®;  sout | ®prng( prng1, 0, 5 )® | '\t';
     8278        sout | ®prng( prng2 )®;  sout | ®prng( prng2, 5 )®;  sout | ®prng( prng2, 0, 5 )® | nlOn;
     8279}
     8280\end{cfa}
     8281\begin{cquote}
     8282\begin{tabular}{@{}ll@{}}
     8283\begin{cfa}
     828437301721 2 2
     82851681308562 1 3
     8286290112364 3 2
     82871852700364 4 3
     8288733221210 1 3
     82891775396023 2 3
     8290123981445 2 3
     82912062557687 2 0
     8292283934808 1 0
     8293672325890 1 3
     8294\end{cfa}
     8295&
     8296\begin{cfa}
     829737301721 2 2
     82981681308562 1 3
     8299290112364 3 2
     83001852700364 4 3
     8301733221210 1 3
     83021775396023 2 3
     8303123981445 2 3
     83042062557687 2 0
     8305283934808 1 0
     8306672325890 1 3
     8307\end{cfa}
     8308\end{tabular}
     8309\end{cquote}
     8310\caption{Sequential PRNG}
     8311\label{f:SequentialPRNG}
     8312\end{figure}
     8313
     8314\item
     8315The PRNG global and companion thread functions are for concurrent programming, such as randomizing execution in short-running programs, \eg ©yield( prng() % 5 )©.
     8316\begin{cfa}
     8317void set_seed( uint32_t seed ); $\C[3.75in]{// set global seed}$
     8318uint32_t get_seed(); $\C{// get global seed}$
     8319// SLOWER
     8320uint32_t prng(); $\C{// [0,UINT\_MAX]}$
     8321uint32_t prng( uint32_t u ); $\C{// [0,u)}$
     8322uint32_t prng( uint32_t l, uint32_t u ); $\C{// [l,u]}$
     8323// FASTER
     8324uint32_t prng( $thread\LstStringStyle{\textdollar}$ & th );     $\C{// [0,UINT\_MAX]}$
     8325uint32_t prng( $thread\LstStringStyle{\textdollar}$ & th, uint32_t u ); $\C{// [0,u)}$
     8326uint32_t prng( $thread\LstStringStyle{\textdollar}$ & th, uint32_t l, uint32_t u );     $\C{// [l,u]}\CRT$
     8327\end{cfa}
     8328The only difference between the two sets of ©prng© routines is performance.
     8329
     8330Because concurrent execution is non-deterministic, seeding the concurrent PRNG is less important, as repeatable execution is impossible.
     8331Hence, there is one system-wide PRNG (global seed) but each \CFA thread has its own non-contended PRNG state.
     8332If the global seed is set, threads start with this seed, until it is reset and then threads start with the reset seed.
     8333Hence, these threads generate the same sequence of random numbers from their specific starting seed.
     8334If the global seed is \emph{not} set, threads start with a random seed, until the global seed is set.
     8335Hence, these threads generate different sequences of random numbers.
     8336If each thread needs its own seed, use a sequential ©PRNG© in each thread.
     8337The slower ©prng© functions \emph{without} a thread argument call ©active_thread© internally to indirectly access the current thread's PRNG state, while the faster ©prng© functions \emph{with} a thread argument directly access the thread through the thread parameter.
     8338If a thread pointer is available, \eg in thread main, eliminating the call to ©active_thread© significantly reduces the cost of accessing the thread's PRNG state.
     8339\VRef[Figure]{f:ConcurrentPRNG} shows an example using the slower/faster concurrent PRNG in the program main and a thread.
     8340
     8341\begin{figure}
     8342\begin{cfa}
     8343thread T {};
     8344void main( ®T & th® ) {  // thread address
     8345        for ( i; 10 ) {
     8346                sout | nlOff | ®prng()®;  sout | ®prng( 5 )®;  sout | ®prng( 0, 5 )® | '\t';  // SLOWER
     8347                sout | nlOff | ®prng( th )®;  sout | ®prng( th, 5 )®;  sout | ®prng( th, 0, 5 )® | nlOn;  // FASTER
     8348        }
     8349}
     8350int main() {
     8351        set_seed( 1009 );
     8352        $\R{thread\LstStringStyle{\textdollar}}$ ®& th = *active_thread()®;  // program-main thread-address
     8353        for ( i; 10 ) {
     8354                sout | nlOff | ®prng()®; sout | ®prng( 5 )®; sout | ®prng( 0, 5 )® | '\t';  // SLOWER
     8355                sout | nlOff | ®prng( th )®; sout | ®prng( th, 5 )®; sout | ®prng( th, 0, 5 )® | nlOn;  // FASTER
     8356        }
     8357        sout | nl;
     8358        T t; // run thread
     8359}
     8360\end{cfa}
     8361\begin{cquote}
     8362\begin{tabular}{@{}ll@{}}
     8363\begin{cfa}
     836437301721 2 2
     8365290112364 3 2
     8366733221210 1 3
     8367123981445 2 3
     8368283934808 1 0
     83691414344101 1 3
     8370871831898 3 4
     83712142057611 4 4
     8372802117363 0 4
     83732346353643 1 3
     8374\end{cfa}
     8375&
     8376\begin{cfa}
     83771681308562 1 3
     83781852700364 4 3
     83791775396023 2 3
     83802062557687 2 0
     8381672325890 1 3
     8382873424536 3 4
     8383866783532 0 1
     838417310256 2 5
     8385492964499 0 0
     83862143013105 3 2
     8387\end{cfa}
     8388\end{tabular}
     8389\begin{cfa}
     8390// same output as above from thread t
     8391\end{cfa}
     8392\end{cquote}
     8393\caption{Concurrent PRNG}
     8394\label{f:ConcurrentPRNG}
     8395\end{figure}
     8396\end{itemize}
     8397
     8398
    81088399\section{Multi-precision Integers}
    81098400\label{s:MultiPrecisionIntegers}
     
    83108601\end{tabular}
    83118602\end{cquote}
    8312 \small
     8603
    83138604\begin{cfa}
    83148605Factorial Numbers
Note: See TracChangeset for help on using the changeset viewer.