Changeset 1dec8f3


Ignore:
Timestamp:
Sep 22, 2025, 2:33:42 PM (5 months ago)
Author:
Michael Brooks <mlbrooks@…>
Branches:
master
Children:
bb5b866
Parents:
7ca6bf1 (diff), 295ed2d1 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'refs/remotes/origin/master'

Files:
11 added
46 edited
1 moved

Legend:

Unmodified
Added
Removed
  • Jenkins/Distribute

    r7ca6bf1 r1dec8f3  
    2121final commit, build
    2222node {
    23 
    2423        //Wrap build to add timestamp to command line
    2524        wrap([$class: 'TimestamperBuildWrapper']) {
     
    3534
    3635                Tools.Clean()
    37 
    3836                Tools.Checkout( commit )
    39 
    4037                Version = GetVersion( build )
    41 
    4238                Configure()
    43 
    4439                Package()
    45 
    4640                Test()
    47 
    4841                Archive()
    4942        }
     
    6457        echo "Build   Version: ${build}"
    6558        echo "Long    Version: ${version}"
    66 
    6759        return version
    6860}
     
    119111def prepare_build() {
    120112        // prepare the properties
    121         properties ([                                                                                                   \
    122                 buildDiscarder(logRotator(                                                                              \
    123                         artifactDaysToKeepStr: '',                                                                      \
    124                         artifactNumToKeepStr: '',                                                                       \
    125                         daysToKeepStr: '730',                                                                           \
    126                         numToKeepStr: '1000'                                                                            \
    127                 )),                                                                                                             \
    128                 [$class: 'ParametersDefinitionProperty',                                                                \
    129                         parameterDefinitions: [                                                                         \
    130                                 [$class: 'StringParameterDefinition',                                           \
    131                                         description: 'The git commit to checkout',                              \
    132                                         name: 'GitRef',                                                                 \
    133                                         defaultValue: '',                                                               \
    134                                 ],                                                                                              \
    135                                 [$class: 'StringParameterDefinition',                                           \
    136                                         description: 'Build Number to put into the version',                    \
    137                                         name: 'Build',                                                                  \
    138                                         defaultValue: '0',                                                              \
    139                                 ],                                                                                              \
     113        properties ([                                                                                   \
     114                buildDiscarder(logRotator(                                                              \
     115                        artifactDaysToKeepStr: '',                                                      \
     116                        artifactNumToKeepStr: '',                                                       \
     117                        daysToKeepStr: '730',                                                           \
     118                        numToKeepStr: '1000'                                                            \
     119                )),                                                                                     \
     120                [$class: 'ParametersDefinitionProperty',                                                \
     121                        parameterDefinitions: [                                                         \
     122                                [$class: 'StringParameterDefinition',                                   \
     123                                        description: 'The git commit to checkout',                      \
     124                                        name: 'GitRef',                                                 \
     125                                        defaultValue: '',                                               \
     126                                ],                                                                      \
     127                                [$class: 'StringParameterDefinition',                                   \
     128                                        description: 'Build Number to put into the version',            \
     129                                        name: 'Build',                                                  \
     130                                        defaultValue: '0',                                              \
     131                                ],                                                                      \
    140132                        ],
    141133                ]])
  • Jenkins/Promote

    r7ca6bf1 r1dec8f3  
    11#!groovy
    22
     3import groovy.transform.Field
     4
     5// Globals
     6@Field def BuildDir  = null
     7@Field def SrcDir    = null
     8@Field def RemoteRepo = ''
     9@Field def ArchiveUrl = ''
     10
     11// Local variables
     12def err = null
     13def log_needed = false
     14
    315node {
    4         // Globals
    516        BuildDir   = pwd tmp: true
    617        SrcDir     = pwd tmp: false
    718        RemoteRepo = 'git@github.com:cforall/cforall.git'
    819        ArchiveUrl = 'https://cforall.uwaterloo.ca/jenkins/job/Cforall_Distribute_Ref/lastSuccessfulBuild/artifact/*zip*/archive.zip'
    9 
    10         // Local variables
    11         def err = null
    12         def log_needed = false
    13 
    1420        currentBuild.result = "SUCCESS"
    1521
    16         //Wrap build to add timestamp to command line
     22        // Wrap build to add timestamp to command line
    1723        wrap([$class: 'TimestamperBuildWrapper']) {
    18 
    1924                PrepRepo();
    20 
    2125                def name = GetArchive();
    22 
    2326                PushRepo(name);
    2427        }
    25 
    2628}
    2729
     
    3638                dir (BuildDir) {
    3739                    sh 'rm -rf *'
    38                         sshagent (credentials: ['git_key_mar27']) {
     40                        sshagent (credentials: ['github_sep2025']) {
    3941                                sh "git clone --bare ${RemoteRepo} repo"
    4042                        }
     
    5961                }
    6062        }
    61 
    6263        return tarball
    6364}
     
    6970                        sh "git status"
    7071                        sh "git diff-index --quiet HEAD || git commit -m 'Push from build machine: ${name}'"
    71                         sshagent (credentials: ['git_key_mar27']) {
     72                        sshagent (credentials: ['github_sep2025']) {
    7273                                sh "git push origin master"
    7374                        }
  • Jenkins/tools.groovy

    r7ca6bf1 r1dec8f3  
    3737                                userRemoteConfigs: [[
    3838                                        url: 'cforall@plg.uwaterloo.ca:software/cfa/cfa-cc',
    39                                         credentialsId: 'git_key_aug20']]
     39                                        credentialsId: 'git_key_aug2025']]
    4040                        ])
    4141                        echo GitLogMessage(scmVars.GIT_COMMIT, scmVars.GIT_PREVIOUS_COMMIT)
  • benchmark/io/http/channel.hfa

    r7ca6bf1 r1dec8f3  
    1212                int size;
    1313                mutex_lock lock;
    14                 condition_variable prods;
    15                 condition_variable cons;
     14                cond_lock prods;
     15                cond_lock cons;
    1616        };
    1717
  • benchmark/io/http/printer.hfa

    r7ca6bf1 r1dec8f3  
    5050                acceptor_stats_t accpt;
    5151        } stats;
    52         condition_variable(fast_block_lock) var;
     52        cond_lock(fast_block_lock) var;
    5353        ServerCluster * cl;
    5454};
  • doc/papers/llheap/Makefile

    r7ca6bf1 r1dec8f3  
    5353FakeHeader \
    5454Header \
     55decreasing \
     56increasing \
    5557}
    5658
     
    6668
    6769GRAPHS = ${addsuffix .tex, \
     70prolog \
     71swift \
     72java \
    6873}
     74
     75#prolog \
    6976
    7077## Define the documents that need to be made.
     
    8087
    8188clean :
    82         @rm -frv ${DOCUMENT} ${BASE}.ps WileyNJD-AMA.bst ${BASE}.out.ps ${Build}
     89        @rm -frv ${DOCUMENT} testgenfmt testgenfmt2 ${BASE}.ps WileyNJD-AMA.bst ${BASE}.out.ps ${Build}
    8390
    8491# File Dependencies #
     
    9097        dvips ${Build}/$< -o $@
    9198
    92 ${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     99${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst testgenfmt testgenfmt2 ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    93100                local.bib ../../bibliography/pl.bib | ${Build}
    94101        # Must have *.aux file containing citations for bibtex
     
    122129        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
    123130
     131testgenfmt : testgenfmt.cc
     132        g++ testgenfmt.cc -o $@
     133
     134testgenfmt2 : testgenfmt2.cc
     135        g++ testgenfmt2.cc -o $@
     136
     137#${addsuffix /testdata, ${basename ${GRAPHS}}} : ${addsuffix /testgen, ${basename ${GRAPHS}}}
     138#       echo ${addsuffix /testdata, ${basename ${GRAPHS}}}
     139#       echo ${addsuffix /testgen, ${basename ${GRAPHS}}}
     140#       testgenfmt < $< > $@
     141
     142#swift/testdata.lexp : swift/testgen.ldata testgenfmt.cc
     143#       ./testgenfmt < $<
     144
     145swift/testdata.exp : swift/testgen.data testgenfmt2.cc
     146        ./testgenfmt2 < $<
     147
     148#prolog/testdata.lexp : prolog/testgen.ldata testgenfmt.cc
     149#       ./testgenfmt < $<
     150
     151prolog/testdata.exp : prolog/testgen.data testgenfmt2.cc
     152        ./testgenfmt2 < $<
     153
     154#java/testdata.lexp : java/testgen.ldata testgenfmt.cc
     155#       ./testgenfmt < $<
     156
     157java/testdata.exp : java/testgen.data testgenfmt2.cc
     158        ./testgenfmt2 < $<
     159
     160${GRAPHS} : Makefile plotexp.gp plotres.gp ${addsuffix /testdata.exp, ${basename ${GRAPHS}}}
     161        gnuplot -e GRAPH="'${basename $@}'" plotexp.gp
     162        gnuplot -e GRAPH="'${basename $@}'" plotres.gp
     163
    124164# Local Variables: #
    125165# compile-command: "make" #
  • doc/papers/llheap/Paper.tex

    r7ca6bf1 r1dec8f3  
    1 \documentclass[AMA,STIX1COL]{WileyNJD-v2}
     1% Type: Paper
     2%
     3% Abstract
     4%
     5% A new C-based concurrent memory-allocator is presented, called llheap (ll => low latency). It supports C/C++ applications with multiple kernel threads, or it can be embedded into user-threading runtime-systems. llheap extends the C allocation API with new functions providing orthogonal access to allocation features; hence, programmers do have to code missing combinations. llheap also extends the C allocation semantics by remembering multiple aspects of the initial allocation. These properties can be queried, allowing programmers to write safer programs by preserving these properties in future allocations. As well, realloc/reallocarray preserve initial zero-fill and alignment properties when adjusting storage size, again increasing future allocation safety. The allocator provides a contention-free statistics gathering mode, and a debugging mode for dynamically checking allocation pre/post conditions and invariants. These modes are invaluable for understanding and debugging a program's dynamic allocation behaviour, with low enough cost to be used in production code. An example is presented for condensing the allocation API using advanced type-systems, providing a single type-safe allocation routine using named arguments. Finally, performance results across a number of benchmarks show llheap is competitive with other modern memory allocators.
     6%
     7% Upload: llheap.pdf
     8%
     9% Computing Classification Systems
     10%
     11% Add
     12% 500 Software and its engineering > Software libraries and repositories
     13% Add
     14% 300 Computing methodologies > Concurrent programming languages
     15%
     16% Authors, submitter has to have an orcid
     17%
     18% Details & Comments
     19%
     20% cover letter
     21%
     22% Funding
     23%  yes
     24%  Government of Canada >
     25%  Natural Sciences and Engineering Research Council of Canada
     26%
     27% Electronic Supplementary Materials No
     28% Are you submitting a conference paper extension: No
     29% X  ACM uses CrossCheck, an automated service that checks for plagiarism. Any submission to ACM is subject to such a check. Confirm that you are familiar with the ACM Plagiarism Polic
     30% To confirm that you have reviewed all title, author, and affiliation information in the submission form and the manuscript for accuracy, and approve its exact use in the final, published article, please check the box to the right. X
     31
     32\documentclass[manuscript,screen,review]{acmart}
    233
    334% Latex packages used in the document.
     
    839\usepackage{relsize}
    940\usepackage{xspace}
     41\usepackage{xcolor}
    1042\usepackage{calc}
     43\usepackage{algorithm}
     44\usepackage{algorithmic}
     45\usepackage{enumitem}
     46\usepackage{tabularx}                                                                   % allows \lstMakeShortInline@
    1147\usepackage[scaled=0.88]{helvet}                                                % descent Helvetica font and scale to times size
    1248\usepackage[T1]{fontenc}
    1349\usepackage{listings}                                                                   % format program code
    14 \usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt]{subfig}
     50\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt,font={rm,md,up}]{subfig}
    1551\renewcommand{\thesubfigure}{(\alph{subfigure})}
    16 \usepackage{enumitem}
    1752
    1853\hypersetup{breaklinks=true}
    19 
    20 \usepackage[pagewise]{lineno}
    21 \renewcommand{\linenumberfont}{\scriptsize\sffamily}
     54\usepackage{breakurl}
     55
     56% \usepackage[pagewise]{lineno}
     57% \renewcommand{\linenumberfont}{\scriptsize\sffamily}
    2258
    2359\usepackage{varioref}                                   % extended references
     
    71107\setlength{\gcolumnposn}{3.25in}
    72108\setlength{\columnposn}{\gcolumnposn}
    73 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
     109\renewcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
    74110\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
    75111\makeatother
     
    78114columns=fullflexible,
    79115basicstyle=\linespread{0.9}\sf,                 % reduce line spacing and use sanserif font
    80 stringstyle=\small\tt,                                  % use typewriter font
     116stringstyle=\fontsize{9}{9}\selectfont\tt,      % use typewriter font
    81117tabsize=5,                                                              % N space tabbing
    82118xleftmargin=\parindentlnth,                             % indent code to paragraph indentation
     
    93129literate=
    94130%  {-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.75ex}{0.1ex}}}}1
    95   {-}{\raisebox{-1pt}{\ttfamily-}}1
     131  {-}{\raisebox{0pt}{\ttfamily-}}1
    96132  {^}{\raisebox{0.6ex}{\(\scriptstyle\land\,\)}}1
    97133  {~}{\raisebox{0.3ex}{\(\scriptstyle\sim\,\)}}1
    98   {'}{\ttfamily'\hspace*{-0.4ex}}1
    99   {`}{\ttfamily\upshape\hspace*{-0.3ex}`}1
     134%  {'}{\ttfamily'\hspace*{-0.4ex}}1
     135  {`}{\raisebox{-2pt}{\large\textasciigrave\hspace{-1pt}}}1
    100136  {<-}{$\leftarrow$}2
    101137  {=>}{$\Rightarrow$}2
     
    150186\lstnewenvironment{java}[1][]{\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}{}
    151187
    152 % inline code @...@
    153 \lstMakeShortInline@%
    154 
    155 % \let\OLDthebibliography\thebibliography
    156 % \renewcommand\thebibliography[1]{
    157 %   \OLDthebibliography{#1}
    158 %   \setlength{\parskip}{0pt}
    159 %   \setlength{\itemsep}{4pt plus 0.3ex}
    160 % }
    161 
    162188\newsavebox{\myboxA}
    163189\newsavebox{\myboxB}
     
    167193%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    168194
    169 \articletype{RESEARCH ARTICLE}%
    170 
    171 % Referees
    172 % Doug Lea, dl@cs.oswego.edu, SUNY Oswego
    173 % Herb Sutter, hsutter@microsoft.com, Microsoft Corp
    174 % Gor Nishanov, gorn@microsoft.com, Microsoft Corp
    175 % James Noble, kjx@ecs.vuw.ac.nz, Victoria University of Wellington, School of Engineering and Computer Science
    176 
    177 \received{XXXXX}
    178 \revised{XXXXX}
    179 \accepted{XXXXX}
    180 
    181 \raggedbottom
    182 
    183195\title{High-Performance Concurrent Memory Allocation}
    184196
    185 \author[1]{Mubeen Zulfiqar}
    186 \author[1]{Ayelet Wasik}
    187 \author[1]{Peter A. Buhr*}
    188 \author[2]{Bryan Chan}
    189 \author[3]{Dave Dice}
    190 \authormark{ZULFIQAR \textsc{et al.}}
    191 
    192 \address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, ON}, \country{Canada}}}
    193 \address[2]{\orgdiv{Huawei Compiler Lab}, \orgname{Huawei}, \orgaddress{\state{Markham, ON}, \country{Canada}}}
    194 \address[3]{\orgdiv{Oracle Labs}, \orgname{Oracle}, \orgaddress{\state{Burlington, MA}, \country{USA}}}
    195 
    196 
    197 \corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}
    198 
    199 % \fundingInfo{Natural Sciences and Engineering Research Council of Canada}
    200 
    201 \abstract[Summary]{%
    202 A new C-based concurrent memory-allocator is presented, called llheap (low latency).
    203 It can be used standalone in C/\CC applications with multiple kernel threads, or embedded into high-performance user-threading programming languages.
    204 llheap extends the feature set of existing C allocation by remembering zero-filled (\lstinline{calloc}) and aligned properties (\lstinline{memalign}) in an allocation.
     197\author{Mubeen Zulfiqar}
     198\email{m3zulfiq@uwaterloo.ca}
     199\author{Ayelet Wasik}
     200\email{aisraeli@plg.uwaterloo.ca}
     201\author{Peter A. Buhr}
     202\email{pabuhr@uwaterloo.ca}
     203\orcid{0000-0003-3747-9281}
     204\affiliation{%
     205  \institution{University of Waterloo}
     206  \city{Waterloo}
     207  \state{Ontario}
     208  \country{Canada}
     209}
     210\author{Dave Dice}
     211\email{dave.dice@oracle.com}
     212\orcid{0000-0001-9164-7747}
     213\affiliation{%
     214  \institution{Oracle Labs}
     215  \city{Burlington}
     216  \state{Massachusetts}
     217  \country{USA}
     218}
     219\author{Bryan Chan}
     220\email{bryan.chan@huawei.com}
     221\affiliation{%
     222  \institution{Huawei Compiler Lab}
     223  \city{Markham}
     224  \state{Ontario}
     225  \country{Canada}
     226}
     227
     228\renewcommand{\shortauthors}{Zulfiqar et al.}
     229
     230% inline code @...@
     231\lstMakeShortInline@%
     232
     233\begin{document}
     234
     235\begin{abstract}
     236A new C-based concurrent memory-allocator is presented, called llheap (ll $\Rightarrow$ low latency).
     237It supports C/\CC applications with multiple kernel threads, or it can be embedded into user-threading runtime-systems.
     238llheap extends the C allocation API with new functions providing orthogonal access to allocation features;
     239hence, programmers do have to code missing combinations.
     240llheap also extends the C allocation semantics by remembering multiple aspects of the initial allocation.
    205241These properties can be queried, allowing programmers to write safer programs by preserving these properties in future allocations.
    206 As well, \lstinline{realloc}/\lstinline{reallocarray} preserve these properties when adjusting storage size, again increasing future allocation safety.
    207 llheap also extends the C allocation API with \lstinline{aalloc}, \lstinline{amemalign}, \lstinline{cmemalign}, \lstinline{resize}, and extended \lstinline{realloc}, providing orthogonal access to allocation features;
    208 hence, programmers do have to code missing combinations.
    209 The llheap allocator also provides a contention-free statistics gathering mode, and a debugging mode for dynamically checking allocation pre/post conditions and invariants.
     242As well, \lstinline{realloc}/\lstinline{reallocarray} preserve initial zero-fill and alignment properties when adjusting storage size, again increasing future allocation safety.
     243The allocator provides a contention-free statistics gathering mode, and a debugging mode for dynamically checking allocation pre/post conditions and invariants.
    210244These modes are invaluable for understanding and debugging a program's dynamic allocation behaviour, with low enough cost to be used in production code.
    211 The llheap API is further extended with the \CFA advanced type-system, providing a single type-safe allocation routine using named arguments, increasing safety and simplifying usage.
    212 Finally, performance results across a number of benchmarks show llheap is competitive with the best memory allocators.
    213 }% abstract
    214 
    215 % While not as powerful as the \lstinline{valgrind} interpreter, a large number of allocations mistakes are detected.
    216 % A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs. It has been an interesting challenge.
    217 % These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs.
    218 % Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc, and the new allocator llheap are all compared using the new micro-benchmark test-suite.
     245An example is presented for condensing the allocation API using advanced type-systems, providing a single type-safe allocation routine using named arguments.
     246Finally, performance results across a number of benchmarks show llheap is competitive with other modern memory allocators.
     247\end{abstract}
     248
     249\begin{CCSXML}
     250<concept>
     251<concept_id>10011007.10011006.10011072</concept_id>
     252<concept_desc>Software and its engineering~Software libraries and repositories</concept_desc>
     253<concept_significance>500</concept_significance>
     254</concept>
     255</ccs2012>
     256
     257<ccs2012>
     258<concept>
     259<concept_id>10010147.10011777.10011014</concept_id>
     260<concept_desc>Computing methodologies~Concurrent programming languages</concept_desc>
     261<concept_significance>300</concept_significance>
     262</concept>
     263\end{CCSXML}
     264
     265\ccsdesc[500]{Software and its engineering~Software libraries and repositories}
     266\ccsdesc[300]{Computing methodologies~Concurrent programming languages}
    219267
    220268\keywords{memory allocation, (user-level) concurrency, type-safety, statistics, debugging, high performance}
    221269
    222 
    223 \begin{document}
    224 %\linenumbers                           % comment out to turn off line numbering
     270\received{20 February 2007}
     271\received[revised]{12 March 2009}
     272\received[accepted]{5 June 2009}
     273
    225274
    226275\maketitle
    227276
    228 
    229277\section{Introduction}
    230278
    231 Memory management services a series of program allocation/deallocation requests and attempts to satisfy them from a variable-sized block(s) of memory, while minimizing total memory usage.
    232 A general-purpose dynamic-allocation algorithm cannot anticipate allocation requests so its time and space performance is rarely optimal (bin packing).
    233 However, allocators take advantage of allocation patterns in typical programs (heuristics) to produce excellent results, both in time and space (similar to LRU paging).
    234 Allocators use similar techniques, but each optimizes specific allocation patterns.
    235 Nevertheless, allocators are a series of compromises, occasionally with some static or dynamic tuning parameters to optimize specific request patterns.
     279Memory management services a series of program allocation/deallocation requests and attempts to satisfy them from variable-sized blocks of memory while minimizing total memory usage.
     280A general-purpose memory allocator cannot anticipate storage requests so its time and space performance cannot be optimal (bin packing).
     281Each allocator takes advantage of a subset of typical allocation patterns (heuristics) to produce excellent results, both in time and space (similar to LRU paging).
     282Nevertheless, allocators are a series of compromises, possibly with static or dynamic tuning parameters to optimize specific request patterns.
    236283
    237284
     
    239286\label{s:MemoryStructure}
    240287
    241 Figure~\ref{f:ProgramAddressSpace} shows the typical layout of a program's address space (high to low) divided into a number of zones, with free memory surrounding the dynamic code/data~\cite{memlayout}.
     288Figure~\ref{f:ProgramAddressSpace} shows the typical layout of a program's address space (high addresses to low) divided into a number of zones, with free memory surrounding the dynamic code/data~\cite{memlayout}.
    242289Static code and data are placed into memory at load time from the executable and are fixed-sized at runtime.
    243290Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}.
    244 However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control.
    245 Stack memory is managed by the program call/return-mechanism using a LIFO technique, which works well for sequential programs.
    246 For stackful coroutines and user threads, a new stack is commonly created in the dynamic-allocation memory.
    247 The dynamic-allocation memory is often a contiguous area (can be memory mapped as multiple areas), which starts empty and grows/shrinks as the program creates/deletes variables with independent lifetime.
    248 The programming-language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
    249 This work focuses solely on management of the dynamic-allocation memory.
     291However, changes to the dynamic code/data space are typically infrequent, most occurring at program startup and are largely outside of a program's control.
     292Stack memory is managed by the program call/return mechanism using a LIFO technique.
     293For stackful coroutines and user threads, new stacks are commonly created in the dynamic-allocation memory.
     294The dynamic-allocation memory is often a contiguous area, which starts empty and grows/shrinks as the program creates/deletes variables with independent lifetime.
     295The language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
    250296
    251297\begin{figure}
     
    261307\label{s:DynamicMemoryManagement}
    262308
    263 Modern programming languages manage dynamic memory in different ways.
    264 Some languages, such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, provide explicit allocation but \emph{implicit} deallocation of data through garbage collection~\cite{Wilson92}.
    265 In general, garbage collection supports memory compaction, where dynamic (live) data is moved during runtime to better utilize space.
    266 However, moving data requires finding and updating pointers to it to reflect the new data locations.
    267 Programming languages such as C~\cite{C}, \CC~\cite{C++}, and Rust~\cite{Rust} provide the programmer with explicit allocation \emph{and} deallocation of data.
    268 These languages cannot find and subsequently move live data because pointers can be created to any storage zone, including internal components of allocated objects, and may contain temporary invalid values generated by pointer arithmetic.
    269 Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
    270 This work only examines dynamic management with \emph{explicit} deallocation.
    271 While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.
     309Modern programming languages provide two forms of storage management: managed or unmanaged.
     310Both forms have explicit allocation, but managed memory has implicit deallocation (garbage collection~\cite{Wilson92}, GC) and unmanaged memory has some form of explicit deallocation.
     311Sometimes there are explicit deallocation hints in managed.
     312Both forms attempt to reuse freed storage in the heap for new allocations.
     313Unmanaged languages have no information about allocated \newterm{objects}, and hence, use techniques during freeing to detect adjacent unused storage if coalescing.
     314Conservative GC attempts to find free objects in an unmanaged system by scanning memory and marking anything that \emph{looks} like a live object.
     315However, \emph{conservative} means some non-objects might be marked as live;
     316the goal is not to miss any live objects.
     317Managed languages maintain sufficient information to locate all live objects.
     318Precise GC is then able to mark just the live objects.
     319Both approaches then sweep through the unmarked objects looking for adjacent free storage to coalesce.
     320Precise GC has a further coalescing option of compacting used objects and adjusting the pointers used to find them to the new locations, resulting in a large area of contiguous free storage.
     321Languages such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, are managed and normally implemented using precise GC.
     322(Both Go~\cite{Go1.3} and Netscape JavaScript~\cite{JavaScriptGC} switched from conservative to precise GC.)
     323Languages such as C~\cite{C}, \CC~\cite{C++}, Rust~\cite{Rust} and Swift~\cite{swift} (because of explicit management of weak references) are unmanaged but could be used with conservative GC~\cite{Boehm88}.
     324This work only examines unmanaged memory with \emph{explicit} deallocation.
     325% While GC is not part this work, some of the results are applicable to the allocation phase in any memory-management approach.
    272326
    273327Most programs use a general-purpose allocator, usually the one provided by the programming-language's runtime.
    274328In certain languages, programmers can write specialize allocators for specific needs.
    275 C and \CC allow easy replacement of the default memory allocator through a standard API.
    276 Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
    277 As well, new languages support concurrency (kernel and/or user threading), which must be safely handled by the allocator.
    278 Hence, several alternative allocators exist for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
     329POSIX~\cite{POSIX17} provides for replacement of the default memory allocator in C and \CC through a standard API.
     330Most industry JVMs provide multiple GCs, from which a user selects one for their workload.
     331%Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
     332As well, new languages support concurrency (kernel/user threading), which must be safely handled by the allocator.
     333Hence, alternative allocators exist for C/\CC with the goal of scaling in multi-threaded programs~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
    279334This work examines the design of high-performance allocators for use by kernel and user multi-threaded applications written in C/\CC.
    280335
     
    283338\label{s:Contributions}
    284339
    285 This work provides the following contributions in the area of explicit concurrent dynamic-allocation:
    286 \begin{enumerate}[leftmargin=*,itemsep=0pt]
    287 \item
    288 Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,400 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions for the concurrent languages \uC~\cite{uC++} and \CFA~\cite{Moss18,Delisle21} using user-level threads running on multiple kernel threads (M:N threading).
    289 
    290 \item
    291 Extend the standard C heap functionality by preserving with each allocation its request size, the amount allocated, whether it is zero fill, and its alignment.
     340This work provides the following contributions to the area of explicit concurrent dynamic-allocation.
     341\begin{enumerate}[leftmargin=18pt,topsep=3pt,itemsep=0pt]
     342\item
     343Implementation of a new stand-alone concurrent low-latency memory-allocator, called llheap~\cite{llheap}, ($\approx$1,500 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions for the concurrent languages \uC~\cite{uC++} and \CFA~\cite{Moss18,Delisle21} using user-level threads running on multiple kernel threads (M:N threading).
     344
     345\item
     346Extend the C allocation API with new functions @aalloc@, @amemalign@, @cmemalign@, @resize@, @aligned_resize@, @aligned_realloc@, and @aligned_reallocarray@ to make allocation properties orthogonally accessible.
     347
     348\item
     349Extend the C allocation semantics by preserving with each allocation its request size, the amount allocated, whether it is zero fill, and its alignment.
     350
     351\item
     352Provide additional query operations @malloc_alignment@, @malloc_zero_fill@, and @malloc_size@ to access allocation information.
    292353
    293354\item
    294355Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ and @reallocarray@ to zero-fill and align when storage is extended or copied.
    295 Without this extension, it is unsafe to @realloc@ storage these allocations if the properties are not preserved when copying.
     356Without this extension, it is unsafe to @realloc@ storage if the properties are not preserved when copying.
    296357This silent problem is unintuitive to programmers and difficult to locate because it is transient.
    297358
    298359\item
    299 Provide additional heap operations to make allocation properties orthogonally accessible.
    300 \begin{itemize}[topsep=0pt,itemsep=0pt,parsep=0pt]
    301 \item
    302 @aalloc( dimension, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled, which is significantly faster than @calloc@.
    303 \item
    304 @amemalign( alignment, dimension, elemSize )@ same as @aalloc@ with memory alignment.
    305 \item
    306 @cmemalign( alignment, dimension, elemSize )@ same as @calloc@ with memory alignment.
    307 \item
    308 @resize( oaddr, size )@ re-purpose an old allocation for a new type \emph{without} preserving fill or alignment.
    309 \item
    310 @aligned_resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
    311 \item
    312 @aligned_realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment.
    313 \item
    314 @aligned_reallocarray( oaddr, alignment, dimension, elemSize )@ same as @reallocarray@ but adding or changing alignment.
    315 \end{itemize}
    316 
    317 \item
    318 Provide additional query operations to access information about an allocation:
    319 \begin{itemize}[topsep=0pt,itemsep=0pt,parsep=0pt]
    320 \item
    321 @malloc_alignment( addr )@ returns the alignment of the allocation.
    322 If the allocation is not aligned or @addr@ is @NULL@, the minimal alignment is returned.
    323 \item
    324 @malloc_zero_fill( addr )@ returns a boolean result indicating if the memory is allocated with zero fill, \eg by @calloc@/@cmemalign@.
    325 \item
    326 @malloc_size( addr )@ returns the size of the memory allocation.
    327 \item
    328 @malloc_usable_size( addr )@ returns the usable (total) size of the memory, \ie the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
    329 \end{itemize}
    330 
    331 \item
    332 Provide optional extensive, fast, and contention-free allocation statistics to understand allocation behaviour, accessed by:
    333 \begin{itemize}[topsep=0pt,itemsep=0pt,parsep=0pt]
    334 \item
    335 @malloc_stats()@ print memory-allocation statistics on the file-descriptor set by @malloc_stats_fd@ (default @stderr@).
    336 \item
    337 @malloc_info( options, stream )@ print memory-allocation statistics as an XML string on the specified file-descriptor set by @malloc_stats_fd@ (default @stderr@).
    338 \item
    339 @malloc_stats_fd( fd )@ set file-descriptor number for printing memory-allocation statistics (default @stderr@).
    340 This file descriptor is used implicitly by @malloc_stats@ and @malloc_info@.
    341 \end{itemize}
    342 
    343 \item
    344 Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
     360Provide optional extensive, fast, and contention-free allocation statistics to understand allocation behaviour.
     361
     362\item
     363Provide runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
    345364
    346365\item
    347366Build 8 different versions of the allocator: static or dynamic linking, with or without statistics or debugging.
    348 A program may link to any of these 8 versions of the allocator often without recompilation (@LD_PRELOAD@).
    349 
    350 \item
    351 Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties.
    352 
    353 \item
    354 A micro-benchmark test-suite for comparing allocators rather than relying on a suite of arbitrary programs.
    355 These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs
     367A program may link to any of these 8 versions of the allocator often without recompilation (linking or @LD_PRELOAD@).
     368
     369\item
     370Demonstrate how advanced programming-language type-systems can condense the allocation API providing a single type-safe allocation function using named arguments.
     371
     372\item
     373Create a benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs.
     374
     375\item
     376Run performance experiments using the new benchmark test-suite comparing llheap with six of the best allocators in use today.
     377The goal is to demonstrate that llheap's performance, both in time and space, is comparable to the best allocators in use today.
    356378\end{enumerate}
    357379
     
    359381\section{Background}
    360382
    361 The following is a quick overview of allocator design options that affect memory usage and performance (see~\cite{Zulfiqar22} for more details).
    362 Dynamic acquires and releases obtain storage for a program variable, called an \newterm{object}, through calls such as @malloc@/@new@ and @free@/@delete@ in C/\CC.
     383The following is an overview of allocator design options that affect memory usage and performance (see~\cite{Zulfiqar22} for more details).
     384Dynamic acquires and releases obtain \newterm{object} storage via calls such as @malloc@/@new@ and @free@/@delete@ in C/\CC, respectively.
    363385A \newterm{memory allocator} contains a complex data-structure and code that manages the layout of objects in the dynamic-allocation zone.
    364 The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
    365 Since objects in C/\CC cannot be moved to aid the packing process, only adjacent free storage can be \newterm{coalesced} into larger free areas.
    366 The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage via OS calls, such as @mmap@ or @sbrk@ in UNIX.
    367 
    368 
     386% The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
     387Since objects in C/\CC cannot be moved, only adjacent free storage can be \newterm{coalesced} into larger free areas.
     388The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage using \newterm{operating system} (OS) calls, such as @mmap@ or @sbrk@ in UNIX.
     389
     390
     391\vspace*{-7pt}
    369392\subsection{Allocator Components}
    370393\label{s:AllocatorComponents}
     
    373396The \newterm{management data} is a data structure located at a known memory address and contains fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
    374397For multi-threaded programs, additional management data may exist in \newterm{thread-local storage} (TLS) for each kernel thread executing the program.
    375 The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
    376 Allocated objects (light grey) are variable sized, and are allocated and maintained by the program;
     398The \newterm{storage data} is composed of allocated/freed objects, and \newterm{reserved memory}.
     399Allocated objects (white) are variable sized, and are allocated and maintained by the program;
    377400\ie only the program knows the location of allocated storage.
    378 Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating location of new allocations.
    379 Reserved memory (dark grey) is one or more blocks of memory obtained from the \newterm{operating system} (OS) but not yet allocated to the program;
    380 if there are multiple reserved blocks, they are also chained together.
     401Freed objects (light grey) represent memory deallocated by the program, which are linked into one or more lists facilitating location for new allocations.
     402Reserved memory (dark grey) is one or more blocks of memory obtained from the OS but not yet used by the program;
     403if there are multiple reserved blocks, they are normally linked together.
    381404
    382405\begin{figure}
     
    389412In many allocator designs, allocated objects and reserved blocks have management data embedded within them (see also Section~\ref{s:ObjectContainers}).
    390413Figure~\ref{f:AllocatedObject} shows an allocated object with a header, trailer, and optional spacing around the object.
    391 The header contains information about the object, \eg size, type, etc.
    392 The trailer may be used to simplify coalescing and/or for security purposes to mark the end of an object.
     414The header contains information about the object, \eg size, type, \etc.
     415The trailer may be used to simplify coalescing and/or for safety purposes to mark the end of an object.
    393416An object may be preceded by padding to ensure proper alignment.
    394 Some algorithms quantize allocation requests, resulting in additional space after an object less than the quantized value.
     417Some algorithms quantize allocation requests, resulting in additional space after an object.
    395418When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
    396419
    397 A free object often contains management data, \eg size, pointers, etc.
    398 Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
    399 For internal chaining, the amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, allocation requests less than 16 bytes are rounded up.
     420A free object often contains management data, \eg size, pointers, \etc.
     421Often the free list is linked internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
     422For internal linking, the amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, allocation requests less than 16 bytes are rounded up.
    400423Often the minimum storage alignment and free-node size are the same.
    401 The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new program data and/or management information.
     424The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new program data and/or management information, receptively.
     425For safety purposes, freed storage may be scrubbed (overwritten) to expose inadvertent bugs, such as assuming variables are zero initialized.
    402426
    403427\begin{figure}
     
    406430\caption{Allocated Object}
    407431\label{f:AllocatedObject}
    408 \end{figure}
    409 
    410 
    411 \subsection{Single-Threaded Memory-Allocator}
    412 \label{s:SingleThreadedMemoryAllocator}
    413 
    414 In a sequential (single threaded) program, the program thread performs all allocation operations and concurrency issues do not exist.
    415 However, interrupts logically introduce concurrency, if the signal handler performs allocation/deallocation (serially reusable problem~\cite{SeriallyReusable}).
    416 In general, the primary issues in a single-threaded allocator are fragmentation and locality.
    417 
    418 \subsubsection{Fragmentation}
    419 \label{s:Fragmentation}
    420 
    421 Fragmentation is memory requested from the OS but not used allocated objects in by the program.
    422 Figure~\ref{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: \emph{internal} or \emph{external}.
    423 
    424 \begin{figure}
    425 \centering
     432
     433\bigskip
     434
    426435\input{IntExtFragmentation}
    427436\caption{Internal and External Fragmentation}
     
    429438\end{figure}
    430439
    431 \newterm{Internal fragmentation} is unaccessible allocated memory, such as headers, trailers, padding, and spacing around an allocated object.
    432 Internal fragmentation is problematic when management space becomes a significant proportion of an allocated object, \eg for objects $<$16 bytes, memory usage doubles.
    433 An allocator strives to keep internal management information to a minimum.
     440
     441\subsection{Single-Threaded Memory-Allocator}
     442\label{s:SingleThreadedMemoryAllocator}
     443
     444In a sequential (single threaded) program, the program thread performs all allocation operations without direct concurrency issues.
     445However, interrupts introduce indirect concurrency, if the signal handler performs allocation/deallocation (serially reusable problem~\cite{SeriallyReusable}).
     446In general, the primary issues in a single-threaded allocator are fragmentation and locality.
     447
     448
     449\subsubsection{Fragmentation}
     450\label{s:Fragmentation}
     451
     452Fragmentation is unused memory requested from the OS.
     453Figure~\ref{f:InternalExternalFragmentation} shows fragmentation has two forms: \emph{internal} or \emph{external}.
     454
     455\newterm{Internal fragmentation} is inaccessible \emph{allocated} memory, such as headers, trailers, \etc.
     456Internal fragmentation is problematic when management space approaches the object size, \eg for objects $<$16 bytes, memory usage doubles.
    434457
    435458\newterm{External fragmentation} is memory not allocated in the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.
    436 This memory is problematic in two ways: heap blowup and highly fragmented memory.
    437 \newterm{Heap blowup} occurs when freed memory cannot be reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
    438 Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects, resulting in a checkerboard of adjacent allocated and free areas, where the free blocks are to small to service requests.
    439 % Figure~\ref{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
    440 Heap blowup occurs with allocator policies that are too restrictive in reusing freed memory, \eg the allocated size cannot use a larger free block and/or no coalescing of free storage.
    441 % Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
    442 % Memory is highly fragmented when most free blocks are unusable because of their sizes.
    443 % For example, Figure~\ref{f:Contiguous} and Figure~\ref{f:HighlyFragmented} have the same quantity of external fragmentation, but Figure~\ref{f:HighlyFragmented} is highly fragmented.
    444 % If there is a request to allocate a large object, Figure~\ref{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while Figure~\ref{f:HighlyFragmented} likely has to request more memory from the OS.
    445 
    446 % \begin{figure}
    447 % \centering
    448 % \input{MemoryFragmentation}
    449 % \caption{Memory Fragmentation}
    450 % \label{f:MemoryFragmentation}
    451 % \vspace{10pt}
    452 % \subfloat[Contiguous]{
    453 %       \input{ContigFragmentation}
    454 %       \label{f:Contiguous}
    455 % } % subfloat
    456 %       \subfloat[Highly Fragmented]{
    457 %       \input{NonContigFragmentation}
    458 % \label{f:HighlyFragmented}
    459 % } % subfloat
    460 % \caption{Fragmentation Quality}
    461 % \label{f:FragmentationQuality}
    462 % \end{figure}
    463 
    464 For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
    465 The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
    466 Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
     459This memory is problematic resulting in heap blowup and fragmented memory.
     460\newterm{Blowup} occurs when freed memory becomes a checkerboard of adjacent allocated and free areas, where the free blocks are too small to service requests, leading to unbounded external fragmentation growth~\cite{Berger00}.
     461Heap blowup is a fundamental problem in unmanaged languages without compaction.
     462
     463Three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
     464The first approach is \newterm{sequential-fit} with a list of free objects (possibly ordered by size) that is searched for a block large enough to fit a requested object.
     465Different search policies determine the free object selected, \eg the first free object large enough (first fit) or closest to the requested size (best fit).
    467466Any storage larger than the request can become spacing after the object or split into a smaller free object.
    468 % The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list.
    469 
    470 The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
    471 When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to space after the object.
    472 A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
    473 Fewer bin sizes means a faster search to find a matching bin, but larger differences between allocation and bin size, which increases unusable space after objects (internal fragmentation).
    474 More bin sizes means a slower search but smaller differences matching between allocation and bin size resulting in less internal fragmentation but more external fragmentation if larger bins cannot service smaller requests.
    475 Allowing larger bins to service smaller allocations when the matching bin is empty means the freed object can be returned to the matching or larger bin (some advantages to either scheme).
    476 % For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
    477 % For subsequent requests, the bin free-lists contain objects of different sizes, ranging from one bin-size to the next (8-16 in this example), and a sequential-fit algorithm may be used to find an object large enough for the requested size on the associated bin list.
    478 
    479 The third approach is a \newterm{splitting} and \newterm{coalescing} algorithms.
    480 When an object is allocated, if there is no matching free storage, a larger free object is split into two smaller objects, one matching the allocation size.
     467
     468The second approach is \newterm{segregation} or \newterm{binning} with a set of lists for different sized freed objects.
     469The request size is rounded up to the nearest bin size, often leading to internal fragmentation after the object.
     470A binning algorithm searches for the smallest bin that covers the request, and selects the first free object, if available.
     471Fewer bin sizes means more internal fragmentation but increased reuse as more request sizes match the bin size.
     472More bin sizes has less internal fragmentation size but more external fragmentation as larger bins cannot service smaller requests.
     473Allowing larger bins to service smaller allocations means the freed object can be returned to the matching or larger bin (some advantages to either scheme).
     474
     475The third approach is \newterm{splitting} and \newterm{coalescing}.
     476If there is no matching free storage for allocation, a larger free object is split to get the allocation and the smaller object is put back on the free list.
    481477For example, in the \newterm{buddy system}, a block of free memory is split into equal chunks, splitting continues until a minimal block is created that fits the allocation.
    482 When an object is deallocated, it is coalesced with the objects immediately before/after it in memory, if they are free, turning them into a larger block.
     478When an object is deallocated, it is coalesced with the objects immediately before/after it in memory, if they are free, creating a larger block.
    483479Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled.
    484480However, coalescing increases allocation latency (unbounded delays), both for allocation and deallocation.
    485481While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup.
    486 % Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory.
    487482
    488483
     
    495490Hardware takes advantage of the working set through multiple levels of caching and paging, \ie memory hierarchy.
    496491% When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
    497 For example, entire cache lines are transferred between cache and memory, and entire virtual-memory pages are transferred between memory and disk.
     492% For example, entire cache lines are transferred between cache and memory, and entire virtual-memory pages are transferred between memory and disk.
    498493% A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}.
    499494
    500 Temporal locality is largely controlled by program accesses to its variables~\cite{Feng05}.
     495Temporal locality is largely controlled by program accesses to variables~\cite{Feng05}.
    501496An allocator has only indirect influence on temporal locality but largely dictates spatial locality.
    502497For temporal locality, an allocator tries to return recently freed storage for new allocations, as this memory is still \emph{warm} in the memory hierarchy.
     
    506501
    507502An allocator can easily degrade locality by increasing the working set.
    508 An allocator can access an unbounded number of free objects when matching an allocation or coalescing, causing multiple cache or page misses~\cite{Grunwald93}.
     503For example, it can access an unbounded number of free objects when matching an allocation or coalescing, causing multiple cache or page misses~\cite{Grunwald93}.
    509504An allocator can spatially separate related data by binning free storage anywhere in memory, so the related objects are highly separated.
    510505
     
    513508\label{s:MultiThreadedMemoryAllocator}
    514509
    515 In a concurrent (multi-threaded) program, multiple program threads performs allocation operations and all concurrency issues arise.
    516 Along with fragmentation and locality issues, a multi-threaded allocator must deal with mutual exclusion, false sharing, and additional forms of heap blowup.
     510In a concurrent program, multiple kernel threads (KT) perform allocations, requiring some form of mutual exclusion.
     511Along with fragmentation and locality issues, a multi-threaded allocator must deal with false sharing and additional forms of heap blowup.
    517512
    518513
     
    520515\label{s:MutualExclusion}
    521516
    522 \newterm{Mutual exclusion} provides sequential access to the shared-management data of the heap.
     517% \newterm{Mutual exclusion} provides sequential access to the shared-management data of the heap.
    523518There are two performance issues for mutual exclusion.
    524 First is the cost of performing at least one hardware atomic operation every time a shared resource is accessed.
    525 Second is \emph{contention} on simultaneous access, so some threads must wait until the resource is released.
    526 Contention can be reduced in a number of ways:
    527 1) Using multiple fine-grained locks versus a single lock to spread the contention across the locks.
     519First, the cost of performing atomic instructions every time a shared resource is accessed to provide mutual exclusion.
     520Solutions using any atomic fence, atomic instruction (lock free), or lock along a fast path, even with zero contention, results in significant slowdown.
     521Second, \newterm{contention} on simultaneous access, so threads must wait until the resource is released.
     522Contention can be reduced by:
     5231) Using multiple fine-grained locks versus few course-gain locks to spread the contention.
    5285242) Using trylock and generating new storage if the lock is busy (classic space versus time tradeoff).
    529 3) Using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
    530 However, all approaches have degenerate cases where program contention to the heap is high, which is beyond the allocator's control.
    531 
    532 
    533 \subsubsection{False Sharing}
    534 \label{s:FalseSharing}
    535 
    536 False sharing occurs when two or more threads simultaneously modify different objects sharing a cache line.
    537 Changes now invalidate each thread's cache, even though the threads may be uninterested in the other modified object.
    538 False sharing can occur three ways:
    539 1) Thread T$_1$ allocates objects O$_1$ and O$_2$ on the same cache line and passes O$_2$'s reference to thread T$_2$;
    540 both threads now simultaneously modifying the objects on the same cache line.
    541 2) Objects O$_1$ and O$_2$ are allocated on the same cache line by thread T$_3$ and their references are passed to T$_1$ and T$_2$, which simultaneously modify the objects.
    542 3) T$_2$ deallocates O$_2$, T$_1$ allocates O$_1$ on the same cache line as O$_2$, and T$_2$ reallocated O$_2$ while T$_1$ is using O$_1$.
    543 In all three cases, the allocator performs a hidden and possibly transient (non-determinism) operation, making it extremely difficult to find and fix the issue.
    544 
    545 
    546 \subsubsection{Heap Blowup}
    547 \label{s:HeapBlowup}
    548 
    549 In a multi-threaded program, heap blowup occurs when memory freed by one thread is inaccessible to other threads due to the allocation strategy.
    550 Specific examples are presented in later subsections.
    551 
    552 
    553 \subsection{Multi-Threaded Allocator Features}
    554 \label{s:MultiThreadedAllocatorFeatures}
    555 
    556 The following features are used in the construction of multi-threaded allocators.
    557 
    558 \subsubsection{Multiple Heaps}
    559 \label{s:MultipleHeaps}
    560 
    561 Figure~\ref{f:ThreadHeapRelationship} shows how a multi-threaded allocator reduced contention by subdividing a single heap into multiple heaps.
     525% 3) Using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Fatourou12}.
     526% However, all approaches have degenerate cases where program contention to the heap is high, which is beyond the allocator's control.
     527Figure~\ref{f:ThreadHeapRelationship} shows how a multi-threaded allocator reduces contention by subdividing a single heap into multiple heaps.
    562528
    563529\begin{figure}
    564530\centering
    565531\subfloat[T:1]{
    566 %       \input{SingleHeap.pstex_t}
    567532        \input{SingleHeap}
    568533        \label{f:SingleHeap}
     
    570535\vrule
    571536\subfloat[T:H]{
    572 %       \input{MultipleHeaps.pstex_t}
    573537        \input{SharedHeaps}
    574538        \label{f:SharedHeaps}
     
    576540\vrule
    577541\subfloat[1:1]{
    578 %       \input{MultipleHeapsGlobal.pstex_t}
    579542        \input{PerThreadHeap}
    580543        \label{f:PerThreadHeap}
     
    586549\begin{description}[leftmargin=*]
    587550\item[T:1 model (Figure~\ref{f:SingleHeap})] is all threads (T) sharing a single heap (1).
    588 The arrows indicate memory movement for allocation/deallocation operations.
    589 Memory is obtained from freed objects, reserved memory, or the OS;
    590 freed memory can be returned to the OS.
    591 To handle concurrency, a single lock is used for all heap operations or fine-grained locking if operations can be made independent.
     551% The arrows indicate memory movement for allocation/deallocation operations.
     552% Memory is obtained from freed objects, reserved memory, or the OS;
     553% freed memory can be returned to the OS.
     554To handle concurrency, a single lock is used for all heap operations or fine-grained (lock-free) locking if operations can be made independent.
    592555As threads perform large numbers of allocations, a single heap becomes a significant source of contention.
    593556
    594557\item[T:H model (Figure~\ref{f:SharedHeaps})] is multiple threads (T) sharing multiple heaps (H).
    595 The allocator independently allocates/deallocates heaps and assigns threads to heaps based on dynamic contention pressure.
    596 Locking is required within each heap, but contention is reduced because fewer threads access a specific heap.
    597 The goal is minimal heaps (storage) and contention per heap (time).
    598 A worst case is more heaps than threads, \eg many threads at startup create a large number of heaps and then the threads reduce.
    599 
    600 % For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
    601 % At creation, a thread is associated with a heap from the pool.
    602 % In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
    603 % If an unlocked heap is found, the thread changes its association and uses that heap.
    604 % If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
    605 % or the thread can block waiting for a heap to become available.
    606 % While the heap-pool approach often minimizes the number of extant heaps, the worse case can result in more heaps than threads;
    607 % \eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces.
    608 
    609 % Threads using multiple heaps need to determine the specific heap to access for an allocation/deallocation, \ie association of thread to heap.
    610 % A number of techniques are used to establish this association.
    611 % The simplest approach is for each thread to have a pointer to its associated heap (or to administrative information that points to the heap), and this pointer changes if the association changes.
    612 % For threading systems with thread-local storage, the heap pointer is created using this mechanism;
    613 % otherwise, the heap routines must simulate thread-local storage using approaches like hashing the thread's stack-pointer or thread-id to find its associated heap.
    614 
    615 % The storage management for multiple heaps is more complex than for a single heap (see Figure~\ref{f:AllocatorComponents}).
    616 % Figure~\ref{f:MultipleHeapStorage} illustrates the general storage layout for multiple heaps.
    617 % Allocated and free objects are labelled by the thread or heap they are associated with.
    618 % (Links between free objects are removed for simplicity.)
    619 % The management information for multiple heaps in the static zone must be able to locate all heaps.
    620 % The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
    621 % Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory.
    622 % An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
    623 % Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
    624 % Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the OS.
    625 
    626 % \begin{figure}
    627 % \centering
    628 % \input{MultipleHeapsStorage}
    629 % \caption{Multiple-Heap Storage}
    630 % \label{f:MultipleHeapStorage}
    631 % \end{figure}
    632 
    633 Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
    634 The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
    635 Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the OS (see Section~\ref{s:Ownership}).
    636 Returning storage to the OS may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
     558The allocator allocates/deallocates heaps and assigns threads to heaps often based on dynamic contention pressure.
     559While locking is required for heap access, contention is (normally) reduced as access is spread across the heaps.
     560Locking can be reduced (eliminated) using the T:C variant, \ie each CPU has a heap, and a thread cannot migrate from the CPU if executing an allocator critical-section, implemented with restartable critical sections~\cite{Desnoyers19,Dice02} (see also Section~\ref{s:UserlevelThreadingSupport}).
     561% The goal is minimal heaps (storage) and contention per heap (time).
     562Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup, where the worst-case scenario is more heaps than threads.
     563The external fragmentation is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
     564When freeing, objects normally need to be returned to their original heap (see Section~\ref{s:Ownership}).
     565% Returning storage to the OS may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
    637566% In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
    638567
    639 Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space.
    640 Now, each heap obtains and returns storage to/from the global heap rather than the OS.
    641 Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold.
    642 Similarly, the global heap buffers this memory, obtaining and returning storage to/from the OS as necessary.
    643 The global heap does not have its own thread and makes no internal allocation requests;
    644 instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations.
    645 Hence, the worst-case cost of a memory operation includes all these steps.
    646 With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the OS to achieve the same goal and is independent of the mechanism used by the OS to present dynamic memory to an address space.
    647 However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking.
    648 A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention.
    649 In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap.
    650 
    651 \item[1:1 model (Figure~\ref{f:PerThreadHeap})] is each thread (1) has a heap (1), eliminating most contention and locking if threads seldom access another thread's heap (see Section~\ref{s:Ownership}).
     568A shared \newterm{global heap} (G) is often introduced to manage the reserved memory among heaps and centralize interacts with the OS.
     569Instead of heaps making individual object allocations/deallocations through the global heap, resulting in locking and high contention, the global heap partitions the reserved memory into heap (allocation) buffers, which are given out to heaps for their own suballocations.
     570Hence, a heap's allocations are temporally and spatially accessed densely in a small set of buffers, rather than spread sparsely across the entire reserve memory.
     571Buffers are allocated at heap startup, after which allocation often reaches a steady state through free lists.
     572Allocation buffers may increase external fragmentation, since some memory may never be used.
     573
     574\item[1:1 model (Figure~\ref{f:PerThreadHeap})] is each thread (1) having its own heap (1), eliminating most contention and locking if threads seldom access another thread's heap (see Section~\ref{s:Ownership}).
    652575A thread's objects are consolidated in its heap, better utilizing the cache and paging during thread execution.
    653576In contrast, the T:H model can spread thread objects over a larger area in different heaps.
    654 Thread heaps can also reduces false-sharing, unless there are overlapping memory boundaries from another thread's heap.
    655577%For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them.
    656 
    657578When a thread terminates, it can free its heap objects to the global heap, or the thread heap is retained as-is and reused for a new thread in the future.
    658579Destroying a heap can reduce external fragmentation sooner, since all free objects in the global heap are available for immediate reuse.
    659 Alternatively, reusing a heap can aid the inheriting thread, if it has a similar allocation pattern because the heap in primed with unfreed storage of the right sizes.
     580Alternatively, reusing a heap can aid the inheriting thread, if it has a similar allocation pattern, because the heap in primed with freed storage of the right sizes.
    660581\end{description}
    661582
    662583
    663 \subsubsection{User-Level Threading}
    664 
    665 It is possible to use any of the heap models with user-level (M:N) threading.
    666 However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the OS, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
    667 It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
    668 Figure~\ref{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime.
    669 Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
     584\subsubsection{False Sharing}
     585\label{s:FalseSharing}
     586
     587False sharing occurs for a read/write or write/write among threads modifying different memory sharing a cache line~\cite{Bolosky93}.
     588The write invalidates each thread's cache, even though the threads may be uninterested in the other modified object.
     589False sharing can occur three ways:
     5901) Thread T$_1$ allocates objects O$_1$ and O$_2$ on the same cache line and passes O$_2$'s reference to thread T$_2$.
     5912) Thread T$_1$ allocates object O$_1$ and thread T$_2$ allocates O$_2$, where objects O$_1$ and O$_2$ are on the same cache line.
     5923) T$_2$ deallocates O$_2$, T$_1$ allocates O$_1$ on the same cache line as O$_2$, and T$_2$ reallocated O$_2$ while T$_1$ is using O$_1$.
     593In all three cases, the false sharing is hidden and possibly transient (non-deterministic), making it extremely difficult to find and fix.
     594Case 1) occurs in all three allocator models, and is induced by program behaviour, not the allocator.
     595Case 2) and 3) are allocator induced, and occurs in T:1 and T:H models due to heap sharing, but not 1:1 with private heaps, except possibly at boundary points among heaps.
     596
     597
     598\subsubsection{Object Containers}
     599\label{s:ObjectContainers}
     600
     601Associating header data with every allocation can result in significant internal fragmentation, as shown in Figure~\ref{f:AllocatedObject}.
     602While the header and object are spatially together in memory, they are generally not accessed temporally together~\cite{Feng05}.
     603The result is poor cache usage, since only a portion of the cache line is holding useful data from the program's perspective.
     604% \eg an object is accessed by the program after it is allocated, while the header is accessed by the allocator after it is free.
    670605
    671606\begin{figure}
    672607\centering
    673 \input{UserKernelHeaps}
    674 \caption{User-Level Kernel Heaps}
    675 \label{f:UserLevelKernelHeaps}
     608\input{Container}
     609\caption{Object Container}
     610\label{f:ObjectContainer}
    676611\end{figure}
    677612
    678 Adopting user threading results in a subtle problem with shared heaps.
    679 With kernel threading, an operation started by a kernel thread is always completed by that thread.
    680 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap, even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.
    681 However, this correctness property is not preserved for user-level threading.
    682 A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}.
    683 When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
    684 To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
    685 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is infrequent (milliseconds).
    686 Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
    687 Occasional ignoring of a preemption should be benign, but a persistent lack of preemption can result in starvation;
    688 techniques like rolling forward the preemption to the next context switch can be used.
     613The alternative approach factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks}~\cite[\S~3]{Berger00}) suballocated from a heap's allocation buffers, as in Figure~\ref{f:ObjectContainer}.
     614A trailer may also be used at the end of the container.
     615To find the header from an allocation within the container, the container is aligned on a power of 2 boundary and the lower bits of the object address are truncated (or rounded up, minus the trailer size, to obtain the trailer address).
     616Container size is a tradeoff between internal and external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
     617A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on the program's access patterns.
     618Normally, heap ownership applies to its containers.
     619Without ownership, different objects in a container may be on different heap free-lists.
     620Finally, containers are linked together for management purposes, and should all objects in a container become free, the container can be repurposed for different sized objects or given to another heap through a global heap.
    689621
    690622
     
    692624\label{s:Ownership}
    693625
    694 \newterm{Ownership} defines which heap an object is returned-to on deallocation.
    695 If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects.
     626Object \newterm{ownership} is defined as the heap to which an object is returned upon deallocation~\cite[\S~6.1]{Berger00}.
     627If a thread returns an object to its originating heap, a heap has ownership of its objects.
     628Containers force ownership of internal contiguous objects, unless the entire container changes ownership after it becomes empty.
    696629Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime.
    697 Figure~\ref{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
    698 Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
    699 For the 1:1 thread:heap relationship, a thread only allocates from its own heap, and without ownership, a thread only frees objects to its own heap, which means the heap is private to its owner thread and does not require any locking, called a \newterm{private heap}.
    700 For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
    701 
    702 \begin{figure}
    703 \centering
    704 \subfloat[Ownership]{
    705         \input{MultipleHeapsOwnership}
    706 } % subfloat
    707 \hspace{0.25in}
    708 \subfloat[No Ownership]{
    709         \input{MultipleHeapsNoOwnership}
    710 } % subfloat
    711 \caption{Heap Ownership}
    712 \label{f:HeapsOwnership}
    713 \end{figure}
    714 
    715 % Figure~\ref{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
    716 % (For simplicity, assume the heaps all use the same size of reserves storage.)
    717 % In contrast to Figure~\ref{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
    718 % Passive false-sharing may still occur, if delayed ownership is used (see below).
    719 
    720 % \begin{figure}
    721 % \centering
    722 % \input{MultipleHeapsOwnershipStorage.pstex_t}
    723 % \caption{Multiple-Heap Storage with Ownership}
    724 % \label{f:MultipleHeapStorageOwnership}
    725 % \end{figure}
    726 
    727 The main advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
    728 Ownership prevents the classical problem where one thread performs allocations from one heap, passes the object to another thread, and the receiving thread deallocates the object to another heap, hence draining the initial heap of storage.
    729 Because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
    730 The exception is for the 1:1 model if reserved memory does not overlap a cache-line because all allocated storage within a used area is associated with a single thread.
    731 In this case, there is no allocator-induced active false-sharing because two adjacent allocated objects used by different threads cannot share a cache-line.
    732 Finally, there is no allocator-induced passive false-sharing because two adjacent allocated objects used by different threads cannot occur as free objects are returned to the owner heap.
    733 % For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, the deallocation by Thread$_2$ returns Object$_2$ back to Thread$_1$'s heap;
    734 % hence a subsequent allocation by Thread$_2$ cannot return this storage.
    735 The disadvantage of ownership is deallocating to another thread's heap so heaps are no longer private and require locks to provide safe concurrent access.
     630The advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
     631Ownership prevents the problem of a producer thread allocating from one heap, passing the object to a consumer thread, and the consumer deallocates the object to another heap, hence draining the producer heap of storage.
     632The disadvantage of ownership is deallocating to another thread's heap requires an atomic operation.
    736633
    737634Object ownership can be immediate or delayed, meaning free objects may be batched on a separate free list either by the returning or receiving thread.
    738 While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
    739 It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
    740 Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads.
    741 Finally, it is possible for heaps to temporarily steal owned objects rather than return them immediately and then reallocate these objects again.
    742 It is unclear whether the complexity of this approach is worthwhile.
    743 % However, stealing can result in passive false-sharing.
    744 % For example, in Figure~\ref{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially.
    745 % If Thread$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur.
    746 
    747 For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps.% (see~Figure~\ref{f:HybridPrivatePublicHeap}).
    748 The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
    749 In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
    750 Similarly, a thread first deallocates an object to its private heap, and second to the public heap.
    751 Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
    752 % Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
    753 Finally, when a thread frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing thread's private heap for delayed ownership, which does allows the freeing thread to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return.
    754 
    755 % \begin{figure}
    756 % \centering
    757 % \input{PrivatePublicHeaps.pstex_t}
    758 % \caption{Hybrid Private/Public Heap for Per-thread Heaps}
    759 % \label{f:HybridPrivatePublicHeap}
    760 % \vspace{10pt}
    761 % \input{RemoteFreeList.pstex_t}
    762 % \caption{Remote Free-List}
    763 % \label{f:RemoteFreeList}
    764 % \end{figure}
    765 
    766 % As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified.
    767 % For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
    768 % To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
    769 % Since the remote free-list is occasionally cleared during an allocation, this adds to that cost.
    770 % Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object.
    771  
    772 % If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking.
    773 % In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur.
    774 % If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations.
    775  
    776 % The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator.
    777 % Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used.
    778 % In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps.
    779 % For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach.
    780 
    781 
    782 \begin{figure}
    783 \centering
    784 \subfloat[Object Headers]{
    785         \input{ObjectHeaders}
    786         \label{f:ObjectHeaders}
    787 } % subfloat
    788 \subfloat[Object Container]{
    789         \input{Container}
    790         \label{f:ObjectContainer}
    791 } % subfloat
    792 \caption{Header Placement}
    793 \label{f:HeaderPlacement}
    794 \end{figure}
    795 
    796 
    797 \subsubsection{Object Containers}
    798 \label{s:ObjectContainers}
    799 
    800 Associating header data with every allocation can result in significant internal fragmentation, as shown in Figure~\ref{f:ObjectHeaders}.
    801 Especially if the headers contain redundant data, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
    802 As well, the redundant data can result in poor cache usage, since only a portion of the cache line is holding useful data from the program's perspective.
    803 Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}.
    804 While the header and object are spatially together in memory, they are generally not accessed temporarily together;
    805 \eg an object is accessed by the program after it is allocated, while the header is accessed by the allocator after it is free.
    806 
    807 An alternative approach factors common header data to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks}~\cite{Berger00}), as in Figure~\ref{f:ObjectContainer}.
    808 The header for the container holds information necessary for all objects in the container;
    809 a trailer may also be used at the end of the container.
    810 Similar to the approach described for thread heaps in Section~\ref{s:MultipleHeaps}, if container boundaries do not overlap with memory of another container at crucial boundaries and all objects in a container are allocated to the same thread, allocator-induced active false-sharing is avoided.
    811 
    812 The difficulty with object containers lies in finding the object header/trailer given only the object address, since that is normally the only information passed to the deallocation operation.
    813 One way is to start containers on aligned addresses in memory, then truncate the lower bits of the object address to obtain the header address (or round up and subtract the trailer size to obtain the trailer address).
    814 For example, if an object at address 0xFC28\,EF08 is freed and containers are aligned on 64\,KB (0x0001\,0000) addresses, then the container header is at 0xFC28\,0000.
    815 
    816 Normally, a container has homogeneous objects, \eg object size and ownership.
    817 This approach greatly reduces internal fragmentation since far fewer headers are required, and potentially increases spatial locality as a cache line or page holds more objects since the objects are closer together.
    818 However, different sized objects are further apart in separate containers.
    819 Depending on the program, this may or may not improve locality.
    820 If the program uses several objects from a small number of containers in its working set, then locality is improved since fewer cache lines and pages are required.
    821 If the program uses many containers, there is poor locality, as both caching and paging increase.
    822 Another drawback is that external fragmentation may be increased since containers reserve space for objects that may never be allocated, \ie there are often multiple containers for each size only partially full.
    823 However, external fragmentation can be reduced by using small containers.
    824 
    825 Containers with heterogeneous objects implies different headers describing them, which complicates the problem of locating a specific header solely by an address.
    826 A couple of solutions can be used to implement containers with heterogeneous objects.
    827 However, the problem with allowing objects of different sizes is that the number of objects, and therefore headers, in a single container is unpredictable.
    828 One solution allocates headers at one end of the container, while allocating objects from the other end of the container;
    829 when the headers meet the objects, the container is full.
    830 Freed objects cannot be split or coalesced since this causes the number of headers to change.
    831 The difficulty in this strategy remains in finding the header for a specific object;
    832 in general, a search is necessary to find the object's header among the container headers.
    833 A second solution combines the use of container headers and individual object headers.
    834 Each object header stores the object's heterogeneous information, such as its size, while the container header stores the homogeneous information, such as the owner when using ownership.
    835 This approach allows containers to hold different types of objects, but does not completely separate headers from objects.
    836 % The benefit of the container in this case is to reduce some redundant information that is factored into the container header.
    837 
    838 % In summary, object containers trade off internal fragmentation for external fragmentation by isolating common administration information to remove/reduce internal fragmentation, but at the cost of external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
    839 % A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on program access-patterns.
    840 
    841 
    842 \paragraph{Container Ownership}
    843 \label{s:ContainerOwnership}
    844 
    845 Without ownership, objects in a container are deallocated to the heap currently associated with the thread that frees the object.
    846 Thus, different objects in a container may be on different heap free-lists. % (see Figure~\ref{f:ContainerNoOwnershipFreelist}).
    847 With ownership, all objects in a container belong to the same heap,
    848 % (see Figure~\ref{f:ContainerOwnershipFreelist}),
    849 so ownership of an object is determined by the container owner.
    850 If multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
    851 Only with the 1:1 model and ownership is active and passive false-sharing avoided (see Section~\ref{s:Ownership}).
    852 Passive false-sharing may still occur, if delayed ownership is used.
    853 Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap.
    854 
    855 % \begin{figure}
    856 % \centering
    857 % \subfloat[No Ownership]{
    858 %       \input{ContainerNoOwnershipFreelist}
    859 %       \label{f:ContainerNoOwnershipFreelist}
    860 % } % subfloat
    861 % \vrule
    862 % \subfloat[Ownership]{
    863 %       \input{ContainerOwnershipFreelist}
    864 %       \label{f:ContainerOwnershipFreelist}
    865 % } % subfloat
    866 % \caption{Free-list Structure with Container Ownership}
    867 % \end{figure}
    868 
    869 When a container changes ownership, the ownership of all objects within it change as well.
    870 Moving a container involves moving all objects on the heap's free-list in that container to the new owner.
    871 This approach can reduce contention for the global heap, since each request for objects from the global heap returns a container rather than individual objects.
    872 
    873 Additional restrictions may be applied to the movement of containers to prevent active false-sharing.
    874 For example, if a container changes ownership through the global heap, then a thread allocating from the newly acquired container is actively false-sharing even though no objects are passed among threads.
    875 Note, once the thread frees the object, no more false sharing can occur until the container changes ownership again.
    876 To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
    877 One implementation approach that increases the freedom to return a free container to the OS involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the OS.
    878 
    879 % \begin{figure}
    880 % \centering
    881 % \subfloat[]{
    882 %       \input{ContainerFalseSharing1}
    883 %       \label{f:ContainerFalseSharing1}
    884 % } % subfloat
    885 % \subfloat[]{
    886 %       \input{ContainerFalseSharing2}
    887 %       \label{f:ContainerFalseSharing2}
    888 % } % subfloat
    889 % \caption{Active False-Sharing using Containers}
    890 % \label{f:ActiveFalseSharingContainers}
    891 % \end{figure}
    892 
    893 Using containers with ownership increases external fragmentation since a new container for a requested object size must be allocated separately for each thread requesting it.
    894 % In Figure~\ref{f:ExternalFragmentationContainerOwnership}, using object ownership allocates 80\% more space than without ownership.
    895 
    896 % \begin{figure}
    897 % \centering
    898 % \subfloat[No Ownership]{
    899 %       \input{ContainerNoOwnership}
    900 % } % subfloat
    901 % \\
    902 % \subfloat[Ownership]{
    903 %       \input{ContainerOwnership}
    904 % } % subfloat
    905 % \caption{External Fragmentation with Container Ownership}
    906 % \label{f:ExternalFragmentationContainerOwnership}
    907 % \end{figure}
    908 
    909 
    910 \paragraph{Container Size}
    911 \label{s:ContainerSize}
    912 
    913 One way to control the external fragmentation caused by allocating a large container for a small number of requested objects is to vary the size of the container.
    914 As described earlier, container boundaries need to be aligned on addresses that are a power of two to allow easy location of the header (by truncating lower bits).
    915 Aligning containers in this manner also determines the size of the container.
    916 However, the size of the container has different implications for the allocator.
    917 
    918 The larger the container, the fewer containers are needed, and hence, the fewer headers need to be maintained in memory, improving both internal fragmentation and potentially performance.
    919 However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation.
    920 With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container.
    921 In general, large objects, \eg greater than 64\,KB, are allocated directly from the OS and are returned immediately to the OS to reduce long-term external fragmentation.
    922 If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate.
    923 Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header.
    924 
    925 In order to find the container header when using different sized containers, a super container is used (see~Figure~\ref{f:SuperContainers}).
    926 The super container spans several containers, contains a header with information for finding each container header, and starts on an aligned address.
    927 Super-container headers are found using the same method used to find container headers by dropping the lower bits of an object address.
    928 The containers within a super container may be different sizes or all the same size.
    929 If the containers in the super container are different sizes, then the super-container header must be searched to determine the specific container for an object given its address.
    930 If all containers in the super container are the same size, \eg 16KB, then a specific container header can be found by a simple calculation.
    931 The free space at the end of a super container is used to allocate new containers.
    932 
    933 \begin{figure}
    934 \centering
    935 \input{SuperContainers}
    936 % \includegraphics{diagrams/supercontainer.eps}
    937 \caption{Super Containers}
    938 \label{f:SuperContainers}
    939 \end{figure}
    940 
    941 Minimal internal and external fragmentation is achieved by having as few containers as possible, each being as full as possible.
    942 It is also possible to achieve additional benefit by using larger containers for popular small sizes, as it reduces the number of containers with associated headers.
    943 However, this approach assumes it is possible for an allocator to determine in advance which sizes are popular.
    944 Keeping statistics on requested sizes allows the allocator to make a dynamic decision about which sizes are popular.
    945 For example, after receiving a number of allocation requests for a particular size, that size is considered a popular request size and larger containers are allocated for that size.
    946 If the decision is incorrect, larger containers than necessary are allocated that remain mostly unused.
    947 A programmer may be able to inform the allocator about popular object sizes, using a mechanism like @mallopt@, in order to select an appropriate container size for each object size.
    948 
    949 
    950 \paragraph{Container Free-Lists}
    951 \label{s:containersfreelists}
    952 
    953 The container header allows an alternate approach for managing the heap's free-list.
    954 Rather than maintain a global free-list throughout the heap the containers are linked through their headers and only the local free objects within a container are linked together.
    955 Note, maintaining free lists within a container assumes all free objects in the container are associated with the same heap;
    956 thus, this approach only applies to containers with ownership.
    957 
    958 This alternate free-list approach can greatly reduce the complexity of moving all freed objects belonging to a container to another heap.
    959 To move a container using a global free-list, the free list is first searched to find all objects within the container.
    960 Each object is then removed from the free list and linked together to form a local free-list for the move to the new heap.
    961 With local free-lists in containers, the container is simply removed from one heap's free list and placed on the new heap's free list.
    962 Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
    963 However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation.
    964 
    965 % \begin{figure}
    966 % \centering
    967 % \subfloat[Global Free-List Among Containers]{
    968 %       \input{FreeListAmongContainers}
    969 %       \label{f:GlobalFreeListAmongContainers}
    970 % } % subfloat
    971 % \hspace{0.25in}
    972 % \subfloat[Local Free-List Within Containers]{
    973 %       \input{FreeListWithinContainers}
    974 %       \label{f:LocalFreeListWithinContainers}
    975 % } % subfloat
    976 % \caption{Container Free-List Structure}
    977 % \label{f:ContainerFreeListStructure}
    978 % \end{figure}
    979 
    980 When all objects in the container are the same size, a single free-list is sufficient.
    981 However, when objects in the container are different size, the header needs a free list for each size class when using a binning allocation algorithm, which can be a significant increase in the container-header size.
    982 The alternative is to use a different allocation algorithm with a single free-list, such as a sequential-fit allocation-algorithm.
    983 
    984 
    985 \subsubsection{Allocation Buffer}
    986 \label{s:AllocationBuffer}
    987 
    988 An allocation buffer is reserved memory (see Section~\ref{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
    989 That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
    990 Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or OS, respectively.
    991 The allocation buffer reduces contention and the number of global/OS calls.
    992 For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
    993 
    994 Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation).
    995 Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
    996 Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish.
    997 
    998 Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
    999 For example, if all objects sharing a cache line come from the same allocation buffer, then these objects are allocated to the same thread, avoiding active false-sharing.
    1000 Active false-sharing may still occur if objects are freed to the global heap and reused by another heap.
    1001 
    1002 Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated.
    1003 A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or OS.
    1004 The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer.
    1005 
    1006 The unused part of a container, neither allocated or freed, is an allocation buffer.
    1007 For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made.
    1008 This lazy method of constructing objects is beneficial in terms of paging and caching.
    1009 For example, although an entire container, possibly spanning several pages, is allocated from the OS, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache.
    1010 
    1011 
    1012 \subsubsection{Lock-Free Operations}
    1013 \label{s:LockFreeOperations}
    1014 
    1015 A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual thread has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}.
    1016 (A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.)
    1017 Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
    1018 While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress.
    1019 With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design.
    1020 Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
    1021 on the other hand, a thread may busy-wait for an unbounded period holding a processor.
    1022 Finally, lock-free implementations have greater complexity and hardware dependency.
    1023 Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
    1024 Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex.
    1025 Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
     635The returning thread batches objects to reduce contention by passing multiple objects at once;
     636however, batching across multiple allocation sizes and heaps is complex and there is no obvious time when to push back to the owner heap.
     637It is simpler for the returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
     638The receiving thread often delays incorporating returned storage until its local storage in drained.
     639
     640
     641\subsubsection{User-Level Threading}
     642
     643Any heap model can be used with user-level (M:N) threading.
     644However, an important goal of user threads (UT) is for fast operations (creation/termination/context-switching) by not interacting with the OS, allowing large numbers of high-performance interacting threads ($>$ 10,000).
     645In general, UTs use whatever kernel-level heap-model is provided by the language runtime.
     646Hence, a UT allocates/deallocates from/to the heap of the KT on which it is executing.
     647
     648However, there is a subtle concurrency problem with user threading and shared heaps.
     649With kernel threading, an operation started by a KT is always completed by that thread, even if preempted;
     650hence, any locking correctness associated with the shared heap is preserved.
     651However, this correctness property is not preserved for user-level threading.
     652A UT can start an allocation/deallocation on one KT, be preempted by user-level time slicing, and continue running on a different KT to complete the operation~\cite{Dice02}.
     653When the UT continues on the new KT, it may have pointers into the previous KT's heap and hold locks associated with it.
     654To get the same KT safety, time slicing must be disabled/\-enabled around these operations to prevent movement.
     655However, eagerly disabling time slicing on the allocation/deallocation fast path is expensive, especially as preemption is infrequent (millisecond intervals).
     656Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it completes atomically.
     657Occasional ignoring a preemption is normally benign;
     658in the worst case, ignoring preemption results in starvation.
     659To mitigate starvation, techniques like rolling the preemption forward at the next context switch can be used.
    1026660
    1027661
    1028662\section{llheap}
    1029663
    1030 This section presents our new stand-alone, concurrent, low-latency memory-allocator, called llheap (low-latency heap), fulfilling the GNU C Library allocator API~\cite{GNUallocAPI} for C/\CC programs using kernel threads (1:1 threading), with specialized versions for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
    1031 The primary design objective for llheap is low-latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator delay during an allocator call.
    1032 Excluded from the low-latency objective are (large) allocations requiring initialization, \eg zero fill, and/or data copying, which are outside the allocator's purview.
     664This section presents our new stand-alone, concurrent, low-latency memory allocator, called llheap (low-latency heap), fulfilling the GNU C Library allocator API~\cite{GNUallocAPI} for C/\CC programs using KTs, with specialized versions for the programming languages \uC and \CFA using user-level threads running over multiple KTs (M:N threading).
     665The primary design objective for llheap is low latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator delay during an allocator call.
     666Excluded from the low-latency objective are (large) allocations requiring initialization, \eg zero fill, and/or data copying, along with unbounded delays to acquire storage from the OS or OS scheduling, all of which are outside the allocator's purview.
    1033667A direct consequence of this objective is very simple or no storage coalescing;
    1034668hence, llheap's design is willing to use more storage to lower latency.
    1035 This objective is apropos because systems research and industrial applications are striving for low latency and modern computers have huge amounts of RAM memory.
    1036 Finally, llheap's performance should be comparable with the current best allocators, both in space and time (see performance comparison in Section~\ref{c:Performance}).
    1037 
    1038 
    1039 \subsection{Design Choices}
    1040 
    1041 llheap's design was reviewed and changed multiple times during its development, with the final choices discussed here.
    1042 All designs focused on the allocation/free \newterm{fastpath}, \ie the shortest code path for the most common operations, \eg when an allocation can immediately return free storage or returned storage is not coalesced.
    1043 The model chosen is 1:1, so there is one thread-local heap for each KT.
    1044 (See Figure~\ref{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
    1045 Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
    1046 Therefore, heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership.
    1047 
    1048 Problems:
    1049 \begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
    1050 \item
    1051 Need to know when a KT starts/terminates to create/delete its heap.
    1052 
    1053 \noindent
    1054 It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
    1055 \item
    1056 There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
    1057 
    1058 \noindent
    1059 The classic solution only deletes a heap after all referents are returned, which is complex.
    1060 The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
    1061 If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
    1062 To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
    1063 In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
    1064 \item
    1065 There can be significant external fragmentation as the number of KTs increases.
    1066 
    1067 \noindent
    1068 In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
    1069 Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
    1070 \item
    1071 Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
    1072 \begin{quote}
    1073 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
    1074 \end{quote}
    1075 If a KT is preempted during an allocation operation, the OS can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
    1076 Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
    1077 Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the OS is providing the second thread via the signal handler.
    1078 
    1079 Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical subsection after undoing its writes, if the critical subsection is preempted.
    1080 
    1081 %There is the same serially-reusable problem with UTs migrating across KTs.
    1082 \end{itemize}
    1083 Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
    1084 
    1085 
    1086 \vspace{5pt}
    1087 \noindent
    1088 The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
    1089 For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N.
    1090 For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
    1091 However, T:H=CPU has poor OS support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
    1092 More OS support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
    1093 So the 1:1 model had no atomic actions along the fastpath and no special OS support requirements.
    1094 The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in Section~\ref{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
    1095 
    1096 
    1097 % \begin{itemize}
    1098 % \item
    1099 % A decentralized design is better to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designs shard the whole heap which has all the buckets with the addition of sharding @sbrk@ area. So Design 1 was eliminated.
    1100 % \item
    1101 % Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenario.
    1102 % \item
    1103 % Design 3 was eliminated because it was slower than Design 4 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achieve user-threading safety which has some cost to it.
    1104 % that  because of 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
    1105 % \end{itemize}
    1106 % Of the four designs for a low-latency memory allocator, the 1:1 model was chosen for the following reasons:
    1107 
    1108 % \subsubsection{Advantages of distributed design}
    1109 %
    1110 % The distributed design of llheap is concurrent to work in multi-threaded applications.
    1111 % Some key benefits of the distributed design of llheap are as follows:
    1112 % \begin{itemize}
    1113 % \item
    1114 % The bump allocation is concurrent as memory taken from @sbrk@ is sharded across all heaps as bump allocation reserve. The call to @sbrk@ will be protected using locks but bump allocation (on memory taken from @sbrk@) will not be contended once the @sbrk@ call has returned.
    1115 % \item
    1116 % Low or almost no contention on heap resources.
    1117 % \item
    1118 % It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
    1119 % \item
    1120 % Distributed design avoids unnecessary locks on resources shared across all KTs.
    1121 % \end{itemize}
    1122 
    1123 \subsubsection{Allocation Latency}
    1124 
    1125 A primary goal of llheap is low latency, hence the name low-latency heap (llheap).
    1126 Two forms of latency are internal and external.
    1127 Internal latency is the time to perform an allocation, while external latency is time to obtain or return storage from or to the OS.
    1128 Ideally latency is $O(1)$ with a small constant.
    1129 
    1130 $O(1)$ internal latency means no open searching on the allocation fastpath, which largely prohibits coalescing.
    1131 The mitigating factor is that most programs have a small, fixed, allocation pattern, where the majority of allocation operations can be $O(1)$ and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
    1132 Modern computers have large memories so a slight increase in program footprint is not a problem.
    1133 
    1134 $O(1)$ external latency means obtaining one large storage area from the OS and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation.
    1135 Excluding real-time OSs, OS operations are unbounded, and hence some external latency is unavoidable.
    1136 The mitigating factor is that OS calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \pageref{p:malloc_expansion}).
    1137 Furthermore, while OS calls are unbounded, many are now reasonably fast, so their latency is tolerable because it occurs infrequently.
    1138 
    1139 
    1140 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1141 
    1142 \subsection{llheap Structure}
    1143 
    1144 Figure~\ref{f:llheapStructure} shows the design of llheap, which uses the following features:
    1145 1:1 multiple-heap model to minimize the fastpath,
    1146 can be built with or without heap ownership,
     669This objective is apropos because systems research and industrial applications are striving for low latency and modern computers have huge amounts of RAM.
     670Finally, llheap's performance must be comparable with current allocators, both in space and time (see performance comparison in Section~\ref{c:Performance}).
     671
     672
     673\subsection{llheap Design}
     674
     675Figure~\ref{f:llheapDesign} shows the design of llheap, which uses the following features:
     6761:1 allocator model eliminating locking on the fast path,
     677separate small (@sbrk@) and large object management (@mmap@),
    1147678headers per allocation versus containers,
     679small object binning (buckets) forming lists for different sized freed objects,
     680optional fast-lookup table for converting allocation requests into bucket sizes,
    1148681no coalescing to minimize latency,
    1149 global heap memory (pool) obtained from the OS using @mmap@ to create and reuse heaps needed by threads,
    1150 local reserved memory (pool) per heap obtained from global pool,
    1151 global reserved memory (pool) obtained from the OS using @sbrk@ call,
    1152 optional fast-lookup table for converting allocation requests into bucket sizes,
    1153 optional statistic-counters table for accumulating counts of allocation operations.
     682optional heap ownership (build time),
     683reserved memory (buffer pool) per heap obtained from a global pool,
     684global heap managing freed thread heaps and interacting with the OS to obtained storage,
     685optional statistic-counters table for accumulating counts of allocation operations and a debugging version for testing (build time).
    1154686
    1155687\begin{figure}
     
    1157689% \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
    1158690\input{llheap}
    1159 \caption{llheap Structure}
    1160 \label{f:llheapStructure}
     691\caption{llheap Design}
     692\label{f:llheapDesign}
    1161693\end{figure}
    1162694
    1163 llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
    1164 There is a global bump-pointer to the next free heap in the array.
    1165 When this array is exhausted, another array of heaps is allocated.
    1166 There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads.
    1167 When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
    1168 
    1169 When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
    1170 When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps.
    1171 The free heaps are stored on stack so hot storage is reused first.
    1172 Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used.
    1173 This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
     695llheap starts by creating an empty array for $N$ global heaps from storage obtained using @mmap@ that persists for program duration, where $N$ is the number of computer cores.
     696There is a global last-array pointer and bump-pointer within this array to locate the next free heap storage.
     697When an array's storage is exhausted, another empty array is allocated.
     698Terminated threads push their heap onto a global-stack top-pointer, where free heaps are intrusively linked.
     699When statistics are turned on, there is a global top pointer for a intrusive linked-list to link \emph{all} the heaps (not shown), which is traversed to accumulate statistics counters across heaps when @malloc_stats@ is called.
     700
     701When a KT starts, it pops heap storage from the heap free-list, or if empty, gets the next free heap-storage.
     702When a KT terminates, its heap is pushed onto the heap free-list for reuse by a new KT, which prevents unbounded heap growth.
     703The free heaps are stored in a stack so hot storage is reused first.
     704Preserving all heaps created during the program lifetime solves the storage lifetime problem when ownership is used.
     705This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially, which is rare.
     706
     707Each heap uses segregated free-buckets that have free objects distributed across 60 different sizes from 16 to 16M.
     708All objects in a bucket are the same size.
     709The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation, which is specified by calling @mallopt( M_MMAP_THRESHOLD )@, where the cross over must be $\ge$ the page size or $\le$ the largest bucket (16M).
     710Each cache-aligned bucket has a stack of the same-sized freed objects, where a stack ensures hot storage is reused first.
    1174711llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
    1175 
    1176 Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
    1177 All objects in a bucket are of the same size.
    1178 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the OS.
    1179 Each free bucket of a specific size has two lists.
    1180 1) A free stack used solely by the KT heap-owner, so push/pop operations do not require locking.
    1181 The free objects are a stack so hot storage is reused first.
    1182 2) For ownership, a shared away-stack for KTs to return storage allocated by other KTs, so push/pop operations require locking.
    1183 When the free stack is empty, the entire ownership stack is removed and becomes the head of the corresponding free stack.
     712For ownership, a shared remote stack is added to the freelist structure, so push/pop operations require locking.
     713Pushes are eager on each remove free \vs batching, and pops are lazy when there is no cheap storage available, then the entire remote stack is gulped and added to the bucket's free list.
     714
     715Initial threads are assigned empty heaps from the heap array.
     716The first thread allocation causes a request for storage from the shared @sbrk@ area.
     717The size of this request is the maximum of the request size or the @sbrk@-extension-size / 16.
     718This heuristic means the @sbrk@ area is subdivided into separate heap buffers (HB) per thread, providing no contention and data locality.
     719A thread does bump allocation in its current buffer, until it starts reusing freed storage or there is insufficient storage, and it obtains another buffer.
     720Thread buffers are not linked;
     721only logically connected to the thread through allocated and deallocated storage.
     722When a thread ends, its heap is returned to the heap array but no storage is released.
     723A new thread receiving a freed heap starts with it fully populated with freed storage.
     724The heuristic is that threads often do similar work, so the free storage in the heap is reusable, resulting in less internal fragmentation.
     725%The heuristic is that threads often do similar work so the free storage in the heap is immediately available.
     726%The downside is the risk of more external fragmentation, if the freed storage is never reused.
     727The downside is if the freed storage is never reused creating external fragmentation.
    1184728
    1185729Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$.
    1186 First, the allocation is divided into small (@sbrk@) or large (@mmap@).
    1187 For large allocations, the storage is mapped directly from the OS.
     730The allocation is divided into small (@sbrk@) or large (@mmap@).
    1188731For small allocations, $S$ is quantized into a bucket size.
    1189 Quantizing is performed using a binary search over the ordered bucket array.
    1190 An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
    1191 The @char@ type restricts the number of bucket sizes to 256.
    1192 For $S$ > 64K, a binary search is used.
    1193 Then, the allocation storage is obtained from the following locations (in order), with increasing latency:
    1194 bucket's free stack,
    1195 bucket's away stack,
    1196 heap's local pool,
    1197 global pool,
    1198 OS (@sbrk@).
    1199 
    1200 \begin{algorithm}
    1201 \caption{Dynamic object allocation of size $S$}\label{alg:heapObjectAlloc}
    1202 \begin{algorithmic}[1]
    1203 \State $\textit{O} \gets \text{NULL}$
    1204 \If {$S >= \textit{mmap-threshhold}$}
    1205         \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
    1206 \Else
    1207         \State $\textit{B} \gets \text{smallest free-bucket} \geq S$
    1208         \If {$\textit{B's free-list is empty}$}
    1209                 \If {$\textit{B's away-list is empty}$}
    1210                         \If {$\textit{heap's allocation buffer} < S$}
    1211                                 \State $\text{get allocation from global pool (which might call \lstinline{sbrk})}$
    1212                         \EndIf
    1213                         \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
    1214                 \Else
    1215                         \State $\textit{merge B's away-list into free-list}$
    1216                         \State $\textit{O} \gets \text{pop an object from B's free-list}$
    1217                 \EndIf
    1218         \Else
    1219                 \State $\textit{O} \gets \text{pop an object from B's free-list}$
    1220         \EndIf
    1221         \State $\textit{O's owner} \gets \text{B}$
    1222 \EndIf
    1223 \State $\Return \textit{ O}$
     732Quantizing is performed using a direct lookup for sizes < 64K or a binary search over the ordered bucket array for $S$ $\ge$ 64K.
     733Then, the allocation storage is obtained from the following locations, in order of increasing latency: the bucket's free stack, the heap's local buffer, the bucket's remote stack, the global buffer, the OS (@sbrk@).
     734For large allocations, the storage is directly allocated from the OS using @mmap@.
     735
     736\begin{algorithm}[t]
     737\caption{Dynamic object allocation of size $S$}
     738\label{alg:heapObjectAlloc}
     739\begin{algorithmic}
     740\STATE $S \gets S + \text{header-size}$
     741\IF {$S < \textit{mmap-threshhold}$}
     742        \STATE $\textit{B} \gets \text{smallest free-bucket} \geq S$
     743        \IF {$\textit{B's free-list \(\neg\)empty}$}
     744                \STATE $\textit{O} \gets \text{pop an object from B's free-list}$
     745        \ELSIF {$\textit{heap's allocation buffer} \ge B$}
     746                \STATE $\textit{O} \gets \text{bump allocate object of size B from allocation buffer}$
     747        \ELSIF {$\textit{heap's remote-list \(\neg\)empty}$}
     748                \STATE $\textit{merge heap's remote-list into free-list}$
     749                \STATE $\textit{O} \gets \text{pop an object from B's free-list}$
     750        \ELSE
     751                \STATE $\textit{O} \gets \text{allocate an object of size B from global pool}$
     752        \ENDIF
     753\ELSE
     754        \STATE $\textit{O} \gets \text{allocate an object of size S using \lstinline{mmap} system-call}$
     755\ENDIF
     756\RETURN $\textit{O}$
    1224757\end{algorithmic}
    1225758\end{algorithm}
    1226759
    1227 \begin{algorithm}
    1228 \caption{Dynamic object free at address $A$ with object ownership}\label{alg:heapObjectFreeOwn}
    1229 \begin{algorithmic}[1]
    1230 \If {$\textit{A mapped allocation}$}
    1231         \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
    1232 \Else
    1233         \State $\text{B} \gets \textit{O's owner}$
    1234         \If {$\textit{B is thread-local heap's bucket}$}
    1235                 \State $\text{push A to B's free-list}$
    1236         \Else
    1237                 \State $\text{push A to B's away-list}$
    1238         \EndIf
    1239 \EndIf
     760Algorithm~\ref{alg:heapObjectFreeOwn} shows the deallocation (free) outline for an object at address $A$ with ownership.
     761First, the address is divided into small (@sbrk@) or large (@mmap@).
     762For small allocations, the bucket associated with the request size is retrieved from the allocation header.
     763If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
     764If the bucket is not local to the thread, the allocation is pushed onto the owning thread's remote stack.
     765For large allocations, the storage is unmapped back to the OS.
     766Without object ownership, the algorithm is the same as for ownership except when the bucket is not local to the thread.
     767In that case, the corresponding bucket of the owner thread is computed by the deallocating thread, and the allocation is pushed onto the deallocating thread's corresponding bucket, \ie no search is required.
     768
     769\begin{algorithm}[t]
     770\caption{Dynamic object free at address $A$ with object ownership}
     771\label{alg:heapObjectFreeOwn}
     772\begin{algorithmic}
     773\IF {$\textit{A heap allocation}$}
     774        \STATE $\text{B} \gets \textit{O's owner}$
     775        \IF {$\textit{B's thread = current heap thread}$}
     776                \STATE $\text{push A to B's free-list}$
     777        \ELSE
     778                \STATE $\text{push A to B's remote-list}$
     779        \ENDIF
     780\ELSE
     781        \STATE $\text{return A to system using system call \lstinline{munmap}}$
     782\ENDIF
    1240783\end{algorithmic}
    1241784\end{algorithm}
    1242785
     786\begin{comment}
    1243787\begin{algorithm}
    1244 \caption{Dynamic object free at address $A$ without object ownership}\label{alg:heapObjectFreeNoOwn}
     788\caption{Dynamic object free at address $A$ without object ownership}
     789\label{alg:heapObjectFreeNoOwn}
    1245790\begin{algorithmic}[1]
    1246 \If {$\textit{A mapped allocation}$}
    1247         \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
    1248 \Else
    1249         \State $\text{B} \gets \textit{O's owner}$
    1250         \If {$\textit{B is thread-local heap's bucket}$}
    1251                 \State $\text{push A to B's free-list}$
    1252         \Else
    1253                 \State $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
    1254                 \State $\text{push A to C's free-list}$
    1255         \EndIf
    1256 \EndIf
     791\IF {$\textit{A mapped allocation}$}
     792        \STATE $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
     793\ELSE
     794        \STATE $\text{B} \gets \textit{O's owner}$
     795        \IF {$\textit{B is thread-local heap's bucket}$}
     796                \STATE $\text{push A to B's free-list}$
     797        \ELSE
     798                \STATE $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
     799                \STATE $\text{push A to C's free-list}$
     800        \ENDIF
     801\ENDIF
    1257802\end{algorithmic}
    1258803\end{algorithm}
    1259 
    1260 
    1261 Algorithm~\ref{alg:heapObjectFreeOwn} shows the deallocation (free) outline for an object at address $A$ with ownership.
    1262 First, the address is divided into small (@sbrk@) or large (@mmap@).
    1263 For large allocations, the storage is unmapped back to the OS.
    1264 For small allocations, the bucket associated with the request size is retrieved.
    1265 If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
    1266 If the bucket is not local to the thread, the allocation is pushed onto the owning thread's associated away stack.
    1267 
    1268 Algorithm~\ref{alg:heapObjectFreeNoOwn} shows the deallocation (free) outline for an object at address $A$ without ownership.
    1269 The algorithm is the same as for ownership except if the bucket is not local to the thread.
    1270 Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
    1271 
    1272 Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
     804\end{comment}
     805
     806Finally, the llheap design funnels all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
    1273807Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
    1274808This design simplifies heap-management code during development and maintenance.
    1275809
    1276810
     811\subsubsection{Bounded Allocation}
     812
     813The llheap design results in bounded allocation.
     814For small allocations, once all the buckets have freed objects, storage is recycled.
     815For large allocations, the storage is directly recycled back to the OS.
     816When a thread terminates, its heap is recycled to the next new thread and the above process begins for that thread.
     817The pathological case is threads allocating a large amount of storage, freeing it, and then quiescing, which demonstrates that the bound constant can be large.
     818This pathological pattern occurs for \emph{immortal} threads, \eg I/O threads with program lifetime and bursts of activity performing many allocations/deallocations.
     819Hence, independent of external fragmentation in thread heaps, storage cannot grow unbounded unless the program does not free.
     820
     821
    1277822\subsubsection{Alignment}
    1278823
    1279 Allocators have a different minimum storage alignment from the hardware's basic types.
    1280 Often the minimum allocator alignment, $M$, is the bus width (32 or 64-bit), the largest register (double, long double), largest atomic instruction (DCAS), or vector data (MMMX).
    1281 The reason for this larger requirement is the lack of knowledge about the data type occupying the allocation.
    1282 Hence, an allocator assumes the worst-case scenario for the start of data and the compiler correctly aligns items within this data because it knows their types.
    1283 Often the minimum storage alignment is an 8/16-byte boundary on a 32/64-bit computer.
    1284 Alignments larger than $M$ are normally a power of 2, such as page alignment (4/8K).
     824The minimum storage alignment $M$ comes from the architecture application-binary-interface (ABI) based on hardware factors: bus width (32 or 64-bit), largest register (double, long double), largest atomic instruction (double compare-and-swap), or vector data (Intel MMX).
     825An access with a nonaligned address maybe slow or an error.
     826A memory allocator must assume the largest hardware requirement because it is unaware of the data type occupying the allocation.
     827Often the minimum storage alignment is an 8/16-byte boundary on a 32/64-bit computer, respectively.
     828Alignments larger than $M$ are powers of 2, such as page alignment (4/8K).
    1285829Any alignment less than $M$ is raised to the minimal alignment.
    1286830
    1287 llheap aligns its header at the $M$ boundary and its size is $M$;
    1288 hence, data following the header is aligned at $M$.
    1289 This pattern means there is no minimal alignment computation along the allocation fastpath, \ie new storage and reused storage is always correctly aligned.
    1290 An alignment $N$ greater than $M$ is accomplished with a \emph{pessimistic} request for storage that ensures \emph{both} the alignment and size request are satisfied, \eg:
     831llheap aligns its allocation header on an $M$ boundary and its size is $M$, making the following data $M$ aligned.
     832This pattern means there is no minimal alignment computation along the allocation fast path, \ie new storage and reused storage is always correctly aligned.
     833An alignment $N$ greater than $M$ is accomplished with a \emph{pessimistic} request for storage that ensures \emph{both} the alignment and size request are satisfied.
    1291834\begin{center}
    1292835\input{Alignment2}
     
    1295838The approach is pessimistic if $P$ happens to have the correct alignment $N$, and the initial allocation has requested sufficient space to move to the next multiple of $N$.
    1296839In this case, there is $alignment - M$ bytes of unused storage after the data object, which could be used by @realloc@.
    1297 Note, the address returned by the allocation is $A$, which is subsequently returned to @free@.
    1298 To correctly free the object, the value $P$ must be computable from $A$, since that is the actual start of the allocation, from which $H$ can be computed $P - M$.
    1299 Hence, there must be a mechanism to detect when $P$ $\neq$ $A$ and then compute $P$ from $A$.
     840Note, the address returned by the allocation is $A$, which is subsequently returned for deallocation.
     841However, the deallocation requires the value $P$, which must be computable from $A$, from which $H$ can be computed $P - M$.
     842Hence, there must be a mechanism to detect $P$ $\neq$ $A$ and compute $P$ from $A$.
    1300843
    1301844To detect and perform this computation, llheap uses two headers:
    1302 the \emph{original} header $H$ associated with the allocation, and a \emph{fake} header $F$ within this storage before the alignment boundary $A$, e.g.:
     845the \emph{original} header $H$ associated with the allocation, and a \emph{fake} header $F$ within this storage before the alignment boundary $A$.
    1303846\begin{center}
    1304847\input{Alignment2Impl}
    1305848\end{center}
    1306849Since every allocation is aligned at $M$, $P$ $\neq$ $A$ only holds for alignments greater than $M$.
    1307 When $P$ $\neq$ $A$, the minimum distance between $P$ and $A$ is $M$ bytes, due to the pessimistic storage-allocation.
     850When $P$ $\neq$ $A$, the minimum distance between $P$ and $A$ is $M$ bytes, due to the pessimistic storage allocation.
    1308851Therefore, there is always room for an $M$-byte fake header before $A$.
    1309852The fake header must supply an indicator to distinguish it from a normal header and the location of address $P$ generated by the allocation.
    1310 This information is encoded as an offset from A to P and the initialize alignment (discussed in Section~\ref{s:ReallocStickyProperties}).
    1311 To distinguish a fake header from a normal header, the least-significant bit of the alignment is used because the offset participates in multiple calculations, while the alignment is just remembered data.
     853This information is encoded as an offset from A to P and the initial alignment (discussed in Section~\ref{s:ReallocStickyProperties}).
     854To distinguish a fake header from a normal header, the least-significant bit of the alignment is set to 1 because the offset participates in multiple calculations, while the alignment is just remembered data.
    1312855\begin{center}
    1313856\input{FakeHeader}
    1314857\end{center}
    1315858
     859Note, doing alignment with containers requires a separate container for the aligned fixed-sized objects, so there are more kinds of containers that must be managed.
     860
    1316861
    1317862\subsubsection{\lstinline{realloc} and Sticky Properties}
    1318863\label{s:ReallocStickyProperties}
    1319864
    1320 The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data.
    1321 The realloc pattern is simpler than the suboptimal manually steps.
     865The allocation routine @realloc@ provides a memory management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data.
     866The realloc pattern is simpler than the suboptimal manual steps.
    1322867\begin{flushleft}
     868\setlength{\tabcolsep}{10pt}
    1323869\begin{tabular}{ll}
    1324 \multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manually}} \\
    1325 \begin{lstlisting}
     870\multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manual}} \\
     871\begin{C++}
    1326872T * naddr = realloc( oaddr, newSize );
    1327873
    1328874
    1329875
    1330 \end{lstlisting}
     876\end{C++}
    1331877&
    1332 \begin{lstlisting}
     878\begin{C++}
    1333879T * naddr = (T *)malloc( newSize ); $\C[2in]{// new storage}$
    1334880memcpy( naddr, addr, oldSize );  $\C{// copy old bytes}$
    1335881free( addr );                           $\C{// free old storage}$
    1336882addr = naddr;                           $\C{// change pointer}\CRT$
    1337 \end{lstlisting}
     883\end{C++}
    1338884\end{tabular}
    1339885\end{flushleft}
    1340 The manual steps are suboptimal because there may be sufficient internal fragmentation at the end of the allocation due to bucket sizes.
    1341 If this storage is large enough, it eliminates a new allocation and copying.
     886The manual steps are suboptimal because there may be internal fragmentation at the end of the allocation due to bucket sizes.
     887If this storage is sufficiently large, it eliminates a new allocation and copying.
    1342888Alternatively, if the storage is made smaller, there may be a reasonable crossover point, where just increasing the internal fragmentation eliminates a new allocation and copying.
    1343 This pattern should be used more frequently to reduce storage management costs.
     889Hence, using @realloc@ as often as possible can reduce storage management costs.
    1344890In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc( newSize)@, and if @newSize@ is 0, @realloc@ does a @free( oaddr )@, so all allocation/deallocation can be done with @realloc@.
    1345891
    1346892The hidden problem with this pattern is the effect of zero fill and alignment with respect to reallocation.
    1347 For safety, we argue these properties should be persistent (``sticky'') and not transient.
    1348 For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, any subsequent reallocations of this storage must preserve these properties.
    1349 Currently, allocation properties are not preserved nor is it possible to query an allocation to maintain these properties manually.
    1350 Hence, subsequent use of @realloc@ storage that assumes any initially properties may cause errors.
     893For safety, these properties must persist (be ``sticky'') when storage size changes.
     894Prior to llheap, allocation properties are not preserved across reallocation nor is it possible to query an allocation to maintain these properties manually.
     895Hence, a random call to @realloc@ that reallocates storage may cause downstream errors, if allocation properties are needed.
    1351896This silent problem is unintuitive to programmers, can cause catastrophic failure, and is difficult to debug because it is transient.
    1352897To prevent these problems, llheap preserves initial allocation properties within an allocation, allowing them to be queried, and the semantics of @realloc@ preserve these properties on any storage change.
    1353898As a result, the realloc pattern is efficient and safe.
    1354899
     900Note, @realloc@ has a compile-time disadvantage \vs @malloc@, because @malloc@ simplifies optimization opportunities.
     901For @malloc@ the compiler knows the new storage address is not aliased, which is not true for @realloc@: the same storage can be returned.
     902The compiler uses this knowledge to optimize the region of code between the @malloc@ call and the point where the pointer escapes or it finds the matching @free@.
     903For @realloc@, the compiler must also analyse the code \emph{before} the call and this analysis may fail.
     904
     905Finally, there is a flaw in @realloc@'s definition: if there is no memory to allocate new storage for an expansion, the original allocation is not freed or moved, @errno@ is set to @ENOMEM@, and a null pointer is returned.
     906This semantics preserves the original allocation so the data is not lost in a failure case.
     907However, most calls to @realloc@ are written: @p = realloc( p, size )@, so the original storage is leaked when pointer @p@ is overwritten with null, negating the benefit of not freeing the storage for recovery purposes.
     908Programmers can follow a coding pattern of:
     909\begin{C++}
     910char * p;
     911...
     912void * p1 = realloc( p, size );
     913if ( p1 ) p = (char *)p1;
     914else // release some storage
     915\end{C++}
     916However, most programmers ignore return codes.
     917A better alternative is to change @realloc@'s interface to be like @posix_memalign@, which returns two results, a return code and a storage address, so the error code is separate from the returned storage.
     918\begin{C++}
     919int retcode = realloc( (void **)&p, size );
     920\end{C++}
     921which returns 0 or @ENOMEM@, only changes @p@ for expansion, but requires an ugly cast on the call.
     922
     923
     924\subsubsection{Sticky Test}
     925
     926Since sticky properties are an important safety feature for @realloc@, an ad-hoc @realloc@ test was created (not shown) to test whether a memory allocator preserves zero-fill from @calloc@ and/or alignment from @memalign@.
     927The first test @calloc@s a large array (zero fill), sets the array to 42, shortens it, and then enlarges it to the original size.
     928It does these steps 100 times attempting to get a reused large block of memory that is still set to 42, showing new storage does not preserve zero fill.
     929The second test @memalign@s storage and @realloc@s it multiple times making it larger until the current storage must be copied into new storage.
     930The alignment of each storage address returned from @realloc@ is verified with the original alignment.
     931
     932If a test fails, that sticky properties is not provided;
     933if the test passes, that sticky property is provided in some form but not necessarily in all forms (test just got lucky).
     934If an allocator fails these tests, it is unnecessary to perform a manual inspection of the @realloc@ code for sticky properties.
     935Only llheap passes the test, as its @realloc@ applies sticky properties.
     936
    1355937
    1356938\subsubsection{Header}
    1357939
    1358940To preserve allocation properties requires storing additional information about an allocation.
    1359 Figure~\ref{f:llheapHeader} shows llheap captures this information in the header, which has two fields (left/right) sized appropriately for 32/64-bit alignment requirements.
     941Figure~\ref{f:llheapHeader} shows llheap captures this information in the per object header, which has two fields (left/right) sized appropriately for 32/64-bit alignment requirements.
    1360942
    1361943\begin{figure}
     
    1367949
    1368950The left field is a union of three values:
    1369 \begin{description}
     951\begin{description}[leftmargin=*,topsep=2pt,itemsep=2pt,parsep=0pt]
    1370952\item[bucket pointer]
    1371 is for deallocated of heap storage and points back to the bucket associated with this storage requests (see Figure~\ref{f:llheapStructure} for the fields accessible in a bucket).
     953is for deallocation and points back to the bucket associated with this storage request (see Figure~\ref{f:llheapDesign} for the fields accessible in a bucket).
    1372954\item[mapped size]
    1373955is for deallocation of mapped storage and is the storage size for unmapping.
    1374956\item[next free block]
    1375 is for freed storage and is an intrusive pointer chaining same-size free blocks onto a bucket's stack of free objects.
     957is an intrusive pointer linking same-size free blocks onto a bucket's stack of free objects.
    1376958\end{description}
    1377 The low-order 3-bits of this field are unused for any stored values as these values are at least 8-byte aligned.
     959The low-order 3-bits of these fields are unused for any stored values, due to the minimum aligned of 8-bytes (even for 32-bit addressing).
    1378960The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
    1379961Note, the zero-filled/mapped bits are only used in the normal header and the alignment bit in the fake header.
    1380962This implementation allows a fast test if any of the lower 3-bits are on (@&@ and compare).
    1381 If no bits are on, it implies a basic allocation, which is handled quickly in the fastpath for allocation and free;
     963If no bits are on, it implies a basic allocation, which is handled quickly in the fast path for allocation and free;
    1382964otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
    1383965
    1384 The right field remembers the request size versus the allocation (bucket) size, \eg request of 42 bytes is rounded up to 64 bytes.
    1385 Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management.
     966The right field remembers the allocation request size versus the allocation (bucket) size, \eg request of 42 bytes is rounded up to 64 bytes.
     967Since programmers think in request size rather than allocation size, the request size allows better generation of statistics or errors and also helps in memory management.
    1386968
    1387969
    1388970\subsection{Statistics and Debugging}
    1389971
    1390 llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand dynamic-memory behaviour.
    1391 Incrementing statistic counters must appear on the allocation fastpath.
    1392 As noted, any atomic operation along the fastpath produces a significant increase in allocation costs.
    1393 To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so heap operations do not require atomic operations.
     972llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand dynamic memory behaviour.
     973Incrementing statistic counters must appear on the allocation fast path.
     974To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so statistic operations do not require slow atomic operations.
    1394975
    1395976To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
    1396 Note, the list is locked to prevent errors traversing an active list;
     977Note, the list is locked to prevent errors traversing an active list, which may have nodes added or removed dynamically;
    1397978the statistics counters are not locked and can flicker during accumulation.
     979Hence, printing statistics during program execution is an approximation.
    1398980Figure~\ref{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
    1399 No other memory allocator studied provides as comprehensive statistical information.
    1400 Finally, these statistics were invaluable during the development of this work for debugging and verifying correctness and should be equally valuable to application developers.
     981No other memory allocator provides as comprehensive statistical information.
     982These statistics were invaluable during the development of llheap for debugging and verifying correctness, and should be equally valuable to application developers.
    1401983
    1402984\begin{figure}
    1403 \begin{lstlisting}
    1404 Heap statistics: (storage request / allocation)
    1405   malloc >0 calls 2,766; 0 calls 2,064; storage 12,715 / 13,367 bytes
    1406   aalloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    1407   calloc >0 calls 6; 0 calls 0; storage 1,008 / 1,104 bytes
    1408   memalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     985\begin{C++}
     986PID: 2167216 Heap statistics: (storage request / allocation)
     987  malloc    >0 calls 19,938,000,110; 0 calls 2,064,000,000; storage 4,812,152,081,688 / 5,487,040,092,624 bytes
     988  aalloc    >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     989  calloc    >0 calls 7; 0 calls 0; storage 1,040 / 1,152 bytes
     990  memalign  >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    1409991  amemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    1410992  cmemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    1411   resize >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    1412   realloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    1413   free !null calls 2,766; null calls 4,064; storage 12,715 / 13,367 bytes
    1414   away pulls 0; pushes 0; storage 0 / 0 bytes
    1415   sbrk calls 1; storage 10,485,760 bytes
    1416   mmap calls 10,000; storage 10,000 / 10,035 bytes
    1417   munmap calls 10,000; storage 10,000 / 10,035 bytes
    1418   threads started 4; exited 3
    1419   heaps new 4; reused 0
    1420 \end{lstlisting}
     993  resize    >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     994  realloc   >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     995            copies 0; smaller 0; alignment 0; 0 fill 0
     996  free      !null calls 19,938,000,092; null / 0 calls 4,064,000,004; storage 4,812,152,003,021 / 5,487,040,005,152 bytes
     997  remote    pushes 4; pulls 0; storage 0 / 0 bytes
     998  sbrk      calls 1; storage 8,388,608 bytes
     999  mmap      calls 2,000,000; storage 2,097,152,000,000 / 2,105,344,000,000 bytes
     1000  munmap    calls 2,000,000; storage 2,097,152,000,000 / 2,105,344,000,000 bytes
     1001  remainder calls 0; storage 0 bytes
     1002  threads   started 4; exited 4
     1003  heaps     $new$ 4; reused 0
     1004\end{C++}
    14211005\caption{Statistics Output}
    14221006\label{f:StatiticsOutput}
     
    14241008
    14251009llheap can also be built with debug checking, which inserts many asserts along all allocation paths.
    1426 These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruptions because internal values (like header fields) are overwritten.
    1427 These checks are best effort as opposed to complete allocation checking as in @valgrind@.
     1010These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruption because internal values (like header fields) are overwritten.
     1011These checks are best effort as opposed to complete allocation checking as in @valgrind@~\cite{valgind}.
    14281012Nevertheless, the checks detect many allocation problems.
    1429 There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
    1430 For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer.
    1431 To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \pageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
     1013There is a problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
     1014For example, @printf@ might allocate a 1024-byte buffer on the first call and never delete this buffer.
     1015To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ in Section~\ref{s:ExtendedCAPI}), and it is subtracted from the total allocate/free difference.
    14321016Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
    1433 
    1434 Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
    1435 
    1436 
    1437 \subsection{User-level Threading Support}
    1438 \label{s:UserlevelThreadingSupport}
    1439 
    1440 The serially-reusable problem (see \pageref{p:SeriallyReusable}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
    1441 The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical subsections such as starting a memory operation on one KT and completing it on another.
    1442 Locking these critical subsections negates any attempt for a quick fastpath and results in high contention.
    1443 For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
    1444 Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads.
    1445 To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-subsection for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
    1446 The rollforward flag is tested at the end of each allocation funnel routine (see \pageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
    1447 
    1448 llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period.
    1449 On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately.
    1450 On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe.
    1451 For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
    1452 Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
    1453 
    1454 The fast technique (with lower run time cost) is to define a special code subsection and places all non-interruptible routines in this subsection.
    1455 The linker places all code in this subsection into a contiguous block of memory, but the order of routines within the block is unspecified.
    1456 Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible subsection, and aborts if executing within this subsection and sets the rollforward flag.
    1457 This technique is fragile because any calls in the non-interruptible code outside of the non-interruptible subsection (like @sbrk@) must be bracketed with disable/enable interrupts and these calls must be along the slowpath.
    1458 Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible subsection.
    1459 This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible subsection and their assembler code examined.
    1460 These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading.
     1017Debugging mode also scrubs each allocation with @0xff@, so assumptions about zero-filled objects generate errors.
     1018Finally, if a program does segment-fault in debug mode, a stack backtrace is printed to help in debugging.
     1019
     1020Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled in programs with 10\% to 15\% allocation cost, and the latency cost for accumulating statistic from each heap is mitigated by limited calls, often only one at the end of the program.
     1021
     1022
     1023% \subsection{Design Choices}
     1024%
     1025% llheap's design was reviewed and changed multiple times during its development.
     1026% All designs focused on the allocation/free \newterm{fast path}, \ie the shortest code path for the most common operations.
     1027% The model chosen is 1:1, giving one heap per thread for each kernel thread (KT).
     1028% Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
     1029% Therefore, the majority of heap operations are uncontended, modulo operations on the global heap and ownership.
     1030%
     1031% Problems:
     1032% \begin{itemize}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
     1033% \item
     1034% Need to know when a KT starts/terminates to create/delete its heap.
     1035%
     1036% \noindent
     1037% It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
     1038% \item
     1039% There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
     1040%
     1041% \noindent
     1042% The classic solution only deletes a heap after all referents are returned, which is complex.
     1043% The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
     1044% If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
     1045% To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
     1046% In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
     1047% \item
     1048% There can be significant external fragmentation as the number of KTs increases.
     1049%
     1050% \noindent
     1051% In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
     1052% Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
     1053% \item
     1054% Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
     1055% \begin{quote}
     1056% A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
     1057% \end{quote}
     1058% If a KT is preempted during an allocation operation, the OS can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
     1059% Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
     1060% Essentially, the serially-reusable problem is a race condition on an unprotected critical subsection, where the OS is providing the second thread via the signal handler.
     1061
     1062% There is the same serially-reusable problem with UTs migrating across KTs.
     1063% \end{itemize}
     1064% Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
    14611065
    14621066
     
    14641068
    14651069There are problems bootstrapping a memory allocator.
    1466 \begin{enumerate}
    1467 \item
    14681070Programs can be statically or dynamically linked.
    1469 \item
    14701071The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely.
    1471 \item
    1472 Knowing a KT's start and end independently from the KT code is difficult.
    1473 \end{enumerate}
     1072Knowing a KT's start and end independently from the KT code is also difficult.
    14741073
    14751074For static linking, the allocator is loaded with the program.
     
    14771076This approach allows allocator substitution by placing an allocation library before any other in the linked/load path.
    14781077
    1479 Allocator substitution is similar for dynamic linking, but the problem is that the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
    1480 As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including from the dynamic loader.
    1481 Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
    1482 Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
    1483 Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
    1484 
    1485 All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
    1486 The problem is getting initialization done before the first allocator call.
    1487 However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
    1488 Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}.
    1489 This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call.
    1490 As a result, calls to allocation routines occur without initialization.
    1491 To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
    1492 
    1493 Two other important execution points are program startup and termination, which include prologue or epilogue code to bootstrap a program, which programmers are unaware of.
    1494 For example, dynamic-memory allocations before/after the application starts should not be considered in statistics because the application does not make these calls.
    1495 llheap establishes these two points using routines:
    1496 \begin{lstlisting}
    1497 __attribute__(( constructor( 100 ) )) static void startup( void ) {
     1078Allocator substitution is similar for dynamic linking.
     1079However, the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
     1080As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including those from the dynamic loader.
     1081Hence, some part of the @sbrk@ area may be used by the default allocator and substitution allocator statistics cannot be correct.
     1082Furthermore, dynamic linking uses an assembler trampoline to call the procedure linkage table resolver, so there is an additional cost along the allocator fast path for all allocation operations.
     1083Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model( "initial-exec" )@ to obtain tighter binding.
     1084
     1085After the allocator is loaded, it needs to be initialized before the first allocation request.
     1086Currently, the only mechanism to control initialization is via constructor routines (see below), each with an integer priority, where the linker calls the constructors in increasing order of priority.
     1087However, there are few conventions for priorities amongst libraries, where constructors with equal priorities are called in arbitrary order.
     1088(Only a transitive closure of references amongst library calls can establish an absolute initialization order.)
     1089As a result, the first call to an allocation routine can occur without initialization.
     1090To deal with this problem, it is necessary to have a global flag that is checked along the allocation fast path to trigger initialization (singleton pattern).
     1091
     1092Along these lines, there is a subtle problem is defining when a program starts and ends.
     1093For example, prolog/epilog code outside of the program should not be considered in statistics as the application does not make these calls.
     1094llheap establishes these two points using constructor/destructor routines with initialization priority 100, where system libraries use priorities $\le$ 100 and application programs have priorities $>$ 100.
     1095\begin{flushleft}
     1096\hspace*{\parindentlnth}
     1097\setlength{\tabcolsep}{20pt}
     1098\begin{tabular}{@{}ll@{}}
     1099\begin{C++}
     1100@__attribute__(( constructor( 100 ) ))@
     1101static void startup( void ) {
    14981102        // clear statistic counters
    14991103        // reset allocUnfreed counter
    15001104}
    1501 __attribute__(( destructor( 100 ) )) static void shutdown( void ) {
     1105
     1106\end{C++}
     1107&
     1108\begin{C++}
     1109@__attribute__(( destructor( 100 ) ))@
     1110static void shutdown( void ) {
    15021111        // sum allocUnfreed for all heaps
    15031112        // subtract global unfreed storage
    15041113        // if allocUnfreed > 0 then print warning message
    15051114}
    1506 \end{lstlisting}
    1507 which use global constructor/destructor priority 100, where the linker calls these routines at program prologue/epilogue in increasing/decreasing order of priority.
    1508 Application programs may only use global constructor/destructor priorities greater than 100.
     1115\end{C++}
     1116\end{tabular}
     1117\end{flushleft}
    15091118Hence, @startup@ is called after the program prologue but before the application starts, and @shutdown@ is called after the program terminates but before the program epilogue.
    15101119By resetting counters in @startup@, prologue allocations are ignored, and checking unfreed storage in @shutdown@ checks only application memory management, ignoring the program epilogue.
    15111120
    1512 While @startup@/@shutdown@ apply to the program KT, a concurrent program creates additional KTs that do not trigger these routines.
    1513 However, it is essential for the allocator to know when each KT is started/terminated.
    1514 One approach is to create a thread-local object with a construct/destructor, which is triggered after a new KT starts and before it terminates, respectively.
    1515 \begin{lstlisting}
    1516 struct ThreadManager {
    1517         volatile bool pgm_thread;
    1518         ThreadManager() {} // unusable
    1519         ~ThreadManager() { if ( pgm_thread ) heapManagerDtor(); }
    1520 };
    1521 static thread_local ThreadManager threadManager;
    1522 \end{lstlisting}
    1523 Unfortunately, thread-local variables are created lazily, \ie on the first dereference of @threadManager@, which then triggers its constructor.
    1524 Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
    1525 Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
    1526 Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
    1527 The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
    1528 
    1529 Finally, there is a recursive problem when the singleton pattern dereferences @pgm_thread@ to initialize the thread-local object, because its initialization calls @atExit@, which immediately calls @malloc@ to obtain storage.
    1530 This recursion is handled with another thread-local flag to prevent double initialization.
    1531 A similar problem exists when the KT terminates and calls member @~ThreadManager@, because immediately afterwards, the terminating KT calls @free@ to deallocate the storage obtained from the @atExit@.
    1532 In the meantime, the terminated heap has been put on the global-heap free-stack, and may be active by a new KT, so the @atExit@ free is handled as a free to another heap and put onto the away list using locking.
    1533 
    1534 For user threading systems, the KTs are controlled by the runtime, and hence, start/end pointers are known and interact directly with the llheap allocator for \uC and \CFA, which eliminates or simplifies several of these problems.
    1535 The following API was created to provide interaction between the language runtime and the allocator.
    1536 \begin{lstlisting}
    1537 void startThread();                     $\C{// KT starts}$
    1538 void finishThread();                    $\C{// KT ends}$
    1539 void startup();                         $\C{// when application code starts}$
    1540 void shutdown();                        $\C{// when application code ends}$
    1541 bool traceHeap();                       $\C{// enable allocation/free printing for debugging}$
    1542 bool traceHeapOn();                     $\C{// start printing allocation/free calls}$
    1543 bool traceHeapOff();                    $\C{// stop printing allocation/free calls}$
    1544 \end{lstlisting}
    1545 This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way.
    1546 
    1547 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1548 
    1549 \subsection{Added Features and Methods}
    1550 
    1551 The C dynamic-allocation API (see Figure~\ref{f:CDynamicAllocationAPI}) is neither orthogonal nor complete.
    1552 For example,
    1553 \begin{itemize}
    1554 \item
    1555 It is possible to zero fill or align an allocation but not both.
    1556 \item
    1557 It is \emph{only} possible to zero fill an array allocation.
    1558 \item
    1559 It is not possible to resize a memory allocation without data copying.
    1560 \item
    1561 @realloc@ does not preserve initial allocation properties.
    1562 \end{itemize}
    1563 As a result, programmers must provide these options, which is error prone, resulting in blaming the entire programming language for a poor dynamic-allocation API.
     1121Unfortunately, @startup@/@shutdown@ only apply to the program KT, not to any additional KTs created by the program.
     1122However, it is essential for the allocator to know when each KT is started/terminated to initialize/de-initialize the KT's heap.
     1123Initialization can be handled by making the global flag (above) thread-local and then the initialization check along the fast path covers the first allocation by a newly created thread.
     1124De-initialization is handled by registering a destructor routine using @pthread_key_create@ in the initialization code triggered along the fast path, which subsequently calls the destructor at thread termination.
     1125
     1126
     1127\subsection{User-level Threading Support}
     1128\label{s:UserlevelThreadingSupport}
     1129
     1130llheap is the underlying allocator in the user-threading programming languages \uC and \CFA.
     1131These systems have preemptive scheduling, which requires management of timing events through a signal handle (@SIGALRM@).
     1132The complexity in these system is the serially-reusable problem (see Section~\ref{s:SingleThreadedMemoryAllocator}) when UTs are time sliced (language level) independently from KTs (OS level).
     1133The solution is to prevent interrupts resulting in a CPU or KT change during critical operations, eliminating problems like starting a memory operation on one KT and completing it on another when the underlying heaps are different.
     1134% For user-level threading, the serially-reusable problem occurs with time slicing for preemptable user-level scheduling, as the interrupted UT is unlikely to be restarted on the same KT.
     1135However, without time slicing, a long running UT prevents the execution of other UTs (starvation).
     1136
     1137The languages modify llheap using two techniques to prevent time slicing during non-interruptible allocation operations.
     1138On the slow path, when executing expensive operations, time-slicing interrupts are disabled/enabled, so the operation completes atomically on the KT.
     1139On the fast path, all non-interruptible allocation/deallocation routines are placed in a separate code segment.
     1140The linker places this segment into a contiguous block of memory. %, but the order of routines within the block is unspecified.
     1141Then the time-slice signal handler compares the program counter at the point of interrupt with the start/end address of the non-interruptible segment, and if executing within the segment, the signal handler returns without context switching.
     1142The llheap funnel design simplifies this implementation so only a few funnel and statistics routines are located in the non-interruptible section.
     1143% This technique is fragile as no mechanism exists to ensure all crucial code along the fast path is placed into the non-interruptible segment.
     1144
     1145Interestingly, marking non-interruptible operations by bracketing them with a set/reset of a thread-local flag fails, as read/write is not atomic on some machines.
     1146For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
     1147Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a UT.
     1148As well, switching to a T:C model with restartable critical sections using @librseq@~\cite{Desnoyers19} was examined (see Section~\ref{s:MutualExclusion}).
     1149However, tests showed that while  @librseq@ can determine the particular CPU quickly, setting up the restartable critical-section along the allocation fast-path produced a significant decrease in performance.
     1150Also, the number of undoable writes in @librseq@ is limited and restartable sequences cannot deal with UT migration across KTs.
     1151For example, UT$_1$ is executing an allocation by KT$_1$ on CPU$_1$ and a time-slice preemption occurs.
     1152The signal handler context switches UT$_1$ onto the user-level ready-queue and starts running UT$_2$ on KT$_1$, which immediately performs an allocation.
     1153Since KT$_1$ is still executing on CPU$_1$, @librseq@ takes no action because it assumes KT$_1$ is still executing the same critical section.
     1154Then UT$_1$ is scheduled onto KT$_2$ by the user-level scheduler, and its allocation operation continues in parallel with UT$_2$ using references into the heap associated with CPU$_1$, which corrupts CPU$_1$'s heap.
     1155If @librseq@ had an @rseq_abort@ which:
     1156\begin{enumerate}[leftmargin=*,topsep=2pt,itemsep=0pt,parsep=0pt]
     1157\item
     1158marks the current restartable critical-section as cancelled so it restarts when attempting to commit.
     1159\item
     1160does nothing if there is no current restartable critical section in progress.
     1161\end{enumerate}
     1162Then @rseq_abort@ could be called on the backside of a user-level context-switching.
     1163A feature similar to this idea might exist for hardware transactional memory.
     1164A significant effort was made to make this approach work but its complexity, lack of robustness, and performance costs resulted in its rejection.
     1165
     1166
     1167\subsection{C API}
     1168
     1169Figure~\ref{f:CDynamicAllocationAPI} shows the C dynamic allocation API, which is neither orthogonal nor complete.
     1170For example, it is possible to zero fill or align an allocation but not both, it is only possible to zero fill an array allocation, and it is not possible to resize a memory allocation without data copying.
     1171As a result, programmers must provide missing alternatives, which is error prone, rightly blaming the C programming language for a poor allocation API.
    15641172Furthermore, newer programming languages have better type systems that can provide safer and more powerful APIs for memory allocation.
     1173The following presents llheap API changes.
    15651174
    15661175\begin{figure}
    1567 \begin{lstlisting}
     1176\hspace*{\parindentlnth}
     1177\begin{tabular}{@{}l|l@{}}
     1178\begin{C++}
    15681179void * malloc( size_t size );
    1569 void * calloc( size_t nmemb, size_t size );
    1570 void * realloc( void * ptr, size_t size );
    1571 void * reallocarray( void * ptr, size_t nmemb, size_t size );
    1572 void free( void * ptr );
     1180void * calloc( size_t dimension, size_t size );
     1181void * realloc( void * oaddr, size_t size );
     1182void * reallocarray( void * oaddr, size_t dimension, size_t size );
     1183void free( void * addr );
    15731184void * memalign( size_t alignment, size_t size );
    15741185void * aligned_alloc( size_t alignment, size_t size );
     
    15761187void * valloc( size_t size );
    15771188void * pvalloc( size_t size );
    1578 
    1579 struct mallinfo mallinfo( void );
    1580 int mallopt( int param, int val );
    1581 int malloc_trim( size_t pad );
    1582 size_t malloc_usable_size( void * ptr );
     1189\end{C++}
     1190&
     1191\begin{C++}
     1192int mallopt( int option, int value );
     1193size_t malloc_usable_size( void * addr );
    15831194void malloc_stats( void );
    15841195int malloc_info( int options, FILE * fp );
    1585 \end{lstlisting}
    1586 \caption{C Dynamic-Allocation API}
     1196
     1197// Unsupported
     1198struct mallinfo mallinfo( void );
     1199int malloc_trim( size_t );
     1200void * malloc_get_state( void );
     1201int malloc_set_state( void * );
     1202\end{C++}
     1203\end{tabular}
     1204\caption{llheap support of C dynamic-allocation API}
    15871205\label{f:CDynamicAllocationAPI}
    15881206\end{figure}
    15891207
    1590 The following presents design and API changes for C, \CC (\uC), and \CFA, all of which are implemented in llheap.
    1591 
    1592 
    1593 \subsubsection{Out of Memory}
    1594 
    1595 Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
    1596 hence the need to return an alternate value for a zero-sized allocation.
    1597 A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
    1598 In theory, notifying the programmer of memory failure allows recovery;
    1599 in practice, it is almost impossible to gracefully recover when out of memory.
    1600 Hence, the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen because no pseudo allocation is necessary.
    1601 
    1602 
    1603 \subsubsection{C Interface}
    1604 
    1605 For C, it is possible to increase functionality and orthogonality of the dynamic-memory API to make allocation better for programmers.
    1606 
    1607 For existing C allocation routines:
    1608 \begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
     1208
     1209\subsubsection{Extended C API}
     1210\label{s:ExtendedCAPI}
     1211
     1212llheap transparently augments the C dynamic memory API to increase functionality, orthogonality, and safety.
     1213\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
     1214\item
     1215@malloc@ remembers the original allocation size separate from the actual allocation size.
    16091216\item
    16101217@calloc@ sets the sticky zero-fill property.
    16111218\item
    1612 @memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property.
    1613 \item
    1614 @realloc@ and @reallocarray@ preserve sticky properties.
     1219@memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property, remembering the specified alignment size.
     1220\item
     1221@realloc@ and @reallocarray@ preserve sticky properties across copying.
     1222\item
     1223@malloc_stats@ prints detailed statistics of allocation/free operations when linked with a statistic version.
     1224\item
     1225Existence of shell variable @MALLOC_STATS@ implicitly calls @malloc_stats@ at program termination, so precompiled programs do not have to be modified.
    16151226\end{itemize}
    16161227
    1617 The C dynamic-memory API is extended with the following routines:
    1618 
    1619 \medskip\noindent
    1620 \lstinline{void * aalloc( size_t dimension, size_t elemSize )}
    1621 extends @calloc@ for allocating a dynamic array of objects with total size @dim@ $\times$ @elemSize@ but \emph{without} zero-filling the memory.
    1622 @aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines for array allocation.
    1623 It returns the address of the dynamic array or @NULL@ if either @dim@ or @elemSize@ are zero.
    1624 
    1625 \medskip\noindent
    1626 \lstinline{void * resize( void * oaddr, size_t size )}
    1627 extends @realloc@ for resizing an existing allocation, @oaddr@, to the new @size@ (smaller or larger than previous) \emph{without} copying previous data into the new allocation or preserving sticky properties.
    1628 @resize@ is significantly faster than @realloc@, which is the only alternative.
    1629 It returns the address of the old or new storage with the specified new size or @NULL@ if @size@ is zero.
    1630 
    1631 \medskip\noindent
    1632 \lstinline{void * amemalign( size_t alignment, size_t dimension, size_t elemSize )}
    1633 extends @aalloc@ and @memalign@ for allocating a dynamic array of objects with the starting address on the @alignment@ boundary.
    1634 Sets sticky alignment property.
    1635 It returns the address of the aligned dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
    1636 
    1637 \medskip\noindent
    1638 \lstinline{void * cmemalign( size_t alignment, size_t dimension, size_t elemSize )}
    1639 extends @amemalign@ with zero fill and has the same usage as @amemalign@.
    1640 Sets sticky zero-fill and alignment property.
    1641 It returns the address of the aligned, zero-filled dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
    1642 
    1643 \medskip\noindent
    1644 \lstinline{size_t malloc_alignment( void * addr )}
    1645 returns the object alignment, where objects not allocated with alignment return the minimal allocation alignment.
    1646 For use in aligning similar allocations.
    1647 
    1648 \medskip\noindent
    1649 \lstinline{bool malloc_zero_fill( void * addr )}
    1650 returns true if the objects zero-fill sticky property is set and false otherwise.
    1651 For use in zero filling similar allocations.
    1652 
    1653 \medskip\noindent
    1654 \lstinline{size_t malloc_size( void * addr )}
    1655 returns the object's request size, which is updated when an object is resized or zero if @addr@ is @NULL@ (see also @malloc_usable_size@).
    1656 For use in similar allocations.
    1657 
    1658 \medskip\noindent
    1659 \lstinline{int malloc_stats_fd( int fd )}
    1660 changes the file descriptor where @malloc_stats@ writes statistics (default @stdout@) and returns the previous file descriptor.
    1661 
    1662 \medskip\noindent
    1663 \lstinline{size_t malloc_expansion()}
    1664 \label{p:malloc_expansion}
    1665 set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
    1666 It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization.
    1667 
    1668 \medskip\noindent
    1669 \lstinline{size_t malloc_mmap_start()}
    1670 set the crossover between allocations occurring in the @sbrk@ area or separately mapped.
    1671 It returns the crossover point used throughout a program, \ie called once at heap initialization.
    1672 
    1673 \medskip\noindent
    1674 \lstinline{size_t malloc_unfreed()}
    1675 \label{p:malloc_unfreed}
    1676 amount subtracted to adjust for unfreed program storage (debug only).
    1677 It returns the new subtraction amount and called by @malloc_stats@ (discussed in Section~\ref{}).
    1678 
    1679 
    1680 \subsubsection{\CC Interface}
    1681 
    1682 The following extensions take advantage of overload polymorphism in the \CC type-system.
    1683 
    1684 \medskip\noindent
    1685 \lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}
    1686 extends @resize@ with an alignment requirement, @nalign@.
    1687 It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
    1688 
    1689 \medskip\noindent
    1690 \lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}
    1691 extends @realloc@ with an alignment requirement, @nalign@.
    1692 It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
    1693 
    1694 
    1695 \subsubsection{\CFA Interface}
    1696 
    1697 The following extensions take advantage of overload polymorphism in the \CFA type-system.
    1698 The key safety advantage of the \CFA type system is using the return type to select overloads;
    1699 hence, a polymorphic routine knows the returned type and its size.
    1700 This capability is used to remove the object size parameter and correctly cast the return storage to match the result type.
    1701 For example, the following is the \CFA wrapper for C @malloc@:
     1228llheap extends the C dynamic-memory API with new allocation operations with APIs matching existing C counterparts.
     1229\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=1pt,parsep=0pt]
     1230\item
     1231@aalloc@ extends @calloc@ for dynamic array allocation \emph{without} zero-filling the memory (faster than @calloc@).
     1232\item
     1233@resize@ extends @realloc@ for resizing an allocation \emph{without} copying previous data or preserving sticky properties (faster than @realloc@).
     1234\item
     1235@resizearray@ extends @resize@ for an array allocation (faster than @reallocarray@).
     1236\item
     1237@amemalign@ extends @aalloc@ with alignment and sets sticky alignment property.
     1238\item
     1239@cmemalign@ extends @amemalign@ with zero fill and sets sticky zero-fill and alignment property.
     1240\item
     1241@aligned_resize@ extends @resize@ with an alignment.
     1242\item
     1243@aligned_resizearray@ extends @resizearray@ with alignment.
     1244\item
     1245@aligned_realloc@ extends @realloc@ with alignment.
     1246\item
     1247@aligned_reallocarray@ extends @resizearray@ with alignment.
     1248\end{itemize}
     1249
     1250llheap extends the C dynamic memory API with new control operations.
     1251The following routines are called \emph{once} during llheap startup to set specific limits \emph{before} an application starts.
     1252Setting these value early is essential because allocations can occur from the dynamic loader and other libraries before application code executes.
     1253To set a value, define a specific routine in an application and return the desired value, \eg
     1254\begin{C++}
     1255size_t malloc_extend() { return 16 * 1024 * 1024; }
     1256\end{C++}
     1257\begin{itemize}[leftmargin=*,topsep=0pt,itemsep=1pt,parsep=0pt]
     1258\item
     1259@malloc_extend@ returns the number of bytes to extend the @sbrk@ area when there is insufficient free storage to service an allocation request.
     1260\item
     1261@malloc_mmap_start@ returns the crossover allocation size from the @sbrk@ area to separate mapped areas, see also @mallopt( M_MMAP_THRESHOLD )@.
     1262\item
     1263@malloc_unfreed@ returns the amount subtracted from the global unfreed program storage to adjust for unreleased storage from routines like @printf@ (debug only).
     1264\end{itemize}
     1265
     1266llheap extends the C dynamic-memory API with functions to query object properties.
     1267\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=1pt,parsep=0pt]
     1268\item
     1269@malloc_size@ returns the requested size of a dynamic object, which is updated when an object is resized, similar to @malloc_usable_size@.
     1270\item
     1271@malloc_alignment@ returns the object alignment, where the minimal alignment is 16 bytes.
     1272\item
     1273@malloc_zero_fill@ returns true if the object is zero filled.
     1274\item
     1275@malloc_remote@ returns true if the object is from a remote heap (@OWNERSHIP@ only).
     1276\end{itemize}
     1277
     1278llheap extends the C dynamic-memory API with new statistics control.
     1279\begin{itemize}[leftmargin=*,topsep=3pt,itemsep=1pt,parsep=0pt]
     1280\item
     1281@malloc_stats_fd@ sets the file descriptor for @malloc_stats@ writes (default @stdout@).
     1282\item
     1283@malloc_stats_clear@ clears the statistics counters for all thread heaps.
     1284\item
     1285@heap_stats@ extends @malloc_stats@ to only print statistics for the heap associated with the executing thread.
     1286\end{itemize}
     1287
     1288
     1289\subsubsection{Modern Allocation API}
     1290
     1291Modern programming languages have complex type systems that can be used to consolidate the panoply of memory allocation routines and features, providing a simpler programming experience and safety.
     1292The \CFA language is used to demonstrate this capability, because llheap forms the memory allocator for this C variant, but other languages can provide similar APIs.
     1293
     1294\CFA polymorphism reduces the allocation API to two overloaded routines allocating a single object or an array of objects.
    17021295\begin{cfa}
    1703 forall( T & | sized(T) ) {
    1704         T * malloc( void ) {
    1705                 if ( _Alignof(T) <= libAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
    1706                 else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
    1707         } // malloc
     1296forall( T & ) {
     1297        T * alloc( /* list of property functions ... */  ) { ... } // singleton allocation
     1298        T * alloc( size_t @dimension@, /* list of property functions ... */  ) { ... } // array allocation
     1299}
    17081300\end{cfa}
    1709 and is used as follows:
    1710 \begin{lstlisting}
    1711 int * i = malloc();
    1712 double * d = malloc();
    1713 struct Spinlock { ... } __attribute__(( aligned(128) ));
    1714 Spinlock * sl = malloc();
    1715 \end{lstlisting}
    1716 where each @malloc@ call provides the return type as @T@, which is used with @sizeof@, @_Alignof@, and casting the storage to the correct type.
    1717 This interface removes many of the common allocation errors in C programs.
    1718 Figure~\ref{f:CFADynamicAllocationAPI} show the \CFA wrappers for the equivalent C/\CC allocation routines with same semantic behaviour.
    1719 
    1720 \begin{figure}
    1721 \begin{lstlisting}
    1722 T * malloc( void );
    1723 T * aalloc( size_t dim );
    1724 T * calloc( size_t dim );
    1725 T * resize( T * ptr, size_t size );
    1726 T * realloc( T * ptr, size_t size );
    1727 T * memalign( size_t align );
    1728 T * amemalign( size_t align, size_t dim );
    1729 T * cmemalign( size_t align, size_t dim  );
    1730 T * aligned_alloc( size_t align );
    1731 int posix_memalign( T ** ptr, size_t align );
    1732 T * valloc( void );
    1733 T * pvalloc( void );
    1734 \end{lstlisting}
    1735 \caption{\CFA C-Style Dynamic-Allocation API}
    1736 \label{f:CFADynamicAllocationAPI}
    1737 \end{figure}
    1738 
    1739 In addition to the \CFA C-style allocator interface, a new allocator interface is provided to further increase orthogonality and usability of dynamic-memory allocation.
    1740 This interface helps programmers in three ways.
    1741 \begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
    1742 \item
    1743 naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
    1744 \item
    1745 named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls.
    1746 \item
    1747 object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results.
    1748 \end{itemize}
    1749 Note, postfix function call is an alternative call syntax, using backtick @`@, so the argument appears before the function name, \eg
     1301Because the \CFA type system uses the return type to select overloads (like Ada), this capability is leveraged to remove the object-size parameter and return cast for regular calls to C @malloc@ or @memalign@.
    17501302\begin{cfa}
    1751 duration ?@`@h( int h );                // ? denote the position of the function operand
    1752 duration ?@`@m( int m );
    1753 duration ?@`@s( int s );
    1754 duration dur = 3@`@h + 42@`@m + 17@`@s;
     1303inline T * alloc( ... ) {
     1304        if ( _Alignof(T) <= defaultAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
     1305        else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
     1306}
    17551307\end{cfa}
    1756 
    1757 The following extensions take advantage of overload polymorphism in the \CC type-system.
    1758 
    1759 \medskip\noindent
    1760 \lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dimension, ... )}
    1761 is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations.
    1762 These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine.
    1763 A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
    1764 A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
    1765 The variable number of arguments consist of allocation properties, which can be combined to produce different kinds of allocations.
    1766 The only restriction is for properties @realloc@ and @resize@, which cannot be combined.
    1767 
    1768 The allocation property functions are:
    1769 
    1770 \medskip\noindent
    1771 \lstinline{T_align ?`align( size_t alignment )}
    1772 to align the allocation.
    1773 The alignment parameter must be $\ge$ the default alignment (@libAlign()@ in \CFA) and a power of two.
    1774 The following example returns a dynamic object and object array aligned on a 4096-byte boundary.
     1308The calls to these two routine are now much safer than the C equivalents.
     1309\begin{C++}
     1310int * ip = alloc(); $\C[2.75in]{// T => int, sizeof => 4/8, alignment => default}$
     1311double * dp = alloc(); $\C{// T => double, sizeof => 8, alignment => default}$
     1312struct Spinlock { ... } [[aligned(128)]] * sp = alloc(); $\C{// T => Spinlock, sizeof => ..., alignment = 128}$
     1313int * ia = alloc( 10 ); $\C{// T => int, sizeof => 4/8, alignment => default, dimension => 10}\CRT$
     1314\end{C++}
     1315At compile time, each call to @alloc@ extracts the return type @T@ from the left-hand side of the assignment, which is then used in @sizeof@, @_Alignof@, and casting the storage to the correct type.
     1316The @inline@ and constant expression allow the compiler to remove the @if@ statement.
     1317This interface removes all the common allocation-call errors in C and provides a uniform name covering all allocation reducing the cognitive burden.
     1318
     1319The property functions are a variable number of routines providing @alloc@ with management details and actions.
     1320The functions are @align@, @fill@, @resize@, and @realloc@, and written in prefix versus postfix notation solely for aesthetic reasons, \eg @3`fill@ $\equiv$ @fill( 3 )@.
     1321The examples are arrays but apply equally to singleton allocations.
    17751322\begin{cfa}
    1776 int * i0 = alloc( @4096`align@ );  sout | i0 | nl;
    1777 int * i1 = alloc( 3, @4096`align@ );  sout | i1; for (i; 3 ) sout | &i1[i]; sout | nl;
    1778 
    1779 0x555555572000
    1780 0x555555574000 0x555555574000 0x555555574004 0x555555574008
     1323int * ip = alloc( 5, @4096`align@, @5`fill@ ); $\C[3in]{// start array on 4096 boundary and initialize elements with 5}$
     1324int * ip2 = alloc( 10, @ip`fill@, @(malloc_alignment( ip ))`align@ ); $\C{// first 5 elements same as ip, same alignment as ip}$
     1325_Complex double * cdp = alloc( 5, @(3.5+4.1i)`fill@ ); $\C{// initialize complex elements with 3.5+4.1i}$
     1326struct S { int i, j; };
     1327S * sp = alloc( 10, @((S){3, 4})`fill@ ); $\C{// initialize structure elements with {3, 4}}$
     1328ip = alloc( 10, @ip`realloc@, @10`fill@ ); $\C{// make array ip larger and initialize new elements with 10}$
     1329double * dp = alloc( 5, @ip2`resize@, @256`align@, @13.5`fill@ ); $\C{// reuse ip2 storage for something else}\CRT$
    17811330\end{cfa}
    1782 
    1783 \medskip\noindent
    1784 \lstinline{S_fill(T) ?`fill ( /* various types */ )}
    1785 to initialize storage.
    1786 There are three ways to fill storage:
    1787 \begin{enumerate}[itemsep=0pt,parsep=0pt]
    1788 \item
    1789 A char fills each byte of each object.
    1790 \item
    1791 An object of the returned type fills each object.
    1792 \item
    1793 An object array pointer fills some or all of the corresponding object array.
    1794 \end{enumerate}
    1795 For example:
    1796 \begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
    1797 int * i0 = alloc( @0n`fill@ );  sout | *i0 | nl;  // disambiguate 0
    1798 int * i1 = alloc( @5`fill@ );  sout | *i1 | nl;
    1799 int * i2 = alloc( @'\xfe'`fill@ ); sout | hex( *i2 ) | nl;
    1800 int * i3 = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | i3[i]; sout | nl;
    1801 int * i4 = alloc( 5, @0xdeadbeefN`fill@ );  for ( i; 5 ) sout | hex( i4[i] ); sout | nl;
    1802 int * i5 = alloc( 5, @i3`fill@ );  for ( i; 5 ) sout | i5[i]; sout | nl;
    1803 int * i6 = alloc( 5, @[i3, 3]`fill@ );  for ( i; 5 ) sout | i6[i]; sout | nl;
     1331Finally, \CFA has constructors and destructors, like \CC, which are invoked when allocating with @new@ and @delete@.
     1332\begin{cfa}
     1333T * t = new( 3, 4, 5 ); $\C[3in]{// allocate T and call constructor T\{ 3, 4, 5 \}}$
     1334W * w = new( 3.5 ); $\C{// allocate W and call constructor W\{ 3,5 \}}$
     1335delete( t, w ); $\C{// call destructors and free t and w}\CRT$
    18041336\end{cfa}
    1805 \begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
    1806 0
    1807 5
    1808 0xfefefefe
    1809 5 5 5 5 5
    1810 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef
    1811 5 5 5 5 5
    1812 5 5 5 -555819298 -555819298  // two undefined values
    1813 \end{lstlisting}
    1814 Examples 1 to 3 fill an object with a value or characters.
    1815 Examples 4 to 7 fill an array of objects with values, another array, or part of an array.
    1816 
    1817 \medskip\noindent
    1818 \lstinline{S_resize(T) ?`resize( void * oaddr )}
    1819 used to resize, realign, and fill, where the old object data is not copied to the new object.
    1820 The old object type may be different from the new object type, since the values are not used.
    1821 For example:
    1822 \begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
    1823 int * i = alloc( @5`fill@ );  sout | i | *i;
    1824 i = alloc( @i`resize@, @256`align@, @7`fill@ );  sout | i | *i;
    1825 double * d = alloc( @i`resize@, @4096`align@, @13.5`fill@ );  sout | d | *d;
    1826 \end{cfa}
    1827 \begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
    1828 0x55555556d5c0 5
    1829 0x555555570000 7
    1830 0x555555571000 13.5
    1831 \end{lstlisting}
    1832 Examples 2 to 3 change the alignment, fill, and size for the initial storage of @i@.
    1833 
    1834 \begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
    1835 int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
    1836 ia = alloc( 10, @ia`resize@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
    1837 sout | ia; ia = alloc( 5, @ia`resize@, @512`align@, @13`fill@ ); sout | ia; for ( i; 5 ) sout | ia[i]; sout | nl;;
    1838 ia = alloc( 3, @ia`resize@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
    1839 \end{cfa}
    1840 \begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
    1841 5 5 5 5 5
    1842 7 7 7 7 7 7 7 7 7 7
    1843 0x55555556d560 0x555555571a00 13 13 13 13 13
    1844 0x555555572000 0x555555572000 2 0x555555572004 2 0x555555572008 2
    1845 \end{lstlisting}
    1846 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
    1847 
    1848 \medskip\noindent
    1849 \lstinline{S_realloc(T) ?`realloc( T * a ))}
    1850 used to resize, realign, and fill, where the old object data is copied to the new object.
    1851 The old object type must be the same as the new object type, since the value is used.
    1852 Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
    1853 For example:
    1854 \begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
    1855 int * i = alloc( @5`fill@ );  sout | i | *i;
    1856 i = alloc( @i`realloc@, @256`align@ );  sout | i | *i;
    1857 i = alloc( @i`realloc@, @4096`align@, @13`fill@ );  sout | i | *i;
    1858 \end{cfa}
    1859 \begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
    1860 0x55555556d5c0 5
    1861 0x555555570000 5
    1862 0x555555571000 5
    1863 \end{lstlisting}
    1864 Examples 2 to 3 change the alignment for the initial storage of @i@.
    1865 The @13`fill@ in example 3 does nothing because no extra space is added.
    1866 
    1867 \begin{cfa}[numbers=left,xleftmargin=2.5\parindentlnth]
    1868 int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
    1869 ia = alloc( 10, @ia`realloc@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
    1870 sout | ia; ia = alloc( 1, @ia`realloc@, @512`align@, @13`fill@ ); sout | ia; for ( i; 1 ) sout | ia[i]; sout | nl;;
    1871 ia = alloc( 3, @ia`realloc@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
    1872 \end{cfa}
    1873 \begin{lstlisting}[numbers=left,xleftmargin=2.5\parindentlnth]
    1874 5 5 5 5 5
    1875 5 5 5 5 5 7 7 7 7 7
    1876 0x55555556c560 0x555555570a00 5
    1877 0x555555571000 0x555555571000 5 0x555555571004 2 0x555555571008 2
    1878 \end{lstlisting}
    1879 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
    1880 The @13`fill@ in example 3 does nothing because no extra space is added.
    1881 
    1882 These \CFA allocation features are used extensively in the development of the \CFA runtime.
     1337The benefits of high-level API simplifications should not be underestimated with respect to programmer productivity and safety.
     1338
     1339
     1340\section{Performance}
     1341\label{c:Performance}
     1342
     1343This section uses a number of benchmarks to compare the behaviour of currently popular memory allocators with llheap.
     1344The goal is to see if llheap is a competitive memory allocator;
     1345no attempt is made to select a performance winner.
     1346
     1347
     1348\subsection{Experimental Environment}
     1349\label{s:ExperimentalEnvironment}
     1350
     1351The performance experiments are run on three different multi-core architectures, ARM, AMD, and Intel, covering memory models weak order (WO) and total store order (TSO), to determine if there is consistency across architectures:
     1352\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
     1353\item[ARM]
     1354Gigabyte E252-P31 128-core socket 3.0 GHz, WO memory model
     1355\item[AMD]
     1356Supermicro AS--1125HS--TNR EPYC 9754 128--core socket, hyper-threading $\times$ 2 sockets (512 processing units) 2.25 GHz, TSO memory model
     1357\item[Intel]
     1358Supermicro SYS-121H-TNR Xeon Gold 6530 32--core, hyper-threading $\times$ 2 sockets (128 processing units) 2.1 GHz, TSO memory model
     1359\end{description}
     1360For the parallel experiments, threads are pinned to cores in a linear fashion, \ie from core $N$ to $N+M$, where $N$ is the start of a socket boundary.
     1361This layout produces the best throughput, as there is little or no communication among threads in the benchmarks, so binding tightly to the cache layout is unnecessary;
     1362hence, there is almost no OS or NUMA effects perturbing the benchmarks.
     1363
     1364The compilers are gcc/g++-14.2.0 and gfortran-14.2.0 running on the Linux v6.8.0-52-generic OS, with @LD_PRELOAD@ used to override the default allocator.
     1365To prevent eliding certain code patterns, crucial parts of a test are wrapped by the function @pass@
     1366\begin{uC++}
     1367static inline void * pass( void * v ) {         $\C[2.5in]{// prevent eliding, cheaper than volatile}$
     1368        __asm__  __volatile__( "" : "+r"(v) );  return v;
     1369}
     1370void * vp = pass( malloc( 0 ) );                        $\C{// wrap malloc call to prevent elision}\CRT$
     1371\end{uC++}
     1372The call to @pass@ can prevent a small number of compiler optimizations but this cost is the same for all allocators.
     1373
     1374
     1375\subsection{Memory Allocators}
     1376\label{s:MemoryAllocators}
     1377
     1378Historically, a number of C/\CC, stand-alone, general-purpose memory-allocators, \eg dlmalloc~\cite{dlmalloc}, have been written for use by programming languages providing unmanaged memory.
     1379For this work, 6 of the popular, thread-safe memory-allocators are selected for comparison, along with llheap.
     1380
     1381\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt,listparindent=\parindent]
     1382\item[glibc~\cite{glibc}] % https://sourceware.org/glibc/wiki/MallocInternals
     1383is the default glibc allocator, derived from ptmalloc, derived from dlmalloc.
     1384glibc has multiple threads sharing multiple heaps with a global shared heap, header per allocation, free-lists with different organizational criteria and searching, and coalescing of certain adjacent free-areas.
     1385Version Ubuntu GLIBC 2.31-0ubuntu9.7 2.31 compiled by Ubuntu 24.04.
     1386
     1387\item[hoard~\cite{hoard}]
     1388has multiple threads sharing multiple heaps with a global shared heap, where each heap is composed of superblocks containing fixed-sized objects, with each super-block having a single header for its objects and reuse of superblocks if empty.
     1389Version 3.13.0, compiled with gcc-14.2.0, default configuration, using command @make@.
     1390Over the past 5 years, hoard development has stopped;
     1391it fails on the ARM architecture, possibly because of the WO memory model.
     1392
     1393\item[jemalloc~\cite{Evans06}]
     1394has multiple threads sharing multiple heaps (arenas) composed of same-sized chunks subdivided into regions composed of pages where each page is a container of same-sized objects.
     1395The components are organized into a number of data structures to facilitate allocations, freeing, and coalescing.
     1396Large objects are allocated using @mmap@.
     1397Version jemalloc-5.3.0~\cite{jemalloc}, built with the default configuration, using commands: @autogen.sh; configure; make; make install@.
     1398
     1399\item[mimalloc~\cite{Leijen19}]
     1400has a heap per thread composed of a reserved area subdivided into 3-sized page buffers, where each page is a container of same-sized objects.
     1401Each page manages its own internal free list and the free list is build when a page is created so there is no initial bump pointer.
     1402Empty pages are coalesced for reuse.
     1403Uses a fast freelist search for small allocation sizes.
     1404Onwership is handled with a separate remote free-list, and remote frees are batched before pushing to the owner heap.
     1405Version mimalloc-v2.1.2, built with the default configuration, using commands @cmake . ; make@.
     1406
     1407\item[tbbmalloc~{\cite[pp.~314--315]{Kukanov07}}] is the allocator shipped with Intel's Threading Building Blocks (TBB).
     1408tbbmalloc has a heap per thread for small allocations, with large allocation handled using a single request.
     1409There is a global heap to acquire and reuse space obtained from the OS;
     1410its reserved space is divided into thread buffers (containers).
     1411A thread heap is composed of linked containers, with binning used to manage the allocations/deallocations within the containers.
     1412Small object space is not returned to the OS.
     1413An allocation has to search its container list to find a partially filled one.
     1414The search is mitigated by moving mostly-free containers to the start of the container list;
     1415free containers are returned to the global heap.
     1416Ownership is handled with a separate remote free-list.
     1417Version @libtbbmalloc.so.2.11@, installed using @apt-get install libtbb-dev@.
     1418
     1419\item[tcmalloc~\cite{tcmalloc}] is the allocator shipped with Google's perftools.\footnote{
     1420Currently, there are two versions of tcmalloc: Google's perftools and one experimental version available on GitHub, which is not an officially supported Google product.
     1421We selected the perftools version because it is the most likely choice for users as it installs directly onto multiple OSs.}
     1422tcmalloc has per CPU heaps for small allocations, with large allocation handled with a single request.
     1423CPU heaps require a rollback mechanism, @rseq@, to prevent the serially-reusable problem.
     1424There is a global heap to acquire and reuse space obtained from the OS;
     1425its reserved space is divided into multi-page spans (containers) of fixed sized objects.
     1426A CPU heap uses binning to manage the allocations/deallocations within the containers.
     1427Free containers are returned to the OS.
     1428Version @libtcmalloc_minimal.so.4@, installed using @apt-get install google-perftools@.
     1429\end{description}
     1430
     1431Untested allocators:
     1432\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
     1433\item[ptmalloc3]
     1434is 8 years old and already integrated into glibc.
     1435\item[rpmalloc]
     1436requires explicit insertion of initialization/finalization calls for handling concurrent kernel threads.
     1437Having to augment programs, like SPEC CPU benchmarks, is deemed outside of normal programmer expectations.
     1438% An allocator should just plugin and work.
     1439\item[lock free] allocators guarantee allocation progress whether threads are delayed or killed using an atomic instruction, often CAS.
     1440The original lock-free allocator~\cite{Michael04} is completely lock-free.
     1441As stated, atomic instructions on the fast path result in a significant performance penalty.
     1442Hence, new allocators are not completely lock free, switching to a combination of synchronization-free, \ie 1:1 allocator model, on the fast path and lock-free on the slow path(s) to manipulate shared data structures~\cite{rpmalloc}.
     1443These allocators are better labelled as \newterm{hybrid locking} rather than lock free, as the lock-free aspect is not contributing to performance.
     1444
     1445% We observe that none of the pre-built standard malloc replacement libraries for ubuntu \url{https://launchpad.net/ubuntu/+search?text=malloc} are completely lock-free.
     1446% 1:1 allocators can avoid synchronization (locks, or lock-free techniques with atomic instructions as well as cache coherence overheads) in their critical fast paths, but care must be taken to ensure the the amount of free memory captured in thread-local structures is bounded.
     1447
     1448% Another approach to synchronization for allocators is \newterm{Restartable Critical Sections} ~\cite {https://dl.acm.org/doi/10.1145/512429.512451, https://dl.acm.org/doi/pdf/10.5555/1698184, https://doi.org/10.1145/1064979.1064985}, which are available in linux as the \newterm{RSEQ} facility ~\cite{https://www.gnu.org/software/libc/manual/html_node/Restartable-Sequences.html}.
     1449% Restartable Critical Sections  provide obstruction-free progress by means of specially crafted transactions that will be rolled back if they happen to be interrupted by the kernel.
     1450% Restartable Critical Sections transactions can only operate on CPU-specific data, however, which forces a T:C allocator configuration.
     1451% Google's experimental tcmalloc \url{https://google.github.io/tcmalloc/rseq.html} uses RSEQ. 
     1452% SuperMalloc \url{ACM DL is dead at the moment, but it's in ISMM 2015} attempts to use hardware transactional memory for lock elision, but falls back to classic locking if the hardware facility is not present or when a given transactional attempt encounters repeated progress failures. 
     1453
     1454
     1455\end{description}
     1456
     1457Allocator size is an indirect indicator of complexity.
     1458Lines-of-code are computed with command @cloc *.{h,c,cc,cpp}@, except for hoard:
     1459@cloc --exclude-lang="Bourne Shell",SKILL,Markdown,Bazel  Heap-Layers source include@.
     1460\begin{center}
     1461\setlength{\tabcolsep}{13pt}
     1462\begin{tabular}{@{}rrrrrrrr@{}}
     1463llheap & glibc & hoard & jemalloc & mimalloc & tbbmalloc & tcmalloc \\
     14641,450 & 3,807 & 11,932 & 24,512 & 6,887 & 6,256 & 33,963 \\
     1465\end{tabular}
     1466\end{center}
    18831467
    18841468
     
    18861470\label{s:Benchmarks}
    18871471
    1888 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1889 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1890 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Micro Benchmark Suite
    1891 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1892 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1893 
    18941472There are two basic approaches for evaluating computer software: benchmarks and micro-benchmarks.
    1895 \begin{description}
     1473\begin{description}[leftmargin=*,topsep=3pt,itemsep=2pt,parsep=0pt]
    18961474\item[Benchmarks]
    18971475are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc).
    18981476The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation.
    1899 Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns.
     1477Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns, resulting in little or no information about why an application did or did perform well for the tested software.
    19001478\item[Micro-Benchmarks]
    19011479attempt to extract the common execution patterns associated with an application and run the pattern independently.
    19021480This approach removes any masking from extraneous application code, allows execution pattern to be very precise, and provides an opportunity for the execution pattern to have multiple independent tuning adjustments (knobs).
    1903 Micro-benchmarks are often criticized for inadequately representing real-world applications.
     1481Micro-benchmarks are often criticized for inadequately representing real-world applications, but that is not their purpose.
    19041482\end{description}
    19051483
     
    19071485In the past, an assortment of applications have been used for benchmarking allocators~\cite{Detlefs93,Berger00,Berger01,berger02reconsidering}: P2C, GS, Espresso/Espresso-2, CFRAC/CFRAC-2, GMake, GCC, Perl/Perl-2, Gawk/Gawk-2, XPDF/XPDF-2, ROBOOP, Lindsay.
    19081486As well, an assortment of micro-benchmark have been used for benchmarking allocators~\cite{larson99memory,Berger00,streamflow}: threadtest, shbench, Larson, consume, false sharing.
    1909 Many of these benchmark applications and micro-benchmarks are old and may not reflect current application allocation patterns.
    1910 
    1911 This work designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters.
    1912 The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance.
    1913 % These programs can be taken as a standard to benchmark an allocator's basic goals.
    1914 These programs give details of an allocator's memory overhead and speed under certain allocation patterns.
    1915 The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs.
    1916 Each micro-benchmark program has multiple control knobs specified by command-line arguments.
    1917 
    1918 The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics.
    1919 An allocator's speed is benchmarked in different ways, as are issues like false sharing.
    1920 
    1921 
    1922 \subsection{Prior Multi-Threaded Micro-Benchmarks}
    1923 
    1924 Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level.
    1925 The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level.
    1926 None of the prior work addresses multi-threading at the UT level.
    1927 
    1928 
    1929 \subsubsection{threadtest}
    1930 
    1931 This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently.
    1932 There is no interaction among threads, \ie no object sharing.
    1933 Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
    1934 The execution time of the benchmark evaluates its efficiency.
    1935 
    1936 
    1937 \subsubsection{shbench}
    1938 
    1939 This benchmark is similar to threadtest but each thread randomly allocate and free a number of \emph{random-sized} objects.
    1940 It is a stress test that also uses runtime to determine efficiency of the allocator.
    1941 
    1942 
    1943 \subsubsection{Larson}
    1944 
    1945 This benchmark simulates a server environment.
    1946 Multiple threads are created where each thread allocates and frees a number of random-sized objects within a size range.
    1947 Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process.
    1948 The number of thread generations varies depending on the thread speed.
    1949 It calculates memory operations per second as an indicator of the memory allocator's performance.
    1950 
    1951 
    1952 \subsection{New Multi-Threaded Micro-Benchmarks}
    1953 
    1954 The following new benchmarks were created to assess multi-threaded programs at the KT and UT level.
    1955 For generating random values, two generators are supported: uniform~\cite{uniformPRNG} and fisher~\cite{fisherPRNG}.
    1956 
    1957 
    1958 \subsubsection{Churn Benchmark}
    1959 \label{s:ChurnBenchmark}
    1960 
    1961 The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory.
    1962 Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@.
    1963 Churn simulates a memory intensive program and can be tuned to create different scenarios.
    1964 
    1965 Figure~\ref{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark.
    1966 This benchmark creates a buffer with M spots and an allocation in each spot, and then starts K threads.
    1967 Each thread picks a random spot in M, frees the object currently at that spot, and allocates a new object for that spot.
    1968 Each thread repeats this cycle N times.
    1969 The main thread measures the total time taken for the whole benchmark and that time is used to evaluate the memory allocator's performance.
     1487Many of these benchmark applications and micro-benchmarks are old and do not reflect current application allocation patterns.
     1488
     1489Except for the SPEC CPU benchmark, the other performance benchmarks used for testing are micro-benchmarks created for this paper.
     1490All the benchmarks are used solely to extract differences among memory allocators.
     1491The term benchmark in the following discussion means benchmark or micro-benchmark.
     1492
     1493
     1494\subsection{SPEC CPU 2017}
     1495
     1496SPEC CPU 2017 is an industry-standardized suite for measuring and comparing performance of compute-intensive programs.
     1497It contains integer and floating-point tests written in C, \CC, and Fortran, covering throughput and speed, where each test contains multiple benchmarks~\cite{SPECCPU2017}.
     1498All the benchmarks perform dynamic allocation, from light to heavy.
     1499However, the dynamic allocation is relatively small in comparison to the benchmark computation.
     1500Therefore, differences among allocators should be small, unless a particular access pattern triggers a pathological case.
     1501The reason for performing SPEC CPU across the allocators is to prove this hypothesis.
     1502For allocator comparisons, we consider SPEC CPU differences of 5\% as equal and undetectable in general workloads and computing environments.
     1503For compiler comparisons, small differences of 1\% or 2\% are considered significant.
     1504
     1505Table~\ref{t:SPEC-CPU-benchmark} shows the elapsed time (inverted throughput) of the SPEC CPU tests condensed to the geomean across the benchmarks for each of the four SPEC tests, intrate, intspeed, fprate, and fpspeed, covering integer and floating-point operations.
     1506The tests are configured with size = ref, intrate/fprate: copies = 1, intspeed: threads = 1, fpspeed: threads = 16;
     1507only fpspeed is concurrent using OpenMP.
     1508Rigorous testing of SPEC CPU often runs many benchmark copies in parallel to completely load all computer cores.
     1509However, these tests quickly run into architectural bottlenecks having little to do with an allocator's behaviour.
     1510Runnning a single program bound to one core means the focus is strictly on allocator differences rather than conjoining transient OS and hardware differences.
     1511The throughputs are ranked with {\color{red}red} lowest time and {\color{blue}blue} highest, where lower is best.
     1512Hoard failed in multiple experiments on the ARM architecture, marked with {\color{purple}*Err*}, making it impossible to report the successful tests.
     1513
     1514The results show all allocators do well;
     1515the average, median, and relative standard deviation (right column)\footnote{$rstd = \sigma / \mu \times 100$, where $\sigma =$ standard deviation and $\mu =$ average} proves our hypothesis that the performance difference, 0.6\% to 2.3\%, across allocators is small.
     1516One implementation trend we observed is that two of the integer tests, @omnetpp@ and @xalancbmk@, had an execution pattern that exercised the cache.
     1517For the three allocators using headers-per-allocation, glibc, llheap, and tbbmalloc, performance could be up to 40\% slower, between the best and worst allocator results.
     1518The reason is that the headers consumed part of the cache line, resulting in more cache misses.
     1519These two experiments, disproportionally increased the geomean for these allocators for both integral experiments on all architectures.
     1520Hence, headers-per-allocation are disadvantaged for this specific execution pattern.
     1521The floating-point tests show no trends among the allocators.
     1522The goal for llheap in this experiment is to do well, which is established by it being close to the median result, meaning it is normally in the middle of the allocator results.
     1523
     1524\begin{table}
     1525\centering
     1526\caption{SPEC CPU benchmark, 3 hardware architectures, geomean per test in seconds, lower is better}
     1527\label{t:SPEC-CPU-benchmark}
     1528%\setlength{\tabcolsep}{6pt}
     1529\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
     1530                &       bench/alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
     1531\cline{2-12}
     1532                &       intrate & {\color{blue}314.4} & {\color{violet}*Err*} & 300.3 & 309.9 & 302.6 & 313 & {\color{red}298.7} & 306.5 & 309.9 & 2\% \\
     1533ARM             &       intspeed & {\color{blue}439.1} & {\color{violet}*Err*} & 417.6 & 431.1 & 419.9 & 436.2 & {\color{red}415.5} & 426.6 & 431.1 & 2.2\% \\
     1534                &       fprate & 347.6 & {\color{violet}*Err*} & {\color{red}333.9} & 352.2 & {\color{blue}356.6} & 345.9 & 344.5 & 346.8 & 347.6 & 2\% \\
     1535                &       fpspeed & 248.4 & {\color{violet}*Err*} & 245.3 & 245.7 & {\color{blue}250.9} & 246.6 & {\color{red}243.8} & 246.8 & 246.6 & 0.93\%
     1536\end{tabular}
     1537
     1538\begin{comment}
     1539\bigskip
     1540\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
     1541                &       bench/alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
     1542\cline{2-12}
     1543                &       intrate & 251 & 242 & 239 & 249 & 240 & {\color{blue}251} & {\color{red}237} & 244 & 242 & 2.3\% \\
     1544AMD             &       intspeed & 356 & 337 & 335 & 351 & 339 & {\color{blue}356} & {\color{red}333} & 344 & 339 & 2.7\% \\
     1545                &       fprate & 256 & 261 & {\color{red}250} & 257 & {\color{blue}270} & 256 & 254 & 258 & 256 & 2.3\% \\
     1546                &       fpspeed & 340 & {\color{blue}353} & {\color{red}326} & 338 & 348 & 341 & 328 & 339 & 340 & 2.7\%
     1547\end{tabular}
     1548\end{comment}
     1549
     1550\bigskip
     1551\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
     1552                &       bench/alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
     1553\cline{2-12}
     1554                &       intrate & 251.2 & {\color{red}241.1} & 251.9 & 249.3 & 251.6 & 251.5 & {\color{blue}252.3} & 249.9 & 251.5 & 1.5\% \\
     1555AMD             &       intspeed & {\color{blue}356.1} & {\color{red}337.1} & 355.4 & 351.7 & 355.5 & 355.8 & 355.9 & 352.5 & 355.5 & 1.8\% \\
     1556                &       fprate & {\color{red}253.9} & {\color{blue}259.9} & 254.4 & 255.8 & 254.5 & 254.4 & 254.7 & 255.4 & 254.5 & 0.75\% \\
     1557                &       fpspeed & 329.9 & {\color{blue}339.6} & 330.6 & {\color{red}327.2} & 329.9 & 329.8 & 329.5 & 330.9 & 329.9 & 1.1\%
     1558\end{tabular}
     1559
     1560\bigskip
     1561\begin{tabular}{@{}p{15pt}@{\hspace{15pt}}r|*{7}{r}|*{3}{r}@{}}
     1562                &       bench./alloc. & glibc & hoard & jemalloc & llheap & mimalloc & tbbmalloc & tcmalloc & avg & med & rstd \\
     1563\cline{2-12}
     1564                &       intrate & 188.6 & 185.1 & 183.1 & 188.6 & 181.5 & {\color{blue}189.4} & {\color{red}181.2} & 185.4 & 185.1 & 1.8\% \\
     1565Intel   &       intspeed & 271.6 & 264.6 & 263.5 & 270.2 & 261.2 & {\color{blue}272.1} & {\color{red}260.3} & 266.2 & 264.6 & 1.7\% \\
     1566                &       fprate & 202.7 & {\color{red}201.8} & 204.4 & 205.1 & {\color{blue}205.3} & 204.7 & 203.7 & 204 & 204.4 & 0.59\% \\
     1567                &       fpspeed & 237.3 & 235.3 & 234.5 & 235.6 & {\color{blue}244.5} & 236.1 & {\color{red}233.6} & 236.7 & 235.6 & 1.4\%
     1568\end{tabular}
     1569\end{table}
     1570
     1571
     1572\subsection{Realloc Benchmark}
     1573
     1574Some examination of @realloc@ is necessary to encourage its use.
     1575Reallocation can be very efficient (both in space and time) when manipulating variable-sized objects, like strings, multi-precise numbers, or dynamic-sized arrays.
     1576Both X11 (500+ calls) and glibc (300+ calls) use realloc for various purposes.
     1577For example, in \CC:
     1578\begin{C++}
     1579string s = "abc"; // initial allocation and copy new value
     1580s = "gh"; // change size and copy new value
     1581s = "l" + s + "r"; // change size and copy new value
     1582s = s.substr(0,2); // reduce size
     1583\end{C++}
     1584variable @s@ changes size and value multiple times, plus temporary strings are created implicitly, \eg multiple concatenations, all of which requires multiple allocations, copying, and deallocations.
     1585@realloc@ can optimize some of these operations in two ways:
     1586\begin{enumerate}[leftmargin=*]
     1587\item
     1588For decreasing size, Figure~\ref{f:ReallocOptDecreasing} shows a logical truncation of the existing object rather than creating a new object, \ie use a heuristic to decide whether to perform the 3-step procedure (allocate, copy, and free), or pretend the storage is decreased and return the old storage and value, performing zero work but increasing internal fragmentation.
     1589For example, a request to decrease size from 96 to 75 bytes can be implemented two ways:
     1590The 21 bytes of internal fragmentation at the end of the logical reallocation may be unavailable, directly available if the allocator supports @malloc_usable_size@, or indirectly available if put back on the allocator free list.
     1591\item
     1592For increasing size, Figure~\ref{f:ReallocOptIncreasing} takes advantage of the fact that many memory allocators quantize request sizes (binning), often returning slightly more storage than requested (internal fragmentation).
     1593For example, an initial request for 75 bytes may return 96 bytes of storage, giving 21 bytes of internal fragmentation:
     1594For increasing the size up to 21 bytes, realloc can take advantage of this unused space rather than performing the 3-step procedure, which can also result in unused storage.
     1595\end{enumerate}
    19701596
    19711597\begin{figure}
    19721598\centering
    1973 \begin{lstlisting}
    1974 Main Thread
    1975         create worker threads
    1976         note time T1
    1977         ...
    1978         note time T2
    1979         churn_speed = (T2 - T1)
    1980 Worker Thread
    1981         initialize variables
    1982         ...
    1983         for ( N )
    1984                 R = random spot in array
    1985                 free R
    1986                 allocate new object at R
    1987 \end{lstlisting}
    1988 %\includegraphics[width=1\textwidth]{figures/bench-churn.eps}
    1989 \caption{Churn Benchmark}
    1990 \label{fig:ChurnBenchFig}
     1599\subfloat[Decreasing]{\label{f:ReallocOptDecreasing}\input{decreasing}}
     1600\hspace*{5pt}
     1601\vrule
     1602\hspace*{5pt}
     1603\subfloat[Increasing]{\label{f:ReallocOptIncreasing}\raisebox{0.38\totalheight}{\input{increasing}}}
     1604\caption{Realloc Optimizations}
     1605\label{f:ReallocOptimizations}
    19911606\end{figure}
    19921607
    1993 The adjustment knobs for churn are:
    1994 \begin{description}[itemsep=0pt,parsep=0pt]
    1995 \item[thread:]
    1996 number of threads (K).
    1997 \item[spots:]
    1998 number of spots for churn (M).
    1999 \item[obj:]
    2000 number of objects per thread (N).
    2001 \item[max:]
    2002 maximum object size.
    2003 \item[min:]
    2004 minimum object size.
    2005 \item[step:]
    2006 object size increment.
    2007 \item[distro:]
    2008 object size distribution
    2009 \end{description}
    2010 
    2011 
    2012 \subsubsection{Cache Thrash}
    2013 \label{sec:benchThrashSec}
    2014 
    2015 The cache-thrash micro-benchmark measures allocator-induced active false-sharing as illustrated in Section~\ref{s:AllocatorInducedActiveFalseSharing}.
    2016 If memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
    2017 When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases.
    2018 
    2019 Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
    2020 Ideally, a memory allocator should distance the dynamic memory region of one thread from another.
    2021 Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads.
    2022 
    2023 Figure~\ref{fig:benchThrashFig} shows the pseudo code for the cache-thrash micro-benchmark.
    2024 First, it creates K worker threads.
    2025 Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line.
    2026 Each thread repeats this for N times.
    2027 The main thread measures the total time taken for all worker threads to complete.
    2028 Worker threads sharing cache lines with each other are expected to take longer.
     1608Figure~\ref{f:reallocShrinkBenchmark} shows a benchmark to determine if an allocator takes advantage of the first optimization.
     1609The benchmark takes a fixed-size allocation and reduction it by 10\%--90\% in steps of 10\%, checking the storage addresses at each reduction step if the same or new storage is returned.
     1610The fixed-sized allocation is varied between sizes 64--16K in powers of 2.
     1611Hence, both small and large sized storage are reduced.
     1612The following table shows the approximate percentage point where storage is retained on shrinkage, \eg the storage reduction must be greater than 50\% of the prior allocation before a new allocation is performed for the smaller size, data is copied, and prior storage released.
     1613\begin{center}
     1614\setlength{\tabcolsep}{15pt}
     1615\begin{tabular}{@{}ccccccc@{}}
     1616glibc   & hoard & jemalloc      & llheap        & mimalloc      & tbbmalloc & tcmalloc \\
     161790\%    & 50\%  & 20\%          & 50\%          & 50\%          & 90\%          & 50\%
     1618\end{tabular}
     1619\end{center}
     1620The results show glibc and tbbmalloc do not perform this optimization, while the other allocators do with 50\% as the most popular crossover point.
     1621
     1622Figure~\ref{f:reallocGrowBenchmark} shows a benchmark to determine if an allocator takes advantage of the second optimization.
     1623This benchmark creates an array of fixed-sized elements increasing the array size by 1 from 1--10,000 elements.
     1624Then the element size is varied from 32, 64, 128, 256 bytes.
     1625To prevent allocators from doing a bump allocation across the entire benchmark, a small perturbation is introduced where storage is allocated, held, and then released at infrequent points across the experiment.
     1626A companion experiment is a manual simulation of the @realloc@: @malloc@ new storage, copy old data, and free old storage.
     1627Note, the @realloc@ simulation is performing an equivalent perturbation to the @realloc@ benchmark each time through the loop.
     1628The experiment is repeated 10,000 times for @realloc@ and 100 times for the simulation to obtain similar timing ranges.
     1629The performance difference between the @realloc@ and @realloc@-simulation experiments shows if @realloc@ is optimizing unused internal fragmentation at the end of its quantized bucket.
     1630
     1631Figure~\ref{f:reallocGrowResults} shows the results for the @realloc@ and @realloc@ simulation benchmarks.
     1632The difference between the benchmarks is two orders of magnitude, \ie all allocators are reusing some internal fragmentation to prevent a reallocation and copy as the array grows.
     1633The large difference is the extra copying in the simulation case, which is expensive.
     1634Within the @realloc@ benchmark, allocators glibc, hoard, jemalloc, and tbbmalloc have higher cost, while the remaining allocators have almost identical results.
     1635Within the @realloc@ simulation benchmark, allocators glibc and tbbmalloc have higher cost, while the remaining allocators have almost identical results.
     1636This benchmark confirms that @realloc@ can provide some level of performance benefit for dynamically growing data structures, \eg strings or arrays.
     1637Therefore, encouraging its use is reasonable, if and only if, it is safe to do so.
     1638Note, this encouragement is apt for container developers, where low-level storage management is performed internally for the benefit of application users.
    20291639
    20301640\begin{figure}
    2031 \centering
    2032 \input{AllocInducedActiveFalseSharing}
    2033 \medskip
    2034 \begin{lstlisting}
    2035 Main Thread
    2036         create worker threads
    2037         ...
    2038         signal workers to allocate
    2039         ...
    2040         signal workers to free
    2041         ...
    2042 Worker Thread$\(_1\)$
    2043         warm up memory in chunks of 16 bytes
    2044         ...
    2045         For N
    2046                 malloc an object
    2047                 read/write the object M times
    2048                 free the object
    2049         ...
    2050 Worker Thread$\(_2\)$
    2051         // same as Worker Thread$\(_1\)$
    2052 \end{lstlisting}
    2053 %\input{MemoryOverhead}
    2054 %\includegraphics[width=1\textwidth]{figures/bench-cache-thrash.eps}
    2055 \caption{Allocator-Induced Active False-Sharing Benchmark}
    2056 \label{fig:benchThrashFig}
     1641\begin{C++}
     1642for ( size_t p = 10; p <= 100; p += 10 ) {
     1643        for ( size_t s = 64; s < 16 * 1024; s <<= 1 ) {
     1644                bool reuse = false;
     1645                void * prev = pass( malloc( s ) );
     1646                void * curr = pass( realloc( prev, s * p / 100 ) );
     1647                if ( prev == curr ) {  /*  print  */  }
     1648                free( curr );
     1649        }
     1650}
     1651\end{C++}
     1652\vspace*{-10pt}
     1653\caption{\lstinline{realloc} Shrink Benchmark}
     1654\label{f:reallocShrinkBenchmark}
     1655
     1656\vspace*{10pt}
     1657
     1658%\setlength{\tabcolsep}{15pt}
     1659\begin{tabular}{@{}ll@{}}
     1660\multicolumn{1}{c}{\lstinline{realloc}} & \multicolumn{1}{c}{\lstinline{realloc} simulation} \\
     1661\begin{C++}
     1662struct S { size_t ca[DIM]; }; // varied 32, 64, 128, 256
     1663enum { Ssize = sizeof( S ) };
     1664for ( size_t t = 0; t < @10$'$000@; t += 1 ) {
     1665        S * sa = nullptr, * perturb = nullptr;
     1666        for ( size_t i = 0, s = Ssize; i < 10$'$000; i += 1, s += Ssize ) {
     1667                sa = (S *)@realloc( sa, s );@
     1668
     1669                sa[i].ca[0] = i;
     1670                if ( i % 1024 == 0 ) perturb = (S *)realloc( perturb, s );
     1671        }
     1672        free( sa );
     1673        free( perturb );
     1674}
     1675\end{C++}
     1676&
     1677\begin{C++}
     1678struct S { size_t ca[DIM]; }; // varied 32, 64, 128, 256
     1679enum { Ssize = sizeof( S ) };
     1680for ( size_t t = 0; t < @100@; t += 1 ) {
     1681        S * sa = nullptr, * so = (S *)malloc( Ssize );
     1682        for ( size_t i = 0, s = Ssize; i < 10$'$000; i += 1, s += Ssize ) {
     1683                sa = (S *)@malloc( s )@;                        // simulate realloc
     1684                memcpy( sa, so, s - Ssize );    // so one smaller
     1685                sa[i].ca[0] = i;
     1686                free( so );
     1687                so = sa;
     1688        }
     1689        free( sa );
     1690}
     1691\end{C++}
     1692\end{tabular}
     1693\caption{\lstinline{realloc} Grow Benchmark}
     1694\label{f:reallocGrowBenchmark}
     1695
     1696\vspace*{20pt}
     1697
     1698\hspace*{-17pt}
     1699\setlength{\tabcolsep}{-13pt}
     1700\begin{tabular}{@{}l@{\hspace*{-5pt}{\vrule height 1.05in}\hspace*{-5pt}}l@{}}
     1701\begin{tabular}{@{}lll@{}}
     1702\input{prolog.realloc.tex} & \input{swift.realloc.tex} & \input{java.realloc.tex}
     1703\\
     1704\multicolumn{3}{@{}c@{}}{\lstinline{realloc}, 10,000 repetitions}
     1705\end{tabular}
     1706&
     1707\setlength{\tabcolsep}{-10pt}
     1708\begin{tabular}{@{}lll@{}}
     1709\input{prolog.reallocsim.tex} & \input{swift.reallocsim.tex} & \input{java.reallocsim.tex}
     1710\\
     1711\multicolumn{3}{@{}c@{}}{\lstinline{realloc} simulation, 100 repetitions}
     1712\end{tabular}
     1713\end{tabular}
     1714
     1715\caption{\lstinline{realloc} Grow Results, x-axis in bytes, lower is better}
     1716\label{f:reallocGrowResults}
    20571717\end{figure}
    20581718
    2059 The adjustment knobs for cache access scenarios are:
    2060 \begin{description}[itemsep=0pt,parsep=0pt]
    2061 \item[thread:]
    2062 number of threads (K).
    2063 \item[iterations:]
    2064 iterations of cache benchmark (N).
    2065 \item[cacheRW:]
    2066 repetitions of reads/writes to object (M).
    2067 \item[size:]
    2068 object size.
    2069 \end{description}
    2070 
    2071 
    2072 \subsubsection{Cache Scratch}
    2073 \label{s:CacheScratch}
    2074 
    2075 The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in Section~\ref{s:AllocatorInducedPassiveFalseSharing}.
    2076 As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
    2077 In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object.
    2078 
    2079 % An allocator can unintentionally induce false sharing depending upon its management of the freed objects.
    2080 % If thread Thread$_1$ allocates multiple objects together, they may be allocated on the same cache line by the memory allocator.
    2081 % If Thread$_1$ passes these object to thread Thread$_2$, then both threads may share the same cache line but this scenario is not induced by the allocator;
    2082 % instead, the program induced this situation.
    2083 % Now if Thread$_2$ frees this object and then allocate an object of the same size, the allocator may return the same object, which is on a cache line shared with thread Thread$_1$.
    2084 
    2085 Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly.
    2086 An allocator using object ownership, as described in subsection Section~\ref{s:Ownership}, is less susceptible to allocator-induced passive false-sharing.
    2087 If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line.
    2088 
    2089 Figure~\ref{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark.
    2090 First, it allocates K dynamic objects together, one for each of the K worker threads, possibly causing memory allocator to allocate these objects on the same cache line.
    2091 Then it create K worker threads and passes an object from the K allocated objects to each of the K threads.
    2092 Each worker thread frees the object passed by the main thread.
    2093 Then, it allocates an object and reads/writes it repetitively for M times possibly causing frequent cache invalidations.
    2094 Each worker repeats this N times.
     1719
     1720\subsubsection{Cache Benchmark}
     1721\label{s:CacheBenchmark}
     1722
     1723The cache benchmarks attempt to look for false sharing (see Section~\ref{s:FalseSharing}).
     1724Unfortunately, testing for allocator-induced false-sharing is difficult, because it is equivalent to searching for randomly conjoined allocations within a large storage space.
     1725Figure~\ref{f:CacheBenchmark} shows a benchmark for program induced false-sharing, where pointers are passed among threads.
     1726As a side effect, this benchmark is indirectly checking which allocator model is being used.
     1727The program main runs the benchmark with 4, 8, 16, and 32 threads, passing each thread a separate array of dynamically allocated storage from its common heap with @ASIZE@ elements.
     1728Each thread then traverse the array adding a value to each element (read and write).
     1729The traversal is repeated T times.
     1730Each thread frees the array at the end.
     1731The experiment is run with a small and medium sized array.
     1732If there is any heap sharing, the small array has a higher probability for false sharing, \eg the first and last array elements for different array can be juxtaposed in memory, and hence appear in the same cache line.
    20951733
    20961734\begin{figure}
    2097 \centering
    2098 \input{AllocInducedPassiveFalseSharing}
    2099 \medskip
    2100 \begin{lstlisting}
    2101 Main Thread
    2102         malloc N objects $for$ each worker $thread$
    2103         create worker threads and pass N objects to each worker
    2104         ...
    2105         signal workers to allocate
    2106         ...
    2107         signal workers to free
    2108         ...
    2109 Worker Thread$\(_1\)$
    2110         warmup memory in chunks of 16 bytes
    2111         ...
    2112         free the object passed by the Main Thread
    2113         For N
    2114                 malloc new object
    2115                 read/write the object M times
    2116                 free the object
    2117         ...
    2118 Worker Thread$\(_2\)$
    2119         // same as Worker Thread$\(_1\)$
    2120 \end{lstlisting}
    2121 %\includegraphics[width=1\textwidth]{figures/bench-cache-scratch.eps}
    2122 \caption{Program-Induced Passive False-Sharing Benchmark}
    2123 \label{fig:benchScratchFig}
     1735\begin{C++}
     1736enum { TIMES = 10$'$000$'$000$'$000, ASIZE = 3 }; $\C{// repetitions, array size 3 or 30}$
     1737void * worker( void * arg ) {           $\C{// array passed from program main}$
     1738        volatile size_t * arr = (size_t *)arg; $\C{// volatile prevents code elision}$
     1739        for ( size_t  t = 0; t < TIMES / ASIZE; t += 1 ) $\C{// repeat experiment N times}$
     1740                for ( size_t r = 0; r < ASIZE; r += 1 ) $\C{// iterate through array}$
     1741                        arr[r] += r;                    $\C{// read/write array elements}$
     1742        free( (void *)arr );                    $\C{// cast away volatile}$
     1743}
     1744\end{C++}
     1745\vspace*{-5pt}
     1746\caption{Cache False-Sharing Benchmark}
     1747\label{f:CacheBenchmark}
    21241748\end{figure}
    21251749
    2126 Each thread allocating an object after freeing the original object passed by the main thread should cause the memory allocator to return the same object that was initially allocated by the main thread if the allocator did not return the initial object back to its owner (main thread).
    2127 Then, intensive read/write on the shared cache line by multiple threads should slow down worker threads due to to high cache invalidations and misses.
    2128 Main thread measures the total time taken for all the workers to complete.
    2129 
    2130 Similar to benchmark cache thrash in subsection Section~\ref{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments.
    2131 \begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt]
    2132 \item[threads:]
    2133 number of threads (K).
    2134 \item[iterations:]
    2135 iterations of cache benchmark (N).
    2136 \item[cacheRW:]
    2137 repetitions of reads/writes to object (M).
    2138 \item[size:]
    2139 object size.
    2140 \end{description}
    2141 
    2142 
    2143 \subsubsection{Speed Micro-Benchmark}
    2144 \label{s:SpeedMicroBenchmark}
    2145 \vspace*{-4pt}
    2146 
    2147 The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines:
    2148 \begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt]
    2149 \item malloc
    2150 \item realloc
    2151 \item free
    2152 \item calloc
    2153 \item malloc-free
    2154 \item realloc-free
    2155 \item calloc-free
    2156 \item malloc-realloc
    2157 \item calloc-realloc
    2158 \item malloc-realloc-free
    2159 \item calloc-realloc-free
    2160 \item malloc-realloc-free-calloc
     1750Figure~\ref{f:cacheResults} shows the results for the cache benchmark run with array sizes 3 and 30.
     1751Allocators glibc, llheap, mimalloc, and tbbmalloc show little or no false-sharing issues at both 3 and 30 array sizes, \ie all generate virtually the same result.
     1752Note, on the Intel, there is a rise at 32 cores, because of an L3 cache shift at 16 cores; stepping to 32 cores introduces NUMA effects.
     1753This result correlates with these allocators using a 1:1 allocator model.
     1754Allocators hoard, jemalloc, and tcmalloc show false-sharing issues at both 3 and 30 array sizes, reducing performance by 2 times at size 3.
     1755The @perf@ performance analyzer shows a large number of cache misses for these allocators, indicating false sharing.
     1756This result correlates with these allocators using some form of heap sharing.
     1757
     1758\begin{figure}
     1759\setlength{\tabcolsep}{-8pt}
     1760\begin{tabular}{@{}l@{\hspace*{-5pt}{\vrule height 1.05in}\hspace*{-5pt}}l@{}}
     1761\begin{tabular}{@{}lll@{}}
     1762\input{prolog.cacheS.tex} & \input{swift.cacheS.tex} & \input{java.cacheS.tex}
     1763\\
     1764\multicolumn{3}{@{}c@{}}{3 Element Array}
     1765\end{tabular}
     1766&
     1767\begin{tabular}{@{}lll@{}}
     1768\input{prolog.cacheL.tex} & \input{swift.cacheL.tex} & \input{java.cacheL.tex}
     1769\\
     1770\multicolumn{3}{@{}c@{}}{30 Element Array}
     1771\end{tabular}
     1772\end{tabular}
     1773\caption{Cache False-Sharing Results, x-axis in cores, lower is better}
     1774\label{f:cacheResults}
     1775\end{figure}
     1776
     1777
     1778\subsection{Ownership Benchmark}
     1779
     1780% In multi-threaded allocators with H:T or 1:1 structure, one thread can allocation storage, send it to another thread, and the receiving thread deallocates it.
     1781% This raises the question of where the storage is returned: the heap (area) from which it was allocated or a different heap;
     1782% in some cases there is no choice, when storage is bound to its allocation area.
     1783% If storage is returned to its allocation heap, there are concurrency issues if the allocation area is shared.
     1784% If the storage is returned to another heap, there can still be concurrency issues, but the real problem is storage drain in the allocation heap and storage bloat in the deallocation heap, without a secondary mechanism to redistribute storage.
     1785% This choice is the \newterm{ownership problem}.
     1786
     1787Historically the Larson benchmark~\cite{larson99memory} is purported to test for ownership issues, but in actuality, the benchmark is a complex simulation of a server environment.
     1788Multiple threads allocate and free a number of random-sized objects within a size range.
     1789Each thread runs for a time period, and at termination, creates a child thread and passes its array of objects as an argument, which does not require synchronization.
     1790The number of thread generations varies with thread speed.
     1791% It calculates memory operations per second as an indicator of the memory allocator's performance.
     1792Because the benchmark performs multiple kinds of tests, it is impossible to extracted just the remote-free rate.
     1793
     1794Therefore, a new benchmark is created to measure the asynchronous transfer cost from the deallocating to the allocating thread (remote free).
     1795However, the allocating thread must first asynchronously transferred the allocations to the deallocating thread.
     1796This cost needs to be mitigated so it does not mask the remote-free measurement.
     1797To accomplish this, a thread batches its allocations (lots of 100), and atomically exchanges this batch with a freeing thread, which then individually frees the batch components.
     1798Hence, the cost of the asynchronous allocation transfer is much less than the individual cost of the remote free.
     1799
     1800Figure~\ref{f:OwnershipBenchmark} shows the pseudo-code for the benchmark.
     1801There is a global matrix of allocation addresses: one row for each thread and one column for each batch.
     1802Each thread starts at a specific row and fills that row with two different sized allocations.
     1803A thread then loops until it atomically exchanges its row pointer with another thread's row pointer.
     1804The storage in the received batch is then remote freed, the batch row is reset with new allocations, and the process repeats for a timed duration.
     1805As well, after each allocation, an integer is written into the storage, and that integer is read before the deallocation.
     1806
     1807Figure~\ref{f:Ownership} (a)--(c) shows the throughput of the ownership benchmark.
     1808The results are divided into three groups.
     1809glibc and tbbmalloc are slowest because of many system calls to @futex@. % and @nano_sleep@.
     1810Figure~\ref{f:Ownership}~(d) shows the system time climbing during scaling on the AMD;
     1811the other architectures are similar.
     1812llheap and mimalloc are next, as these allocators do not batch remote frees, so every free requires locking.
     1813jemalloc, hoard, and tcmalloc are fastest, as these allocators batch remote frees, reducing locking.
     1814For 1:1 allocators, eager remote return makes sense as the returned storage can be reused during the owning thread's lifetime.
     1815For N:T allocators, lazy remote return using batching makes sense as heaps outlive threads so eventually returned storage can be used by any existing or new thread.
     1816Batching is possible for 1:1 allocators, but results in complexity and external fragmentation, which is only warranted in certain cases.
     1817
     1818\begin{figure}
     1819\begin{cfa}
     1820void * batches[MaxThread][MaxBatch];                            $\C{// thread global}$
     1821struct Aligned { CALIGN void * * col; };
     1822volatile Aligned allocations[MaxThread];
     1823
     1824Aligned batch = { batches[id] };                                        $\C{// thread local}$
     1825size_t cnt = 0, a = 0;
     1826for ( ; ! stop; ) {                                                                     $\C{// loop for T second}$
     1827        for ( ssize_t i = Batch - 1; i >= 0; i -= 1 ) { $\C{// allocations, oppose order from frees}$
     1828                batch.col[i] = malloc( i & 1 ? 42 : 192 );      $\C{// two allocation sizes}$
     1829                *(int *)batch.col[i] = 42;                                      $\C{// write storage}$
     1830        }
     1831        Aligned obatch = batch;
     1832        while ( (batch.col = Fas( allocations[a].col, batch.col )) == obatch.col || batch.col == nullptr ) { // atomic exchange
     1833                if ( stop ) goto fini;
     1834                a = (a + 1) % Threads;                                          $\C{// try another batch}$
     1835        }
     1836        for ( size_t i = 0; i < Batch; i += 1 ) {               $\C{// deallocations}$
     1837                if ( *(int *)batch.col[i] != 42 ) abort();      $\C{// read storage check}$
     1838                free( batch.col[i] );                                           $\C{// remote free}$
     1839        }
     1840        cnt += Batch;                                                                   $\C{// sum allocations/frees}$
     1841        a = (a + 1) % Threads;                                                  $\C{// try another batch}$
     1842}  fini: ;
     1843\end{cfa}
     1844\caption{Ownership Benchmark Outline}
     1845\label{f:OwnershipBenchmark}
     1846\end{figure}
     1847
     1848\begin{figure}
     1849\hspace*{-14pt}
     1850\setlength{\tabcolsep}{-13pt}
     1851\begin{tabular}{@{}lll@{\hspace*{-6pt}{\vrule height 2.05in}\hspace*{-6pt}}l@{}}
     1852\input{prolog.ownership.tex}
     1853&
     1854\input{swift.ownership.tex}
     1855&
     1856\input{java.ownership.tex}
     1857&
     1858\input{swift.ownershipres.tex}
     1859\end{tabular}
     1860\caption{Ownership Results, x-axis is cores, (a)--(c) higher is better, (d) lower is better}
     1861\label{f:Ownership}
     1862\end{figure}
     1863
     1864
     1865\subsection{Delay Benchmark}
     1866
     1867The delay benchmark is a torture test of abrupt allocation patterns looking for delays that increase latency.
     1868A flat response across the tests means there are few or no allocator-induced pauses.
     1869The test examines small and large requests, where small requests are handled by the heap (@sbrk@) and large requests are handled by the OS (@mmap@).
     1870Putting large requests in the heap causes external fragmentation when freed, unless an allocator subdivided the space, leading to pauses.
     1871The @mallopt@ function provides the option @M_MMAP_THRESHOLD@ to set the division point in bytes for requests that cannot be satisfied by an allocator's free list.
     1872Each @sbrk@ test in this benchmark is repeated 5,000,000,000 times and each @mmap@ test is performed 1,000,000 times;
     1873the different repetitions result from the high cost of the OS calls making the experiment run too long.
     1874A \emph{long running} experiment, rather than short experiments with averaged results, is searching for blowup scenarios in time and/or space.
     1875Finally, scaling is tested with 4, 8, 16, and 32 pinned threads, where the threads synchronize between tests using a @pthread@ barrier.
     1876In all experiments, allocated storage has its first and last byte assigned a character to simulate usage.
     1877
     1878The tests are performed in this order:
     1879\begin{enumerate}[leftmargin=18pt,topsep=3pt,itemsep=2pt,parsep=0pt]
     1880\item
     1881@x = malloc( 0 ) / free( x )@:
     1882handles the pathological case of an zero-sized allocation and free.
     1883The POSIX standard allows two meanings for this case: return @NULL@ or a unique pointer, where both can be freed.
     1884The fastest implementation is to return @NULL@, rather than create a fictitious allocation.
     1885However, this overloads the @malloc@ return-value to mean error or a zero-sized allocation.
     1886To comply with the POSIX standard, the check for running out of memory is:
     1887\begin{uC++}
     1888if ( malloc( 0 ) == NULL && errno == ENOMEM ) ... // no memory
     1889\end{uC++}
     1890Unfortunately, most programmers assume @NULL@ means an error, \eg two tests in the SPEC CPU benchmark fail if @NULL@ is returned for a zero-sized allocation.
     1891Hence, returning @NULL@ for a zero-sized allocation is an impractical allocator option.
     1892
     1893\item
     1894@free( NULL )@: handles the pathological case of freeing a non-existent or zero-byte allocation.
     1895Non-existent allocations occur as algorithm base-cases, such as an unused pointer set to @NULL@.
     1896Having the allocator ignore this case eliminates checking for an erroneous @free@ call on a @NULL@ value.
     1897This call should be fast.
     1898
     1899\item
     1900\label{expS}
     1901@x = malloc( 42 ) / free( x )@:
     1902handles a fixed-sized allocation and free.
     1903
     1904\item
     1905@x[0..100) = malloc( 42 ) / free( x[0..100) )@:
     1906handles a group of fixed-sized allocations and group free.
     1907
     1908\item
     1909@x[0..1000) = malloc( 42 ) / free( x[0..1000) )@:
     1910handles a larger group of fixed-sized allocations and group free.
     1911
     1912\item
     1913@x[0..100) = malloc( 42 ) / free( x(100..0] )@:
     1914handles a group of fixed-sized allocations and group free in reverse order.
     1915
     1916\item
     1917\label{expE}
     1918@x[0..1000) = malloc( 42 ) / free( x(1000..0] )@:
     1919handles a larger group of fixed-sized allocations and group free in reverse order.
     1920
     1921\item
     1922@x = malloc( [0..100) ) / free( x )@:
     1923handles a variable-sized allocation and free.
     1924
     1925\item
     1926@x[0..100) = malloc( [0..100) ) / free( x[0..100) )@:
     1927handles a group of variable-sized allocations and group free.
     1928
     1929\item
     1930@x[0..1000) = malloc( [0..1000) ) / free( x[0..1000) )@:
     1931handles a larger group of variable-sized allocations and group free.
     1932
     1933\item
     1934@x[0..100) = malloc( [0..100) ) / free( x(100..0] )@:
     1935handles a group of variable-sized allocations and group free in reverse order.
     1936
     1937\item
     1938@x[0..1000) = malloc( [0..1000) ) / free( x(1000..0] )@:
     1939handles a larger group of variable-sized allocations and group free in reverse order.
    21611940\end{enumerate}
    2162 
    2163 Figure~\ref{fig:SpeedBenchFig} shows the pseudo code for the speed micro-benchmark.
    2164 Each routine in the chain is called for N objects and then those allocated objects are used when calling the next routine in the allocation chain.
    2165 This tests the latency of the memory allocator when multiple routines are chained together, \eg the call sequence malloc-realloc-free-calloc gives a complete picture of the major allocation routines when combined together.
    2166 For each chain, the time is recorded to visualize performance of a memory allocator against each chain.
     1941Experiments \ref{expS}--\ref{expE} are repeated with a fixed-sized allocation of 1,048,576, where @M_MMAP_THRESHOLD@ is set to 524,288 to force the use of @mmap@, resulting in 17 experiments.
     1942Because the @mmap@ experiments test the operating-system memory-management not the allocators, the variable-sized @mmap@ experiments are deemed unnecessary.
     1943A test with random-sized @sbrk@ allocations @malloc( [0..N) random )@ was performed, but the results are the same as fixed sized as all the allocation sizes are quickly accessed over the large number of experiment repetitions.
     1944That is, once the buckets or superblocks for the allocation sizes are created, access order is irrelevant.
     1945
     1946Figures~\ref{f:LatencyExpARM}--\ref{f:LatencyExpIntel} show the results of the @sbrk@ and @mmap@ experiments across the seven allocators with parallel scaling.
     1947The average of the N threads is graphed for each experiment and the standard deviation is the error bar.
     1948For the @sbrk@ graphs, a good allocator result should be low (smaller is better), flat across scaling (cores), with no error bars (STD $\approx$ 0) indicating no jitter (pauses) among the threads.
     1949The result patterns across the three hardware architectures are similar, with differences correlating to CPU speed and cache differences.
     1950
     1951The key observation across the @sbrk@ graphs is that llheap and mimalloc are always at the bottom (lower is better) and flat with respect to scaling.
     1952The only exception is on the Intel, where all allocators experienced similar non-flat behaviour, because of the L3 cache shift at 16 cores.
     1953Some anomalies are tcmalloc and hoard experiencing large jitter (see error bars) and scaling issues in some experiments, which is correlated with poorer results;
     1954jemalloc has significant scaling issues for experiments 5, 7, 10, and 12, resulting from large numbers of @futex@ calls, possibly related to @madvise@ for returning storage to the OS;
     1955and glibc and tbbmalloc are often slower than the other allocators (symbols are on top of each other).
     1956
     1957The key observation across the @mmap@ graphs is that only three allocators, glibc, llheap, and tbbmalloc honoured the @mmap@ threshold request (symbols are on top of each other).
     1958The other allocators made no @mmap@ calls, so their results are extremely low.
     1959The exception is hoard, which did make @mmap@ calls that were uncorrelated with @M_MMAP_THRESHOLD@, and had significant jitter due to a large number of @futex@ calls.
     1960For the allocators using @mmap@, there should be some scaling effect as more threads make more system calls.
    21671961
    21681962\begin{figure}
    2169 \centering
    2170 \begin{lstlisting}[morekeywords={foreach}]
    2171 Main Thread
    2172         create worker threads
    2173         foreach ( allocation chain )
    2174                 note time T1
    2175                 ...
    2176                 note time T2
    2177                 chain_speed = (T2 - T1) / number-of-worker-threads * N )
    2178 Worker Thread
    2179         initialize variables
    2180         ...
    2181         foreach ( routine in allocation chain )
    2182                 call routine N times
    2183 \end{lstlisting}
    2184 %\includegraphics[width=1\textwidth]{figures/bench-speed.eps}
    2185 \caption{Speed Benchmark}
    2186 \label{fig:SpeedBenchFig}
     1963\input{prolog.tex}
     1964\vspace*{-20pt}
     1965\caption{Delay Results, ARM, x-axis is cores, lower is better}
     1966\label{f:LatencyExpARM}
    21871967\end{figure}
    21881968
    2189 The adjustment knobs for memory usage are:
    2190 \begin{description}[itemsep=0pt,parsep=0pt]
    2191 \item[max:]
    2192 maximum object size.
    2193 \item[min:]
    2194 minimum object size.
    2195 \item[step:]
    2196 object size increment.
    2197 \item[distro:]
    2198 object size distribution.
    2199 \item[objects:]
    2200 number of objects per thread.
    2201 \item[workers:]
    2202 number of worker threads.
    2203 \end{description}
    2204 
    2205 
    2206 \subsubsection{Memory Micro-Benchmark}
    2207 \label{s:MemoryMicroBenchmark}
    2208 
    2209 The memory micro-benchmark measures the memory overhead of an allocator.
    2210 It allocates a number of dynamic objects and reads @/proc/self/proc/maps@ to get the total memory requested by the allocator from the OS.
    2211 It calculates the memory overhead by computing the difference between the memory the allocator requests from the OS and the memory that the program allocates.
    2212 This micro-benchmark is like Larson and stresses the ability of an allocator to deal with object sharing.
    2213 
    2214 Figure~\ref{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark.
    2215 It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads.
    2216 A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer.
    2217 A consumer frees these objects.
    2218 After every memory operation, program memory usage is recorded throughout the runtime.
    2219 This data is used to visualize the memory usage and consumption for the program.
    2220 
    22211969\begin{figure}
    2222 \centering
    2223 \begin{lstlisting}
    2224 Main Thread
    2225         print memory snapshot
    2226         create producer threads
    2227 Producer Thread (K)
    2228         set free start
    2229         create consumer threads
    2230         for ( N )
    2231                 allocate memory
    2232                 print memory snapshot
    2233 Consumer Thread (M)
    2234         wait while ( allocations < free start )
    2235         for ( N )
    2236                 free memory
    2237                 print memory snapshot
    2238 \end{lstlisting}
    2239 %\includegraphics[width=1\textwidth]{figures/bench-memory.eps}
    2240 \caption{Memory Footprint Micro-Benchmark}
    2241 \label{fig:MemoryBenchFig}
     1970\input{swift.tex}
     1971\vspace*{-20pt}
     1972\caption{Delay Results, AMD, x-axis is cores, lower is better}
     1973\label{f:LatencyExpAMD}
    22421974\end{figure}
    22431975
    2244 The global adjustment knobs for this micro-benchmark are:
    2245 \begin{description}[itemsep=0pt,parsep=0pt]
    2246 \item[producer (K):]
    2247 sets the number of producer threads.
    2248 \item[consumer (M):]
    2249 sets number of consumers threads for each producer.
    2250 \item[round:]
    2251 sets production and consumption round size.
    2252 \end{description}
    2253 
    2254 The adjustment knobs for object allocation are:
    2255 \begin{description}[itemsep=0pt,parsep=0pt]
    2256 \item[max:]
    2257 maximum object size.
    2258 \item[min:]
    2259 minimum object size.
    2260 \item[step:]
    2261 object size increment.
    2262 \item[distro:]
    2263 object size distribution.
    2264 \item[objects (N):]
    2265 number of objects per thread.
    2266 \end{description}
    2267 
    2268 
    2269 \section{Performance}
    2270 \label{c:Performance}
    2271 
    2272 This section uses the micro-benchmarks from Section~\ref{s:Benchmarks} to test a number of current memory allocators, including llheap.
    2273 The goal is to see if llheap is competitive with the currently popular memory allocators.
    2274 
    2275 
    2276 \subsection{Machine Specification}
    2277 
    2278 The performance experiments were run on two different multi-core architectures (x64 and ARM) to determine if there is consistency across platforms:
    2279 \begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
    2280 \item
    2281 \textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
    2282 \item
    2283 \textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0
    2284 \end{itemize}
    2285 
    2286 
    2287 \subsection{Existing Memory Allocators}
    2288 \label{sec:curAllocatorSec}
    2289 
    2290 With dynamic allocation being an important feature of C, there are many stand-alone memory allocators that have been designed for different purposes.
    2291 For this work, 7 of the most popular and widely used memory allocators were selected for comparison, along with llheap.
    2292 
    2293 \paragraph{llheap (\textsf{llh})}
    2294 is the thread-safe allocator from Chapter~\ref{c:Allocator}
    2295 \\
    2296 \textbf{Version:} 1.0
    2297 \textbf{Configuration:} Compiled with dynamic linking, but without statistics or debugging.\\
    2298 \textbf{Compilation command:} @make@
    2299 
    2300 \paragraph{glibc (\textsf{glc})}
    2301 \cite{glibc} is the default glibc thread-safe allocator.
    2302 \\
    2303 \textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\
    2304 \textbf{Configuration:} Compiled by Ubuntu 20.04.\\
    2305 \textbf{Compilation command:} N/A
    2306 
    2307 \paragraph{dlmalloc (\textsf{dl})}
    2308 \cite{dlmalloc} is a thread-safe allocator that is single threaded and single heap.
    2309 It maintains free-lists of different sizes to store freed dynamic memory.
    2310 \\
    2311 \textbf{Version:} 2.8.6\\
    2312 \textbf{Configuration:} Compiled with preprocessor @USE_LOCKS@.\\
    2313 \textbf{Compilation command:} @gcc -g3 -O3 -Wall -Wextra -fno-builtin-malloc -fno-builtin-calloc@ @-fno-builtin-realloc -fno-builtin-free -fPIC -shared -DUSE_LOCKS -o libdlmalloc.so malloc-2.8.6.c@
    2314 
    2315 \paragraph{hoard (\textsf{hrd})}
    2316 \cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
    2317 \\
    2318 \textbf{Version:} 3.13\\
    2319 \textbf{Configuration:} Compiled with hoard's default configurations and @Makefile@.\\
    2320 \textbf{Compilation command:} @make all@
    2321 
    2322 \paragraph{jemalloc (\textsf{je})}
    2323 \cite{jemalloc} is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena.
    2324 Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
    2325 \\
    2326 \textbf{Version:} 5.2.1\\
    2327 \textbf{Configuration:} Compiled with jemalloc's default configurations and @Makefile@.\\
    2328 \textbf{Compilation command:} @autogen.sh; configure; make; make install@
    2329 
    2330 \paragraph{ptmalloc3 (\textsf{pt3})}
    2331 \cite{ptmalloc3} is a modification of dlmalloc.
    2332 It is a thread-safe multi-threaded memory allocator that uses multiple heaps.
    2333 ptmalloc3 heap has similar design to dlmalloc's heap.
    2334 \\
    2335 \textbf{Version:} 1.8\\
    2336 \textbf{Configuration:} Compiled with ptmalloc3's @Makefile@ using option ``linux-shared''.\\
    2337 \textbf{Compilation command:} @make linux-shared@
    2338 
    2339 \paragraph{rpmalloc (\textsf{rp})}
    2340 \cite{rpmalloc} is a thread-safe allocator that is multi-threaded and uses per-thread heap.
    2341 Each heap has multiple size-classes and each size-class contains memory regions of the relevant size.
    2342 \\
    2343 \textbf{Version:} 1.4.1\\
    2344 \textbf{Configuration:} Compiled with rpmalloc's default configurations and ninja build system.\\
    2345 \textbf{Compilation command:} @python3 configure.py; ninja@
    2346 
    2347 \paragraph{tbb malloc (\textsf{tbb})}
    2348 \cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread.
    2349 Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
    2350 \\
    2351 \textbf{Version:} intel tbb 2020 update 2, tbb\_interface\_version == 11102\\
    2352 \textbf{Configuration:} Compiled with tbbmalloc's default configurations and @Makefile@.\\
    2353 \textbf{Compilation command:} @make@
    2354 
    2355 % \subsection{Experiment Environment}
    2356 % We used our micro benchmark suite (FIX ME: cite mbench) to evaluate these memory allocators Section~\ref{sec:curAllocatorSec} and our own memory allocator uHeap Section~\ref{sec:allocatorSec}.
    2357 
    2358 \subsection{Experiments}
    2359 
    2360 Each micro-benchmark is configured and run with each of the allocators,
    2361 The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs.
    2362 All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see Section~\ref{s:MemoryMicroBenchmark}).
    2363 
    2364 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2365 %% CHURN
    2366 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2367 
    2368 \subsubsection{Churn Micro-Benchmark}
    2369 
    2370 Churn tests allocators for speed under intensive dynamic memory usage (see Section~\ref{s:ChurnBenchmark}).
    2371 This experiment was run with following configurations:
    2372 \begin{description}[itemsep=0pt,parsep=0pt]
    2373 \item[thread:]
    2374 1, 2, 4, 8, 16, 32, 48
    2375 \item[spots:]
    2376 16
    2377 \item[obj:]
    2378 100,000
    2379 \item[max:]
    2380 500
    2381 \item[min:]
    2382 50
    2383 \item[step:]
    2384 50
    2385 \item[distro:]
    2386 fisher
    2387 \end{description}
    2388 
    2389 % -maxS          : 500
    2390 % -minS          : 50
    2391 % -stepS                 : 50
    2392 % -distroS       : fisher
    2393 % -objN          : 100000
    2394 % -cSpots                : 16
    2395 % -threadN       : 1, 2, 4, 8, 16
    2396 
    2397 Figure~\ref{fig:churn} shows the results for algol and nasus.
    2398 The X-axis shows the number of threads;
    2399 the Y-axis shows the total experiment time.
    2400 Each allocator's performance for each thread is shown in different colors.
    2401 
    24021976\begin{figure}
    2403 \centering
    2404     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/churn} } \\
    2405     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/churn} }
    2406 \caption{Churn}
    2407 \label{fig:churn}
     1977\input{java.tex}
     1978\vspace*{-20pt}
     1979\caption{Delay Results, Intel, x-axis is cores, lower is better}
     1980\label{f:LatencyExpIntel}
    24081981\end{figure}
    24091982
    2410 \paragraph{Assessment}
    2411 All allocators did well in this micro-benchmark, except for \textsf{dl} on the ARM.
    2412 \textsf{dl}'s is the slowest, indicating some small bottleneck with respect to the other allocators.
    2413 \textsf{je} is the fastest, with only a small benefit over the other allocators.
    2414 % llheap is slightly slower because it uses ownership, where many of the allocations have remote frees, which requires locking.
    2415 % When llheap is compiled without ownership, its performance is the same as the other allocators (not shown).
    2416 
    2417 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2418 %% THRASH
    2419 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2420 
    2421 \subsubsection{Cache Thrash}
    2422 \label{sec:cache-thrash-perf}
    2423 
    2424 Thrash tests memory allocators for active false sharing (see Section~\ref{sec:benchThrashSec}).
    2425 This experiment was run with following configurations:
    2426 \begin{description}[itemsep=0pt,parsep=0pt]
    2427 \item[threads:]
    2428 1, 2, 4, 8, 16, 32, 48
    2429 \item[iterations:]
    2430 1,000
    2431 \item[cacheRW:]
    2432 1,000,000
    2433 \item[size:]
    2434 1
    2435 \end{description}
    2436 
    2437 % * Each allocator was tested for its performance across different number of threads.
    2438 % Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
    2439 
    2440 Figure~\ref{fig:cacheThrash} shows the results for algol and nasus.
    2441 The X-axis shows the number of threads;
    2442 the Y-axis shows the total experiment time.
    2443 Each allocator's performance for each thread is shown in different colors.
     1983Figures~\ref{f:LatencyResARM}--\ref{f:LatencyResIntel} show a time/space perspective across the entire experiment.
     1984The user, system, and real times along with the maximum memory usage are presented for the @sbrk@ and @mmap@ experiments.
     1985The result patterns across the three hardware architectures are similar.
     1986If an allocator disappears in a graph, its result is less than 1 on a logarithmic scale.
     1987Surprisingly, there are large (2 orders of magnitude) time differences among the allocators.
    24441988
    24451989\begin{figure}
    2446 \centering
    2447     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/cache_thrash_0-thrash} } \\
    2448     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/cache_thrash_0-thrash} }
    2449 \caption{Cache Thrash}
    2450 \label{fig:cacheThrash}
     1990\hspace*{15pt}
     1991\input{prolog2.tex}
     1992\vspace*{-20pt}
     1993\caption{Delay Results, ARM, x-axis is cores, lower is better}
     1994\label{f:LatencyResARM}
     1995
     1996\hspace*{15pt}
     1997\input{swift2.tex}
     1998\vspace*{-20pt}
     1999\caption{Delay Results, AMD, x-axis is cores, lower is better}
     2000\label{f:LatencyResAMD}
     2001
     2002\hspace*{15pt}
     2003\input{java2.tex}
     2004\vspace*{-20pt}
     2005\caption{Delay Results, Intel, x-axis is cores, lower is better}
     2006\label{f:LatencyResIntel}
    24512007\end{figure}
    24522008
    2453 \paragraph{Assessment}
    2454 All allocators did well in this micro-benchmark, except for \textsf{dl} and \textsf{pt3}.
    2455 \textsf{dl} uses a single heap for all threads so it is understandable that it generates so much active false-sharing.
    2456 Requests from different threads are dealt with sequentially by the single heap (using a single lock), which can allocate objects to different threads on the same cache line.
    2457 \textsf{pt3} uses the T:H model, so multiple threads can use one heap, but the active false-sharing is less than \textsf{dl}.
    2458 The rest of the memory allocators generate little or no active false-sharing.
    2459 
    2460 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2461 %% SCRATCH
    2462 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2463 
    2464 \subsubsection{Cache Scratch}
    2465 
    2466 Scratch tests memory allocators for program-induced allocator-preserved passive false-sharing (see Section~\ref{s:CacheScratch}).
    2467 This experiment was run with following configurations:
    2468 \begin{description}[itemsep=0pt,parsep=0pt]
    2469 \item[threads:]
    2470 1, 2, 4, 8, 16, 32, 48
    2471 \item[iterations:]
    2472 1,000
    2473 \item[cacheRW:]
    2474 1,000,000
    2475 \item[size:]
    2476 1
    2477 \end{description}
    2478 
    2479 % * Each allocator was tested for its performance across different number of threads.
    2480 % Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
    2481 
    2482 Figure~\ref{fig:cacheScratch} shows the results for algol and nasus.
    2483 The X-axis shows the number of threads;
    2484 the Y-axis shows the total experiment time.
    2485 Each allocator's performance for each thread is shown in different colors.
     2009For @sbrk@ graphs, the user time should be high and scale with cores, the system time very low, the real time constant, and the maximum memory scales with cores.
     2010For user time, llheap and mimalloc, are at the bottom (lower is better) and all allocators have linear scaling as cores increase.
     2011The remaining allocators are slower by one to two orders of magnitude, which correlates with high results in the experiments.
     2012For system time jemalloc has non-trivial system time that scales with cores, caused by a large number of @futex@ calls.
     2013The remaining allocators have virtually zero system time (not on graph).
     2014The exception is a random anomaly where allocators had small amounts of system time, which appeared/disappeared on different experiment runs as if something slightly perturbs the experiment (OS?) over its 20 hour run.
     2015For real time, llheap and mimalloc, take the least overall time and all allocators except jemalloc have flat performance.
     2016For maximum memory, all allocators scale with cores, and there is a rough inverse correlation between user time and memory usage, \ie time \vs speed tradeoff.
     2017
     2018For @mmap@ graphs, only used by glibc, llheap, and tbbmalloc, the user time should be low and scale with cores, the system time should be high and scale with cores, the real time constant, and the maximum memory scales with cores.
     2019For user time, glibc, llheap, and tbbmalloc, are at the bottom because there are no @sbrk@ requests.
     2020The remaining allocators all use a non-trivial amount of time handling the large requests, except mimalloc, which handles the large request identically to a small request.
     2021Interestingly, the amount of time varies by one to two orders of magnitude.
     2022For system time, glibc, llheap, and tbbmalloc, are at the top because of the OS calls to @mmap@.
     2023Interestingly, the remaining allocators still use orders of magnitude of system time, except mimalloc ($<$ 1 so invisible).
     2024For real time, all allocators scale linearly with cores, except mimalloc, which is flat.
     2025For maximum memory, all allocators scale with cores, and there is a rough inverse correlation between user time and memory usage, \ie time \vs speed tradeoff.
     2026
     2027
     2028\subsection{Out of Memory Benchmark}
     2029
     2030Figure~\ref{f:OutMemoryBenchmark} show a \CC program with unbounded memory allocation.
     2031The program is run in a shell with restricted data size.
     2032Hence, it quickly runs out of memory, causing @malloc@, which is called by \CC @new@, to return a @nullptr@ with @errno@ set to @ENOMEM@.
     2033Routine @new@ sees the @nullptr@ and calls the handler routine set by @set_new_handler@, which prints a message, and resets the default handler to raise the @bad_alloc@ exception caught in the program main.
     2034Note, to raise an exception requires dynamic allocation, but \CC preallocates a few special exception, like @bad_alloc@, for special cases.
     2035
     2036All allocators printed the correct output except hoard, mimalloc, and tcmalloc.
     2037Hoard prints @MAP_FAILED@ and hangs spinning on a spinlock in a complex call chain.
     2038mimalloc aborts the program because it incorrectly attempts to raise the @bad_alloc@ exception itself if and only it is compiled with \CC, whereas it is compiled with C.
     2039The correct design is to return a @nullptr@ with @errno@ set to @ENOMEM@ to \CC @new@, which then raises the exception;
     2040hence, the allocator can be compiled with C or \CC.
     2041tcmalloc prints the correct output but adds ``allocation failed'' messages.
    24862042
    24872043\begin{figure}
    2488 \centering
    2489     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/cache_scratch_0-scratch} } \\
    2490     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/cache_scratch_0-scratch} }
    2491 \caption{Cache Scratch}
    2492 \label{fig:cacheScratch}
     2044\begin{tabular}{@{\hspace*{\parindentlnth}}l@{\hspace*{2\parindentlnth}}l@{}@{}}
     2045\begin{cfa}
     2046static void handler() {
     2047        cout << "Memory allocation failed\n";
     2048        set_new_handler( nullptr );
     2049}
     2050
     2051
     2052\end{cfa}
     2053&
     2054\begin{cfa}
     2055int main() {
     2056        set_new_handler( handler );
     2057        try {
     2058                for ( ;; ) pass( new char[50] );        // unbounded allocation
     2059        } catch( const bad_alloc & e ) { cout << e.what() << endl; }
     2060}
     2061\end{cfa}
     2062\end{tabular}
     2063\caption{Out of Memory Benchmark}
     2064\label{f:OutMemoryBenchmark}
    24932065\end{figure}
    24942066
    2495 \paragraph{Assessment}
    2496 This micro-benchmark divides the allocators into two groups.
    2497 First is the high-performer group: \textsf{llh}, \textsf{je}, and \textsf{rp}.
    2498 These memory allocators generate little or no passive false-sharing and their performance difference is negligible.
    2499 Second is the low-performer group, which includes the rest of the memory allocators.
    2500 These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator.
    2501 All of the allocators in this group are sharing heaps among threads at some level.
    2502 
    2503 Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see Section~\ref{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch.
    2504 It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing.
    2505 
    2506 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2507 %% SPEED
    2508 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2509 
    2510 \subsubsection{Speed Micro-Benchmark}
    2511 
    2512 Speed tests memory allocators for runtime latency (see Section~\ref{s:SpeedMicroBenchmark}).
    2513 This experiment was run with following configurations:
    2514 \begin{description}
    2515 \item[max:]
    2516 500
    2517 \item[min:]
    2518 50
    2519 \item[step:]
    2520 50
    2521 \item[distro:]
    2522 fisher
    2523 \item[objects:]
    2524 100,000
    2525 \item[workers:]
    2526 1, 2, 4, 8, 16, 32, 48
    2527 \end{description}
    2528 
    2529 % -maxS    :  500
    2530 % -minS    :  50
    2531 % -stepS   :  50
    2532 % -distroS :  fisher
    2533 % -objN    :  1000000
    2534 % -threadN    : \{ 1, 2, 4, 8, 16 \} *
    2535 
    2536 %* Each allocator was tested for its performance across different number of threads.
    2537 %Experiment was repeated for each allocator for 1, 2, 4, 8, and 16 threads by setting the configuration -threadN.
    2538 
    2539 Figures~\ref{fig:speed-3-malloc} to~\ref{fig:speed-14-malloc-calloc-realloc-free} show 12 figures, one figure for each chain of the speed benchmark.
    2540 The X-axis shows the number of threads;
    2541 the Y-axis shows the total experiment time.
    2542 Each allocator's performance for each thread is shown in different colors.
    2543 
    2544 \begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
    2545 \item Figure~\ref{fig:speed-3-malloc} shows results for chain: malloc
    2546 \item Figure~\ref{fig:speed-4-realloc} shows results for chain: realloc
    2547 \item Figure~\ref{fig:speed-5-free} shows results for chain: free
    2548 \item Figure~\ref{fig:speed-6-calloc} shows results for chain: calloc
    2549 \item Figure~\ref{fig:speed-7-malloc-free} shows results for chain: malloc-free
    2550 \item Figure~\ref{fig:speed-8-realloc-free} shows results for chain: realloc-free
    2551 \item Figure~\ref{fig:speed-9-calloc-free} shows results for chain: calloc-free
    2552 \item Figure~\ref{fig:speed-10-malloc-realloc} shows results for chain: malloc-realloc
    2553 \item Figure~\ref{fig:speed-11-calloc-realloc} shows results for chain: calloc-realloc
    2554 \item Figure~\ref{fig:speed-12-malloc-realloc-free} shows results for chain: malloc-realloc-free
    2555 \item Figure~\ref{fig:speed-13-calloc-realloc-free} shows results for chain: calloc-realloc-free
    2556 \item Figure~\ref{fig:speed-14-malloc-calloc-realloc-free} shows results for chain: malloc-realloc-free-calloc
    2557 \end{itemize}
    2558 
    2559 \paragraph{Assessment}
    2560 This micro-benchmark divides the allocators into two groups: with and without @calloc@.
    2561 @calloc@ uses @memset@ to set the allocated memory to zero, which dominates the cost of the allocation chain (large performance increase) and levels performance across the allocators.
    2562 But the difference among the allocators in a @calloc@ chain still gives an idea of their relative performance.
    2563 
    2564 All allocators did well in this micro-benchmark across all allocation chains, except for \textsf{dl}, \textsf{pt3}, and \textsf{hrd}.
    2565 Again, the low-performing allocators are sharing heaps among threads, so the contention causes performance increases with increasing numbers of threads.
    2566 Furthermore, chains with @free@ can trigger coalescing, which slows the fast path.
    2567 The high-performing allocators all illustrate low latency across the allocation chains, \ie there are no performance spikes as the chain lengths, that might be caused by contention and/or coalescing.
    2568 Low latency is important for applications that are sensitive to unknown execution delays.
    2569 
    2570 %speed-3-malloc.eps
    2571 \begin{figure}
    2572 \centering
    2573     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-3-malloc} } \\
    2574     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-3-malloc} }
    2575 \caption{Speed benchmark chain: malloc}
    2576 \label{fig:speed-3-malloc}
    2577 \end{figure}
    2578 
    2579 %speed-4-realloc.eps
    2580 \begin{figure}
    2581 \centering
    2582     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-4-realloc} } \\
    2583     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-4-realloc} }
    2584 \caption{Speed benchmark chain: realloc}
    2585 \label{fig:speed-4-realloc}
    2586 \end{figure}
    2587 
    2588 %speed-5-free.eps
    2589 \begin{figure}
    2590 \centering
    2591     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-5-free} } \\
    2592     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-5-free} }
    2593 \caption{Speed benchmark chain: free}
    2594 \label{fig:speed-5-free}
    2595 \end{figure}
    2596 
    2597 %speed-6-calloc.eps
    2598 \begin{figure}
    2599 \centering
    2600     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-6-calloc} } \\
    2601     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-6-calloc} }
    2602 \caption{Speed benchmark chain: calloc}
    2603 \label{fig:speed-6-calloc}
    2604 \end{figure}
    2605 
    2606 %speed-7-malloc-free.eps
    2607 \begin{figure}
    2608 \centering
    2609     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-7-malloc-free} } \\
    2610     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-7-malloc-free} }
    2611 \caption{Speed benchmark chain: malloc-free}
    2612 \label{fig:speed-7-malloc-free}
    2613 \end{figure}
    2614 
    2615 %speed-8-realloc-free.eps
    2616 \begin{figure}
    2617 \centering
    2618     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-8-realloc-free} } \\
    2619     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-8-realloc-free} }
    2620 \caption{Speed benchmark chain: realloc-free}
    2621 \label{fig:speed-8-realloc-free}
    2622 \end{figure}
    2623 
    2624 %speed-9-calloc-free.eps
    2625 \begin{figure}
    2626 \centering
    2627     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-9-calloc-free} } \\
    2628     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-9-calloc-free} }
    2629 \caption{Speed benchmark chain: calloc-free}
    2630 \label{fig:speed-9-calloc-free}
    2631 \end{figure}
    2632 
    2633 %speed-10-malloc-realloc.eps
    2634 \begin{figure}
    2635 \centering
    2636     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-10-malloc-realloc} } \\
    2637     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-10-malloc-realloc} }
    2638 \caption{Speed benchmark chain: malloc-realloc}
    2639 \label{fig:speed-10-malloc-realloc}
    2640 \end{figure}
    2641 
    2642 %speed-11-calloc-realloc.eps
    2643 \begin{figure}
    2644 \centering
    2645     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-11-calloc-realloc} } \\
    2646     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-11-calloc-realloc} }
    2647 \caption{Speed benchmark chain: calloc-realloc}
    2648 \label{fig:speed-11-calloc-realloc}
    2649 \end{figure}
    2650 
    2651 %speed-12-malloc-realloc-free.eps
    2652 \begin{figure}
    2653 \centering
    2654     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-12-malloc-realloc-free} } \\
    2655     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-12-malloc-realloc-free} }
    2656 \caption{Speed benchmark chain: malloc-realloc-free}
    2657 \label{fig:speed-12-malloc-realloc-free}
    2658 \end{figure}
    2659 
    2660 %speed-13-calloc-realloc-free.eps
    2661 \begin{figure}
    2662 \centering
    2663     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-13-calloc-realloc-free} } \\
    2664     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-13-calloc-realloc-free} }
    2665 \caption{Speed benchmark chain: calloc-realloc-free}
    2666 \label{fig:speed-13-calloc-realloc-free}
    2667 \end{figure}
    2668 
    2669 %speed-14-{m,c,re}alloc-free.eps
    2670 \begin{figure}
    2671 \centering
    2672     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/speed-14-m-c-re-alloc-free} } \\
    2673     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/speed-14-m-c-re-alloc-free} }
    2674 \caption{Speed benchmark chain: malloc-calloc-realloc-free}
    2675 \label{fig:speed-14-malloc-calloc-realloc-free}
    2676 \end{figure}
    2677 
    2678 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2679 %% MEMORY
    2680 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2681 
    2682 \newpage
    2683 \subsubsection{Memory Micro-Benchmark}
    2684 \label{s:MemoryMicroBenchmark}
    2685 
    2686 This experiment is run with the following two configurations for each allocator.
    2687 The difference between the two configurations is the number of producers and consumers.
    2688 Configuration 1 has one producer and one consumer, and configuration 2 has 4 producers, where each producer has 4 consumers.
    2689 
    2690 \noindent
    2691 Configuration 1:
    2692 \begin{description}[itemsep=0pt,parsep=0pt]
    2693 \item[producer (K):]
    2694 1
    2695 \item[consumer (M):]
    2696 1
    2697 \item[round:]
    2698 100,000
    2699 \item[max:]
    2700 500
    2701 \item[min:]
    2702 50
    2703 \item[step:]
    2704 50
    2705 \item[distro:]
    2706 fisher
    2707 \item[objects (N):]
    2708 100,000
    2709 \end{description}
    2710 
    2711 % -threadA :  1
    2712 % -threadF :  1
    2713 % -maxS    :  500
    2714 % -minS    :  50
    2715 % -stepS   :  50
    2716 % -distroS :  fisher
    2717 % -objN    :  100000
    2718 % -consumeS:  100000
    2719 
    2720 \noindent
    2721 Configuration 2:
    2722 \begin{description}[itemsep=0pt,parsep=0pt]
    2723 \item[producer (K):]
    2724 4
    2725 \item[consumer (M):]
    2726 4
    2727 \item[round:]
    2728 100,000
    2729 \item[max:]
    2730 500
    2731 \item[min:]
    2732 50
    2733 \item[step:]
    2734 50
    2735 \item[distro:]
    2736 fisher
    2737 \item[objects (N):]
    2738 100,000
    2739 \end{description}
    2740 
    2741 % -threadA :  4
    2742 % -threadF :  4
    2743 % -maxS    :  500
    2744 % -minS    :  50
    2745 % -stepS   :  50
    2746 % -distroS :  fisher
    2747 % -objN    :  100000
    2748 % -consumeS:  100000
    2749 
    2750 % \begin{table}[b]
    2751 % \centering
    2752 %     \begin{tabular}{ |c|c|c| }
    2753 %      \hline
    2754 %     Memory Allocator & Configuration 1 Result & Configuration 2 Result\\
    2755 %      \hline
    2756 %     llh & Figure~\ref{fig:mem-1-prod-1-cons-100-llh} & Figure~\ref{fig:mem-4-prod-4-cons-100-llh}\\
    2757 %      \hline
    2758 %     dl & Figure~\ref{fig:mem-1-prod-1-cons-100-dl} & Figure~\ref{fig:mem-4-prod-4-cons-100-dl}\\
    2759 %      \hline
    2760 %     glibc & Figure~\ref{fig:mem-1-prod-1-cons-100-glc} & Figure~\ref{fig:mem-4-prod-4-cons-100-glc}\\
    2761 %      \hline
    2762 %     hoard & Figure~\ref{fig:mem-1-prod-1-cons-100-hrd} & Figure~\ref{fig:mem-4-prod-4-cons-100-hrd}\\
    2763 %      \hline
    2764 %     je & Figure~\ref{fig:mem-1-prod-1-cons-100-je} & Figure~\ref{fig:mem-4-prod-4-cons-100-je}\\
    2765 %      \hline
    2766 %     pt3 & Figure~\ref{fig:mem-1-prod-1-cons-100-pt3} & Figure~\ref{fig:mem-4-prod-4-cons-100-pt3}\\
    2767 %      \hline
    2768 %     rp & Figure~\ref{fig:mem-1-prod-1-cons-100-rp} & Figure~\ref{fig:mem-4-prod-4-cons-100-rp}\\
    2769 %      \hline
    2770 %     tbb & Figure~\ref{fig:mem-1-prod-1-cons-100-tbb} & Figure~\ref{fig:mem-4-prod-4-cons-100-tbb}\\
    2771 %      \hline
    2772 %     \end{tabular}
    2773 % \caption{Memory benchmark results}
    2774 % \label{table:mem-benchmark-figs}
    2775 % \end{table}
    2776 % Table Section~\ref{table:mem-benchmark-figs} shows the list of figures that contain memory benchmark results.
    2777 
    2778 Figures~\ref{fig:mem-1-prod-1-cons-100-llh}{fig:mem-4-prod-4-cons-100-tbb} show 16 figures, two figures for each of the 8 allocators, one for each configuration.
    2779 Each figure has 2 graphs, one for each experiment environment.
    2780 Each graph has following 5 subgraphs that show memory usage and statistics throughout the micro-benchmark's lifetime.
    2781 \begin{itemize}[topsep=3pt,itemsep=2pt,parsep=0pt]
    2782 \item \textit{\textbf{current\_req\_mem(B)}} shows the amount of dynamic memory requested and currently in-use of the benchmark.
    2783 \item \textit{\textbf{heap}}* shows the memory requested by the program (allocator) from the system that lies in the heap (@sbrk@) area.
    2784 \item \textit{\textbf{mmap\_so}}* shows the memory requested by the program (allocator) from the system that lies in the @mmap@ area.
    2785 \item \textit{\textbf{mmap}}* shows the memory requested by the program (allocator or shared libraries) from the system that lies in the @mmap@ area.
    2786 \item \textit{\textbf{total\_dynamic}} shows the total usage of dynamic memory by the benchmark program, which is a sum of \textit{heap}, \textit{mmap}, and \textit{mmap\_so}.
    2787 \end{itemize}
    2788 * These statistics are gathered by monitoring a process's @/proc/self/maps@ file.
    2789 
    2790 The X-axis shows the time when the memory information is polled.
    2791 The Y-axis shows the memory usage in bytes.
    2792 
    2793 For this experiment, the difference between the memory requested by the benchmark (\textit{current\_req\_mem(B)}) and the memory that the process has received from system (\textit{heap}, \textit{mmap}) should be minimum.
    2794 This difference is the memory overhead caused by the allocator and shows the level of fragmentation in the allocator.
    2795 
    2796 \paragraph{Assessment}
    2797 First, the differences in the shape of the curves between architectures (top ARM, bottom x64) is small, where the differences are in the amount of memory used.
    2798 Hence, it is possible to focus on either the top or bottom graph.
    2799 
    2800 Second, the heap curve is 0 for four memory allocators: \textsf{hrd}, \textsf{je}, \textsf{pt3}, and \textsf{rp}, indicating these memory allocators only use @mmap@ to get memory from the system and ignore the @sbrk@ area.
    2801 
    2802 The total dynamic memory is higher for \textsf{hrd} and \textsf{tbb} than the other allocators.
    2803 The main reason is the use of superblocks (see Section~\ref{s:ObjectContainers}) containing objects of the same size.
    2804 These superblocks are maintained throughout the life of the program.
    2805 
    2806 \textsf{pt3} is the only memory allocator where the total dynamic memory goes down in the second half of the program lifetime when the memory is freed by the benchmark program.
    2807 It makes pt3 the only memory allocator that gives memory back to the OS as it is freed by the program.
    2808 
    2809 % FOR 1 THREAD
    2810 
    2811 %mem-1-prod-1-cons-100-llh.eps
    2812 \begin{figure}
    2813 \centering
    2814     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-llh} } \\
    2815     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-llh} }
    2816 \caption{Memory benchmark results with Configuration-1 for llh memory allocator}
    2817 \label{fig:mem-1-prod-1-cons-100-llh}
    2818 \end{figure}
    2819 
    2820 %mem-1-prod-1-cons-100-dl.eps
    2821 \begin{figure}
    2822 \centering
    2823     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-dl} } \\
    2824     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-dl} }
    2825 \caption{Memory benchmark results with Configuration-1 for dl memory allocator}
    2826 \label{fig:mem-1-prod-1-cons-100-dl}
    2827 \end{figure}
    2828 
    2829 %mem-1-prod-1-cons-100-glc.eps
    2830 \begin{figure}
    2831 \centering
    2832     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-glc} } \\
    2833     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-glc} }
    2834 \caption{Memory benchmark results with Configuration-1 for glibc memory allocator}
    2835 \label{fig:mem-1-prod-1-cons-100-glc}
    2836 \end{figure}
    2837 
    2838 %mem-1-prod-1-cons-100-hrd.eps
    2839 \begin{figure}
    2840 \centering
    2841     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-hrd} } \\
    2842     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-hrd} }
    2843 \caption{Memory benchmark results with Configuration-1 for hoard memory allocator}
    2844 \label{fig:mem-1-prod-1-cons-100-hrd}
    2845 \end{figure}
    2846 
    2847 %mem-1-prod-1-cons-100-je.eps
    2848 \begin{figure}
    2849 \centering
    2850     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-je} } \\
    2851     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-je} }
    2852 \caption{Memory benchmark results with Configuration-1 for je memory allocator}
    2853 \label{fig:mem-1-prod-1-cons-100-je}
    2854 \end{figure}
    2855 
    2856 %mem-1-prod-1-cons-100-pt3.eps
    2857 \begin{figure}
    2858 \centering
    2859     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-pt3} } \\
    2860     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-pt3} }
    2861 \caption{Memory benchmark results with Configuration-1 for pt3 memory allocator}
    2862 \label{fig:mem-1-prod-1-cons-100-pt3}
    2863 \end{figure}
    2864 
    2865 %mem-1-prod-1-cons-100-rp.eps
    2866 \begin{figure}
    2867 \centering
    2868     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-rp} } \\
    2869     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-rp} }
    2870 \caption{Memory benchmark results with Configuration-1 for rp memory allocator}
    2871 \label{fig:mem-1-prod-1-cons-100-rp}
    2872 \end{figure}
    2873 
    2874 %mem-1-prod-1-cons-100-tbb.eps
    2875 \begin{figure}
    2876 \centering
    2877     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-1-prod-1-cons-100-tbb} } \\
    2878     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-1-prod-1-cons-100-tbb} }
    2879 \caption{Memory benchmark results with Configuration-1 for tbb memory allocator}
    2880 \label{fig:mem-1-prod-1-cons-100-tbb}
    2881 \end{figure}
    2882 
    2883 % FOR 4 THREADS
    2884 
    2885 %mem-4-prod-4-cons-100-llh.eps
    2886 \begin{figure}
    2887 \centering
    2888     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-llh} } \\
    2889     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-llh} }
    2890 \caption{Memory benchmark results with Configuration-2 for llh memory allocator}
    2891 \label{fig:mem-4-prod-4-cons-100-llh}
    2892 \end{figure}
    2893 
    2894 %mem-4-prod-4-cons-100-dl.eps
    2895 \begin{figure}
    2896 \centering
    2897     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-dl} } \\
    2898     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-dl} }
    2899 \caption{Memory benchmark results with Configuration-2 for dl memory allocator}
    2900 \label{fig:mem-4-prod-4-cons-100-dl}
    2901 \end{figure}
    2902 
    2903 %mem-4-prod-4-cons-100-glc.eps
    2904 \begin{figure}
    2905 \centering
    2906     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-glc} } \\
    2907     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-glc} }
    2908 \caption{Memory benchmark results with Configuration-2 for glibc memory allocator}
    2909 \label{fig:mem-4-prod-4-cons-100-glc}
    2910 \end{figure}
    2911 
    2912 %mem-4-prod-4-cons-100-hrd.eps
    2913 \begin{figure}
    2914 \centering
    2915     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-hrd} } \\
    2916     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-hrd} }
    2917 \caption{Memory benchmark results with Configuration-2 for hoard memory allocator}
    2918 \label{fig:mem-4-prod-4-cons-100-hrd}
    2919 \end{figure}
    2920 
    2921 %mem-4-prod-4-cons-100-je.eps
    2922 \begin{figure}
    2923 \centering
    2924     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-je} } \\
    2925     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-je} }
    2926 \caption{Memory benchmark results with Configuration-2 for je memory allocator}
    2927 \label{fig:mem-4-prod-4-cons-100-je}
    2928 \end{figure}
    2929 
    2930 %mem-4-prod-4-cons-100-pt3.eps
    2931 \begin{figure}
    2932 \centering
    2933     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-pt3} } \\
    2934     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-pt3} }
    2935 \caption{Memory benchmark results with Configuration-2 for pt3 memory allocator}
    2936 \label{fig:mem-4-prod-4-cons-100-pt3}
    2937 \end{figure}
    2938 
    2939 %mem-4-prod-4-cons-100-rp.eps
    2940 \begin{figure}
    2941 \centering
    2942     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-rp} } \\
    2943         %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-rp} }
    2944 \caption{Memory benchmark results with Configuration-2 for rp memory allocator}
    2945 \label{fig:mem-4-prod-4-cons-100-rp}
    2946 \end{figure}
    2947 
    2948 %mem-4-prod-4-cons-100-tbb.eps
    2949 \begin{figure}
    2950 \centering
    2951     %\subfloat[Algol]{ \includegraphics[width=0.9\textwidth]{evaluations/algol-perf-eps/mem-4-prod-4-cons-100-tbb} } \\
    2952     %\subfloat[Nasus]{ \includegraphics[width=0.9\textwidth]{evaluations/nasus-perf-eps/mem-4-prod-4-cons-100-tbb} }
    2953 \caption{Memory benchmark results with Configuration-2 for tbb memory allocator}
    2954 \label{fig:mem-4-prod-4-cons-100-tbb}
    2955 \end{figure}
    2956 
    29572067
    29582068\section{Conclusion}
    29592069
    2960 % \noindent
    2961 % ====================
    2962 %
    2963 % Writing Points:
    2964 % \begin{itemize}
    2965 % \item
    2966 % Summarize u-benchmark suite.
    2967 % \item
    2968 % Summarize @uHeapLmmm@.
    2969 % \item
    2970 % Make recommendations on memory allocator design.
    2971 % \end{itemize}
    2972 %
    2973 % \noindent
    2974 % ====================
    2975 
    2976 The goal of this work was to build a low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
    2977 The new llheap memory-allocator achieves all of these goals, while maintaining and managing sticky allocation information without a performance loss.
    2978 Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally.
    2979 Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
    2980 
    2981 Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not have to do these useful allocation operations themselves.
    2982 The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
    2983 
     2070The goal of this work is to build a full-featured, low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
     2071The new llheap allocator achieves all of these goals, while maintaining and managing sticky allocation information \emph{without a performance loss}.
     2072Hence, it is possible to use @realloc@ frequently as a safe operation, rather than just occasionally or not at all.
     2073Furthermore, the ability to query sticky properties and other information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
     2074
     2075Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, @cmemalign@ and other alignment variations means programmers do not have to generate these allocation operations themselves.
     2076The ability of the type systems in modern languages, \eg \CFA, to condense the allocation API to one routine with completely orthogonal allocation properties shows how far the allocation API can be advanced.
     2077The result is increased safety and a cognitive reduction in performing dynamic allocation.
     2078All of these extensions should eliminate common reasons for C programmers to roll their own memory allocator and/or allocation function, which is a huge safety advantage.
     2079
     2080The ability to compile llheap with static/dynamic linking and optional statistics/debugging provides programmers with multiple mechanisms to balance performance and safety.
     2081These allocator versions are easy to use because they can be linked to an application without recompilation.
    29842082Providing comprehensive statistics for all allocation operations is invaluable in understanding and debugging a program's dynamic behaviour.
    29852083No other memory allocator provides such comprehensive statistics gathering.
    2986 This capability was used extensively during the development of llheap to verify its behaviour.
    2987 As well, providing a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
    2988 While not as powerful as the @valgrind@ interpreter, a large number of allocation mistakes are detected.
    2989 Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
    2990 
    2991 The ability to compile llheap with static/dynamic linking and optional statistics/debugging provides programers with multiple mechanisms to balance performance and safety.
    2992 These allocator versions are easy to use because they can be linked to an application without recompilation.
    2993 
    2994 Starting a micro-benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
    2995 The current micro-benchmarks allow some understanding of allocator implementation properties without actually looking at the implementation.
    2996 For example, the memory micro-benchmark quickly identified how several of the allocators work at the global level.
    2997 It was not possible to show how the micro-benchmarks adjustment knobs were used to tune to an interesting test point.
    2998 Many graphs were created and discarded until a few were selected for the work.
    2999 
    3000 
    3001 \subsection{Future Work}
    3002 
    3003 A careful walk-though of the allocator fastpath should yield additional optimizations for a slight performance gain.
    3004 In particular, analysing the implementation of rpmalloc, which is often the fastest allocator,
    3005 
    3006 The micro-benchmark project requires more testing and analysis.
    3007 Additional allocation patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the most useful tuning knobs.
    3008 Also, identifying ways to visualize the results of the micro-benchmarks is a work in progress.
    3009 
    3010 After llheap is made available on GitHub, interacting with its users to locate problems and improvements will make llbench a more robust memory allocator.
    3011 As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional information.
    3012 
     2084This capability was used extensively during the development of llheap to verify its behaviour, and to verify the benchmarks developed for the paper.
     2085As well, the debugging mode, where allocations are checked along with internal pre/post-conditions and invariants, is extremely useful especially for students ($\approx$1,000 students have tested the \uC version of llheap).
     2086While not as powerful as the @valgrind@ interpreter, lheap's debugging mode can detect a large number of allocation mistakes.
     2087The contention-free statistics gathering and debugging have a low enough cost to be used in production code.
     2088Finally, no other memory allocator addresses the needs of user-level threading, which are now available in many modern languages.
     2089
     2090Creating a benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
     2091The purpose of these performance tests is not to pick winners and losers among the allocators, because each allocator optimizes a particular set of allocation patterns: there is no optimal memory-allocator.
     2092The goal is to demonstrate that llheap's performance, both in time and space, across some interesting allocation patterns, is comparable to the best allocators in use today.
     2093Admittedly, there are pathological cases where llheap might use significant amounts of memory because it never coalesces or returns storage to the OS.
     2094These pathological cases do not correlate to long running applications, where llheap can perform very well.
     2095In the small set of tested benchmarks, no heap blowup was observed, while some tests caused time blowups in other allocators.
     2096Therefore, llheap is a viable drop-in replacement for many applications and its ancillary features make it safer and more informative.
     2097
     2098
     2099\subsection{Recommendations}
     2100
     2101Substantial work has been put into building a new allocator and benchmarks, plus doing comprehensive performance tests among allocators.
     2102Based on this work, we make two recommendations:
     2103\begin{enumerate}[leftmargin=*, topsep=0pt,itemsep=0pt,parsep=0pt]
     2104\item
     2105Hoard is no longer maintained and did not do well (even broke) in some performance experiments.
     2106We recommend to those doing memory allocation research not to use it.
     2107\item
     2108glibc did not perform as well as other allocators.
     2109Given it is the default memory allocator for many academic and industry applications, this seems unfortunate and skews performance resulting so developers may draw incorrect conclusions.
     2110As such, we recommend the adoption of a newer memory allocator for glibc.
     2111We offer llheap for the reasons given above, but most importantly, its small code base.
     2112glibc maintainers come and go.
     2113Therefore, it is crucial for a new maintainer to on-board quickly and have a thorough understanding of the code base within a month.
     2114The llheap code base is small and can be learned quickly because of its simple design, making it an ideal choice as a substitute allocator.
     2115\end{enumerate}
    30132116
    30142117
     
    30162119
    30172120This research is funded by the NSERC/Waterloo-Huawei (\url{http://www.huawei.com}) Joint Innovation Lab. %, and Peter Buhr is partially funded by the Natural Sciences and Engineering Research Council of Canada.
    3018 
    3019 {%
    3020 \fontsize{9bp}{11.5bp}\selectfont%
     2121% Special thanks to Trevor Brown for many helpful discussions.
     2122
     2123\bibliographystyle{ACM-Reference-Format}
    30212124\bibliography{pl,local}
    3022 }%
    30232125
    30242126\end{document}
     2127\endinput
    30252128
    30262129% Local Variables: %
  • doc/papers/llheap/figures/AddressSpace.fig

    r7ca6bf1 r1dec8f3  
    88-2
    991200 2
     106 5700 1200 6600 1800
    10112 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    11          1200 1200 2100 1200 2100 1800 1200 1800 1200 1200
    12 2 2 0 1 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    13          2100 1200 3000 1200 3000 1800 2100 1800 2100 1200
     12         5700 1250 6600 1250 6600 1750 5700 1750 5700 1250
     134 1 0 50 -1 0 9 0.0000 2 120 660 6150 1575 Code and\001
     144 1 0 50 -1 0 9 0.0000 2 120 375 6150 1400 Static\001
     154 1 0 50 -1 0 9 0.0000 2 120 315 6150 1725 Data\001
     16-6
     176 3000 1200 3900 1800
     182 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     19         3000 1250 3900 1250 3900 1750 3000 1750 3000 1250
     20-6
     216 1200 1200 2100 1800
     222 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     23         1200 1250 2100 1250 2100 1750 1200 1750 1200 1250
     24-6
     256 4800 1200 5700 1800
     262 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     27         4800 1250 5700 1250 5700 1750 4800 1750 4800 1250
     28-6
     296 2100 1200 3000 1800
    14302 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    1531        1 1 1.00 45.00 90.00
     
    1834        1 1 1.00 45.00 90.00
    1935         3000 1500 2700 1500
    20 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    21          3000 1200 3900 1200 3900 1800 3000 1800 3000 1200
    22362 2 0 1 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    23          3900 1200 4800 1200 4800 1800 3900 1800 3900 1200
     37         2100 1250 3000 1250 3000 1750 2100 1750 2100 1250
     384 1 0 50 -1 0 9 0.0000 2 150 600 2550 1700 Memory\001
     394 1 0 50 -1 0 9 0.0000 2 120 300 2550 1450 Free\001
     40-6
     416 3900 1200 4800 1800
    24422 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    2543        1 1 1.00 45.00 90.00
     
    2846        1 1 1.00 45.00 90.00
    2947         4800 1500 4500 1500
    30 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    31          4800 1200 5700 1200 5700 1800 4800 1800 4800 1200
    32 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    33          5700 1200 6600 1200 6600 1800 5700 1800 5700 1200
    34 4 0 0 50 -1 0 10 0.0000 2 165 870 1200 2025 high address\001
    35 4 2 0 50 -1 0 10 0.0000 2 120 810 6600 2025 low address\001
    36 4 1 0 50 -1 0 10 0.0000 2 120 375 1650 1575 Stack\001
    37 4 1 0 50 -1 0 10 0.0000 2 150 600 2550 1725 Memory\001
    38 4 1 0 50 -1 0 10 0.0000 2 120 300 2550 1425 Free\001
    39 4 1 0 50 -1 0 10 0.0000 2 120 660 3450 1575 Code and\001
    40 4 1 0 50 -1 0 10 0.0000 2 150 630 3450 1350 Dynamic\001
    41 4 1 0 50 -1 0 10 0.0000 2 120 315 3450 1775 Data\001
    42 4 1 0 50 -1 0 10 0.0000 2 120 300 4350 1425 Free\001
    43 4 1 0 50 -1 0 10 0.0000 2 150 600 4350 1725 Memory\001
    44 4 1 4 50 -1 0 10 0.0000 2 150 630 5250 1425 Dynamic\001
    45 4 1 0 50 -1 0 10 0.0000 2 120 315 6150 1775 Data\001
    46 4 1 0 50 -1 0 10 0.0000 2 120 660 6150 1575 Code and\001
    47 4 1 0 50 -1 0 10 0.0000 2 120 375 6150 1350 Static\001
    48 4 1 4 50 -1 0 10 0.0000 2 120 720 5250 1725 Allocation\001
     482 2 0 1 0 7 60 -1 17 0.000 0 0 -1 0 0 5
     49         3900 1250 4800 1250 4800 1750 3900 1750 3900 1250
     504 1 0 50 -1 0 9 0.0000 2 150 600 4350 1700 Memory\001
     514 1 0 50 -1 0 9 0.0000 2 120 300 4350 1450 Free\001
     52-6
     534 1 0 50 -1 0 9 0.0000 2 120 375 1650 1575 Stack\001
     544 1 0 50 -1 0 9 0.0000 2 120 660 3450 1575 Code and\001
     554 1 0 50 -1 0 9 0.0000 2 120 315 3450 1725 Data\001
     564 1 0 50 -1 0 9 0.0000 2 150 630 3450 1400 Dynamic\001
     574 1 4 50 -1 0 9 0.0000 2 150 630 5250 1450 Dynamic\001
     584 1 4 50 -1 0 9 0.0000 2 120 720 5250 1700 Allocation\001
     594 0 0 50 -1 0 9 0.0000 2 165 870 1200 1950 high address\001
     604 2 0 50 -1 0 9 0.0000 2 120 810 6600 1950 low address\001
  • doc/papers/llheap/figures/Alignment2.fig

    r7ca6bf1 r1dec8f3  
    88-2
    991200 2
    10 2 1 1 1 0 7 25 -1 -1 4.000 0 0 -1 0 0 2
    11          2100 1500 2100 1800
    12 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    13          5700 1500 5700 1800
     102 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     11         5700 1575 5700 1800
     122 1 0 1 0 7 25 -1 -1 0.000 0 0 -1 0 0 2
     13         2400 1575 2400 1800
     142 1 0 1 0 7 25 -1 -1 0.000 0 0 -1 0 0 2
     15         4200 1575 4200 1800
     162 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     17         1200 1575 6600 1575 6600 1800 1200 1800 1200 1575
    14182 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
    15          2100 1500 4200 1500 4200 1800 2100 1800 2100 1500
    16 2 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
    17          1200 1500 6600 1500 6600 1800 1200 1800 1200 1500
    18 2 1 1 1 0 7 25 -1 -1 4.000 0 0 -1 0 0 2
    19          4200 1500 4200 1800
     19         2400 1575 4200 1575 4200 1800 2400 1800 2400 1575
    20202 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
    21          5700 1500 6600 1500 6600 1800 5700 1800 5700 1500
    22 4 1 0 50 -1 0 10 0.0000 2 135 540 1650 1725 header\001
    23 4 1 0 50 -1 4 10 0.0000 2 150 135 1200 2025 H\001
    24 4 1 0 50 -1 4 10 0.0000 2 150 135 2100 2025 P\001
    25 4 0 0 50 -1 0 10 0.0000 2 180 1575 2175 2025 (min. alignment M)\001
    26 4 1 0 50 -1 0 10 0.0000 2 180 510 4950 1725 object\001
    27 4 1 0 50 -1 0 10 0.0000 2 135 315 4950 1425 size\001
    28 4 1 0 50 -1 0 10 0.0000 2 180 1815 3150 1425 internal fragmentation\001
    29 4 1 0 50 -1 0 10 0.0000 2 135 585 6150 1725 unused\001
    30 4 1 0 50 -1 4 10 0.0000 2 150 135 4200 2025 A\001
    31 4 0 0 50 -1 0 10 0.0000 2 180 1200 4275 2025 (multiple of N)\001
     21         5700 1575 6600 1575 6600 1800 5700 1800 5700 1575
     224 1 0 50 -1 0 9 0.0000 2 135 360 4950 1725 object\001
     234 1 0 50 -1 0 9 0.0000 2 105 420 6150 1725 unused\001
     244 1 0 50 -1 0 9 0.0000 2 105 375 1800 1725 header\001
     254 1 0 50 -1 0 9 0.0000 2 135 1320 3300 1500 internal fragmentation\001
     264 1 0 50 -1 0 9 0.0000 2 105 225 4950 1500 size\001
     274 0 0 50 -1 0 9 0.0000 2 135 1140 2400 1950 $P$ (aligned $M$)\001
     284 0 0 50 -1 0 9 0.0000 2 135 1155 1200 1950 $H$ (aligned $M$)\001
     294 0 0 50 -1 0 9 0.0000 2 135 1365 4200 1950 $A$ (multiple of $N$)\001
  • doc/papers/llheap/figures/Alignment2Impl.fig

    r7ca6bf1 r1dec8f3  
    991200 2
    10102 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    11          2100 1500 2100 1875
    12 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    1311         4200 1500 4200 1875
    14122 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     
    1715        1 1 1.00 45.00 90.00
    1816         3300 1725 2100 1725
     172 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     18         2100 1500 2100 1875
    19192 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
    2020         1200 1500 5700 1500 5700 1875 1200 1875 1200 1500
    21212 2 0 0 0 7 60 -1 18 0.000 0 0 -1 0 0 5
    2222         2100 1500 3300 1500 3300 1875 2100 1875 2100 1500
    23 4 1 0 50 -1 0 10 0.0000 2 180 1815 2550 1425 internal fragmentation\001
    24 4 1 0 50 -1 0 10 0.0000 2 180 510 4950 1725 object\001
    25 4 1 0 50 -1 0 10 0.0000 2 135 315 4950 1425 size\001
    26 4 1 0 50 -1 4 10 0.0000 2 150 135 1200 2100 H\001
    27 4 1 0 50 -1 4 10 0.0000 2 150 135 2100 2100 P\001
    28 4 0 0 50 -1 0 10 0.0000 2 180 1575 2175 2100 (min. alignment M)\001
    29 4 1 0 50 -1 4 10 0.0000 2 150 135 4200 2100 A\001
    30 4 0 0 50 -1 0 10 0.0000 2 180 1200 4275 2100 (multiple of N)\001
    31 4 1 0 50 -1 0 10 0.0000 2 135 540 3750 1850 header\001
    32 4 1 0 50 -1 0 10 0.0000 2 135 345 3750 1700 fake\001
    33 4 1 0 50 -1 0 10 0.0000 2 135 450 2700 1700 offset\001
    34 4 1 0 50 -1 0 10 0.0000 2 135 540 1650 1850 header\001
    35 4 1 0 50 -1 0 10 0.0000 2 135 570 1650 1675 normal\001
     234 1 0 50 -1 0 9 0.0000 2 135 1320 2550 1425 internal fragmentation\001
     244 1 0 50 -1 0 9 0.0000 2 135 360 4950 1725 object\001
     254 1 0 50 -1 0 9 0.0000 2 105 225 4950 1425 size\001
     264 1 0 50 -1 0 9 0.0000 2 105 330 2700 1700 offset\001
     274 1 0 50 -1 0 9 0.0000 2 105 420 1650 1650 normal\001
     284 1 0 50 -1 0 9 0.0000 2 105 240 3750 1650 fake\001
     294 1 0 50 -1 0 9 0.0000 2 105 375 1650 1800 header\001
     304 1 0 50 -1 0 9 0.0000 2 105 375 3750 1800 header\001
     314 0 0 50 -1 0 9 0.0000 2 120 255 1125 2025 $H$\001
     324 0 0 50 -1 0 9 0.0000 2 120 240 2025 2025 $P$\001
     334 0 0 50 -1 0 9 0.0000 2 120 240 3225 2025 $F$\001
     344 0 0 50 -1 0 9 0.0000 2 120 255 4125 2025 $A$\001
  • doc/papers/llheap/figures/AllocatedObject.fig

    r7ca6bf1 r1dec8f3  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
    88-2
    991200 2
     102 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     11         2100 1275 2100 1500
     122 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     13         3000 1275 3000 1500
     142 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     15         3900 1275 3900 1500
     162 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     17         4800 1275 4800 1500
     182 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     19         1200 1275 5700 1275 5700 1500 1200 1500 1200 1275
    10202 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    11          3900 1200 4800 1200 4800 1500 3900 1500 3900 1200
     21         2100 1275 3000 1275 3000 1500 2100 1500 2100 1275
    12222 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    13          2100 1200 3000 1200 3000 1500 2100 1500 2100 1200
    14 2 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    15          1200 1200 5700 1200 5700 1500 1200 1500 1200 1200
    16 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    17          2100 1200 2100 1500
    18 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    19          3000 1200 3000 1500
    20 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    21          3900 1200 3900 1500
    22 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    23          4800 1200 4800 1500
    24 4 1 0 50 -1 0 10 0.0000 2 135 555 1650 1425 Header\001
    25 4 1 0 50 -1 0 10 0.0000 2 180 600 2550 1425 Padding\001
    26 4 1 0 50 -1 0 10 0.0000 2 180 510 3450 1425 Object\001
    27 4 1 0 50 -1 0 10 0.0000 2 180 600 4350 1425 Spacing\001
    28 4 1 0 50 -1 0 10 0.0000 2 135 495 5250 1425 Trailer\001
     23         3900 1275 4800 1275 4800 1500 3900 1500 3900 1275
     244 1 0 50 -1 0 9 0.0000 2 105 405 1650 1425 Header\001
     254 1 0 50 -1 0 9 0.0000 2 135 495 2550 1425 Padding\001
     264 1 0 50 -1 0 9 0.0000 2 135 390 3450 1425 Object\001
     274 1 0 50 -1 0 9 0.0000 2 135 480 4350 1425 Spacing\001
     284 1 0 50 -1 0 9 0.0000 2 105 390 5250 1425 Trailer\001
  • doc/papers/llheap/figures/AllocatorComponents.fig

    r7ca6bf1 r1dec8f3  
    1717         4200 1800 4800 1800 4800 2100 4200 2100 4200 1800
    18182 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    19          4200 2100 5100 2100 5100 2400 4200 2400 4200 2100
    20 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    2119         5100 2100 6300 2100 6300 2400 5100 2400 5100 2100
    22 2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
     202 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
    2321         3300 1800 4200 1800 4200 2100 3300 2100 3300 1800
    24 2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
    25          5400 1800 6300 1800 6300 2100 5400 2100 5400 1800
    26 2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
     222 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
    2723         3300 2100 3600 2100 3600 2400 3300 2400 3300 2100
    28242 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     
    30262 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    3127         3900 2400 4800 2400 4800 2700 3900 2700 3900 2400
    32 2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
     282 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
    3329         4800 2400 5400 2400 5400 2700 4800 2700 4800 2400
    34 2 2 0 1 0 7 50 -1 17 0.000 0 0 -1 0 0 5
    35          4800 1800 5400 1800 5400 2100 4800 2100 4800 1800
    36 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    37          5400 2400 6300 2400 6300 2700 5400 2700 5400 2400
    38302 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    3931        1 1 1.00 45.00 90.00
     
    58502 2 0 1 0 7 60 -1 13 0.000 0 0 -1 0 0 5
    5951         3300 2700 6300 2700 6300 3000 3300 3000 3300 2700
    60 4 0 0 50 -1 2 10 0.0000 2 165 1005 3300 1725 Storage Data\001
    61 4 2 0 50 -1 0 10 0.0000 2 165 810 3000 1875 free objects\001
    62 4 2 0 50 -1 0 10 0.0000 2 135 1140 3000 2850 reserve memory\001
    63 4 1 0 50 -1 0 10 0.0000 2 120 795 2325 1500 Static Zone\001
    64 4 1 0 50 -1 0 10 0.0000 2 165 1845 4800 1500 Dynamic-Allocation Zone\001
    65 4 2 0 50 -1 2 10 0.0000 2 165 1005 2325 2325 Management\001
    66 4 2 0 50 -1 2 10 0.0000 2 135 375 2325 2525 Data\001
     522 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
     53         5400 1800 6300 1800 6300 2100 5400 2100 5400 1800
     542 2 0 1 0 7 50 -1 18 0.000 0 0 -1 0 0 5
     55         4800 1800 5400 1800 5400 2100 4800 2100 4800 1800
     562 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     57         4200 2100 5100 2100 5100 2400 4200 2400 4200 2100
     582 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     59         5400 2400 6300 2400 6300 2700 5400 2700 5400 2400
     604 0 0 50 -1 2 9 0.0000 2 150 975 3300 1725 Storage Data\001
     614 2 0 50 -1 0 9 0.0000 2 150 795 3000 1875 free objects\001
     624 2 0 50 -1 0 9 0.0000 2 135 1215 3000 2850 reserved memory\001
     634 1 0 50 -1 0 9 0.0000 2 120 780 2325 1500 Static Zone\001
     644 1 0 50 -1 0 9 0.0000 2 150 1815 4800 1500 Dynamic-Allocation Zone\001
     654 2 0 50 -1 2 9 0.0000 2 150 945 2325 2325 Management\001
     664 2 0 50 -1 2 9 0.0000 2 120 360 2325 2525 Data\001
  • doc/papers/llheap/figures/Container.fig

    r7ca6bf1 r1dec8f3  
    1 #FIG 3.2  Produced by xfig version 3.2.5-alpha5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
    88-2
    991200 2
    10 6 1200 1125 2100 1575
     106 4630 1380 4970 1420
     111 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4650 1400 20 20 4650 1400 4670 1400
     121 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 1400 20 20 4950 1400 4970 1400
     131 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 1400 20 20 4800 1400 4820 1400
     14-6
    11152 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    12          1275 1200 2025 1200 2025 1500 1275 1500 1275 1200
    13 4 1 0 50 -1 0 10 0.0000 2 135 555 1650 1425 Header\001
    14 -6
    15 6 1950 1125 2850 1575
     16         1275 1275 2025 1275 2025 1500 1275 1500 1275 1275
    16172 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    17          2025 1200 2775 1200 2775 1500 2025 1500 2025 1200
    18 4 1 0 50 -1 0 10 0.0000 2 195 870 2400 1425 Object$_1$\001
    19 -6
    20 6 2700 1125 3600 1575
     18         2025 1275 2775 1275 2775 1500 2025 1500 2025 1275
    21192 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    22          2775 1200 3525 1200 3525 1500 2775 1500 2775 1200
    23 4 1 0 50 -1 0 10 0.0000 2 195 870 3150 1425 Object$_2$\001
    24 -6
    25 6 3450 1125 4350 1575
     20         2775 1275 3525 1275 3525 1500 2775 1500 2775 1275
    26212 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    27          3525 1200 4275 1200 4275 1500 3525 1500 3525 1200
    28 4 1 0 50 -1 0 10 0.0000 2 195 870 3900 1425 Object$_3$\001
    29 -6
     22         3525 1275 4275 1275 4275 1500 3525 1500 3525 1275
     232 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     24         4275 1275 5400 1275 5400 1500 4275 1500 4275 1275
     254 1 0 50 -1 0 9 0.0000 2 105 405 1650 1425 Header\001
     264 1 0 50 -1 0 9 0.0000 2 135 690 2400 1425 Object$_1$\001
     274 1 0 50 -1 0 9 0.0000 2 135 690 3150 1425 Object$_2$\001
     284 1 0 50 -1 0 9 0.0000 2 135 690 3900 1425 Object$_3$\001
  • doc/papers/llheap/figures/FakeHeader.fig

    r7ca6bf1 r1dec8f3  
    991200 2
    10102 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    11          2700 1500 2700 1800
    12 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    13          1200 1500 4200 1500 4200 1800 1200 1800 1200 1500
     11         3600 1575 3600 1800
    14122 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    15          2550 1500 2550 1800
     13         3450 1575 3450 1800
    16142 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
    1715        1 1 1.00 45.00 90.00
    18          2925 1950 2625 1950 2625 1800
    19 4 1 0 50 -1 0 10 0.0000 2 135 450 3450 1725 offset\001
    20 4 1 0 50 -1 0 10 0.0000 2 180 825 1950 1725 alignment\001
    21 4 1 0 50 -1 0 10 0.0000 2 135 105 2625 1725 1\001
    22 4 0 0 50 -1 0 10 0.0000 2 180 1920 3000 2025 alignment (fake header)\001
    23 4 1 0 50 -1 0 10 0.0000 2 180 765 1950 1425 4/8-bytes\001
    24 4 1 0 50 -1 0 10 0.0000 2 180 765 3450 1425 4/8-bytes\001
     16         3825 1950 3525 1950 3525 1800
     172 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     18         2100 1575 5100 1575 5100 1800 2100 1800 2100 1575
     194 1 0 50 -1 0 9 0.0000 2 105 330 4350 1725 offset\001
     204 1 0 50 -1 0 9 0.0000 2 135 600 2850 1725 alignment\001
     214 1 0 50 -1 0 9 0.0000 2 105 75 3525 1725 1\001
     224 0 0 50 -1 0 9 0.0000 2 135 1575 3900 2025 $\\Rightarrow$ fake header\001
     234 2 0 50 -1 0 9 0.0000 2 105 660 2025 1725 fake header\001
     244 1 0 50 -1 0 9 0.0000 2 135 555 2850 1500 4/8-bytes\001
     254 1 0 50 -1 0 9 0.0000 2 135 555 4350 1500 4/8-bytes\001
  • doc/papers/llheap/figures/Header.fig

    r7ca6bf1 r1dec8f3  
    1111         1800 1800 4200 1800
    12122 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    13          1800 2100 4200 2100
     13         3900 1575 3900 2250
     142 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     15         3600 1575 3600 2250
     162 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     17         3300 1575 3300 2250
     182 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     19        1 1 1.00 45.00 90.00
     20         4200 2400 4050 2400 4050 1725
     212 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     22        1 1 1.00 45.00 90.00
     23         4200 2550 3750 2550 3750 1725
     242 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     25        1 1 1.00 45.00 90.00
     26         4200 2700 3450 2700 3450 1950
     272 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     28         1800 2025 4200 2025
    14292 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    15          1800 1500 4200 1500 4200 2400 1800 2400 1800 1500
    16 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    17          3900 1500 3900 2400
    18 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    19          3600 1500 3600 2400
    20 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    21          3300 1500 3300 2400
     30         1800 1575 4200 1575 4200 2250 1800 2250 1800 1575
    22312 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    23          4200 1800 6600 1800 6600 2100 4200 2100 4200 1800
    24 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
    25         1 1 1.00 45.00 90.00
    26          4200 2775 3750 2775 3750 1725
    27 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
    28         1 1 1.00 45.00 90.00
    29          4200 2550 4050 2550 4050 1725
    30 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
    31         1 1 1.00 45.00 90.00
    32          4200 3000 3450 3000 3450 2025
    33 4 0 0 50 -1 0 10 0.0000 2 180 1185 1875 1725 bucket pointer\001
    34 4 0 0 50 -1 0 10 0.0000 2 180 1005 1875 2025 mapped size\001
    35 4 0 0 50 -1 0 10 0.0000 2 135 1215 1875 2325 next free block\001
    36 4 2 0 50 -1 0 10 0.0000 2 135 480 1725 2025 union\001
    37 4 1 0 50 -1 0 10 0.0000 2 180 945 5400 2025 request size\001
    38 4 1 0 50 -1 0 10 0.0000 2 180 765 5400 1425 4/8-bytes\001
    39 4 1 0 50 -1 0 10 0.0000 2 180 765 3000 1425 4/8-bytes\001
    40 4 1 0 50 -1 0 10 0.0000 2 135 270 3475 2025 0/1\001
    41 4 1 0 50 -1 0 10 0.0000 2 135 270 3775 1725 0/1\001
    42 4 1 0 50 -1 0 10 0.0000 2 135 270 4075 1725 0/1\001
    43 4 0 0 50 -1 0 10 0.0000 2 180 1515 4275 3075 mapped allocation\001
    44 4 0 0 50 -1 0 10 0.0000 2 135 825 4275 2850 zero filled\001
    45 4 0 0 50 -1 0 10 0.0000 2 180 1920 4275 2625 alignment (fake header)\001
     32         4200 1800 6600 1800 6600 2025 4200 2025 4200 1800
     334 0 0 50 -1 0 9 0.0000 2 135 855 1875 1725 bucket pointer\001
     344 1 0 50 -1 0 9 0.0000 2 105 195 3775 1725 0/1\001
     354 1 0 50 -1 0 9 0.0000 2 105 195 4075 1725 0/1\001
     364 2 0 50 -1 0 9 0.0000 2 105 345 1725 1950 union\001
     374 0 0 50 -1 0 9 0.0000 2 135 735 1875 1950 mapped size\001
     384 1 0 50 -1 0 9 0.0000 2 105 195 3450 1950 0/1\001
     394 1 0 50 -1 0 9 0.0000 2 135 690 5400 1950 request size\001
     404 1 0 50 -1 0 9 0.0000 2 135 555 3000 1500 4/8-bytes\001
     414 1 0 50 -1 0 9 0.0000 2 135 555 5400 1500 4/8-bytes\001
     424 0 0 50 -1 0 9 0.0000 2 105 885 1875 2175 next free block\001
     434 0 0 50 -1 0 9 0.0000 2 105 600 4275 2600 zero filled\001
     444 0 0 50 -1 0 9 0.0000 2 135 1095 4275 2750 mapped allocation\001
     454 0 0 50 -1 0 9 0.0000 2 135 1395 4275 2450 alignment (fake header)\001
  • doc/papers/llheap/figures/IntExtFragmentation.fig

    r7ca6bf1 r1dec8f3  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
    88-2
    991200 2
    10 6 3150 1200 3900 1500
    11 2 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    12          3150 1200 3900 1200 3900 1500 3150 1500 3150 1200
    13 4 1 0 50 -1 0 10 0.0000 2 180 600 3525 1425 Spacing\001
    14 -6
    15 6 4425 1125 5775 1575
    16 2 2 0 2 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    17          4500 1200 5700 1200 5700 1500 4500 1500 4500 1200
    18 4 1 0 50 -1 0 10 0.0000 2 180 1020 5100 1425 Free Memory\001
    19 -6
    20106 1200 1575 2550 1725
    21112 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     
    29192 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    3020         2550 1575 2550 1725
    31 4 1 0 50 -1 0 10 0.0000 2 135 570 1875 1725 internal\001
     214 1 0 50 -1 0 9 0.0000 2 120 525 1875 1725 internal\001
    3222-6
    33236 3150 1575 4500 1725
     
    42322 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    4333         4500 1575 4500 1725
    44 4 1 0 50 -1 0 10 0.0000 2 135 570 3825 1725 internal\001
     344 1 0 50 -1 0 9 0.0000 2 120 525 3825 1725 internal\001
    4535-6
    46366 4500 1575 5700 1725
     
    55452 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    5646         5700 1575 5700 1725
    57 4 1 0 50 -1 0 10 0.0000 2 135 615 5100 1725 external\001
     474 1 0 50 -1 0 9 0.0000 2 120 555 5100 1725 external\001
    5848-6
    59492 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    60          2550 1200 2550 1500
     50         2550 1275 2550 1500
     512 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     52         3150 1275 3150 1500
     532 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     54         3900 1275 3900 1500
     552 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     56         1800 1275 1800 1500
    61572 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
    62          1800 1200 2550 1200 2550 1500 1800 1500 1800 1200
    63 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    64          3150 1200 3150 1500
    65 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    66          3900 1200 3900 1500
    67 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    68          1800 1200 1800 1500
     58         1800 1275 2550 1275 2550 1500 1800 1500 1800 1275
    69592 2 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    70          1200 1200 4500 1200 4500 1500 1200 1500 1200 1200
    71 4 1 0 50 -1 0 10 0.0000 2 135 555 1500 1425 Header\001
    72 4 1 0 50 -1 0 10 0.0000 2 180 600 2175 1425 Padding\001
    73 4 1 0 50 -1 0 10 0.0000 2 180 510 2850 1425 Object\001
    74 4 1 0 50 -1 0 10 0.0000 2 135 495 4200 1425 Trailer\001
     60         1200 1275 4500 1275 4500 1500 1200 1500 1200 1275
     612 2 0 0 0 7 60 -1 17 0.000 0 0 -1 0 0 5
     62         3150 1275 3900 1275 3900 1500 3150 1500 3150 1275
     632 2 0 2 0 7 60 -1 17 0.000 0 0 -1 0 0 5
     64         4500 1275 5700 1275 5700 1500 4500 1500 4500 1275
     654 1 0 50 -1 0 9 0.0000 2 120 495 1500 1425 Header\001
     664 1 0 50 -1 0 9 0.0000 2 165 570 2175 1425 Padding\001
     674 1 0 50 -1 0 9 0.0000 2 150 450 2850 1425 Object\001
     684 1 0 50 -1 0 9 0.0000 2 120 465 4200 1425 Trailer\001
     694 1 0 50 -1 0 9 0.0000 2 150 945 5100 1425 Free Memory\001
     704 1 0 50 -1 0 9 0.0000 2 165 555 3525 1425 Spacing\001
  • doc/papers/llheap/figures/PerThreadHeap.fig

    r7ca6bf1 r1dec8f3  
    11112 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    1212         2700 1800 3000 1800 3000 2100 2700 2100 2700 1800
    13 4 1 0 50 -1 0 10 0.0000 2 120 135 2850 2025 G\001
     134 1 0 50 -1 0 9 0.0000 2 120 135 2850 2025 G\001
    1414-6
    15151 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
     
    3434        1 1 1.00 45.00 90.00
    3535         2250 1500 2250 1800
    36 4 1 0 50 -1 0 10 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001
    37 4 1 0 50 -1 0 10 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001
    38 4 0 0 50 -1 0 10 0.0000 2 120 240 3300 2025 OS\001
    39 4 1 0 50 -1 0 10 0.0000 2 165 495 1350 2025 H$_1$\001
    40 4 1 0 50 -1 0 10 0.0000 2 165 465 1350 1425 T$_1$\001
    41 4 1 0 50 -1 0 10 0.0000 2 165 495 1800 2025 H$_2$\001
    42 4 1 0 50 -1 0 10 0.0000 2 165 465 1800 1425 T$_2$\001
    43 4 1 0 50 -1 0 10 0.0000 2 165 495 2250 2025 H$_3$\001
    44 4 1 0 50 -1 0 10 0.0000 2 165 465 2250 1425 T$_3$\001
     364 1 0 50 -1 0 9 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001
     374 1 0 50 -1 0 9 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001
     384 0 0 50 -1 0 9 0.0000 2 120 240 3300 2025 OS\001
     394 1 0 50 -1 0 9 0.0000 2 165 495 1350 2025 H$_1$\001
     404 1 0 50 -1 0 9 0.0000 2 165 465 1350 1425 T$_1$\001
     414 1 0 50 -1 0 9 0.0000 2 165 495 1800 2025 H$_2$\001
     424 1 0 50 -1 0 9 0.0000 2 165 465 1800 1425 T$_2$\001
     434 1 0 50 -1 0 9 0.0000 2 165 495 2250 2025 H$_3$\001
     444 1 0 50 -1 0 9 0.0000 2 165 465 2250 1425 T$_3$\001
  • doc/papers/llheap/figures/SharedHeaps.fig

    r7ca6bf1 r1dec8f3  
    10106 1500 1200 2100 1500
    11111 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
    12 4 1 0 50 -1 0 10 0.0000 2 165 465 1800 1425 T$_2$\001
     124 1 0 50 -1 0 9 0.0000 2 165 465 1800 1425 T$_2$\001
    1313-6
    14146 1050 1200 1650 1500
    15151 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
    16 4 1 0 50 -1 0 10 0.0000 2 165 465 1350 1425 T$_1$\001
     164 1 0 50 -1 0 9 0.0000 2 165 465 1350 1425 T$_1$\001
    1717-6
    18186 1950 1200 2550 1500
    19191 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
    20 4 1 0 50 -1 0 10 0.0000 2 165 465 2250 1425 T$_3$\001
     204 1 0 50 -1 0 9 0.0000 2 165 465 2250 1425 T$_3$\001
    2121-6
    22226 1275 1800 1875 2100
    23232 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    2424         1425 1800 1725 1800 1725 2100 1425 2100 1425 1800
    25 4 1 0 50 -1 0 10 0.0000 2 165 495 1575 2025 H$_1$\001
     254 1 0 50 -1 0 9 0.0000 2 165 495 1575 2025 H$_1$\001
    2626-6
    27276 1725 1800 2325 2100
    28282 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    2929         1875 1800 2175 1800 2175 2100 1875 2100 1875 1800
    30 4 1 0 50 -1 0 10 0.0000 2 165 495 2025 2025 H$_2$\001
     304 1 0 50 -1 0 9 0.0000 2 165 495 2025 2025 H$_2$\001
    3131-6
    32326 2475 1800 2775 2100
    33332 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    3434         2475 1800 2775 1800 2775 2100 2475 2100 2475 1800
    35 4 1 0 50 -1 0 10 0.0000 2 120 135 2625 2025 G\001
     354 1 0 50 -1 0 9 0.0000 2 120 135 2625 2025 G\001
    3636-6
    37372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
     
    5555        1 1 1.00 45.00 90.00
    5656         2250 1500 2100 1800
    57 4 0 0 50 -1 0 10 0.0000 2 120 240 3075 2025 OS\001
    58 4 1 0 50 -1 0 10 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001
    59 4 1 0 50 -1 0 10 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
     574 0 0 50 -1 0 9 0.0000 2 120 240 3075 2025 OS\001
     584 1 0 50 -1 0 9 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001
     594 1 0 50 -1 0 9 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
  • doc/papers/llheap/figures/SingleHeap.fig

    r7ca6bf1 r1dec8f3  
    10106 1500 1200 2100 1500
    11111 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
    12 4 1 0 50 -1 0 10 0.0000 2 165 465 1800 1425 T$_2$\001
     124 1 0 50 -1 0 9 0.0000 2 165 465 1800 1425 T$_2$\001
    1313-6
    14146 1050 1200 1650 1500
    15151 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
    16 4 1 0 50 -1 0 10 0.0000 2 165 465 1350 1425 T$_1$\001
     164 1 0 50 -1 0 9 0.0000 2 165 465 1350 1425 T$_1$\001
    1717-6
    18186 1950 1200 2550 1500
    19191 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
    20 4 1 0 50 -1 0 10 0.0000 2 165 465 2250 1425 T$_3$\001
     204 1 0 50 -1 0 9 0.0000 2 165 465 2250 1425 T$_3$\001
    2121-6
    22222 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
     
    3434        1 1 1.00 45.00 90.00
    3535         1800 1500 1800 1800
    36 4 1 0 50 -1 0 10 0.0000 2 165 495 1800 2025 H$_1$\001
    37 4 1 0 50 -1 0 10 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001
    38 4 0 0 50 -1 0 10 0.0000 2 120 240 2250 2025 OS\001
     364 1 0 50 -1 0 9 0.0000 2 165 495 1800 2025 H$_1$\001
     374 1 0 50 -1 0 9 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001
     384 0 0 50 -1 0 9 0.0000 2 120 240 2250 2025 OS\001
  • doc/papers/llheap/figures/llheap.fig

    r7ca6bf1 r1dec8f3  
    88-2
    991200 2
    10 6 1275 1950 1725 2250
    11 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    12          1275 1950 1725 1950 1725 2250 1275 2250 1275 1950
    13 4 1 0 50 -1 0 10 0.0000 2 135 360 1500 2175 lock\001
    14 -6
    15 6 4125 4050 4275 4350
    16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 4125 20 20 4200 4125 4220 4125
    17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 4200 20 20 4200 4200 4220 4200
    18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 4275 20 20 4200 4275 4220 4275
    19 -6
    20 6 5025 3825 5325 3975
    21 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3900 20 20 5100 3900 5120 3900
    22 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5175 3900 20 20 5175 3900 5195 3900
    23 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5250 3900 20 20 5250 3900 5270 3900
    24 -6
    25 6 6150 2025 6450 2175
    26 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6225 2100 20 20 6225 2100 6245 2100
    27 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6300 2100 20 20 6300 2100 6320 2100
    28 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6375 2100 20 20 6375 2100 6395 2100
    29 -6
    30 6 3225 4650 3675 4950
    31 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    32          3225 4650 3675 4650 3675 4950 3225 4950 3225 4650
    33 4 1 0 50 -1 0 10 0.0000 2 135 360 3450 4875 lock\001
    34 -6
    35 6 3750 2325 3900 2700
    36 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    37         1 1 1.00 45.00 90.00
    38          3825 2325 3825 2550
    39 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    40          3750 2550 3900 2550 3900 2700 3750 2700 3750 2550
    41 -6
    42 6 6750 2025 7050 2175
    43 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6825 2100 20 20 6825 2100 6845 2100
    44 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6900 2100 20 20 6900 2100 6920 2100
    45 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6975 2100 20 20 6975 2100 6995 2100
    46 -6
    47 6 2550 3150 3450 4350
    48 6 2925 4050 3075 4350
    49 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 4125 20 20 3000 4125 3020 4125
    50 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 4200 20 20 3000 4200 3020 4200
    51 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 4275 20 20 3000 4275 3020 4275
    52 -6
    53 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    54          2550 3375 3450 3375 3450 3600 2550 3600 2550 3375
    55 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    56          2550 3750 3450 3750 3450 3975 2550 3975 2550 3750
    57 4 1 0 50 -1 0 10 0.0000 2 180 900 3000 3300 local pools\001
    58 -6
    59 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    60          2850 1800 2850 2400
    61 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    62          3000 1800 3000 2400
    63 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    64          3150 1800 3150 2400
    65 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    66          3300 1800 3300 2400
    67 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    68          3450 1800 3450 2400
    69 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    70          2550 1800 2550 2400
    71 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    72          2400 1950 3600 1950
    73 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    74          2700 1800 2700 2400
    75 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    76          2400 2100 3600 2100
    77 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    78          2400 1800 3600 1800 3600 2400 2400 2400 2400 1800
    79 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    80          2400 2250 3600 2250
    81 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    82         1 1 1.00 45.00 90.00
    83          2475 2325 2475 2550
    84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    85         1 1 1.00 45.00 90.00
    86          2475 2625 2475 2850
    87 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    88          2400 2850 2550 2850 2550 3000 2400 3000 2400 2850
    89 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    90          2400 2550 2550 2550 2550 2700 2400 2700 2400 2550
    91 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    92         1 1 1.00 45.00 90.00
    93          2925 2175 2925 2550
    94 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    95         1 1 1.00 45.00 90.00
    96          2925 2625 2925 2850
    97 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    98          2850 2850 3000 2850 3000 3000 2850 3000 2850 2850
    99 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    100          2850 2550 3000 2550 3000 2700 2850 2700 2850 2550
    101 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    102          3600 1650 3600 2550
    103 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    104         1 1 1.00 45.00 90.00
    105          3375 2325 3375 2550
    106 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    107          3225 2550 3525 2550 3525 2700 3225 2700 3225 2550
    108 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    109          4050 1800 4050 2400
    110 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    111          4200 1800 4200 2400
    112 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    113          4350 1800 4350 2400
    114 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    115          4500 1800 4500 2400
    116 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    117          4650 1800 4650 2400
    118 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    119          3750 1800 3750 2400
    120 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    121          3600 1950 4800 1950
    122 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    123          3900 1800 3900 2400
    124 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    125          3600 2100 4800 2100
    126 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    127          3600 1800 4800 1800 4800 2400 3600 2400 3600 1800
    128 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    129          3600 2250 4800 2250
    130 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    131         1 1 1.00 45.00 90.00
    132          4125 2175 4125 2550
    133 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    134          4050 2550 4200 2550 4200 2700 4050 2700 4050 2550
    135 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    136          4800 1650 4800 2550
    137 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    138          5400 1650 5400 2550
    139 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    140          6000 1650 6000 2550
    141 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    142          4800 1800 6600 1800 6600 2400 4800 2400 4800 1800
    143 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    144         1 1 1.00 45.00 90.00
    145          4575 2625 4575 2850
    146 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    147         1 1 1.00 45.00 90.00
    148          4575 2325 4575 2550
    149 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    150          4425 2550 4725 2550 4725 2700 4425 2700 4425 2550
    151 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    152          4425 2850 4725 2850 4725 3000 4425 3000 4425 2850
    153 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    154          3750 3375 4650 3375 4650 3600 3750 3600 3750 3375
    155 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    156          3750 3750 4650 3750 4650 3975 3750 3975 3750 3750
    157 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    158          3825 4650 5325 4650 5325 4950 3825 4950 3825 4650
     106 3000 3375 3150 3675
     111 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 3450 20 20 3075 3450 3095 3450
     121 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 3525 20 20 3075 3525 3095 3525
     131 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 3600 20 20 3075 3600 3095 3600
     14-6
     156 3675 1950 3900 2100
     161 3 0 1 0 0 50 -1 20 0.000 1 1.5708 3750 2025 20 20 3750 2025 3750 2005
     171 3 0 1 0 0 50 -1 20 0.000 1 1.5708 3825 2025 20 20 3825 2025 3825 2005
     181 3 0 1 0 0 50 -1 20 0.000 1 1.5708 3900 2025 20 20 3900 2025 3900 2005
     19-6
     206 5475 1950 5700 2100
     211 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5550 2025 20 20 5550 2025 5550 2005
     221 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5625 2025 20 20 5625 2025 5625 2005
     231 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5700 2025 20 20 5700 2025 5700 2005
     24-6
     256 4800 3375 4950 3675
     261 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 3450 20 20 4875 3450 4895 3450
     271 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 3525 20 20 4875 3525 4895 3525
     281 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 3600 20 20 4875 3600 4895 3600
     29-6
     306 4200 3900 4500 4125
     314 1 0 50 -1 0 9 0.0000 2 105 210 4350 4075 HB\001
     32-6
     336 3600 3900 3900 4125
     344 1 0 50 -1 0 9 0.0000 2 105 210 3750 4075 HB\001
     35-6
     366 3300 3900 3600 4125
     374 1 0 50 -1 0 9 0.0000 2 105 210 3450 4075 HB\001
     38-6
     396 2850 3900 3150 4125
     404 1 0 50 -1 0 9 0.0000 2 105 210 3000 4075 HB\001
     41-6
     426 2400 3900 2700 4125
     434 1 0 50 -1 0 9 0.0000 2 105 210 2550 4075 HB\001
     44-6
     456 5775 1950 6000 2100
     461 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5850 2025 20 20 5850 2025 5850 2005
     471 3 0 1 0 0 50 -1 20 0.000 1 1.5708 5925 2025 20 20 5925 2025 5925 2005
     481 3 0 1 0 0 50 -1 20 0.000 1 1.5708 6000 2025 20 20 6000 2025 6000 2005
     49-6
     506 1125 1275 2250 3750
     516 1200 3375 2250 3750
    159522 2 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
    160          1200 3900 1950 3900 1950 4425 1200 4425 1200 3900
     53         1200 3375 2250 3375 2250 3750 1200 3750 1200 3375
     544 1 0 50 -1 0 9 0.0000 2 135 675 1725 3525 fast lookup\001
     554 1 0 50 -1 0 9 0.0000 2 105 285 1725 3675 table\001
     56-6
     576 1200 2925 2250 3225
     582 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     59         1200 2925 2250 2925 2250 3225 1200 3225 1200 2925
     604 1 0 50 -1 0 9 0.0000 2 105 720 1725 3150 bucket sizes\001
     61-6
     622 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     63         1125 1500 2175 1500 2175 2775 1125 2775 1125 1500
     644 1 0 50 -1 0 9 0.0000 2 105 315 1650 1650 locks\001
     654 1 0 50 -1 0 9 0.0000 2 105 555 1650 1800 sbrk start\001
     664 1 0 50 -1 0 9 0.0000 2 135 900 1650 2700 free array space\001
     674 1 0 50 -1 0 9 0.0000 2 135 705 1650 1425 heap master\001
     684 1 0 50 -1 0 9 0.0000 2 105 690 1650 2250 sbrk extend\001
     694 1 0 50 -1 0 9 0.0000 2 135 765 1650 2400 free heap top\001
     704 1 0 50 -1 0 9 0.0000 2 135 855 1650 2550 last heap array\001
     714 1 0 50 -1 0 9 0.0000 2 135 900 1650 1950 sbrk remaining\001
     724 1 0 50 -1 0 9 0.0000 2 105 510 1650 2100 sbrk end\001
     73-6
     746 6825 3075 7575 3600
    161752 2 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
    162          1200 3000 1800 3000 1800 3525 1200 3525 1200 3000
    163 4 2 0 50 -1 0 10 0.0000 2 135 300 2325 1950 lock\001
    164 4 2 0 50 -1 0 10 0.0000 2 120 270 2325 2100 size\001
    165 4 2 0 50 -1 0 10 0.0000 2 120 270 2325 2400 free\001
    166 4 2 0 50 -1 0 10 0.0000 2 165 495 2325 2250 (away)\001
    167 4 1 0 50 -1 0 10 0.0000 2 180 1455 4575 4575 global pool (sbrk)\001
    168 4 1 0 50 -1 0 10 0.0000 2 180 900 4200 3300 local pools\001
    169 4 1 0 50 -1 0 10 0.0000 2 180 1695 4350 1425 global heaps (mmap)\001
    170 4 1 0 50 -1 0 10 0.0000 2 180 810 3000 1725 heap$_1$\001
    171 4 1 0 50 -1 0 10 0.0000 2 180 810 4200 1725 heap$_2$\001
    172 4 1 0 50 -1 0 10 0.0000 2 120 255 1500 3150 fast\001
    173 4 1 0 50 -1 0 10 0.0000 2 180 495 1500 3300 lookup\001
    174 4 1 0 50 -1 0 10 0.0000 2 135 330 1500 3450 table\001
    175 4 1 0 50 -1 0 10 0.0000 2 120 315 1575 4050 stats\001
    176 4 1 0 50 -1 0 10 0.0000 2 120 600 1575 4200 counters\001
    177 4 1 0 50 -1 0 10 0.0000 2 135 330 1575 4350 table\001
     76         6825 3075 7575 3075 7575 3600 6825 3600 6825 3075
     774 1 0 50 -1 0 9 0.0000 2 90 270 7200 3225 stats\001
     784 1 0 50 -1 0 9 0.0000 2 90 495 7200 3375 counters\001
     794 1 0 50 -1 0 9 0.0000 2 105 285 7200 3525 table\001
     80-6
     816 7950 2775 8100 3075
     821 3 0 1 0 0 50 -1 20 0.000 1 0.0000 8025 2850 20 20 8025 2850 8045 2850
     831 3 0 1 0 0 50 -1 20 0.000 1 0.0000 8025 2925 20 20 8025 2925 8045 2925
     841 3 0 1 0 0 50 -1 20 0.000 1 0.0000 8025 3000 20 20 8025 3000 8045 3000
     85-6
     866 7935 4005 8100 4035
     871 3 0 1 0 0 50 -1 20 0.000 1 1.5708 7950 4025 20 20 7950 4025 7950 4005
     881 3 0 1 0 0 50 -1 20 0.000 1 1.5708 8025 4025 20 20 8025 4025 8025 4005
     891 3 0 1 0 0 50 -1 20 0.000 1 1.5708 8100 4025 20 20 8100 4025 8100 4005
     90-6
     912 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     92         4275 1725 5475 1725 5475 2400 4275 2400 4275 1725
     932 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     94         2475 1725 3675 1725 3675 2400 2475 2400 2475 1725
     952 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     96         2625 2700 3525 2700 3525 2925 2625 2925 2625 2700
     972 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     98         4800 3900 4800 4125
     992 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     100         3300 3900 3300 4125
     1012 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     102         3900 3900 3900 4125
     1032 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     104         4425 2700 5325 2700 5325 2925 4425 2925 4425 2700
     1052 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     106         4425 3075 5325 3075 5325 3300 4425 3300 4425 3075
     1072 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     108         2625 3075 3975 3075 3975 3300 2625 3300 2625 3075
     1092 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 4
     110        1 1 1.00 45.00 90.00
     111         4500 2275 4350 2275 4350 3600 4500 3600
     1122 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 4
     113        1 1 1.00 45.00 90.00
     114         2700 2275 2550 2275 2550 3600 2700 3600
     1152 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     116         3600 3900 3600 4125
     1172 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     118         2700 3900 2700 4125
     1192 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     120         2400 1500 3975 1500 3975 2475 2400 2475 2400 1500
     1212 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     122         4200 1500 5775 1500 5775 2475 4200 2475 4200 1500
     1232 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     124         2400 3900 6000 3900 6000 4125 2400 4125 2400 3900
     1252 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     126         7275 3900 7875 3900 7875 4125 7275 4125 7275 3900
     1272 2 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     128         6750 3900 7125 3900 7125 4125 6750 4125 6750 3900
     1292 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     130         6075 1350 6075 3675
     1312 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     132         7125 1725 7125 2025
     1332 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     134         7275 1725 7275 2025
     1352 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     136         7425 1725 7425 2025
     1372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     138         7575 1725 7575 2025
     1392 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     140         7725 1725 7725 2025
     1412 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     142         6675 1725 7875 1725
     1432 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     144         6975 1725 6975 2025
     1452 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     146         6675 1875 7875 1875
     1472 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     148         6825 1725 6825 2025
     1492 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     150         6675 1725 7875 1725 7875 2325 6675 2325 6675 1725
     1512 2 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 5
     152         6675 2025 7875 2025 7875 2175 6675 2175 6675 2025
     1532 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     154         6825 2025 6825 2175
     1552 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     156         6975 2025 6975 2175
     1572 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     158         7125 2025 7125 2175
     1592 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     160         7275 2025 7275 2175
     1612 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     162         7425 2025 7425 2175
     1632 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     164         7575 2025 7575 2175
     1652 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     166         7725 2025 7725 2175
     1672 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     168         6825 2175 6825 2325
     1692 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     170         6975 2175 6975 2325
     1712 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     172         7125 2175 7125 2325
     1732 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     174         7275 2175 7275 2325
     1752 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     176         7425 2175 7425 2325
     1772 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     178         7575 2175 7575 2325
     1792 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     180         7725 2175 7725 2325
     1812 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     182        1 1 1.00 45.00 90.00
     183         6750 2250 6750 2475
     1842 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     185        1 1 1.00 45.00 90.00
     186         7200 2250 7200 2475
     1872 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     188        1 1 1.00 45.00 90.00
     189         7650 2250 7650 2475
     1902 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     191        1 1 1.00 45.00 90.00
     192         6750 2550 6750 2775
     1932 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     194         6675 2475 6825 2475 6825 2625 6675 2625 6675 2475
     1952 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     196        1 1 1.00 45.00 90.00
     197         7200 2550 7200 2775
     1982 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     199         7500 2475 7800 2475 7800 2625 7500 2625 7500 2475
     2002 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     201         7100 2475 7325 2475 7325 2625 7100 2625 7100 2475
     2022 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     203         6675 2775 6825 2775 6825 2925 6675 2925 6675 2775
     2042 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     205         7100 2775 7325 2775 7325 2925 7100 2925 7100 2775
     2062 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     207        1 1 1.00 45.00 90.00
     208         7800 2100 8025 2100 8025 2475
     2092 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     210         7950 2475 8100 2475 8100 2625 7950 2625 7950 2475
     2112 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     212        1 1 1.00 45.00 90.00
     213         8025 2550 8025 2775
     2144 1 0 50 -1 0 9 0.0000 2 135 795 4875 1650 heap$_{16}$\001
     2154 1 0 50 -1 2 9 0.0000 2 135 1035 4875 1875 per heap structs\001
     2164 1 0 50 -1 0 9 0.0000 2 135 990 3075 2175 buffer remaining\001
     2174 1 0 50 -1 0 9 0.0000 2 105 645 3075 2325 buffer start\001
     2184 1 0 50 -1 0 9 0.0000 2 135 825 3075 2025 next free heap\001
     2194 1 0 50 -1 2 9 0.0000 2 135 1035 3075 1875 per heap structs\001
     2204 1 0 50 -1 0 9 0.0000 2 135 570 3075 1650 heap$_0$\001
     2214 1 0 50 -1 0 9 0.0000 2 135 720 3075 2625 heap buffers\001
     2224 1 0 50 -1 0 9 0.0000 2 135 825 4875 2025 next free heap\001
     2234 1 0 50 -1 0 9 0.0000 2 135 900 3150 1425 heap array$_0$\001
     2244 1 0 50 -1 0 9 0.0000 2 135 900 4950 1425 heap array$_1$\001
     2254 2 0 50 -1 0 9 0.0000 2 105 255 2325 4050 sbrk\001
     2264 1 0 50 -1 0 9 0.0000 2 90 255 2400 4275 start\001
     2274 1 0 50 -1 0 9 0.0000 2 105 645 4875 2325 buffer start\001
     2284 1 0 50 -1 0 9 0.0000 2 135 720 4875 2625 heap buffers\001
     2294 1 0 50 -1 0 9 0.0000 2 135 990 4875 2175 buffer remaining\001
     2304 1 0 50 -1 0 9 0.0000 2 135 600 5400 4050 remaining\001
     2314 2 0 50 -1 0 9 0.0000 2 105 375 6675 4050 mmap\001
     2324 1 0 50 -1 2 9 0.0000 2 135 1245 7200 1425 per heap structures\001
     2334 2 0 50 -1 0 9 0.0000 2 105 225 6600 2025 size\001
     2344 2 0 50 -1 0 9 0.0000 2 135 270 6600 1875 heap\001
     2354 1 0 50 -1 0 9 0.0000 2 105 465 7275 1650 freelists\001
     2364 2 0 50 -1 0 9 0.0000 2 105 210 6600 2325 free\001
     2374 2 0 50 -1 0 9 0.0000 2 90 405 6600 2175 remote\001
     2384 1 0 50 -1 0 9 0.0000 2 105 210 6000 4275 end\001
  • doc/papers/llheap/local.bib

    r7ca6bf1 r1dec8f3  
    3535}
    3636
    37 @article{Chicken,
    38     keywords    = {Chicken},
    39     author      = {Doug Zongker},
    40     title       = {Chicken Chicken Chicken: Chicken Chicken},
    41     year        = 2006
    42 }
    43 
    4437@misc{TBB,
    4538    keywords    = {Intel, TBB},
    46     key = {TBB},
     39    key         = {TBB},
    4740    title       = {Thread Building Blocks},
    4841    howpublished= {Intel, \url{https://www.threadingbuildingblocks.org}},
     
    5043}
    5144
     45@misc{litemalloc,
     46    keywords    = {lock free},
     47    author      = {Ivan Tkatchev and Veniamin Gvozdikov},
     48    title       = {Lite Malloc},
     49    month       = jul,
     50    year        = 2018,
     51    howpublished= {\url{https://github.com/Begun/lockfree-malloc}},
     52}
     53
    5254@manual{www-cfa,
    53     key = {CFA},
     55    key         = {CFA},
    5456    keywords    = {Cforall},
    5557    author      = {C$\forall$},
     
    6567    year        = 2015,
    6668    note        = {\url{https://www.iso.org/standard/66343.html}},
    67 }
    68 
    69 @misc{BankTransfer,
    70     key = {Bank Transfer},
    71     keywords    = {Bank Transfer},
    72     title       = {Bank Account Transfer Problem},
    73     howpublished        = {Wiki Wiki Web, \url{http://wiki.c2.com/?BankAccountTransferProblem}},
    74     year        = 2010
    7569}
    7670
     
    164158@article{Berger00,
    165159    author      = {Emery D. Berger and Kathryn S. McKinley and Robert D. Blumofe and Paul R. Wilson},
    166     title       = {Hoard: A Scalable Memory Allocator for Multithreaded Applications},
    167     booktitle   = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
    168     journal     = sigplan,
    169     volume      = 35,
    170     number      = 11,
     160    title       = {Hoard: a scalable memory allocator for multithreaded applications},
     161    publisher   = {Association for Computing Machinery},
     162    address     = {New York, NY, USA},
     163    volume      = 28,
     164    number      = 5,
     165    journal     = {SIGARCH Comput. Archit. News},
     166    year        = {2000},
    171167    month       = nov,
    172     year        = 2000,
    173168    pages       = {117-128},
    174     note        = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
    175169}
    176170
     
    178172    author      = {Emery D. Berger and Benjamin G. Zorn and Kathryn S. McKinley},
    179173    title       = {Reconsidering Custom Memory Allocation},
    180     organization= {Proceedings of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002},
     174    organization= {Proc. of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002},
    181175    month       = nov,
    182176    year        = 2002,
     
    194188    pages       = {176-185},
    195189    year        = 1999,
    196     url         = {http://citeseer.ist.psu.edu/article/larson98memory.html}
     190    note        = {\url{http://citeseer.ist.psu.edu/article/larson98memory.html}},
    197191}
    198192
     
    204198    address     = {Chalmers University of Technology},
    205199    year        = 2004,
    206     url         = {http://citeseer.ist.psu.edu/gidenstam04allocating.html}
     200    note        = {\url{http://citeseer.ist.psu.edu/gidenstam04allocating.html}},
    207201}
    208202
     
    213207    year        = 2002,
    214208    month       = aug,
    215     url         = {http://citeseer.ist.psu.edu/article/berger02memory.html}
     209    note        = {\url{http://citeseer.ist.psu.edu/article/berger02memory.html}},
    216210}
    217211
     
    260254    month       = jul,
    261255    year        = 2001,
    262     url         = {http://www.ddj.com/mobile/184404685?pgno=1}
     256    note        = {\url{http://www.ddj.com/mobile/184404685?pgno=1}},
    263257}
    264258
     
    271265
    272266@misc{tcmalloc,
    273     author      = {Sanjay Ghemawat and Paul Menage},
    274     title       = {tcmalloc version 1.5},
    275     month       = jan,
    276     year        = 2010,
    277     howpublished= {\url{http://google-perftools.googlecode.com/files/google-perftools-1.5.tar.gz}},
     267    author      = {{multiple contributors}},
     268    title       = {TCMalloc : Thread-Caching Malloc},
     269    month       = dec,
     270    year        = 2024,
     271    howpublished= {\url{https://gperftools.github.io/gperftools/tcmalloc.html}},
    278272}
    279273
     
    282276    title       = {Scalable Locality-Conscious Multithreaded Memory Allocation},
    283277    organization= {International Symposium on Memory Management (ISSM'06)},
     278    year        = 2006,
    284279    month       = jun,
    285     year        = 2006,
    286     pages       = {84-94},
    287280    location    = {Ottawa, Ontario, Canada},
    288281    publisher   = {ACM},
    289282    address     = {New York, NY, USA},
     283    pages       = {84-94},
    290284}
    291285
     
    294288    title       = {Streamflow},
    295289    howpublished= {\url{http://people.cs.vt.edu/~scschnei/streamflow}},
     290}
     291
     292@misc{llheap,
     293    author      = {Peter A. Buhr and Mubeen Zulfiqar},
     294    title       = {llheap: low-latency memory allocator},
     295    year        = 2025,
     296    month       = jun,
     297    howpublished= {\url{https://github.com/cforall/llheap}},
    296298}
    297299
     
    303305    year        = 1994,
    304306    month       = nov,
    305     url         = {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}
     307    note        = {\url{http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}},
    306308}
    307309
     
    322324    pages       = {177-186},
    323325    year        = 1993,
    324     url         = {http://citeseer.ist.psu.edu/grunwald93improving.html}
     326    note        = {\url{http://citeseer.ist.psu.edu/grunwald93improving.html}},
    325327}
    326328
     
    331333    address     = {Kinross Scotland, UK},
    332334    year        = 1995,
    333     url         = {http://citeseer.ist.psu.edu/wilson95dynamic.html}
     335    note        = {\url{http://citeseer.ist.psu.edu/wilson95dynamic.html}},
    334336}
    335337
     
    341343    isbn        = {1-58113-338-3},
    342344    pages       = {9-17},
    343     location    = {San Jose, California, United States},
     345    location    = {San Jose, CA, USA},
    344346    publisher   = {ACM Press},
    345347    address     = {New York, NY, USA}
     
    399401    author      = {Paul R. Wilson},
    400402    title       = {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies},
    401     url         = {http://citeseer.ist.psu.edu/337869.html}
     403    note        = {\url{http://citeseer.ist.psu.edu/337869.html}},
    402404}
    403405
     
    421423    isbn        = {0-89791-598-4},
    422424    pages       = {177-186},
    423     location    = {Albuquerque, New Mexico, United States},
     425    location    = {Albuquerque, New Mexico, USA},
    424426    publisher   = {ACM Press},
    425427    address     = {New York, NY, USA}
     
    432434    month       = feb,
    433435    year        = 2001,
    434     url         = {http://www.ddj.com/cpp/184403766}
     436    note        = {\url{http://www.ddj.com/cpp/184403766}},
    435437}
    436438
     
    460462    author      = {Xianglong Huang and Brian T Lewis and Kathryn S McKinley},
    461463    title       = {Dynamic Code Management: Improving Whole Program Code Locality in Managed Runtimes},
    462     organization= {VEE '06: Proceedings of the 2nd international conference on Virtual execution environments},
     464    organization= {VEE '06: Proc. of the 2nd International Conf. on Virtual Execution Environments},
    463465    year        = 2006,
    464     isbn        = {1-59593-332-6},
    465     pages       = {133-143},
    466466    location    = {Ottawa, Ontario, Canada},
    467467    publisher   = {ACM Press},
    468     address     = {New York, NY, USA}
    469  }
     468    address     = {New York, NY, USA},
     469    pages       = {133-143},
     470}
    470471
    471472@inproceedings{Herlihy03,
     
    475476    year        = 2003,
    476477    month       = may,
    477     url         = {http://www.cs.brown.edu/~mph/publications.html}
     478    note        = {\url{http://www.cs.brown.edu/~mph/publications.html}},
    478479}
    479480
     
    485486    address     = {130 Lytton Avenue, Palo Alto, CA 94301 and Campus Box 430, Boulder, CO 80309},
    486487    year        = 1993,
    487     url         = {http://citeseer.ist.psu.edu/detlefs93memory.html}
     488    note        = {\url{http://citeseer.ist.psu.edu/detlefs93memory.html}},
    488489}
    489490
     
    530531    address     = {Chalmers University of Technology},
    531532    year        = 2004,
    532     url         = {http://citeseer.ist.psu.edu/gidenstam04allocating.html}
     533    note        = {\url{http://citeseer.ist.psu.edu/gidenstam04allocating.html}},
    533534}
    534535
     
    539540    year        = 2002,
    540541    month       = aug,
    541     url         = {http://citeseer.ist.psu.edu/article/berger02memory.html}
     542    note        = {\url{http://citeseer.ist.psu.edu/article/berger02memory.html}},
    542543}
    543544
     
    558559@misc{tbbmalloc,
    559560    key         = {tbbmalloc},
    560     author      = {multiple contributors},
     561    author      = {{multiple contributors}},
    561562    title       = {Threading Building Blocks},
    562563    month       = mar,
     
    590591@misc{glibc,
    591592    key         = {glibc},
    592     author      = {multiple contributors},
     593    author      = {{multiple contributors}},
    593594    title       = {glibc version 2.31},
    594595    month       = feb,
     
    599600@misc{jemalloc,
    600601    key         = {jemalloc},
    601     author      = {multiple contributors},
     602    author      = {{multiple contributors}},
    602603    title       = {jemalloc version 5.2.1},
    603604    month       = apr,
    604605    year        = 2022,
    605     howpublished= {\url{https://github.com/jemalloc/jemalloc}{https://github.com/jemalloc/jemalloc}},
     606    howpublished= {\url{https://github.com/jemalloc/jemalloc}},
     607}
     608
     609@misc{Evans06,
     610    author      = {Jason Evans},
     611    title       = {A Scalable Concurrent \texttt{malloc(3)} Implementation for {FreeBSD}},
     612    month       = apr,
     613    year        = 2006,
     614    howpublished= {\url{https://papers.freebsd.org/2006/bsdcan/evans-jemalloc.files/evans-jemalloc-paper.pdf}},
    606615}
    607616
     
    631640    author      = {R. Blumofe and C. Leiserson},
    632641    title       = {Scheduling Multithreaded Computations by Work Stealing},
    633     booktitle   = {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.},
     642    organization= {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.},
    634643    pages       = {356-368},
    635644    year        = 1994,
    636645    month       = nov,
    637     url         = {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}
     646    note        = {\url{http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}},
    638647}
    639648
     
    647656    issn        = {0164-1212},
    648657    pages       = {107-118},
    649     doi         = {http://dx.doi.org/10.1016/S0164-1212(00)00122-9},
    650658    publisher   = {Elsevier Science Inc.},
    651659    address     = {New York, NY, USA}
     
    655663    author      = {Paul R. Wilson},
    656664    title       = {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies},
    657     url         = {http://citeseer.ist.psu.edu/337869.html}
     665    note        = {\url{http://citeseer.ist.psu.edu/337869.html}},
    658666}
    659667
     
    661669    author      = {Dirk Grunwald and Benjamin Zorn and Robert Henderson},
    662670    title       = {Improving the Cache Locality of Memory Allocation},
    663     booktitle   = {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation},
     671    organization= {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation},
    664672    year        = 1993,
    665     isbn        = {0-89791-598-4},
    666673    pages       = {177-186},
    667     location    = {Albuquerque, New Mexico, United States},
    668     doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/155090.155107},
     674    location    = {Albuquerque, New Mexico, USA},
    669675    publisher   = {ACM Press},
    670676    address     = {New York, NY, USA}
     677}
     678
     679@inproceedings{Bolosky93,
     680    author      = {William J. Bolosky and Michael L. Scott},
     681    title       = {False Sharing and its Effect on Shared Memory Performance},
     682    organization= {4th Symp. on Experiences with Distributed and Multiprocessor Systems (SEDMS)},
     683    year        = 1993,
     684    location    = {San Diego, CA, USA},
     685    publisher   = {USENIX Association},
     686    address     = {Berkeley, CA, USA},
     687    note        = {\url{https://www.cs.rochester.edu/u/scott/papers/1993\_SEDMS\_false\_sharing.pdf}},
    671688}
    672689
     
    677694    month       = feb,
    678695    year        = 2001,
    679     url         = {http://www.ddj.com/cpp/184403766}
     696    note        = {\url{http://www.ddj.com/cpp/184403766}},
     697}
     698
     699@misc{Desnoyers19,
     700    author      = {Mathieu Desnoyers},
     701    title       = {The 5-year journey to bring restartable sequences to Linux},
     702    month       = feb,
     703    year        = 2019,
     704    howpublished={\url{https://www.efficios.com/blog/2019/02/08/linux-restartable-sequences}},
    680705}
    681706
     
    698723    author      = {M. Herlihy and V. Luchangco and M. Moir},
    699724    title       = {Obstruction-free Synchronization: Double-ended Queues as an Example},
    700     booktitle   = {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems},
     725    organization= {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems},
    701726    year        = 2003,
    702727    month       = may,
    703     url         = {http://www.cs.brown.edu/~mph/publications.html}
    704 }
     728    note        = {\url{http://www.cs.brown.edu/~mph/publications.html}},
     729}
     730
     731@article{Fatourou12,
     732    keywords    = {synchronization techniques, hierarchical algorithms, concurrent data structures, combining, blocking algorithms},
     733    author      = {Panagiota Fatourou and Nikolaos D. Kallimanis},
     734    title       = {Revisiting the Combining Synchronization Technique},
     735    publisher   = {ACM},
     736    address     = {New York, NY, USA},
     737    volume      = 47,
     738    number      = 8,
     739    journal     = {SIGPLAN Not.},
     740    year        = 2012,
     741    month       = feb,
     742    pages       = {257-266},
     743}
     744
     745@manual{Go1.3,
     746    keywords    = {conservative garbage collection},
     747    title       = {Go 1.3 Release Notes},
     748    month       = jun,
     749    year        = 2014,
     750    note        = {\url{https://go.dev/doc/go1.3\#garbage_collector}},
     751}
     752
     753@misc{JavaScriptGC,
     754    keywords    = {Intel, TBB},
     755    author      = {Steve Fink},
     756    title       = {JavaScript: Clawing Our Way Back To Precision},
     757    howpublished= {\url{https://blog.mozilla.org/javascript/2013/07/18/clawing-our-way-back-to-precision/}},
     758    month       = jul,
     759    year        = 2013,
     760}
  • doc/proposals/modules-alvin/proposal.md

    r7ca6bf1 r1dec8f3  
    2626    Other kinds of symbols
    2727    Implementing module namespaces
    28 Porting existing C code
    29 Handling initialization order
     28    Implementing code analysis tools
     29    Porting existing C code
     30    Handling initialization order
    3031Why C++20 modules failed (and why we will succeed)
     32    What we do different
     33    Notable differences between C++ and Cforall
    3134-->
    3235
     
    5356First, to make C translation units into modules, our proposed modules should not require fundamental architectural changes to an existing C project in order to use it. Crucially, there needs to be a way to represent forward declarations of other modules' contents.
    5457
    55 Second, most modern languages don't require an additioanl file just to link symbols between files (Object Oriented languages have interfaces, but they are not required). We would like developers to only need to declare a symbol once, and leave symbol discovery to the module system.
     58Second, most modern languages don't require an additional file just to link symbols between files (Object Oriented languages have interfaces, but they are not required). We would like developers to only need to declare a symbol once, and leave symbol discovery to the module system.
    5659
    5760Third, it should not be confusing as to which module's symbols are visible at a given time, because a module's symbols should only be visible if said module allows them to be. Ideally, such symbols are only visible if said module exports them and a module imports said module (we will find there are special cases where we need to leak some information).
     
    188191In addition to name disambiguation, some symbols need additional information in order to be useable by importers. For example, size/alignment information of types, function bodies for inline functions, and trait information for polymorphic functions. This information is obtained by resolving the symbols on any imported module, and so on, as necessary.
    189192
    190 This task is recursive, which raises the problem of circular imports: What if we recurse back to `data/graph/node` (or any module that creates a cycle)? Since we reason at the level of symbol definitions, as long as we are analyzing different symbols inside the circularly imported module, we don't actually have a cycle. This leaves us with handling the problem where we circle back to the same symbol. For size/alignment analysis, coming back to the same type means that said type contains itself, which for our purposes is not resolvable (emit error and stop). If an inline function calls other inline function that mutually recurses with itself, we produce a forward declaration of the inline function within the underlying C code (Cforall compiles down to C). For trait information, a trait works like a collection of conditions, which means it includes itself, which means we can safely ignore circular references (we may want to emit a warning though). Since we can handle all of our circular problems, our system is well-defined here.
     193This task is recursive, which raises the problem of circular imports: What if we recurse back to `data/graph/node` (or any module that creates a cycle)? Since we reason at the level of symbol definitions, as long as we are analyzing different symbols inside the circularly imported module, we don't actually have a cycle. This leaves us with handling the problem where we circle back to the same symbol. For size/alignment analysis, coming back to the same type means that said type contains itself, which for our purposes is not resolvable (emit error and stop). If an inline function calls another inline function that mutually recurses with itself, we produce a forward declaration of the inline function within the underlying C code (Cforall compiles down to C). For trait information, a trait works like a collection of conditions, which means it includes itself, which means we can safely ignore circular references (we may want to emit a warning though). Since we can handle all of our circular problems, our system is well-defined here.
    191194
    192195#### Resolving symbols
     
    450453More discussion on the details of how modules are implemented is found in section Implementing module namespaces.
    451454
    452 [[THE REST OF THIS DOCUMENT IS A WORK IN PROGRESS]]
    453 
    454455### Cyclic modules
     456
     457Acyclic vs cyclic in this case refers to whether a module needs to be fully compiled before its symbols can be used. In many languages, modules are acyclic (eg. Python, OCaml, Go). Since all symbols within a module are fully defined before they are used by other modules, our module system becomes much simpler to implement and allows us to incorporate metaprogramming into our modules. In contrast, C allows declaring symbols and using them in a limited capacity before they are defined, and Rust compiles all modules within a crate, allowing modules to use each others symbols without modules imposing ordering restrictions. Our proposed module system takes a slightly different approach than Rust by having the module system analyze other modules as necessary instead of defining crate boundaries. These are examples of cyclic modules, where the module system needs to do extra work in order to keep track of partial definitions.
     458
     459While acyclic modules can help organize a codebase and improve code readability, it enforces a certain code structure that may be incompatible with many common design patterns. For example, parsing an expression tends to require recursive functions and data structures, which would need to exist in a single acyclic module. However, there are many practical reasons (see Conway's law) why a codebase may take a different structure than it was initially designed for. Especially when building off of an existing language such as C, migration compatibility and incremental development are extremely important. As such, since C allows forward declarations, our module system allows cyclic dependencies.
     460
     461In order to account for cyclic dependencies in the details of a codebase while also enforcing an acyclic high-level code structure, many languages offer two "kinds" of modules. For example, C++ has namespaces and C++20 modules, Rust has modules and crates, and Java has packages and Java 9 modules. An extension to our module system could be to generate static libraries, which would function as acyclic modules.
     462
     463*For languages that only have acyclic modules, the language's type system usually provides a way to break cycles. For example, Go has interfaces and Ocaml has generic types. It may be possible to leverage Cforall's polymorphic functions and types in a similar manner. These techniques break the cycle by having caller and callee agree on some interface instead of the concrete types. However, this method this requires adding an extra layer of indirection to link to the concrete implementation, which may not be desirable in a systems programming environment. An alternate approach to generic types uses something akin to C++ templates, though this technique does not work for "all polymorphic types that satisfy some trait" since we can only generate a finite amount of assembly code.*
     464
    455465### Other kinds of symbols
    456 [[union types]]
    457 The forall keyword is an addition to Cforall to support polymorphism, with polymorphic functions using dictionary passing and a single implementation. If a module exports a forall statement, the module owns the polymorphic function implementations, while the polymorphic function declarations are exported (if these were declared inline, the definition could be exported, similar to C++20 modules). Polymorphic types are instantiated from the caller's side, so their definitions are exported. This may present problems, but currently I am not familiar enough with Cforall to judge.
    458 
    459 An interesting case is to consider if Cforall could be updated to perform specialization (multiple implementations for a single function) in addition to the single implementation strategy. An example of this being done in a production language is with Rust's `impl` vs `dyn` traits. To support this, the module system would need to be updated, as we would want the multiple implementations to exist within the module that owns the forall statement.
     466
     467The formalism provided only uses a limited subset of C, which raises the question: how would this apply to other kinds of symbols?
     468
     469C has `union` types, where each of its "fields" are actually different representations of the same piece of memory. Resolving `union` types follows the same technique as `struct` - resolve each of the field types, and a circular dependency is an error.
     470
     471C also allows defining types within the definitions of other types/variables (eg. `struct {int i;} x = {5};`). If we are to export such a statement, we would export all definitions together. This would be implemented by providing the full top-level declaration as-written to the compiler.
     472
     473Our module system would need to be changed to throw an error if we try to export a `static` function or variable. There could also be a number of features that are specific to each compiler that require special treatment by the module system (eg. `__attribute__`). This special treatment can have an arbitrary nature, making it challenging for the module system to be updated to handle them properly. If this becomes a large enough problem, a potential solution could be to use "hook functions" in order to make it easier to extend the module system as features get added.
     474
     475*C23 introduces `auto` as a type inference keyword (this is different than `auto` as a storage class specifier, which is reduntant and rarely used in practice). Cforall has an advanced overloading system, and this feature can cause type inference to span an arbitrary number of statements (even potentially accross functions with `auto foo() {...}`). Not only is this very challenging to implement efficiently, but it also can make code much harder to read if misused. As such, we currently do not support `auto` as a type inference keyword, though we monitor how it is used in pracice in case we wish to support it in the future. I believe compile-time reflection can enable our module system to handle `auto` as a type inference keyword, though I am not certain about this.*
     476
     477*The forall keyword is an addition to Cforall to support polymorphism, with polymorphic functions using dictionary passing and a single implementation. Exporting polymorphic functions and types work in the same way as their regular counterparts from the perspective of module visibility. However, the compiler needs to be provided trait information in order to call polymorphic functions with dictionary passing, as well as the layout function in order to allocate space on the stack for polymorphic types. Such information is gathered using compile-time reflection to search for trait information.*
     478
     479*An interesting case is to consider how Cforall could be updated to perform specialization (multiple implementations for a single function) in addition to the single implementation strategy. Rust's `impl` and `dyn` traits provide a good reference for how both strategies could be implemented in the language. This raises the question: which modules should the multiple implementations belong in? Since we focus on the idea that modules should own their contents, we would want the multiple implementations to exist within the module that owns the forall statement. It would be a very interesting extension to Cforall, though a significant amount of research would need to be conducted in order to determine the feasibility of some of this.*
     480
    460481### Implementing module namespaces
     482
    461483A module is defined by having `module;` be the first statement in a source file (somewhat similar to C++20 modules). Internally, modules work like namespaces, implemented by prepending the module name in front of all declared symbols. There are multiple alternatives to determine the module name - we use option 2 for its brevity:
     484
    4624851. Have the user define the module names (eg. `module A;`). This is similar to how Java and C++ require specifying packages and namespaces, respectively. This gives the developer some flexibility on naming, as it is not tied to the file system. However, it raises some questions surrounding how module discovery works (if a module imports `A`, where is `A`?).
    463 2. Have the module names be defined from a "root directory" (eg. `module;` is module `A` because it is located at `src/A.cfa`, and `src/` is defined as the root directory). This creates import paths that look similar to include paths, allowing us to align more closely with existing C programmers. When searching for an appropriate module, a search is conducted first from the current directory, then we look for an appropriate library (similar to the include path in C). A downside is that this precludes adding nested modules (ie. module definitions within a module file), though nested modules are arguably not that important.
    464 
    465 Another design choice that was made was to have files with the same name as a folder exist outside their folder. For example, module `graph` exists at `src/graph.cfa`, while module `graph/node` exists at `src/graph/node.cfa`. The alternative is to have module `graph` at `src/graph/mod.cfa` - this may be more familiar to some developers, but this complicates module discovery (eg. if there exists a module at `src/graph.cfa` at the same time, which takes precedence? Does `graph` need to `import ../analysis` in order to import the module at `src/analysis`?). Taking insights from Rust's move from `mod.rs` to files with the same name as the folder, we opt to use the more straightforward strategy.
    466 
    467 This prepending of the module name in front of all symbols within a module can result in undesirable behaviour if we use `#include` within a module, as all of its contents will be prepended with the module name. To resolve this, we introduce extern blocks, which escape the module prefixing (eg. `extern { #include <stdio.h> }`, though with a newline after the `>`).
    468 
    469 This configuration allows for a special kind of optimization to be performed: since modules prepend their names to their symbols, every symobl can be disambiguated. This allows us to add functionality to perform a "unity build", where the entire codebase can be compiled within a single translation unit, allowing the compiler to inline functions as its discretion. This would allow us to balance a "development configuration" with the benefits of modularization, alongside a "release configuration" with maximum optimizations.
    470 ## Porting existing C code
    471 [[See C macros are not exportable]]
    472 [[See forward declarations are not necessary. An interesting idea is to allow naked `#include` within the module file, and try and handle it. It works up until type definitions... so can't work]]
    473 [[So have import statements, just like C++20 imports]]
    474 ## Handling initialization order
     4862. Have the module names be defined from a "root directory" (eg. `module;` is module `A` because it is located at `src/A.cfa`, and `src/` is defined as the root directory). This creates import paths that look similar to include paths, allowing us to align more closely with existing C programmers. When searching for an appropriate module, a search is conducted first from the current directory, then we look for an appropriate library (similar to the include path in C). A downside is that this precludes adding nested modules (ie. module definitions within a module file), though we argue that nested modules are not that important.
     487
     488Another design choice that was made was to have files with the same name as a folder exist outside their folder. For example, module `graph` exists at `src/graph.cfa`, while module `graph/node` exists at `src/graph/node.cfa`. The alternative is to have module `graph` at `src/graph/mod.cfa` - this may be more familiar to some developers, but this complicates module discovery (eg. if there exists a module at `src/graph.cfa` at the same time, which takes precedence? Does `graph` use `import ../analysis;` or `import analysis;` in order to import the module at `src/analysis`?) and makes it less clear what module name to prepend to all declared symbols. Taking insights from Rust's move from `mod.rs` to files with the same name as the folder, we opt to use the more straightforward strategy.
     489
     490Libraries also share the same symbol namespace as the modules within a codebase, which raises the question: how do we differentiate between `lib/analysis.cfa`, `lib2/analysis.cfa` and `src/analysis.cfa`? One solution is to have the libraries be generated, where each module name is prepended with a library name (eg. symbol `func` in module `graph/node` in library `searcher` has full name `searcher$$graph$node$$func`). Another solution is to store within some central module configuration file a mapping between a library path and some unique library name to prefix symbols with. In fact, both strategies can be implemented: the library comes shipped with some name, which can be mapped to a different name in the central module configuration file. Imports could follow a syntax such as `import "lib:analysis";` to specify that we want the library instead of searching through the current directory.
     491
     492This prepending of the module name in front of all symbols within a module likely causes problems if we use `#include` within a module, as all of its contents will be prepended with the module name. It is challenging for our module system to disambiguate between a type definition from a header and a type definition from the module itself. One solution is to rely on annotations outputted by the preprocessor. Another solution is to introduce extern blocks, which escape the module prefixing (eg. `extern { #include <stdio.h> }`, though with the include statement on a separate line). Taking some inspiration from C++20 modules, we choose to incorporate the headers into the import statement (eg. `import stdio;`), essentially pretending that the header file is a module file. Since this is used to interface with existing C code, we don't currently consider prefixing a library name to avoid symbol clash. See Porting existing C code for details on how we accomplish this.
     493
     494This configuration allows for a special kind of optimization to be performed: since modules prepend their names to their symbols, every symobl can be disambiguated. This allows us to add functionality to perform a "unity build", where the entire codebase can be compiled within a single translation unit, allowing the compiler to inline functions at its discretion. This would allow us to balance a "development configuration" with the benefits of modularization, alongside a "release configuration" with maximum optimizations.
     495
     496### Implementing code analysis tools
     497
     498The explicit symbol visibility afforded by our module system over regular C (where forward declarations could refer to any piece of code in a codebase or library file) allows us to perform static code analysis with precision. In fact, we can reuse a significant portion of the module system's compile-time reflection mechanism to implement any static code analyzer.
     499
     500The functionality of the module system that could be reused by a static code analyzer is documented in the Formalism section. Namely, we can list the import and symbol lists of a single module file, analyze all symbols that are visible within a module, and figure out which symbols can match a given name. A code analysis tool could use this to help visualize how all symbols within a codebase are connected together. If we incorporate the overload resolution system and the runtime system into this, we can create a REPL for use in an advanced development environment.
     501
     502Another direction to take code analysis is in code refactoring. By incorporating a code editing tool into code analysis, we could provide functionality such as moving a symbol from one file to another while maintaining correctness. This would help guide a complex existing codebase into having a more acyclic high-level code structure, giving us a pathway from legacy C code to modern Cforall code.
     503
     504### Porting existing C code
     505
     506A major feature of Cforall is that it is an evolution of C, rather than a completely new language like Rust. This comes with numerous advantages: we are able to immediately tap into a large pool of existing code and programmer experience. This also comes with a number of restrictions: we are limited in how much we can add to the language without alienating C programmers, and we must focus on backwards compatibility (or requiring minimal migration changes).
     507
     508*It is also worth mentioning that Cforall compiles down to C, so there is an inherent extra cost to development that needs to be justified. In TypeScript, the argument is that a strong type system improves code readability enough to justify the extra compilation step. With our system, we argue that the explicit visibility control of our modules is worth the extra compilation step (even without the additional features of Cforall).*
     509
     510When incorporating existing C libraries into our system, we are given a header file and some compiled code. We could determine what macros are defined by having the preprocessor output all symbols that are defined - these details could be placed in a separate header file. Then we could parse the resulting header file to determine what symbols are provided. This would provide us with the "module interface" for that header file.
     511
     512To avoid module name prepending issues with `#include` statements, we use `import` instead to use these library headers while inside a module file (see Modules use `import` instead of `#include` section). To avoid changing anything with the compiled code's symbols, we avoid performing the module name + library name prepending that is described in Implementing module namespaces section. Note that this technique breaks the principle that modules should "own" their contents, because it is possible for an IO library to transitively depend on a string library (and therefore export it too). As such, we need to keep track of which header file we are grabbing symbols from so we can deduplicate type definitions, as well as keep track of forward type declarations to ensure they are defined at some point.
     513
     514One particularly difficult challenge is the problem of dealing with header files that define C macros. As described in C macros are not exportable section, we don't allow modules to export macros because it makes modules order-dependent. However, when dealing with library headers, we need to consider backwards compatibility. To support this, our module system could scan the module file a second time after figuring out which library headers are imported in order to determine if there would have been any symbols that would have been updated by one of the library headers' macros. If so, we raise a warning to tell the user to add a line such as `#include "stdio_macros.h"` (where `stdio_macros.h` is the name of the separate header file holding the C macros defined in `stdio`).
     515
     516How would one update a legacy C codebase to use modules? The strategy described above for library headers could be used to perform the first step, though we would like to eventually migrate to using modules instead of header files. If every .h file avoids using forward declarations to symbols that are not defined in the corresponding .c file, then the migration could be performed automatically. On another extreme, if we pull out every symbol definition into its own module, we could replace any forward declarations with their corresponding imports. However, in order to execute a reasonable migration, we need to avoid changing files more than necessary. In our initial analysis, we can flag any forward declarations in the header file that aren't defined in the corresponding .c file. These could be replaced with an import of the correct module, which could use export tags to ensure we only receive what is necessary (see Generating multiple module interfaces). Afterwards, some of the export tags can be combined together to simplify the interface, perhaps with the assistance of a tool to ensure no symbol bindings got changed.
     517
     518### Handling initialization order
     519
     520Some low-level programs run with no setup other than ensuring that functions and global variables are loaded into memory. However, many programs expect some initalization code to run before `main` to set up some systems. For example, we expect to be able to use `malloc` without first needing to call `initalize_glibc_library` (having to manage this for every program can be tedious and error-prone). Ideally, modules should own their own initialization code, with the module system handling ordering.
     521
     522*Technically this isn't a problem in C, since C only allows constant expressions in a global variable's initializer expressions. However, we would want our module system to support it for the reasons listed above. More importantly, initialization order is a problem for Cforall because it has constructors and allows calling functions within initializer expressions.*
     523
     524Unfortunately, the current compiler/linker architecture only offers limited control over managing the order of iniitalization code execution. By default, initialization code in C++ object files run in "link order" (the order in which files are passed to the linker). This can cause a global variable to be initialized using the value of another, before the other has been initialized (referred to as "static initialization order fiasco" in C++).
     525
     526While some compilers allow some finer control over initialization order on specific statements (eg. `__attrubute__((constructor(101)))` in GCC), the language itself should offer a method to describe the initialization order relationships between symbols (or automatically determine the ordering).
     527
     528#### How other languages handle initialization order
     529
     530C++20 modules and Go packages/modules handle this problem using acyclic modules. In this architecture, a module must be fully defined (and therefore its symbols are fully defined) before other modules can use it, so ordering the initialization code to match module dependencies avoids inter-module initialization problems. Unfortunately, as described in the Cyclic modules section, we cannot enforce acyclity on our modules if we wish to make it easy to migrate to using our module system.
     531
     532In Rust, global variables are either zero-initialized or initialized within contexts such as `lazy_static` (this uses synchronization primitives to ensure safety in concurrent contexts), and Rust's type system prevents many accidental uses of uninitialized values. However, some of the analysis that Rust performs essentially requires whole-program compilation, which is not compatible with C's separate compilation architecture. Besides, many C programmers do not wish to pay the cost of additional synchronization if it can be avoided.
     533
     534In C++, the introduction of `constexpr` functions meant that the initialization code could be run at compile time, so no initialization code would have to run before `main`. While this would be a great addition to Cforall, implementing this is outside the scope of this module proposal. Additionally, while this reduces the problem, it does not eliminate it - there may be code that we need to execute at runtime to perform program setup.
     535
     536#### What we want our module system to have
     537
     538In the most ideal case, we'd like our module system to automatically determine initialization dependencies between symbols, similar to how our module system automatically resolves cyclic module dependencies by analyzing symbols individually. However, while type dependencies are relatively simple and well-defined, the ordering dependencies of execution can be pervasive and rely on compiler implementation quirks.
     539
     540For example, what if we call a function that uses a global variable inside it? What if there is a specific order in which modules should register themselves to a global array? What if function `foo` should only be called after `init` is called? Not only do some of these problems require whole-program analysis to resolve, sometimes the programmer fails to specify the constraints they really want! Evidently, we should add some constraints to our module system, but we must be careful - it can make the language confusingly verbose and accidentally limit potential optimizations.
     541
     542Another consideration is implementation difficulty - many potential solutions are rejected for being too challenging to implement. For example, we could feed some initialization restrictions to the linker to resolve, but making changes to the linker is significantly out-of-scope for this module proposal. We also prefer solutions that don't require as much interaction with the overload resolution mechanism, since that system is very complex. As such, we are limited to providing ordering at a module level, leaving detailed verification to a separate tool.
     543
     544#### Our proposal for handling initialization order
     545
     546To handle initialization order, we introduce a new concept: ordered imports. If we write a statement like `ordered import "graph/node";`, then all runtime initialization within the current module will run after module `graph/node`. `export ordered import` works as if the importing module had `ordered import` specified. If an ordered import cycle is detected, then we error out.
     547
     548Note that if every `import` statement is `ordered import`, then we get acyclic modules. In essence, this system is a relaxation of acyclic modules, trusting that the user has avoided any use-before-initialization problems instead of outright enforcing it (we leave the job of detailed checking to a separate static code analysis tool). This means that if we have:
     549
     550```
     551// module A
     552module;
     553ordered import B;
     554
     555// module B
     556module;
     557import C;
     558
     559// module C
     560module;
     561ordered import D;
     562
     563// module D
     564module;
     565```
     566
     567Then `D` is initialized before `C` and `B` is initialized before `A`, but `C` does not need to be initialized before `A`.
     568
     569Inspired by Go's `init` functions, our ordered imports allow us to introduce top-level `init { ... }` blocks, which can be used to run code before the `main` function runs. Like Go, these work like functions that are only called at initialization, and multiple `init` blocks within a module are executed in the order they appear in the source code. Different from Go, our `init` blocks do not look like function definitions, to allow us to extend `init` blocks to allow multi-stage iniitalization in a future proposal (eg. `init(1)` blocks run after all `init(0)` blocks, and `init` is shorthand for `init(0)`). Our initializers run single-threaded, since we do not assume that a multithreading environment is set up at initialization time.
     570
     571Internally, we use `__attribute__((constructor(N+M)))` to implement the initialization ordering (where `N` is a base value that can be configured, and `M` is the depth of the dependency chain). We may need to analyze more modules than usual to in order to determine the depth of the dependency chain if the other modules have not been processed by the module system yet (afterwards, that information is cached). If we have ties, we still fall back on the link path ordering to get deterministic ordering. We also do not handle initialization of static or dynamic libraries in this proposal, as building libraries is left to a future proposal.
     572
    475573## Why C++20 modules failed (and why we will succeed)
    476 [[I will die on the hill of cyclic modules]]
     574
     575C++ serves as our main point of comparison, being a very popular language that extends C features. This raises the question: why use a different strategy than what is implemented in C++? While many features of C++ have been great additions to the language, C++20 modules stands out as the first major feature to be met with poor results. For example, it was the last major C++20 feature to be implemented in Clang and GCC (and is still not feature-complete as of 2025), lacks robust build support, and very few C++ projects have transitioned to using them. What went wrong? And how does our module system do things differently?
     576
     577While there are many reasons given as to why C++20 modules failed, often centering around missing features, we argue that the problem stems from poor incremental development support. Compiler implementations need to make pervasive changes in order to support a distinctly different compilation architecture, then carefully tested to ensure feature compatibility. Tooling such as build systems and intellisense also need to be updated to handle modules, which are supplied through the command line instead of being discovered through file paths. Libraries may need to be rewritten in order to adhere to the acyclic requirement of C++20 modules. All this means that users need to grapple with missing features, fragile implementations and a lack of widespread adoption to use as a reference. Simply put, using C++20 modules requires too many changes to be made to existing systems to see a major benefit, and it is still not ready for widespread use as of 2025.
     578
     579### What we do different
     580
     581Instead of approaching modules from the perspective of the "ideal module", we approach modules from the perspective of "what minimal code changes are required to make C translation units modules?" Not only is this important for adoption, this makes our features incremental and largely independent of each other, ensuring that many features can be moved to a later proposal if we lack sufficient time to implement them.
     582
     583A key difference between our modules and C++20 modules is that our modules allow cyclic dependencies. As stated in section Cyclic modules, many design patterns in C require being able to define recursive data structures and functions, which could not be defined across multiple acyclic modules. We relax this constraint while maintaining visibility control by treating each symbol definition as separate from each other. While this means our modules are not as self-contained as C++20 modules, they are much more widely applicable to existing design patterns. Note that even though we don't implement acyclic modules in this proposal, a later proposal can always build upon our foundation to provide them.
     584
     585In order to avoid requiring too many changes at once, we start by supporting a limited feature set in our modules, and build from there. This gives our system (and any tooling) specific feature sets to aim for, giving users a clearer understanding of what is supported and what is not.Starting from a simple setup also allows us to provide a formalism of how our module system would work, helping guide development of the Cforall compiler by providing a clear standard to adhere to.
     586
     587We also choose to make our modules be structured according to the file system instead of having modules choose their own names. This aligns much closer to `#include` following file paths and allows us to avoid having to pass files in via the command line. Taking this further, we avoid confusion by having modules with the same name as a folder exist as a file with the same name as a folder instead of using a special name inside the folder (eg. `src/graph.cfa` instead of `src/graph/mod.cfa`, see Implementing module namespaces section).
     588
     589### Notable differences between C++ and Cforall
     590
     591For all of the problems with C++'s implementation of modules, it is worth noting that C++ follows a different paradigm than Cforall, which can make it lean towards a different style than our proposed module system.
     592
     593The C++ specification pressures features to be fully interoperable with each other, and suffers from having multiple competing designs for a certain feature such as modules. Our cyclic module implementation requires certain restrictions to be met, such as a context-free grammar for top-level declarations, which would likely be challenging to get approval from a large committee. Acyclic modules are much more of a self-contained feature, making unforseen challenges less likely to cascade out of control. In this light, it makes sense that acyclic modules were chosen despite their drawbacks.
     594
     595It is also worth noting that our proposed module system requires leveraging compile-time reflection, a feature that some compiler developers may be hesitant to implement. C++ has a large existing codebase and userbase while Cforall is still in alpha development, making us more suited to use a new feature like this. We also both write the proposals and implement the compiler in Cforall, making it easier to tweak our proposal if it turns out to be exceedingly hard to implement. Contrast with how the C++ specifications committee is different than the engineers who implement the C++ compilers. All this makes it problematic for C++ to rely heavily on such an unproven technique even though Cforall can.
     596
     597There is also a difference in focus between C++ and Cforall: C++ focuses more on performance, while Cforall focuses more on development. While we are ok with the potential of having compilation perform whole-program analysis, on the premise that practical codebases wouldn't implement themselves in that way, this may not be acceptable in C++. While we consider the generation of multiple module interfaces for better expressivity, this aspect did not appear to be addressed in the original C++ modules proposal. As such, we believe we have the superior module system for development purposes.
  • doc/theses/mike_brooks_MMath/string.tex

    r7ca6bf1 r1dec8f3  
    2929@strncpy@                               & @replace@                             & @replace@             & @( )@, on LHS of @=@ \\
    3030@strstr@                                & @find@                                & @indexOf@             & @find@ \\
    31 @strcspn@                               & @find_first_of@               & @matches@             & @include@ \\
    32 @strspn@                                & @find_first_not_of@   & @matches@             & @exclude@ \\
     31@strcspn@                               & @find_first_of@               & @matches@             & @exclude@ \\
     32@strspn@                                & @find_first_not_of@   & @matches@             & @include@ \\
    3333N/A                                             & @c_str@, @data@               & N/A                   & @strcpy@, @strncpy@ \\
    3434\end{tabular}
     
    5757
    5858The \CFA string type is for manipulation of dynamically-sized character-strings versus C @char *@ type for manipulation of statically-sized null-terminated character-strings.
    59 Hence, the amount of storage for a \CFA string changes dynamically at runtime to fit the string size, whereas the amount of storage for a C string is fixed at compile time.
    60 As a result, a @string@ declaration does not specify a maximum length, where a C string must.
     59Therefore, the amount of storage for a \CFA string changes dynamically at runtime to fit the string size, whereas the amount of storage for a C string is fixed at compile time.
     60As a result, a @string@ declaration does not specify a maximum length, where a C string array does.
     61For \CFA, as a @string@ dynamically grows and shrinks in size, so does its underlying storage.
     62For C, as a string dynamically grows and shrinks in size, but its underlying storage does not.
    6163The maximum storage for a \CFA @string@ value is @size_t@ characters, which is $2^{32}$ or $2^{64}$ respectively.
    6264A \CFA string manages its length separately from the string, so there is no null (@'\0'@) terminating value at the end of a string value.
     
    8688Hence, the basic types @char@, @char *@, @int@, @double@, @_Complex@, including any signness and size variations, implicitly convert to type @string@ (as in Java).
    8789\begin{cquote}
    88 \setlength{\tabcolsep}{15pt}
    8990\begin{tabular}{@{}l|ll|l@{}}
    9091\begin{cfa}
    91 string s;
     92string s = 5;
    9293s = 'x';
    9394s = "abc";
    94 s = cs;
    95 s = 45hh;
    96 s = 45h;
    97 \end{cfa}
    98 &
    99 \begin{cfa}
    100 
     95s = 42hh;               /* signed char */
     96s = 42h;                /* short int */
     97s = 0xff;
     98\end{cfa}
     99&
     100\begin{cfa}
     101"5"
    101102"x"
    102103"abc"
    103 "abc"
    104 "45"
    105 "45"
    106 \end{cfa}
    107 &
    108 \begin{cfa}
    109         s = (ssize_t)MIN;
    110         s = (size_t)MAX;
    111         s = 5.5;
    112         s = 5.5L;
    113         s = 5.5+3.4i;
    114         s = 5.5L+3.4Li;
     104"42"
     105"42"
     106"255"
     107\end{cfa}
     108&
     109\begin{cfa}
     110s = (ssize_t)MIN;
     111s = (size_t)MAX;
     112s = 5.5;
     113s = 5.5L;
     114s = 5.5+3.4i;
     115s = 5.5L+3.4Li;
    115116\end{cfa}
    116117&
     
    127128Conversions can be explicitly specified using a compound literal.
    128129\begin{cfa}
    129 s = (string){ "abc" };                          $\C{// converts char * to string}$
    130 s = (string){ 5 };                                      $\C{// converts int to string}$
    131 s = (string){ 5.5 };                            $\C{// converts double to string}$
    132 \end{cfa}
    133 
    134 Conversions from @string@ to @char *@ attempt to be safe:
    135 either by requiring the maximum length of the @char *@ storage (@strncpy@) or allocating the @char *@ storage for the string characters (ownership), meaning the programmer must free the storage.
    136 Note, a C string is always null terminated, implying a minimum size of 1 character.
    137 \begin{cquote}
    138 \setlength{\tabcolsep}{15pt}
    139 \begin{tabular}{@{}l|l@{}}
    140 \begin{cfa}
     130s = (string){ 5 };    s = (string){ "abc" };   s = (string){ 5.5 };
     131\end{cfa}
     132
     133Conversions from @string@ to @char *@ attempt to be safe.
     134The @strncpy@ conversion requires the maximum length for the pointer's target buffer.
     135The assignment operator and constructor both allocate the buffer and return its address, meaning the programmer must free it.
     136Note, a C string is always null terminated, implying storage is always necessary for the null.
     137\begin{cquote}
     138\begin{tabular}{@{}l|l@{}}
     139\begin{cfa}
     140string s = "abcde";
     141char cs[4];
    141142strncpy( cs, s, sizeof(cs) );
    142 char * cp = s;
     143char * cp = s;          // ownership
    143144delete( cp );
    144 cp = s + ' ' + s;
     145cp = s + ' ' + s;       // ownership
    145146delete( cp );
    146147\end{cfa}
    147148&
    148149\begin{cfa}
     150
     151
    149152"abc\0", in place
    150153"abcde\0", malloc
    151 ownership
     154
    152155"abcde abcde\0", malloc
    153 ownership
     156
    154157\end{cfa}
    155158\end{tabular}
     
    162165For compatibility, @strlen@ also works with \CFA strings.
    163166\begin{cquote}
    164 \setlength{\tabcolsep}{15pt}
    165167\begin{tabular}{@{}l|l@{}}
    166168\begin{cfa}
     
    187189\subsection{Comparison Operators}
    188190
    189 The binary relational, @<@, @<=@, @>@, @>=@, and equality, @==@, @!=@, operators compare \CFA string values using lexicographical ordering, where longer strings are greater than shorter strings.
     191The binary relational, @<@, @<=@, @>@, @>=@, and equality, @==@, @!=@, operators compare \CFA strings using lexicographical ordering, where longer strings are greater than shorter strings.
    190192In C, these operators compare the C string pointer not its value, which does not match programmer expectation.
    191193C strings use function @strcmp@ to lexicographically compare the string value.
     
    196198
    197199The binary operators @+@ and @+=@ concatenate C @char@, @char *@ and \CFA strings, creating the sum of the characters.
    198 \par\noindent
     200\begin{cquote}
    199201\begin{tabular}{@{}l|l@{\hspace{15pt}}l|l@{\hspace{15pt}}l|l@{}}
    200202\begin{cfa}
     
    246248\end{cfa}
    247249\end{tabular}
    248 \par\noindent
     250\end{cquote}
    249251However, including @<string.hfa>@ can result in ambiguous uses of the overloaded @+@ operator.\footnote{Combining multiple packages in any programming language can result in name clashes or ambiguities.}
    250 While subtracting characters or pointers has a low-level use-case
    251 \begin{cfa}
    252 ch - '0'    $\C[2in]{// find character offset}$
    253 cs - cs2;  $\C{// find pointer offset}\CRT$
     252For example, subtracting characters or pointers has valid use-cases:
     253\begin{cfa}
     254ch - '0'        $\C[2in]{// find character offset}$
     255cs - cs2;       $\C{// find pointer offset}\CRT$
    254256\end{cfa}
    255257addition is less obvious
    256258\begin{cfa}
    257 ch + 'b'    $\C[2in]{// add character values}$
    258 cs + 'a';  $\C{// move pointer cs['a']}\CRT$
     259ch + 'b'        $\C[2in]{// add character values}$
     260cs + 'a';       $\C{// move pointer cs['a']}\CRT$
    259261\end{cfa}
    260262There are legitimate use cases for arithmetic with @signed@/@unsigned@ characters (bytes), and these types are treated differently from @char@ in \CC and \CFA.
     
    262264Similarly, it is impossible to restrict or remove addition on type @char *@ because (unfortunately) it is subscripting: @cs + 'a'@ implies @cs['a']@ or @'a'[cs]@.
    263265
    264 The prior \CFA concatenation examples show complex mixed-mode interactions among @char@, @char *@, and @string@ (variables are the same as constants) work correctly.
     266The prior \CFA concatenation examples show complex mixed-mode interactions among @char@, @char *@, and @string@ constants work correctly (variables are the same).
    265267The reason is that the \CFA type-system handles this kind of overloading well using the left-hand assignment-type and complex conversion costs.
    266268Hence, the type system correctly handles all uses of addition (explicit or implicit) for @char *@.
     
    270272Only @char@ addition can result in ambiguities, and only when there is no left-hand information.
    271273\begin{cfa}
    272 ch = ch + 'b'; $\C[2in]{// LHS disambiguate, add character values}$
    273 s = 'a' + 'b'; $\C{// LHS disambiguate, concatenate characters}$
     274ch = ch + 'b';          $\C[2in]{// LHS disambiguate, add character values}$
     275s = 'a' + 'b';          $\C{// LHS disambiguate, concatenate characters}$
    274276printf( "%c\n", @'a' + 'b'@ ); $\C{// no LHS information, ambiguous}$
    275277printf( "%c\n", @(return char)@('a' + 'b') ); $\C{// disambiguate with ascription cast}\CRT$
     
    277279The ascription cast, @(return T)@, disambiguates by stating a (LHS) type to use during expression resolution (not a conversion).
    278280Fortunately, character addition without LHS information is rare in C/\CFA programs, so repurposing the operator @+@ for @string@ types is not a problem.
    279 Note, other programming languages that repurpose @+@ for concatenation, could have similar ambiguity issues.
     281Note, other programming languages that repurpose @+@ for concatenation, can have similar ambiguity issues.
    280282
    281283Interestingly, \CC cannot support this generality because it does not use the left-hand side of assignment in expression resolution.
     
    297299If $N = 0$, a zero length string, @""@, is returned.
    298300\begin{cquote}
    299 \setlength{\tabcolsep}{15pt}
    300301\begin{tabular}{@{}l|l@{}}
    301302\begin{cfa}
     
    303304s = 'x' * 3;
    304305s = "abc" * 3;
    305 s = (name + ' ') * 3;
    306 \end{cfa}
    307 &
    308 \begin{cfa}
    309 "
     306s = ("MIKE" + ' ') * 3;
     307\end{cfa}
     308&
     309\begin{cfa}
     310""
    310311"xxx"
    311312"abcabcabc"
     
    315316\end{cquote}
    316317Like concatenation, there is a potential ambiguity with multiplication of characters;
    317 multiplication for pointers does not exist in C.
    318 \begin{cfa}
    319 ch = ch * 3; $\C[2in]{// LHS disambiguate, multiply character values}$
    320 s = 'a' * 3; $\C{// LHS disambiguate, concatenate characters}$
     318multiplication of pointers does not exist in C.
     319\begin{cfa}
     320ch = ch * 3;            $\C[2in]{// LHS disambiguate, multiply character values}$
     321s = 'a' * 3;            $\C{// LHS disambiguate, concatenate characters}$
    321322printf( "%c\n", @'a' * 3@ ); $\C{// no LHS information, ambiguous}$
    322323printf( "%c\n", @(return char)@('a' * 3) ); $\C{// disambiguate with ascription cast}\CRT$
     
    326327
    327328\subsection{Substring}
    328 The substring operation returns a subset of a string starting at a position in the string and traversing a length or matching a pattern string.
     329
     330The substring operation returns a subset of a string starting at a position in the string and traversing a length, or matching a pattern string.
    329331\begin{cquote}
    330332\setlength{\tabcolsep}{10pt}
    331333\begin{tabular}{@{}l|ll|l@{}}
    332 \multicolumn{2}{c}{\textbf{length}} & \multicolumn{2}{c}{\textbf{pattern}} \\
    333 \begin{cfa}
    334 s = name( 2, 2 );
    335 s = name( 3, -2 );
    336 s = name( 2, 8 );
    337 s = name( 0, -1 );
    338 s = name( -1, -1 );
     334\multicolumn{2}{@{}c}{\textbf{length}} & \multicolumn{2}{c@{}}{\textbf{pattern}} \\
     335\multicolumn{4}{@{}l}{\lstinline{string name = "PETER"}} \\
     336\begin{cfa}
     337s = name( 0, 4 );
     338s = name( 1, 4 );
     339s = name( 2, 4 );
     340s = name( 4, -2 );
     341s = name( 8, 2 );
     342s = name( 0, -2 );
     343s = name( -1, -2 );
    339344s = name( -3 );
    340345\end{cfa}
    341346&
    342347\begin{cfa}
    343 "KE"
    344 "IK"
    345 "KE", clip length to 2
    346 "", beyond string clip to null
    347 "K"
    348 "IKE", to end of string
    349 \end{cfa}
    350 &
    351 \begin{cfa}
    352 s = name( "IK" );
     348"PETE"
     349"ETER"
     350"TER"   // clip length to 3
     351"ER"
     352""                 // beyond string to right, clip to null
     353""                 // beyond string to left, clip to null
     354"ER"
     355"TER"   // to end of string
     356\end{cfa}
     357&
     358\begin{cfa}
     359s = name( "ET" );
    353360s = name( "WW" );
    354361
     
    356363
    357364
    358 \end{cfa}
    359 &
    360 \begin{cfa}
    361 "IK"
    362 ""
    363 
    364 
    365 
    366 
    367 \end{cfa}
    368 \end{tabular}
    369 \end{cquote}
    370 A negative starting position is a specification from the right end of the string.
     365
     366
     367\end{cfa}
     368&
     369\begin{cfa}
     370"ET"
     371""  // does not occur
     372
     373
     374
     375
     376
     377
     378\end{cfa}
     379\end{tabular}
     380\end{cquote}
     381For the length form, a negative starting position is a specification from the right end of the string.
    371382A negative length means that characters are selected in the opposite (right to left) direction from the starting position.
    372383If the substring request extends beyond the beginning or end of the string, it is clipped (shortened) to the bounds of the string.
    373384If the substring request is completely outside of the original string, a null string is returned.
    374 The pattern-form either returns the pattern string is the pattern matches or a null string if the pattern does not match.
     385For the pattern-form, it returns the pattern string if the pattern matches or a null string if the pattern does not match.
    375386The usefulness of this mechanism is discussed next.
    376387
     
    379390Hence, the left string may decrease, stay the same, or increase in length.
    380391\begin{cquote}
    381 \setlength{\tabcolsep}{15pt}
    382392\begin{tabular}{@{}l|l@{}}
    383393\begin{cfa}[escapechar={}]
     
    398408\end{tabular}
    399409\end{cquote}
    400 Now pattern matching is useful on the left-hand side of assignment.
    401 \begin{cquote}
    402 \setlength{\tabcolsep}{15pt}
     410Now substring pattern matching is useful on the left-hand side of assignment.
     411\begin{cquote}
    403412\begin{tabular}{@{}l|l@{}}
    404413\begin{cfa}[escapechar={}]
     
    415424Extending the pattern to a regular expression is a possible extension.
    416425
    417 The replace operation extensions substring to substitute all occurrences.
    418 \begin{cquote}
    419 \setlength{\tabcolsep}{15pt}
     426The replace operation extends substring to substitute all occurrences.
     427\begin{cquote}
    420428\begin{tabular}{@{}l|l@{}}
    421429\begin{cfa}
     
    437445\subsection{Searching}
    438446
    439 The find operation returns the position of the first occurrence of a key in a string.
     447The @find@ operation returns the position of the first occurrence of a key in a string.
    440448If the key does not appear in the string, the length of the string is returned.
    441449\begin{cquote}
    442 \setlength{\tabcolsep}{15pt}
    443450\begin{tabular}{@{}l|l@{}}
    444451\begin{cfa}
     
    458465A character-class operation indicates if a string is composed completely of a particular class of characters, \eg, alphabetic, numeric, vowels, \etc.
    459466\begin{cquote}
    460 \setlength{\tabcolsep}{15pt}
    461467\begin{tabular}{@{}l|l@{}}
    462468\begin{cfa}
     
    478484Function @exclude@ is the reverse of @include@, checking if all characters in the string are excluded from the class (compliance).
    479485\begin{cquote}
    480 \setlength{\tabcolsep}{15pt}
    481486\begin{tabular}{@{}l|l@{}}
    482487\begin{cfa}
     
    493498Both forms can return the longest substring of compliant characters.
    494499\begin{cquote}
    495 \setlength{\tabcolsep}{15pt}
    496500\begin{tabular}{@{}l|l@{}}
    497501\begin{cfa}
     
    511515\end{cquote}
    512516
    513 There are also versions of @include@ and @exclude@, returning a position or string, taking a validation function, like one of the C character-class routines.\footnote{It is part of the hereditary of C that these function take and return an \lstinline{int} rather than a \lstinline{bool}, which affects the function type.}
    514 \begin{cquote}
    515 \setlength{\tabcolsep}{15pt}
     517There are also versions of @include@ and @exclude@, returning a position or string, taking a validation function, like one of the C character-class functions.\footnote{It is part of the hereditary of C that these function take and return an \lstinline{int} rather than a \lstinline{bool}, which affects the function type.}
     518\begin{cquote}
    516519\begin{tabular}{@{}l|l@{}}
    517520\begin{cfa}
     
    533536The translate operation returns a string with each character transformed by one of the C character transformation functions.
    534537\begin{cquote}
    535 \setlength{\tabcolsep}{15pt}
    536538\begin{tabular}{@{}l|l@{}}
    537539\begin{cfa}
     
    580582\begin{figure}
    581583\begin{cquote}
    582 \setlength{\tabcolsep}{15pt}
    583584\begin{tabular}{@{}l|l@{}}
    584585\multicolumn{1}{c}{\textbf{\CC}} & \multicolumn{1}{c}{\textbf{\CFA}} \\
     
    626627Hence, it is possible to convert a block of C string operations to \CFA strings just by changing the type @char *@ to @string@.
    627628\begin{cquote}
    628 \setlength{\tabcolsep}{15pt}
    629629\begin{tabular}{@{}ll@{}}
    630630\begin{cfa}
     
    659659The \CC manipulators are @setw@, and its associated width controls @left@, @right@ and @setfill@.
    660660\begin{cquote}
    661 \setlength{\tabcolsep}{15pt}
    662661\begin{tabular}{@{}l|l@{}}
    663662\begin{c++}
     
    677676The \CFA manipulators are @bin@, @oct@, @hex@, @wd@, and its associated width control and @left@.
    678677\begin{cquote}
    679 \setlength{\tabcolsep}{15pt}
    680678\begin{tabular}{@{}l|l@{}}
    681679\begin{cfa}
     
    706704Reading into a @char@ is safe as the size is 1, @char *@ is unsafe without using @setw@ to constraint the length (which includes @'\0'@), @string@ is safe as its grows dynamically as characters are read.
    707705\begin{cquote}
    708 \setlength{\tabcolsep}{15pt}
    709706\begin{tabular}{@{}l|l@{}}
    710707\begin{c++}
     
    771768\CC modifies the mutable receiver object, replacing by position (zero origin) and length.
    772769\begin{cquote}
    773 \setlength{\tabcolsep}{15pt}
    774770\begin{tabular}{@{}l|l@{}}
    775771\begin{c++}
     
    787783\label{p:JavaReplace}
    788784\begin{cquote}
    789 \setlength{\tabcolsep}{15pt}
    790785\begin{tabular}{@{}l|l@{}}
    791786\begin{java}
     
    802797Java also provides a mutable @StringBuffer@, replacing by position (zero origin) and length.
    803798\begin{cquote}
    804 \setlength{\tabcolsep}{15pt}
    805799\begin{tabular}{@{}l|l@{}}
    806800\begin{java}
     
    12651259The common \CC lowering~\cite[Sec. 3.1.2.3]{cxx:raii-abi} proceeds differently than the present \CFA lowering.
    12661260\begin{cquote}
    1267 \setlength{\tabcolsep}{15pt}
    12681261\begin{tabular}{@{}l|l@{}}
    12691262\begin{cfa}
     
    13661359Of the capabilities listed in \VRef[Figure]{f:StrApiCompare}, only the following three cases need revisions.
    13671360\begin{cquote}
    1368 \setlength{\tabcolsep}{15pt}
    13691361\begin{tabular}{ll}
    13701362HL & LL \\
  • doc/theses/mike_brooks_MMath/uw-ethesis.tex

    r7ca6bf1 r1dec8f3  
    158158\setcounter{secnumdepth}{4}     % number subsubsection
    159159\setcounter{tocdepth}{4} % subsubsection in TOC
     160\setlength{\tabcolsep}{15pt}
    160161
    161162%\usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package
  • doc/uC++toCFA/uC++toCFA.tex

    r7ca6bf1 r1dec8f3  
    1111%% Created On       : Wed Apr  6 14:53:29 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Sat Mar 15 13:38:53 2025
    14 %% Update Count     : 6302
     13%% Last Modified On : Mon Sep  8 18:10:30 2025
     14%% Update Count     : 6534
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
    17 % requires tex packages: texlive-base texlive-latex-base tex-common texlive-humanities texlive-latex-extra texlive-fonts-recommended
    1817
    1918\documentclass[11pt]{article}
     
    8382\setlength{\topmargin}{-0.45in}                                                 % move running title into header
    8483\setlength{\headsep}{0.25in}
     84\setlength{\tabcolsep}{15pt}
    8585
    8686%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     
    134134
    135135\maketitle
    136 \vspace*{-0.55in}
     136\vspace*{-0.65in}
    137137
    138138\section{Introduction}
    139139
    140 \CFA is NOT an object-oriented programming-language.
    141 \CFA uses parametric polymorphism and allows overloading of variables and routines:
    142 \begin{cfa}
    143 int i;  char i;  double i;      $\C[2in]{// overload name i}$
    144 int i();  double i();  char i();
    145 i += 1;                                         $\C{// int i}$
    146 i += 1.0;                                       $\C{// double i}$
    147 i += 'a';                                       $\C{// char i}$
    148 int j = i();                            $\C{// int i()}$
    149 double j = i();                         $\C{// double i();}$
    150 char j = i();                           $\C{// char i()}\CRT$
    151 \end{cfa}
    152 \CFA has rebindable references.
     140\CFA is an extension of the C programming with a trait-style type-system rather then templates and objects as in \CC.
     141\CFA allows overloading of variables and routines using the left-hand assignment type to precisely select among overloaded names.
     142\begin{cfa}
     143int x;  char x;  double x;    // overload name x
     144int x();  double x();  char x();
     145\end{cfa}
     146\vspace*{-8pt}
     147\begin{cquote}
     148\begin{tabular}{@{}l@{\hspace{1in}}|l@{}}
     149\begin{cfa}
     150x += 42;        $\C[1in]{// int x}$
     151x += 42.2;      $\C{// double x}$
     152x += 'a';       $\C{// char x}\CRT$
     153\end{cfa}
     154&
     155\begin{cfa}
     156int j = x();    $\C[1in]{// int x()}$
     157double j = x(); $\C{// double x();}$
     158char j = x();   $\C{// char x()}\CRT$
     159\end{cfa}
     160\end{tabular}
     161\end{cquote}
     162\CFA generalizes reference types, allowing multiple and rebindable references (like pointers).
    153163\begin{cquote}
    154164\begin{tabular}{@{}l|l@{}}
     
    165175&
    166176\begin{cfa}
    167 r2i = 3; $\C[1.0in]{// change x}$
     177r2i = 3;         $\C[0.875in]{// change x}$
    168178&r2i = &r1y; $\C{// change p2i / r2i}$
    169 r2i = 3; $\C{// change y}$
     179r2i = 3;        $\C{// change y}$
    170180&r1x = &r1y; $\C{// change p1x / r1x}$
    171 r2i = 4; $\C{// change y}$
     181r2i = 4;        $\C{// change y}$
    172182&r1x = @0p@; $\C{// reset}\CRT$
    173183\end{cfa}
     
    179189int & @const@ & @const@ crcr = cr; // generalize
    180190\end{cfa}
     191
     192
     193\section{Control Flow}
     194
     195The @choose@ statement provides an implicit @break@ after the @case@ clause for safety.
     196It is possible to @break default@ in a @case@ clause to transfer to common code in the @default@ clause.
     197\begin{cquote}
     198\begin{tabular}{@{}l|l@{}}
     199\begin{uC++}
     200switch ( i ) {
     201  case 1: ... @break@; // explicit break
     202  case 2: ... @break@; // explicit break
     203  default: ... ;
     204}
     205\end{uC++}
     206&
     207\begin{cfa}
     208choose ( i ) {
     209  case 1: ... ; // implicit break
     210  case 2: ... ; // implicit break
     211  default: ... ;
     212}
     213\end{cfa}
     214\end{tabular}
     215\end{cquote}
     216To simplify creating an infinite loop, the loop condition in optional.
     217\begin{cquote}
     218\begin{tabular}{@{}l|l@{}}
     219\begin{uC++}
     220while ( true ) ...
     221for ( ;; ) ...
     222do ... while ( true )
     223\end{uC++}
     224&
     225\begin{uC++}
     226while ($\,$) ...
     227for ($\,$) ...
     228do ... while ($\,$)
     229\end{uC++}
     230\end{tabular}
     231\end{cquote}
     232To simplify loop iteration a range is provided, from low to high, and a traversal direction, ascending (@+@) or descending (@-@).
     233The following is the syntax for the loop range, where @[@\,@]@ means optional.
     234\begin{cfa}[deletekeywords=default]
     235[ @index ;@ ] [ [ @min@ (default 0) ] [ direction @+@/@-@ (default +) ] @~@ [ @=@ (include endpoint) ] ] @max@ [ @~ increment@ ]
     236\end{cfa}
     237For @=@, the range includes the endpoint (@max@/@min@) depending on the direction (@+@/@-@).
     238\begin{cquote}
     239\begin{tabular}{@{}l|l@{}}
     240\begin{uC++}
     241for ( int i = 0; i < @10@; i += 1 ) { ... }
     242for ( int i = @5@; i < @15@; i += @2@ ) { ... }
     243for ( int i = -2; i <@=@ 10; i += 3 ) { ... }
     244for ( int i = 10; i > -3; i @-@= 1 ) { ... }
     245for ( int i = 10; i >@=@ 0; i @-@= 1 ) { ... }
     246\end{uC++}
     247&
     248\begin{cfa}
     249for ( @10@ ) { ... }  /  for ( i; @10@ ) { ... }  // 0 to 9 by 1
     250for ( i; @5@ ~ @15@ ~ @2@ ) { ... } // 5 to 14 by 2
     251for ( i; -2 ~@=@ 10 ~ 3 ) { ... } // -2 to 10 by 3
     252for ( i; -3 @-@~ 10 ) { ... } // not 10 -~= -3, 10 to -2 by -1
     253for ( i; 0 @-@~@=@ 10 ) { ... } // not 10 -~= 0, 10 to 0 by -1
     254\end{cfa}
     255\end{tabular}
     256\end{cquote}
     257A terminating loop @else@ (like Python) is executed if the loop terminates normally, \ie the loop conditional becomes false, which is safer than retesting after the loop.
     258The loop index is available in the @else@ clause.
     259\begin{cquote}
     260\begin{tabular}{@{}l|l@{}}
     261\begin{uC++}
     262int i = 0
     263for ( i = 0; i < 10; i += 1 ) { ... }
     264@if ( i == 10 )@ { ... }
     265\end{uC++}
     266&
     267\begin{cfa}
     268
     269for ( i; 10 ) { ... }
     270@else@ { ... } // i == 10 because of post increment
     271\end{cfa}
     272\end{tabular}
     273\end{cquote}
     274Single/multiple-level loop exit/continue is provided by the labelled @break@/@continue@. (First example is \CC.)
     275\begin{cquote}
     276\begin{tabular}{@{}l|l|l@{}}
     277\begin{C++}
     278@L1:@ for ( ;; ) {
     279        for ( ;; ) {
     280                ... if ( ... ) @goto L1@; ...
     281                ... if ( ... ) @goto L2@; ...
     282        } @L2: ;@
     283}
     284\end{C++}
     285&
     286\begin{cfa}
     287@L1:@ for () {
     288        @L2:@ for () {
     289                ... if ( ... ) @continue L1@; ...
     290                ... if ( ... ) @break L2@; ...
     291        }
     292}
     293\end{cfa}
     294&
     295\begin{cfa}
     296@L1:@ for () {
     297        @L2:@ for () {
     298                ... if ( ... ) @continue L1@; ...
     299                ... if ( ... ) @break L2@; ...
     300        }
     301}
     302\end{cfa}
     303\end{tabular}
     304\end{cquote}
     305
     306
     307\section{Exception}
     308
     309Currently, \CFA uses macros @ExceptionDecl@ and @ExceptionInst@ to declare and instantiate an exception.
     310\begin{cquote}
     311\setlength{\tabcolsep}{5pt}
     312\begin{tabular}{@{}l|ll@{}}
     313\begin{uC++}
     314
     315@_Exception@ E {        // local or global scope
     316        ... // exception fields
     317};
     318try {
     319        ... if ( ... ) @_Resume@ E( /* initialization */ ); ...
     320        ... if ( ... ) @_Throw@ E( /* initialization */ ); ...
     321} @_CatchResume@( E & /* reference */ ) { ... }
     322  catch( E & ) { ... }
     323  catch( ... /* catch any */ ) { ... }
     324  _Finally { ... }
     325\end{uC++}
     326&
     327\begin{cfa}
     328#include <Exception.hfa>
     329@ExceptionDecl@( E,             // must be global scope
     330        ... // exception fields
     331);
     332try {
     333        ... if ( ... ) @throwResume@ @ExceptionInst@( E, /* intialization */ ); ...
     334        ... if ( ... ) @throw@ @ExceptionInst@( E, /* intialization */ ); ...
     335} @catchResume@( E @*@ /* pointer */ ) { ... }
     336  catch( E * ) { ... }
     337  catch( exception_t @*@ /* catch any */ ) { ... }
     338  finally { ... }
     339\end{cfa}
     340\end{tabular}
     341\end{cquote}
     342
     343
     344\section{Non-local Exception}
     345
     346\begin{cquote}
     347\begin{tabular}{@{}l|ll@{}}
     348\begin{uC++}
     349
     350
     351void main() {
     352        try {
     353                _Enable {
     354                        ... suspend(); ...
     355                }
     356        } @_CatchResume@( E & /* reference */ ) { ... }
     357          catch( E & ) { ... }
     358}
     359\end{uC++}
     360&
     361\begin{cfa}
     362#define resumePoll( coroutine ) resume( coroutine ); poll()
     363#define suspendPoll suspend; poll()
     364void main() {
     365        try {
     366                enable_ehm();
     367                ... suspendPoll ...
     368                disable_ehm();
     369        } @catchResume@( E * ) { ... }
     370          catch( E & ) { ... }
     371}
     372\end{cfa}
     373\end{tabular}
     374\end{cquote}
     375
     376
     377\section{Stream I/O}
     378
     379\CFA output streams automatically separate values and insert a newline at the end of the print.
     380\begin{cquote}
     381\begin{tabular}{@{}l|l@{}}
     382\begin{uC++}
     383#include <@iostream@>
     384using namespace std;
     385int i;   double d;   char c;
     386cin >> i >> d >> c;
     387cout << i << ' ' << d << ' ' << c << endl;
     388\end{uC++}
     389&
     390\begin{cfa}
     391#include <@fstream.hfa@>
     392
     393int i;   double d;   char c;
     394sin | i | d | c;
     395sout | i | d | c
     396\end{cfa}
     397\end{tabular}
     398\end{cquote}
     399To disable/enable automatic newline at the end of printing, use @nlOff@/@nlOn@ and @nl@.
     400\begin{cquote}
     401\begin{tabular}{@{}l|l@{}}
     402\begin{uC++}
     403
     404for ( int i = 0; i < 5; i += 1 ) cout << i << ' ';
     405cout << @endl@;
     4060 1 2 3 4
     407\end{uC++}
     408&
     409\begin{cfa}
     410sout | @nlOff@; // disable auto nl
     411for ( i; 5 ) sout | i;
     412sout | @nl@; sout | @nlOn@;  // enable auto nl
     4130 1 2 3 4
     414\end{cfa}
     415\end{tabular}
     416\end{cquote}
     417Floating-point numbers without a fraction print with a decimal point, which can be disabled with @nodp@.
     418\begin{cquote}
     419\begin{tabular}{@{}l|l@{}}
     420\begin{uC++}
     421cout << 3.0 << ' ' << showpoint << setprecision(0) << 3.0 << endl;
     4223 3.
     423\end{uC++}
     424&
     425\begin{cfa}
     426sout | @nodp( 3.0 )@ | 3.0;
     4273 3.
     428\end{cfa}
     429\end{tabular}
     430\end{cquote}
     431
     432
     433\section{String}
     434
     435The @string@ type in \CFA is very similar to that in \CC.
     436\begin{cquote}
     437\begin{tabular}{@{}l|l@{}}
     438\multicolumn{2}{@{}l@{}}{\lstinline{string s1, s2;}} \\
     439\begin{uC++}
     440s1 = "abcdefg";
     441s2 = s1;
     442s1 += s2;
     443s1 == s2; s1 != s2;
     444s1 < s2;  s1 <= s2;  s1 > s2;  s1 >= s2;
     445s1.length();
     446s1[3];
     447s1.substr( 2 );  s1.substr( 2, 3 );
     448s1.replace( 2, 5, s2 );
     449s1.find( s2 );
     450s1.find_first_of( "cd" );
     451s1.find_first_not_of( "cd" );
     452getline( cin, s1 );
     453cout << s1 << endl;
     454\end{uC++}
     455&
     456\begin{cfa}
     457s1 = "abcdefg";
     458s2 = s1;
     459s1 += s2;
     460s1 == s2; s1 != s2;
     461s1 < s2;  s1 <= s2;  s1 > s2;  s1 >= s2;
     462len( s1 ); // like C strlen( s1 )
     463s1[3];
     464s1( 2 );  s1( 2, 3 );
     465s1( 2, 5 ) = s2;
     466find( s1, s2 );
     467exclude( s1, "cd" );  // longest sequence excluding "c" and "d"
     468include( s1, "cd" );  // longest sequence including "c" and "d"
     469sin | getline( s1 );
     470sout | s1;
     471\end{cfa}
     472\end{tabular}
     473\end{cquote}
     474
     475
     476\section{\texorpdfstring{\lstinline{uArray}}{uArray}}
     477
     478\begin{cquote}
     479\setlength{\tabcolsep}{5pt}
     480\begin{tabular}{@{}l|l@{}}
     481\begin{uC++}
     482#include <iostream>
     483using namespace std;
     484
     485struct S {
     486        int i;
     487        S( int i ) { S::i = i; }
     488};
     489void f( @uArrayRef( S, parm )@ );
     490int main() {
     491        enum { N = 5 };
     492        @uArray( S, s, N );@   // stack, no ctor calls
     493        for ( int i = 0; i < N; i += 1 ) @s[i]( i )@; // ctor calls
     494        for ( int i = 0; i < N; i += 1 ) cout << s[i]@->@i << endl;
     495        f( s );
     496        @uArrayPtr( S, sp, N );@   // heap, no ctor calls
     497        for ( int i = 0; i < N; i += 1 ) @sp[i]( i )@; // ctor calls
     498        for ( int i = 0; i < N; i += 1 ) cout << sp[i]@->@i << endl;
     499        f( sp );
     500} // delete s, sp
     501\end{uC++}
     502&
     503\begin{cfa}
     504#include <fstream.hfa>
     505#include <array.hfa>
     506#include <memory.hfa>
     507struct S {
     508        int i;
     509};
     510void ?{}( S & s, int i ) { s.i = i; }
     511@forall( [N] )@ void f( @array( S, N ) & parm@ ) {}
     512int main() {
     513        enum { N = 5 };
     514        @array( S, N ) s = { delay_init };@ // no ctor calls
     515        for ( i; N ) @s[i]{ i }@; // ctor calls
     516        for ( i; N ) sout | s[i]@.@i;
     517        f( s );
     518    @unique_ptr( array( S, N ) )@ sp = { delay_init }; // heap
     519    for ( int i = 0; i < N; i += 1 ) @(*sp)@[i]{ i }; // ctor calls
     520    for ( int i = 0; i < N; i += 1 ) sout | @(*sp)@[i].i;
     521        f( @*sp@ );
     522} // delete s, sp
     523\end{cfa}
     524\end{tabular}
     525\end{cquote}
     526
     527
     528\section{\texorpdfstring{Structures (object-oriented \protect\vs routine style)}{Structures (object-oriented vs. routine style)}}
     529
     530\CFA is NOT an object-oriented programming-language, so there is no receiver (\lstinline[language=c++]{this}) or nested structure routines.
     531The equivalent of a \emph{member} routine has an explicit structure parameter in any parameter position (often the first).
     532\begin{cquote}
     533\begin{tabular}{@{}l|l@{}}
     534\begin{uC++}
     535struct S {
     536        int i = 0;  // cheat, implicit default constructor
     537        int setter( int j ) { int t = i; i = j; return t; }
     538        int getter() { return i; }
     539};
     540S s;
     541@s.@setter( 3 );  // object calls
     542int k = @s.@getter();
     543\end{uC++}
     544&
     545\begin{cfa}
     546struct S {  int i;  };
     547void ?{}( S & s ) { s.i = 0; } // explicit default constructor
     548int setter( @S & s,@ int j ) @with( s )@ { int t = i; i = j; return t; }
     549int getter( @S & s@ ) @with( s )@ { return i; }
     550
     551S s;
     552setter( @s,@ 3 );  // normal calls
     553int k = getter( @s@ );
     554\end{cfa}
     555\end{tabular}
     556\end{cquote}
    181557Aggregate qualification is reduced or eliminated by opening scopes using the @with@ clause.
    182558\begin{cfa}
     
    194570\end{cfa}
    195571\noindent
    196 In subsequent code examples, the left example is \uC and the right example is \CFA.
    197 
    198 
    199 \section{Looping}
    200 
    201 \begin{cquote}
    202 \begin{tabular}{@{}l|l@{}}
    203 \begin{uC++}
    204 for ( @;;@ ) { ... }  /  while ( @true@ ) { ... }
    205 for ( int i = 0; i < @10@; i += 1 ) { ... }
    206 for ( int i = @5@; i < @15@; i += @2@ ) { ... }
    207 for ( int i = -1; i <@=@ 10; i += 3 ) { ... }
    208 for ( int i = 10; i > 0; i @-@= 1 ) { ... }
    209 \end{uC++}
    210 &
    211 \begin{cfa}
    212 for () { ... }  /  while () { ... }
    213 for ( @10@ ) { ... }  /  for ( i; @10@ ) { ... }
    214 for ( i; @5@ ~ @15@ ~ @2@ ) { ... }
    215 for ( i; -1 ~@=@ 10 ~ 3 ) { ... }
    216 for ( i; 0 @-@~ 10 ) { ... }
    217 \end{cfa}
    218 \end{tabular}
    219 \end{cquote}
    220 
    221 \begin{cquote}
    222 \begin{tabular}{@{}l|l@{}}
    223 \begin{uC++}
    224 int i = 0
    225 for ( i = 0; i < 10; i += 1 ) { ... }
    226 @if ( i == 10 )@ { ... }
    227 \end{uC++}
    228 &
    229 \begin{cfa}
    230 
    231 for ( i; 10 ) { ... }
    232 @else@ { ... } // i == 10
    233 \end{cfa}
    234 \end{tabular}
    235 \end{cquote}
    236 
    237 \begin{cquote}
    238 \begin{tabular}{@{}l|l@{}}
    239 \begin{uC++}
    240 @L1:@ for ( ;; ) {
    241         @L2:@ for ( ;; ) {
    242                 ... if ( ... ) @break L1@; ...
    243                 ... if ( ... ) @break L2@; ...
    244         }
    245 }
    246 \end{uC++}
    247 &
    248 \begin{cfa}
    249 @L1:@ for () {
    250         @L2:@ for () {
    251                 ... if ( ... ) @break L1@; ...
    252                 ... if ( ... ) @break L2@; ...
    253         }
    254 }
    255 \end{cfa}
    256 \end{tabular}
    257 \end{cquote}
    258 
    259 
    260 \section{Stream I/O}
    261 
    262 \CFA output streams automatically separate values and insert a newline at the end of the print.
    263 \begin{cquote}
    264 \begin{tabular}{@{}l|l@{}}
    265 \begin{uC++}
    266 #include <@iostream@>
    267 using namespace std;
    268 int i;   double d;   char c;
    269 cin >> i >> d >> c;
    270 cout << i << ' ' << d << ' ' << c << endl;
    271 \end{uC++}
    272 &
    273 \begin{cfa}
    274 #include <@fstream.hfa@>
    275 
    276 int i;   double d;   char c;
    277 sin | i | d | c;
    278 sout | i | d | c
    279 \end{cfa}
    280 \end{tabular}
    281 \end{cquote}
    282 To disable/enable automatic newline at the end of printing, use @nlOff@/@nlOn@ and @nl@.
    283 \begin{cquote}
    284 \begin{tabular}{@{}l|l@{}}
    285 \begin{uC++}
    286 
    287 for ( int i = 0; i < 5; i += 1 ) cout << i << ' ';
    288 cout << @endl@;
    289 
    290 0 1 2 3 4
    291 \end{uC++}
    292 &
    293 \begin{cfa}
    294 sout | @nlOff@; // disable auto nl
    295 for ( i; 5 ) sout | i;
    296 sout | @nl@;
    297 sout | @nlOn@;  // reenable auto nl
    298 0 1 2 3 4
    299 \end{cfa}
    300 \end{tabular}
    301 \end{cquote}
    302 Floating-point numbers without a fraction print with a decimal point, which can be disabled with @nodp@.
    303 \begin{cquote}
    304 \begin{tabular}{@{}l|l@{}}
    305 \begin{uC++}
    306 cout << 3.0 << ' ' << showpoint << setprecision(0) << 3.0 << endl;
    307 3 3.
    308 \end{uC++}
    309 &
    310 \begin{cfa}
    311 sout | @nodp( 3.0 )@ | 3.0;
    312 3 3.
    313 \end{cfa}
    314 \end{tabular}
    315 \end{cquote}
    316 
    317 
    318 \section{Exception}
    319 
    320 Currently, \CFA uses macros @ExceptionDecl@ and @ExceptionInst@ to declare and instantiate an exception.
    321 \begin{cquote}
    322 \begin{tabular}{@{}l|ll@{}}
    323 \begin{uC++}
    324 
    325 @_Exception@ E {        // local or global scope
    326         ... // exception fields
    327 };
    328 try {
    329         ...
    330         if ( ... ) @_Resume@ E( /* initialization */ );
    331         if ( ... ) @_Throw@ E( /* initialization */ );
    332                 ...
    333 } @_CatchResume@( E & /* reference */ ) { ... }
    334   catch( E & ) { ... }
    335   catch( ... /* catch any */ ) { ... }
    336   _Finally { ... }
    337 \end{uC++}
    338 &
    339 \begin{cfa}
    340 #include <Exception.hfa>
    341 @ExceptionDecl@( E,             // must be global scope
    342         ... // exception fields
    343 );
    344 try {
    345         ...
    346         if ( ... ) @throwResume@ @ExceptionInst@( E, /* intialization */ );
    347         if ( ... ) @throw@ @ExceptionInst@( E, /* intialization */ );
    348         ...
    349 } @catchResume@( E @*@ /* pointer */ ) { ... }
    350   catch( E * ) { ... }
    351   catch( exception_t @*@ /* catch any */ ) { ... }
    352   finally { ... }
    353 \end{cfa}
    354 \end{tabular}
    355 \end{cquote}
    356 
    357 
    358 \section{Non-local Exception}
    359 
    360 \begin{cquote}
    361 \begin{tabular}{@{}l|ll@{}}
    362 \begin{uC++}
    363 
    364 
    365 void main() {
    366         try {
    367                 _Enable {
    368                         ... suspend(); ...
    369                 }
    370         } @_CatchResume@( E & /* reference */ ) { ... }
    371           catch( E & ) { ... }
    372 }
    373 \end{uC++}
    374 &
    375 \begin{cfa}
    376 #define resumePoll( coroutine ) resume( coroutine ); poll()
    377 #define suspendPoll suspend; poll()
    378 void main() {
    379         try {
    380                 enable_ehm();
    381                 ... suspendPoll ...
    382                 disable_ehm();
    383         } @catchResume@( E * ) { ... }
    384           catch( E & ) { ... }
    385 }
    386 \end{cfa}
    387 \end{tabular}
    388 \end{cquote}
     572In subsequent code examples, the left example is \CC/\uC and the right example is \CFA.
    389573
    390574
    391575\section{Constructor / Destructor}
    392576
     577A constructor/destructor must have its structure type as the first parameter and be a reference.
    393578\begin{cquote}
    394579\begin{tabular}{@{}l|l@{}}
     
    419604struct S { int i, j; };
    420605
    421 void @?{}@( S & s ) { s.i = s.j = 3; } $\C[3in]{// default}$
    422 void @?{}@( S & s, int i, int j ) { s.i = i; s.j = j; } $\C{// initializer}$
    423 void @?{}@( S & s, const S rhs ) { s.[i,j] = rhs.[i,j]; } $\C{// copy}$
    424 void @^?{}@( S & s ) { s.i = 0; s.j = 0; } $\C{// destructor}\CRT$
     606void @?{}@( @S & s@ ) { s.i = s.j = 3; } $\C[3in]{// default}$
     607void @?{}@( @S & s@, int i, int j ) { s.i = i; s.j = j; } $\C{// initializer}$
     608void @?{}@( @S & s@, const S rhs ) { ?{}( s, rhs.i, rhs.j ); } $\C{// copy}$
     609void @^?{}@( @S & s@ ) { s.i = 0; s.j = 0; } $\C{// destructor}\CRT$
    425610
    426611S s0;
    427612S s1 = { 1, 2 };
    428 // cannot use 0/1 (zero_t/one_t) with "new"
    429 S * s2 = new( 1@n@, 2 ); // n => (int)
     613// bug, cannot use 0/1 (zero_t/one_t) with "new"
     614S * s2 = new( 0@n@, 2 ); // suffix n => (natural int)
    430615delete( s2 );
    431 s2 = new( 1n, 2 );
     616s2 = new( 1@n@, 2 );
    432617delete( s2 );
    433 S & s3 = *new( 1n, 2 );
     618S & s3 = *new( 2, 2 );
    434619delete( &s3 );
    435 &s3 = &*new( 1n, 2 );
     620&s3 = &*new( 3, 2 );
    436621delete( &s3 );
    437622\end{cfa}
     
    440625
    441626
    442 \section{\texorpdfstring{Structures (object-oriented \protect\vs routine style)}{Structures (object-oriented vs. routine style)}}
    443 
    444 \begin{cquote}
    445 \begin{tabular}{@{}l|l@{}}
    446 \begin{uC++}
    447 struct S {
    448         int i = 0;  // cheat, implicit default constructor
    449         int setter( int j ) { int t = i; i = j; return t; }
    450         int getter() { return i; }
    451 };
    452 S s;
    453 @s.@setter( 3 );  // object calls
    454 int k = @s.@getter();
    455 \end{uC++}
    456 &
    457 \begin{cfa}
    458 struct S {  int i;  };
    459 void ?{}( S & s ) { s.i = 0; } // explicit default constructor
    460 int setter( @S & s,@ int j ) @with( s )@ { int t = i; i = j; return t; }
    461 int getter( @S & s@ ) @with( s )@ { return i; }
    462 
    463 S s;
    464 setter( @s,@ 3 );  // normal calls
    465 int k = getter( @s@ );
    466 \end{cfa}
    467 \end{tabular}
    468 \end{cquote}
    469 
    470 
    471 \section{String}
    472 
    473 \begin{cquote}
    474 \begin{tabular}{@{}l|l@{}}
    475 \multicolumn{2}{@{}l@{}}{\lstinline{string s1, s2;}} \\
    476 \begin{uC++}
    477 s1 = "abcdefg";
    478 s2 = s1;
    479 s1 += s2;
    480 s1 == s2; s1 != s2;
    481 s1 < s2; s1 <= s2; s1 > s2; s1 >= s2;
    482 s1.length();
    483 s1[3];
    484 s1.substr( 2 ); s1.substr( 2, 3 );
    485 s1.replace( 2, 5, s2 );
    486 s1.find( s2 ); s1.rfind( s2 );
    487 s1.find_first_of( s2 ); s1.find_last_of( s2 );
    488 s1.find_first_not_of( s2 ); s1.find_last_not_of( s2 );
    489 getline( cin, s1 );
    490 cout << s1 << endl;
    491 \end{uC++}
    492 &
    493 \begin{cfa}
    494 s1 = "abcdefg";
    495 s2 = s1;
    496 s1 += s2;
    497 s1 == s2; s1 != s2;
    498 s1 < s2; s1 <= s2; s1 > s2; s1 >= s2;
    499 size( s1 );
    500 s1[3];
    501 s1( 2 ); s1( 2, 3 );
    502 // replace( s1, 2, 5, s2 );
    503 // find( s1, s2 ), rfind( s1, s2 );
    504 // find_first_of( s2 ); find_last_of( s2 );
    505 // find_first_not_of( s1, s2 ); find_last_not_of( s1, s2 );
    506 sin | getline( s1 );
    507 sout | s1;
    508 \end{cfa}
    509 \end{tabular}
    510 \end{cquote}
    511 
    512 
    513 \section{\texorpdfstring{\lstinline{uArray}}{uArray}}
    514 
    515 \begin{cquote}
    516 \begin{tabular}{@{}l|l@{}}
    517 \begin{uC++}
    518 #include <iostream>
    519 using namespace std;
    520 struct S {
    521         int i;
    522         S( int i ) { S::i = i; cout << "ctor " << S::i << endl; }
    523         ~S() { S::i = i; cout << "dtor " << S::i << endl; }
    524 };
    525 int main() {
    526         enum { N = 5 };
    527         @uArray( S, s, N );@   // no constructor calls
    528         for ( int i = 0; i < N; i += 1 ) @s[i]( i )@; // constructor calls
    529         for ( int i = 0; i < N; i += 1 ) cout << s[i]@->@i << endl;
    530 }
    531 \end{uC++}
    532 &
    533 \begin{cfa}
    534 #include <fstream.hfa>
    535 #include <array.hfa>
    536 struct S {
    537         int i;
    538 };
    539 void ?{}( S & s, int i ) { s.i = i; sout | "ctor" | s.i; }
    540 void ^?{}( S & s ) { sout | "dtor" | s.i; }
    541 int main() {
    542         enum { N = 5 };
    543         @array( S, N ) s = { delay_init };@ // no constructor calls
    544         for ( i; N ) @s[i]{ i }@; // constructor calls
    545         for ( i; N ) sout | s[i]@.@i;
    546 }
    547 \end{cfa}
    548 \end{tabular}
    549 \end{cquote}
    550 
    551 
    552627\section{Coroutine}
    553628
     
    555630\begin{tabular}{@{}l|ll@{}}
    556631\begin{uC++}
    557 
    558632@_Coroutine@ C {
    559633        // private coroutine fields
     
    648722                val( val ) {}
    649723};
     724\end{uC++}
     725&
     726\begin{cfa}
     727#include <fstream.hfa>
     728#include <mutex_stmt.hfa>
     729#include <actor.hfa>
     730
     731struct StrMsg {
     732        @inline message;@ // derived message
     733        const char * val; // string message
     734};
     735void ?{}( StrMsg & msg, const char * str ) {
     736        @set_allocation( msg, Delete );@ // delete after use
     737        msg.val = str;
     738}
     739\end{cfa}
     740\end{tabular}
     741\begin{tabular}{@{}l|ll@{}}
     742\begin{uC++}
    650743_Actor Hello { ${\color{red}\LstCommentStyle{// : public uActor}}$
    651744        Allocation receive( Message & msg ) {
     
    671764&
    672765\begin{cfa}
    673 #include <fstream.hfa>
    674 #include <mutex_stmt.hfa>
    675 #include <actor.hfa>
    676 
    677 struct StrMsg {
    678         @inline message;@ // derived message
    679         const char * val; // string message
    680 };
    681 void ?{}( StrMsg & msg, const char * str ) {
    682         @set_allocation( msg, Delete );@ // delete after use
    683         msg.val = str;
    684 }
    685766struct Hello { @inline actor;@ }; // derived actor
    686767allocation receive( Hello & receiver, @start_msg_t@ & ) {
     
    760841#include <locks.hfa>
    761842owner_lock m;
    762 condition_variable( owner_lock ) s;  // generic type on mutex lock
     843cond_lock( owner_lock ) s;  // generic type on mutex lock
    763844lock( m );
    764845if ( ! empty( s ) ) wait( s, m );
     
    799880enum { N = 3 };
    800881Barrier b{ N };
    801 
    802 _Task T {
    803         void main() {
    804                 for ( int i = 0; i < 10; i += 1 ) {
    805                         b.block( 1 );
    806                 }
    807         }
    808 };
    809 int main() {
    810         uProcessor p[N - 1];
    811         T t[N];
    812 }
    813882\end{uC++}
    814883&
     
    836905enum { N = 3 };
    837906Barrier b{ N };
    838 
    839 thread T {};
    840 void main( T & ) {
    841         for ( 10 ) {
    842                 block( b, 1 );
    843         }
    844 }
    845 
    846 int main() {
    847         processor p[N - 1];
    848         T t[N];
    849 }
    850 \end{cfa}
    851 \end{tabular}
    852 \end{cquote}
    853 
    854 \newpage
     907\end{cfa}
     908\end{tabular}
     909\end{cquote}
     910
     911
     912\enlargethispage{1000pt}
    855913
    856914\section{Monitor}
     
    921979\end{cquote}
    922980
    923 \enlargethispage{1000pt}
    924 
     981\newpage
    925982\noindent
    926983External Scheduling
  • doc/user/user.tex

    r7ca6bf1 r1dec8f3  
    1111%% Created On       : Wed Apr  6 14:53:29 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Mon Apr 14 20:53:55 2025
    14 %% Update Count     : 7065
     13%% Last Modified On : Wed Sep 17 09:15:48 2025
     14%% Update Count     : 7251
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    6262\setlength{\topmargin}{-0.45in}                                                 % move running title into header
    6363\setlength{\headsep}{0.25in}
     64\setlength{\tabcolsep}{15pt}
    6465
    6566%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     
    606607
    607608C, \CC, Java and other programming languages have no exponentiation operator\index{exponentiation!operator}\index{operator!exponentiation}, using a routine like \Indexc{pow( x, y )} instead.
    608 Ada, Haskell, Python and other programming languages often use operators ©^© or ©**© for exponentiation.
     609Ada, Haskell, Python and other programming languages have an exponentiation operator often using operators ©^© or ©**©.
    609610However, neither of these operators work in C as ©^© means exclusive-or and ©**© means double dereference.
    610611Furthermore, using a routine for exponentiation does not match with mathematical expectation, \ie ©-x**-y© becomes ©pow( -x, -y )©.
     
    703704In addition, inclusive ranges are allowed using symbol ©~© to specify a contiguous set of case values, both positive and negative.
    704705\begin{cquote}
    705 \setlength{\tabcolsep}{15pt}
    706706\begin{tabular}{@{}llll@{}}
    707707\multicolumn{1}{c}{\textbf{C}}  & \multicolumn{1}{c}{\textbf{\CFA}}     & \multicolumn{1}{c}{\textbf{©gcc©}}    \\
     
    882882\end{enumerate}
    883883
    884 Before discussing potential language changes to deal with these problems, it is worth observing that in a typical C program:
     884Before discussing language changes to deal with these problems, it is worth observing that in a typical C program:
    885885\begin{itemize}
    886886\item
     
    902902\end{cfa}
    903903still works.
    904 Nevertheless, reversing the default action would have a non-trivial effect on case actions that compound, such as the above example of processing shell arguments.
    905 Therefore, to preserve backwards compatibility, it is necessary to introduce a new kind of ©switch© statement, called \Indexc{choose}, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword \Indexc{fallthrough}, \eg:
     904Nevertheless, reversing the default action would have a non-trivial effect on case actions that compound, such as the above example of processing command-line arguments.
     905To preserve backwards compatibility, a new kind of ©switch© statement, called \Indexc{choose} is introduced, with no implicit fall-through semantics and an explicit fall-through if the last statement of a case-clause ends with the new keyword \Indexc{fallthrough}, \eg:
    906906\begin{cfa}
    907907®choose® ( i ) {
     
    931931\item
    932932Dealing with unreachable code in a ©switch©/©choose© body is solved by restricting declarations and initialization to the start of statement body, which is executed \emph{before} the transfer to the appropriate ©case© clause\footnote{
    933 Essentially, these declarations are hoisted before the ©switch©/©choose© statement and both declarations and statement are surrounded by a compound statement.} and precluding statements before the first ©case© clause.
     933These declarations are hoisted before the ©switch©/©choose© statement and both declarations and statement are surrounded by a compound statement.} and precluding statements before the first ©case© clause.
    934934Further declarations at the same nesting level as the statement body are disallowed to ensure every transfer into the body is sound.
    935935\begin{cfa}
     
    10051005\end{tabular}
    10061006\end{cquote}
    1007 The target label must be below the \Indexc{fallthrough} and may not be nested in a control structure, and
    1008 the target label must be at the same or higher level as the containing \Indexc{case} clause and located at
    1009 the same level as a ©case© clause; the target label may be case \Indexc{default}, but only associated
    1010 with the current \Indexc{switch}/\Indexc{choose} statement.
     1007The target label must be below the \Indexc{fallthrough} and may not be nested in a control structure, and the target label must be at the same or higher level as the containing \Indexc{case} clause and located at the same level as a ©case© clause;
     1008the target label may be case \Indexc{default}, but only associated with the current \Indexc{switch}/\Indexc{choose} statement.
    10111009
    10121010
    10131011\subsection{Loop Control}
    10141012
     1013\CFA condenses writing loops to facilitate coding speed and safety.
     1014
     1015To simplify creating an infinite loop, the \Indexc{for}, \Indexc{while}, and \Indexc{do} loop-predicate\index{loop predicate} is extended with an empty conditional, meaning a comparison value of ©1© (true).
     1016\begin{cfa}
     1017while ( )                               §\C{// while ( true )}§
     1018for ( )                                 §\C{// for ( ; true; )}§
     1019do ... while ( )                §\C{// do ... while ( true )}§
     1020\end{cfa}
     1021
    10151022Looping a predefined number of times, possibly with a loop index, occurs frequently.
    1016 \CFA condenses writing loops to facilitate coding speed and safety.
    1017 
    1018 \Indexc{for}, \Indexc{while}, and \Indexc{do} loop-control\index{loop control} are extended with an empty conditional, meaning a comparison value of ©1© (true).
    1019 \begin{cfa}
    1020 while ( ®/* empty */®  )                                §\C{// while ( true )}§
    1021 for ( ®/* empty */®  )                                  §\C{// for ( ; true; )}§
    1022 do ... while ( ®/* empty */®  )                 §\C{// do ... while ( true )}§
    1023 \end{cfa}
    1024 
    10251023The ©for© control\index{for control}, \ie ©for ( /* control */ )©, is extended with a range and step.
    10261024A range is a set of values defined by an optional low value (default to 0), tilde, and high value, ©L ~ H©, with an optional step ©~ S© (default to 1), which means an ascending set of values from ©L© to ©H© in positive steps of ©S©.
     
    10311029\end{cfa}
    10321030\R{Warning}: A range in descending order, \eg ©5 ~ -3© is the null (empty) set, \ie no values in the set.
    1033 \R{Warning}: A ©0© or negative step is undefined.
    1034 Note, the order of values in a set may not be the order the values are presented during looping.
     1031As well, a ©0© or negative step is undefined.
    10351032
    10361033The range character, ©'~'©, is decorated on the left and right to control how the set values are presented in the loop body.
    10371034The range character can be prefixed with ©'+'© or ©'-'© indicating the \emph{direction} the range is scanned, \ie from left to right (ascending) or right to left (descending).
    1038 Ascending stepping uses operator \Indexc{+=};
    1039 descending stepping uses operator \Indexc{-=}.
     1035Ascending uses operator \Indexc{+=};
     1036descending uses operator \Indexc{-=}.
    10401037If there is no prefix character, it defaults to ©'+'©.
    10411038\begin{cfa}
    10421039-8 ®§\Sp§®~ -2                                                  §\C{// ascending, no prefix}§
    104310400 ®+®~ 5                                                                §\C{// ascending, prefix}§
    1044 -3 ®-®~ 3                                                               §\C{// descending
     1041-3 ®-®~ 3                                                               §\C{// descending, prefix
    10451042\end{cfa}
    10461043For descending iteration, the ©L© and ©H© values are \emph{implicitly} switched, and the increment/decrement for ©S© is toggled.
    1047 When changing the iteration direction, this form is faster and safer, \ie the direction prefix can be added/removed without changing existing (correct) program text.
     1044Hence, the order of values in a set may not be the order the values are presented during looping.
     1045Changing the iteration direction is faster and safer because the direction prefix can be added/removed without changing existing (correct) range information.
    10481046\R{Warning}: reversing the range endpoints for descending order results in an empty set.
    10491047\begin{cfa}
     
    10581056\index{-\~}\index{descending exclusive range}
    10591057\index{-\~=}\index{descending inclusive range}
     1058
     1059\begin{comment}
     1060To simplify loop iteration a range is provided, from low to high, and a traversal direction, ascending (©+©) or descending (©-©).
     1061The following is the syntax for the loop range, where ©[©\,©]© means optional.
     1062\begin{cfa}[deletekeywords=default]
     1063[ ®index ;® ] [ [ ®min® (default 0) ] [ direction ®+®/®-® (default +) ] ®~® [ ®=® (include endpoint) ] ] ®max® [ ®~ increment® ]
     1064\end{cfa}
     1065For ©=©, the range includes the endpoint (©max©/©min©) depending on the direction (©+©/©-©).
     1066\end{comment}
    10601067
    10611068©for© control is formalized by the following regular expression:
     
    12791286
    12801287
     1288
    12811289\end{cfa}
    12821290&
    12831291\begin{cfa}
    12841292int main() {
     1293        sout | nlOff;
    12851294        for ( S i = 0; i < (S){10,10}; i += 1 ) { sout | i; } sout | "A" | nl; // C
    12861295        for ( S i; 0 ~ (S){10,10} ) { sout | i; } sout | "B" | nl; // CFA
     
    14171426The following example is a linear search for the key 3 in an array, where finding the key is handled with a ©break© and not finding with the ©else© clause on the loop construct.
    14181427\begin{cquote}
    1419 \begin{cfa}
    1420 int a[10];
    1421 \end{cfa}
    1422 \begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{3em}}l@{}}
     1428\begin{tabular}{@{}lll@{}}
     1429\multicolumn{2}{@{}l@{}}{\lstinline{int a[10]}} \\
    14231430\begin{cfa}
    14241431
     
    24222429\label{s:stringType}
    24232430
    2424 The \CFA \Indexc{string} type is for manipulation of dynamically-size character-strings versus C \Indexc{char *} type for manipulation of statically-size null-terminated character-strings.
    2425 That is, the amount of storage for a \CFA string changes dynamically at runtime to fit the string size, whereas the amount of storage for a C string is fixed at compile time.
    2426 Hence, a ©string© declaration does not specify a maximum length;
    2427 as a string dynamically grows and shrinks in size, so does its underlying storage.
    2428 In contrast, a C string also dynamically grows and shrinks is size, but its underlying storage is fixed.
     2431A string is a sequence of symbols, where the form of a symbol can vary significantly: regular 7/8-bit ASCII/Latin-1, or wide 2/4/8-byte UNICODE or variable length UTF-8/16/32.
     2432A C character string is zero or more regular, wide, or escape characters enclosed in double-quotes ©"xyz\n"©.
     2433Currently, \CFA strings only support regular characters.
     2434
     2435A string type is designed to operate on groups of characters for assigning, copying, scanning, and updating, rather than working with individual characters.
     2436The \CFA \Indexc{string} type is for manipulation of dynamically-sized strings versus C \Indexc{char *} type for manipulation of statically-sized null-terminated strings.
     2437Therefore, the amount of storage for a \CFA string changes dynamically at runtime to fit the string size, whereas the amount of storage for a C string is fixed at compile time.
     2438As a result, a ©string© declaration does not specify a maximum length, where a C string array does.
     2439For \CFA, as a ©string© dynamically grows and shrinks in size, so does its underlying storage.
     2440For C, as a string dynamically grows and shrinks in size, but its underlying storage does not.
    24292441The maximum storage for a \CFA ©string© value is ©size_t© characters, which is $2^{32}$ or $2^{64}$ respectively.
    24302442A \CFA string manages its length separately from the string, so there is no null (©'\0'©) terminating value at the end of a string value.
    2431 Hence, a \CFA string cannot be passed to a C string manipulation routine, such as ©strcat©.
    2432 Like C strings, the characters in a ©string© are numbered starting from 0.
    2433 
    2434 The following operations have been defined to manipulate an instance of type ©string©.
    2435 The discussion assumes the following declarations and assignment statements are executed.
    2436 \begin{cfa}
    2437 #include ®<string.hfa>®
    2438 ®string® s, peter, digit, alpha, punctuation, ifstmt;
    2439 int i;
    2440 peter  = "PETER";
    2441 digit  = "0123456789";
    2442 punctuation = "().,";
    2443 ifstmt = "IF (A > B) {";
    2444 \end{cfa}
    2445 Note, the include file \Indexc{string.hfa} to access type ©string©.
    2446 
    2447 
    2448 \subsection{Implicit String Conversions}
    2449 
    2450 The types ©char©, ©char *©, ©int©, ©double©, ©_Complex©, including different signness and sizes, implicitly convert to type ©string©.
    2451 \VRef[Figure]{f:ImplicitConversionsString} shows examples of implicit conversions between C strings, integral, floating-point and complex types to ©string©.
    2452 A conversions can be explicitly specified:
    2453 \begin{cfa}
    2454 s = string( "abc" );                            §\C{// converts char * to string}§
    2455 s = string( 5 );                                        §\C{// converts int to string}§
    2456 s = string( 5.5 );                                      §\C{// converts double to string}§
    2457 \end{cfa}
    2458 All conversions from ©string© to ©char *©, attempt to be safe:
    2459 either by requiring the maximum length of the ©char *© storage (©strncpy©) or allocating the ©char *© storage for the string characters (ownership), meaning the programmer must free the storage.
    2460 As well, a string is always null terminates, implying a minimum size of 1 character.
     2443Hence, a \CFA string cannot be passed to a C string manipulation function, such as ©strcat©.
     2444Like C strings, characters in a ©string© are numbered from the left starting at 0 (because subscripting is zero-origin), and in \CFA numbered from the right starting at -1.
    24612445\begin{cquote}
    2462 \begin{tabular}{@{}l@{\hspace{1.75in}}|@{\hspace{15pt}}l@{}}
    2463 \begin{cfa}
    2464 string s = "abcde";
    2465 char cs[3];
    2466 strncpy( cs, s, sizeof(cs) );           §\C{sout | cs;}§
    2467 char * cp = s;                                          §\C{sout | cp;}§
    2468 delete( cp );
    2469 cp = s + ' ' + s;                                       §\C{sout | cp;}§
    2470 delete( cp );
    2471 \end{cfa}
    2472 &
    2473 \begin{cfa}
    2474 
    2475 
    2476 ab
    2477 abcde
    2478 
    2479 abcde abcde
    2480 
    2481 \end{cfa}
     2446\rm
     2447\begin{tabular}{@{}rrrrll@{}}
     2448\small\tt "a & \small\tt b & \small\tt c & \small\tt d & \small\tt e" \\
     24490 & 1 & 2 & 3 & 4 & left to right index \\
     2450-5 & -4 & -3 & -2 & -1 & right to left index
    24822451\end{tabular}
    24832452\end{cquote}
    2484 
    2485 \begin{figure}
    2486 \begin{tabular}{@{}l@{\hspace{15pt}}|@{\hspace{15pt}}l@{}}
    2487 \begin{cfa}
    2488 //      string s = 5;                                   sout | s;
    2489         string s;
    2490         // conversion of char and char * to string
    2491         s = 'x';                                                §\C{sout | s;}§
    2492         s = "abc";                                              §\C{sout | s;}§
    2493         char cs[5] = "abc";
    2494         s = cs;                                                 §\C{sout | s;}§
    2495         // conversion of integral, floating-point, and complex to string
    2496         s = 45hh;                                               §\C{sout | s;}§
    2497         s = 45h;                                                §\C{sout | s;}§
    2498         s = -(ssize_t)MAX - 1;                  §\C{sout | s;}§
    2499         s = (size_t)MAX;                                §\C{sout | s;}§
    2500         s = 5.5;                                                §\C{sout | s;}§
    2501         s = 5.5L;                                               §\C{sout | s;}§
    2502         s = 5.5+3.4i;                                   §\C{sout | s;}§
    2503         s = 5.5L+3.4Li;                                 §\C{sout | s;}§
    2504 \end{cfa}
    2505 &
    2506 \begin{cfa}
    2507 
    2508 
    2509 
    2510 x
    2511 abc
    2512 
    2513 abc
    2514 
    2515 45
    2516 45
    2517 -9223372036854775808
    2518 18446744073709551615
    2519 5.5
    2520 5.5
    2521 5.5+3.4i
    2522 5.5+3.4i
     2453The include file \Indexc{string.hfa} is necessary to access type ©string©.
     2454
     2455
     2456\subsection{Implicit String Conversions}
     2457
     2458The ability to convert from internal (machine) to external (human) format is useful in situations other than I/O.
     2459Hence, the basic types ©char©, ©char *©, ©int©, ©double©, ©_Complex©, including any signness and size variations, implicitly convert to type ©string© (as in Java).
     2460\begin{cquote}
     2461\begin{tabular}{@{}l|ll|l@{}}
     2462\begin{cfa}
     2463string s = 5;
     2464s = 'x';
     2465s = "abc";
     2466s = 42hh;               /* signed char */
     2467s = 42h;                /* short int */
     2468s = 0xff;
     2469\end{cfa}
     2470&
     2471\begin{cfa}
     2472"5"
     2473"x"
     2474"abc"
     2475"42"
     2476"42"
     2477"255"
     2478\end{cfa}
     2479&
     2480\begin{cfa}
     2481s = (ssize_t)MIN;
     2482s = (size_t)MAX;
     2483s = 5.5;
     2484s = 5.5L;
     2485s = 5.5+3.4i;
     2486s = 5.5L+3.4Li;
     2487\end{cfa}
     2488&
     2489\begin{cfa}
     2490"-9223372036854775808"
     2491"18446744073709551615"
     2492"5.5"
     2493"5.5"
     2494"5.5+3.4i"
     2495"5.5+3.4i"
    25232496\end{cfa}
    25242497\end{tabular}
    2525 \caption{Implicit Conversions to String}
    2526 \label{f:ImplicitConversionsString}
    2527 \end{figure}
    2528 
    2529 
    2530 \subsection{Size (length)}
    2531 
    2532 The ©size© operation returns the length of a string.
    2533 \begin{cfa}
    2534 i = size( "" );                                         §\C{// i is assigned 0}§
    2535 i = size( "abc" );                                      §\C{// i is assigned 3}§
    2536 i = size( peter );                                      §\C{// i is assigned 5}§
    2537 \end{cfa}
     2498\end{cquote}
     2499Conversions can be explicitly specified using a compound literal.
     2500\begin{cfa}
     2501s = (string){ 5 };    s = (string){ "abc" };   s = (string){ 5.5 };
     2502\end{cfa}
     2503
     2504Conversions from ©string© to ©char *© attempt to be safe.
     2505The ©strncpy© conversion requires the maximum length for the pointer's target buffer.
     2506The assignment operator and constructor both allocate the buffer and return its address, meaning the programmer must free it.
     2507Note, a C string is always null terminated, implying storage is always necessary for the null.
     2508\begin{cquote}
     2509\begin{tabular}{@{}l|l@{}}
     2510\begin{cfa}
     2511string s = "abcde";
     2512char cs[4];
     2513strncpy( cs, s, sizeof(cs) );
     2514char * cp = s;          // ownership
     2515delete( cp );
     2516cp = s + ' ' + s;       // ownership
     2517delete( cp );
     2518\end{cfa}
     2519&
     2520\begin{cfa}
     2521
     2522
     2523"abc\0", in place
     2524"abcde\0", malloc
     2525
     2526"abcde abcde\0", malloc
     2527
     2528\end{cfa}
     2529\end{tabular}
     2530\end{cquote}
     2531
     2532
     2533\subsection{Length}
     2534
     2535The ©len© operation (short for ©strlen©) returns the length of a C or \CFA string.
     2536For compatibility, ©strlen© works with \CFA strings.
     2537\begin{cquote}
     2538\begin{tabular}{@{}l|l@{}}
     2539\begin{cfa}
     2540i = len( "" );
     2541i = len( "abc" );
     2542i = len( cs );
     2543i = strlen( cs );
     2544i = len( name );
     2545i = strlen( name );
     2546\end{cfa}
     2547&
     2548\begin{cfa}
     25490
     25503
     25513
     25523
     25534
     25544
     2555\end{cfa}
     2556\end{tabular}
     2557\end{cquote}
    25382558
    25392559
    25402560\subsection{Comparison Operators}
    25412561
    2542 The binary \Index{relational operator}s, ©<©, ©<=©, ©>©, ©>=©, and \Index{equality operator}s, ©==©, ©!=©, compare strings using lexicographical ordering, where longer strings are greater than shorter strings.
     2562The binary relational\index{string!relational opertors}, \Indexc{<}, \Indexc{<=}, \Indexc{>}, \Indexc{>=}, and equality\index{string!equality operators}, \Indexc{==}, \Indexc{!=}, operators compare \CFA strings using lexicographical ordering, where longer strings are greater than shorter strings.
     2563In C, these operators compare the C string pointer not its value, which does not match programmer expectation.
     2564C strings use function ©strcmp© to lexicographically compare the string value.
     2565Java has the same issue with ©==© and ©.equals©.
    25432566
    25442567
    25452568\subsection{Concatenation}
    25462569
    2547 The binary operators \Indexc{+} and \Indexc{+=} concatenate two strings, creating the sum of the strings.
    2548 \begin{cfa}
    2549 s = peter + ' ' + digit;                        §\C{// s is assigned "PETER 0123456789"}§
    2550 s += peter;                                                     §\C{// s is assigned "PETER 0123456789PETER"}§
    2551 \end{cfa}
     2570The binary operators \Indexc{+} and \Indexc{+=} concatenate C ©char©, ©char *© and \CFA strings, creating the sum of the characters.
     2571\begin{cquote}
     2572\begin{tabular}{@{}l|l@{\hspace{15pt}}l|l@{\hspace{15pt}}l|l@{}}
     2573\begin{cfa}
     2574s = "";
     2575s = 'a' + 'b';
     2576s = 'a' + "b";
     2577s = "a" + 'b';
     2578s = "a" + "b";
     2579\end{cfa}
     2580&
     2581\begin{cfa}
     2582
     2583"ab"
     2584"ab"
     2585"ab"
     2586"ab"
     2587\end{cfa}
     2588&
     2589\begin{cfa}
     2590s = "";
     2591s = 'a' + 'b' + s;
     2592s = 'a' + 'b' + s;
     2593s = 'a' + "b" + s;
     2594s = "a" + 'b' + s;
     2595\end{cfa}
     2596&
     2597\begin{cfa}
     2598
     2599"ab"
     2600"abab"
     2601"ababab"
     2602"abababab"
     2603\end{cfa}
     2604&
     2605\begin{cfa}
     2606s = "";
     2607s = s + 'a' + 'b';
     2608s = s + 'a' + "b";
     2609s = s + "a" + 'b';
     2610s = s + "a" + "b";
     2611\end{cfa}
     2612&
     2613\begin{cfa}
     2614
     2615"ab"
     2616"abab"
     2617"ababab"
     2618"abababab"
     2619\end{cfa}
     2620\end{tabular}
     2621\end{cquote}
     2622However, including ©<string.hfa>© can result in ambiguous uses of the overloaded ©+© operator.\footnote{Combining multiple packages in any programming language can result in name clashes or ambiguities.}
     2623For example, subtracting characters or pointers has valid use-cases:
     2624\begin{cfa}
     2625ch - '0'        §\C[2in]{// find character offset}§
     2626cs - cs2;       §\C{// find pointer offset}\CRT§
     2627\end{cfa}
     2628addition is less obvious
     2629\begin{cfa}
     2630ch + 'b'        §\C[2in]{// add character values}§
     2631cs + 'a';       §\C{// move pointer cs['a']}\CRT§
     2632\end{cfa}
     2633There are legitimate use cases for arithmetic with ©signed©/©unsigned© characters (bytes), and these types are treated differently from ©char© in \CC and \CFA.
     2634However, backwards compatibility makes it impossible to restrict or remove addition on type ©char©.
     2635Similarly, it is impossible to restrict or remove addition on type ©char *© because (unfortunately) it is subscripting: ©cs + 'a'© implies ©cs['a']© or ©'a'[cs]©.
     2636
     2637The prior \CFA concatenation examples show complex mixed-mode interactions among ©char©, ©char *©, and ©string© constants work correctly (variables are the same).
     2638The reason is that the \CFA type-system handles this kind of overloading well using the left-hand assignment-type and complex conversion costs.
     2639Hence, the type system correctly handles all uses of addition (explicit or implicit) for ©char *©.
     2640\begin{cfa}
     2641printf( "%s %s %s %c %c\n", "abc", cs, cs + 3, cs['a'], 'a'[cs] );
     2642\end{cfa}
     2643Only ©char© addition can result in ambiguities, and only when there is no left-hand information.
     2644\begin{cfa}
     2645ch = ch + 'b';          §\C[2in]{// LHS disambiguate, add character values}§
     2646s = 'a' + 'b';          §\C{// LHS disambiguate, concatenate characters}§
     2647printf( "%c\n", ®'a' + 'b'® ); §\C{// no LHS information, ambiguous}§
     2648printf( "%c\n", ®(return char)®('a' + 'b') ); §\C{// disambiguate with ascription cast}\CRT§
     2649\end{cfa}
     2650The ascription cast, ©(return T)©, disambiguates by stating a (LHS) type to use during expression resolution (not a conversion).
     2651Fortunately, character addition without LHS information is rare in C/\CFA programs, so repurposing the operator ©+© for ©string© types is not a problem.
     2652Note, other programming languages that repurpose ©+© for concatenation, can have similar ambiguity issues.
     2653
     2654Interestingly, \CC cannot support this generality because it does not use the left-hand side of assignment in expression resolution.
     2655While it can special case some combinations:
     2656\begin{C++}
     2657s = 'a' + s; §\C[2in]{// compiles in C++}§
     2658s = "a" + s;
     2659\end{C++}
     2660it cannot generalize to any number of steps:
     2661\begin{C++}
     2662s = 'a' + 'b' + s; §\C{// does not compile in C++}\CRT§
     2663s = "a" + "b" + s;
     2664\end{C++}
    25522665
    25532666
     
    25552668
    25562669The binary operators \Indexc{*} and \Indexc{*=} repeat a string $N$ times.
    2557 If $N = 0$, a zero length string, ©""© is returned.
    2558 \begin{cfa}
    2559 s = 'x' * 3;                            §\C{// s is assigned "PETER PETER PETER "}§
    2560 s = (peter + ' ') * 3;                          §\C{// s is assigned "PETER PETER PETER "}§
    2561 \end{cfa}
     2670If $N = 0$, a zero length string, ©""©, is returned.
     2671\begin{cquote}
     2672\begin{tabular}{@{}l|l@{}}
     2673\begin{cfa}
     2674s = 'x' * 0;
     2675s = 'x' * 3;
     2676s = "abc" * 3;
     2677s = ("PETER" + ' ') * 3;
     2678\end{cfa}
     2679&
     2680\begin{cfa}
     2681""
     2682"xxx"
     2683"abcabcabc"
     2684"PETER PETER PETER "
     2685\end{cfa}
     2686\end{tabular}
     2687\end{cquote}
     2688Like concatenation, there is a potential ambiguity with multiplication of characters;
     2689multiplication of pointers does not exist in C.
     2690\begin{cfa}
     2691ch = ch * 3;            §\C[2in]{// LHS disambiguate, multiply character values}§
     2692s = 'a' * 3;            §\C{// LHS disambiguate, concatenate characters}§
     2693printf( "%c\n", ®'a' * 3® ); §\C{// no LHS information, ambiguous}§
     2694printf( "%c\n", ®(return char)®('a' * 3) ); §\C{// disambiguate with ascription cast}\CRT§
     2695\end{cfa}
     2696Fortunately, character multiplication without LHS information is even rarer than addition, so repurposing the operator ©*© for ©string© types is not a problem.
    25622697
    25632698
    25642699\subsection{Substring}
    2565 The substring operation returns a subset of the string starting at a position in the string and traversing a length.
    2566 \begin{cfa}
    2567 s = peter( 2, 3 );                                      §\C{// s is assigned "ETE"}§
    2568 s = peter( 4, -3 );                                     §\C{// s is assigned "ETE", length is opposite direction}§
    2569 s = peter( 2, 8 );                                      §\C{// s is assigned "ETER", length is clipped to 4}§
    2570 s = peter( 0, -1 );                                     §\C{// s is assigned "", beyond string so clipped to null}§
    2571 s = peter(-1, -1 );                                     §\C{// s is assigned "R", start and length are negative}§
    2572 \end{cfa}
    2573 A negative starting position is a specification from the right end of the string.
     2700
     2701The substring operation returns a subset of a string starting at a position in the string and traversing a length, or matching a pattern string.
     2702\begin{cquote}
     2703\setlength{\tabcolsep}{10pt}
     2704\begin{tabular}{@{}l|ll|l@{}}
     2705\multicolumn{2}{@{}c}{\textbf{length}} & \multicolumn{2}{c@{}}{\textbf{pattern}} \\
     2706\multicolumn{4}{@{}l}{\lstinline{string name = "PETER"};} \\
     2707\begin{cfa}
     2708s = name( 0, 4 );
     2709s = name( 1, 4 );
     2710s = name( 2, 4 );
     2711s = name( 4, -2 );
     2712s = name( 8, 2 );
     2713s = name( 0, -2 );
     2714s = name( -1, -2 );
     2715s = name( -3 );
     2716\end{cfa}
     2717&
     2718\begin{cfa}
     2719"PETE"
     2720"ETER"
     2721"TER"   // clip length to 3
     2722"ER"
     2723""                 // beyond string to right, clip to null
     2724""                 // beyond string to left, clip to null
     2725"ER"
     2726"TER"   // to end of string
     2727\end{cfa}
     2728&
     2729\begin{cfa}
     2730s = name( "ET" );
     2731s = name( "WW" );
     2732
     2733
     2734
     2735
     2736
     2737
     2738\end{cfa}
     2739&
     2740\begin{cfa}
     2741"ET"
     2742""  // does not occur
     2743
     2744
     2745
     2746
     2747
     2748
     2749\end{cfa}
     2750\end{tabular}
     2751\end{cquote}
     2752For the length form, a negative starting position is a specification from the right end of the string.
    25742753A negative length means that characters are selected in the opposite (right to left) direction from the starting position.
    25752754If the substring request extends beyond the beginning or end of the string, it is clipped (shortened) to the bounds of the string.
    2576 If the substring request is completely outside of the original string, a null string located at the end of the original string is returned.
    2577 The substring operation can also appear on the left hand side of the assignment operator.
    2578 The substring is replaced by the value on the right hand side of the assignment.
    2579 The length of the right-hand-side value may be shorter, the same length, or longer than the length of the substring that is selected on the left hand side of the assignment.
    2580 \begin{cfa}
    2581 digit( 3, 3 ) = "";                             §\C{// digit is assigned "0156789"}§
    2582 digit( 4, 3 ) = "xyz";                          §\C{// digit is assigned "015xyz9"}§
    2583 digit( 7, 0 ) = "***";                          §\C{// digit is assigned "015xyz***9"}§
    2584 digit(-4, 3 ) = "$$$";                          §\C{// digit is assigned "015xyz\$\$\$9"}§
    2585 \end{cfa}
     2755If the substring request is completely outside of the original string, a null string is returned.
     2756For the pattern-form, it returns the pattern string if the pattern matches or a null string if the pattern does not match.
     2757The usefulness of this mechanism is discussed next.
     2758
     2759The substring operation can appear on the left side of assignment, where it defines a replacement substring.
     2760The length of the right string may be shorter, the same, or longer than the length of left string.
     2761Hence, the left string may decrease, stay the same, or increase in length.
     2762\begin{cquote}
     2763\begin{tabular}{@{}l|l@{}}
     2764\multicolumn{2}{@{}l}{\lstinline{string digit = "0123456789"};} \\
     2765\begin{cfa}[escapechar={}]
     2766digit( 3, 3 ) = "";
     2767digit( 4, 3 ) = "xyz";
     2768digit( 7, 0 ) = "***";
     2769digit(-4, 3 ) = "$$$";
     2770digit( 5 ) = "LLL";
     2771\end{cfa}
     2772&
     2773\begin{cfa}[escapechar={}]
     2774"0126789"
     2775"0126xyz"
     2776"0126xyz"
     2777"012$$$z"
     2778"012$$LLL"
     2779\end{cfa}
     2780\end{tabular}
     2781\end{cquote}
     2782Now substring pattern matching is useful on the left-hand side of assignment.
     2783\begin{cquote}
     2784\begin{tabular}{@{}l|l@{}}
     2785\begin{cfa}[escapechar={}]
     2786digit( "$$" ) = "345";
     2787digit( "LLL") = "6789";
     2788\end{cfa}
     2789&
     2790\begin{cfa}
     2791"012345LLL"
     2792"0123456789"
     2793\end{cfa}
     2794\end{tabular}
     2795\end{cquote}
     2796The ©replace© operation extends substring to substitute all occurrences.
     2797\begin{cquote}
     2798\begin{tabular}{@{}l|l@{}}
     2799\begin{cfa}
     2800s = replace( "PETER", "E", "XX" );
     2801s = replace( "PETER", "ET", "XX" );
     2802s = replace( "PETER", "W", "XX" );
     2803\end{cfa}
     2804&
     2805\begin{cfa}
     2806"PXXTXXR"
     2807"PXXER"
     2808"PETER"
     2809\end{cfa}
     2810\end{tabular}
     2811\end{cquote}
     2812The replacement is done left-to-right and substituted text is not examined for replacement.
     2813
     2814
     2815\subsection{Searching}
     2816
     2817The ©find© operation returns the position of the first occurrence of a key in a string.
     2818If the key does not appear in the string, the length of the string is returned.
     2819\begin{cquote}
     2820\begin{tabular}{@{}l|l@{}}
     2821\multicolumn{2}{@{}l}{\lstinline{string digit = "0123456789";}} \\
     2822\begin{cfa}
     2823i = find( digit, '3' );
     2824i = find( digit, "45" );
     2825i = find( digit, "abc" );
     2826\end{cfa}
     2827&
     2828\begin{cfa}
     28293
     28304
     283110
     2832\end{cfa}
     2833\end{tabular}
     2834\end{cquote}
     2835
     2836A character-class operation indicates if a string is composed completely of a particular class of characters, \eg, alphabetic, numeric, vowels, \etc.
     2837\begin{cquote}
     2838\begin{tabular}{@{}l|l@{}}
     2839\multicolumn{2}{@{}l}{\lstinline{charclass vowels\{ "aeiouy" \};}} \\
     2840\begin{cfa}
     2841i = include( "aaeiuyoo", vowels );
     2842i = include( "aabiuyoo", vowels );
     2843\end{cfa}
     2844&
     2845\begin{cfa}
     28468  // compliant
     28472  // b non-compliant
     2848\end{cfa}
     2849\end{tabular}
     2850\end{cquote}
     2851©vowels© defines a character class and function ©include© checks if all characters in the string appear in the class (compliance).
     2852The position of the last character is returned if the string is compliant or the position of the first non-compliant character.
     2853There is no relationship between the order of characters in the two strings.
     2854Function ©exclude© is the reverse of ©include©, checking if all characters in the string are excluded from the class (compliance).
     2855\begin{cquote}
     2856\begin{tabular}{@{}l|l@{}}
     2857\begin{cfa}
     2858i = exclude( "cdbfghmk", vowels );
     2859i = exclude( "cdyfghmk", vowels );
     2860\end{cfa}
     2861&
     2862\begin{cfa}
     28638  // compliant
     28642  // y non-compliant
     2865\end{cfa}
     2866\end{tabular}
     2867\end{cquote}
     2868Both forms can return the longest substring of compliant characters.
     2869\begin{cquote}
     2870\begin{tabular}{@{}l|l@{}}
     2871\begin{cfa}
     2872s = include( "aaeiuyoo", vowels );
     2873s = include( "aabiuyoo", vowels );
     2874s = exclude( "cdbfghmk", vowels );
     2875s = exclude( "cdyfghmk", vowels );
     2876\end{cfa}
     2877&
     2878\begin{cfa}
     2879"aaeiuyoo"
     2880"aa"
     2881"cdbfghmk"
     2882"cd"
     2883\end{cfa}
     2884\end{tabular}
     2885\end{cquote}
     2886
     2887There are also versions of ©include© and ©exclude©, returning a position or string, taking a validation function, like one of the C character-class functions.\footnote{It is part of the hereditary of C that these function take and return an \lstinline{int} rather than a \lstinline{bool}, which affects the function type.}
     2888\begin{cquote}
     2889\begin{tabular}{@{}l|l@{}}
     2890\begin{cfa}
     2891i = include( "1FeC34aB", ®isxdigit® );
     2892i = include( ".,;'!\"", ®ispunct® );
     2893i = include( "XXXx", ®isupper® );
     2894\end{cfa}
     2895&
     2896\begin{cfa}
     28978   // compliant
     28986   // compliant
     28993   // non-compliant
     2900\end{cfa}
     2901\end{tabular}
     2902\end{cquote}
     2903These operations perform an \emph{apply} of the validation function to each character, where the function returns a boolean indicating a stopping condition for the search.
     2904The position of the last character is returned if the string is compliant or the position of the first non-compliant character.
     2905
     2906The translate operation returns a string with each character transformed by one of the C character transformation functions.
     2907\begin{cquote}
     2908\begin{tabular}{@{}l|l@{}}
     2909\begin{cfa}
     2910s = translate( "abc", ®toupper® );
     2911s = translate( "ABC", ®tolower® );
     2912int tospace( int c ) { return isspace( c ) ? ' ' : c; }
     2913s = translate( "X X\tX\nX", ®tospace® );
     2914\end{cfa}
     2915&
     2916\begin{cfa}
     2917"ABC"
     2918"abc"
     2919
     2920"X X X X"
     2921\end{cfa}
     2922\end{tabular}
     2923\end{cquote}
     2924
     2925
     2926\subsection{Returning N on Search Failure}
     2927
     2928Some of the prior string operations are composite, \eg string operations returning the longest substring of compliant characters (©include©) are built using a search and then substring the appropriate text.
     2929However, string search can fail, which is reported as an alternate search outcome, possibly an exception.
     2930Many string libraries use a return code to indicate search failure, with a failure value of ©0© or ©-1© (PL/I~\cite{PLI} returns ©0©).
     2931This semantics leads to the awkward pattern, which can appear many times in a string library or user code.
     2932\begin{cfa}
     2933i = exclude( s, alpha );
     2934if ( i != -1 ) return s( 0, i );
     2935else return "";
     2936\end{cfa}
     2937
     2938\CFA adopts a return code but the failure value is taken from the index-of function in APL~\cite{apl}, which returns the length of the target string $N$ (or $N+1$ for 1 origin).
     2939This semantics allows many search and substring functions to be written without conditions, \eg:
     2940\begin{cfa}
     2941string include( const string & s, int (*f)( int ) ) { return ®s( 0, include( s, f ) )®; }
     2942string exclude( const string & s, int (*f)( int ) ) { return ®s( 0, exclude( s, f ) )®; }
     2943\end{cfa}
     2944In string systems with an $O(1)$ length operator, checking for failure is low cost.
     2945\begin{cfa}
     2946if ( include( line, alpha ) == len( line ) ) ... // not found, 0 origin
     2947\end{cfa}
     2948\VRef[Figure]{f:ExtractingWordsText} compares \CC and \CFA string code for extracting words from a line of text, repeatedly removing non-word text and then a word until the line is empty.
     2949The \CFA code is simpler solely because of the choice for indicating search failure.
     2950(A simplification of the \CC version is to concatenate a sentinel character at the end of the line so the call to ©find_first_not_of© does not fail.)
     2951
     2952\begin{figure}
     2953\begin{cquote}
     2954\begin{tabular}{@{}l|l@{}}
     2955\multicolumn{1}{c}{\textbf{\CC}} & \multicolumn{1}{c}{\textbf{\CFA}} \\
     2956\begin{cfa}
     2957for ( ;; ) {
     2958        string::size_type posn = line.find_first_of( alpha );
     2959  if ( posn == string::npos ) break;
     2960        line = line.substr( posn );
     2961        posn = line.find_first_not_of( alpha );
     2962        if ( posn != string::npos ) {
     2963                cout << line.substr( 0, posn ) << endl;
     2964                line = line.substr( posn );
     2965        } else {
     2966                cout << line << endl;
     2967                line = "";
     2968        }
     2969}
     2970\end{cfa}
     2971&
     2972\begin{cfa}
     2973for () {
     2974        size_t posn = exclude( line, alpha );
     2975  if ( posn == len( line ) ) break;
     2976        line = line( posn );
     2977        posn = include( line, alpha );
     2978
     2979        sout | line( 0, posn );
     2980        line = line( posn );
     2981
     2982
     2983
     2984
     2985}
     2986\end{cfa}
     2987\end{tabular}
     2988\end{cquote}
     2989\caption{Extracting Words from Line of Text}
     2990\label{f:ExtractingWordsText}
     2991\end{figure}
     2992
     2993
     2994\subsection{C Compatibility}
     2995
     2996To ease conversion from C to \CFA, \CFA provides companion C ©string© functions.
     2997Hence, it is possible to convert a block of C string operations to \CFA strings just by changing the type ©char *© to ©string©.
     2998\begin{cquote}
     2999\begin{tabular}{@{}ll@{}}
     3000\begin{cfa}
     3001char s[32];   // string s;
     3002strlen( s );
     3003strnlen( s, 3 );
     3004strcmp( s, "abc" );
     3005strncmp( s, "abc", 3 );
     3006\end{cfa}
     3007&
     3008\begin{cfa}
     3009
     3010strcpy( s, "abc" );
     3011strncpy( s, "abcdef", 3 );
     3012strcat( s, "xyz" );
     3013strncat( s, "uvwxyz", 3 );
     3014\end{cfa}
     3015\end{tabular}
     3016\end{cquote}
     3017However, the conversion fails with I/O because ©printf© cannot print a ©string© using format code ©%s© as \CFA strings are not null terminated.
     3018Nevertheless, this capability does provide a useful starting point for conversion to safer \CFA strings.
     3019
     3020
     3021\subsection{I/O Operators}
     3022
     3023The ability to input and output strings is as essential as for any other type.
     3024The goal for character I/O is to also work with groups rather than individual characters.
     3025A comparison with \CC string I/O is presented as a counterpoint to \CFA string I/O.
     3026
     3027The \CC output ©<<© and input ©>>© operators are defined on type ©string©.
     3028\CC output for ©char©, ©char *©, and ©string© are similar.
     3029The \CC manipulators are ©setw©, and its associated width controls ©left©, ©right© and ©setfill©.
     3030\begin{cquote}
     3031\begin{tabular}{@{}l|l@{}}
     3032\multicolumn{2}{@{}l}{\lstinline{string s = "abc";}} \\
     3033\begin{C++}
     3034cout << setw(10) << left << setfill( 'x' ) << s << endl;
     3035\end{C++}
     3036&
     3037\begin{C++}
     3038"abcxxxxxxx"
     3039\end{C++}
     3040\end{tabular}
     3041\end{cquote}
     3042
     3043The \CFA input/output operator ©|© is defined on type ©string©.
     3044\CFA output for ©char©, ©char *©, and ©string© are similar.
     3045The \CFA manipulators are ©bin©, ©oct©, ©hex©, ©wd©, and its associated width control and ©left©.
     3046\begin{cquote}
     3047\begin{tabular}{@{}l|l@{}}
     3048\multicolumn{2}{@{}l}{\lstinline{string s = "abc";}} \\
     3049\begin{cfa}
     3050sout | bin( s ) | nl
     3051           | oct( s ) | nl
     3052           | hex( s ) | nl
     3053           | wd( 10, s ) | nl
     3054           | wd( 10, 2, s ) | nl
     3055           | left( wd( 10, s ) );
     3056\end{cfa}
     3057&
     3058\begin{cfa}
     3059"0b1100001 0b1100010 0b1100011"
     3060"0141 0142 0143"
     3061"0x61 0x62 0x63"
     3062"       abc"
     3063"        ab"
     3064"abc       "
     3065\end{cfa}
     3066\end{tabular}
     3067\end{cquote}
     3068\CC ©setfill© is not considered an important string manipulator.
     3069
     3070\CC input matching for ©char©, ©char *©, and ©string© are similar, where \emph{all} input characters are read from the current point in the input stream to the end of the type size, format width, whitespace, end of line (©'\n'©), or end of file.
     3071The \CC manipulator is ©setw© to restrict the size.
     3072Reading into a ©char© is safe as the size is 1, ©char *© is unsafe without using ©setw© to constraint the length (which includes ©'\0'©), ©string© is safe as its grows dynamically as characters are read.
     3073\begin{cquote}
     3074\begin{tabular}{@{}l|l@{}}
     3075\multicolumn{2}{@{}l}{\lstinline{char ch, c[10];}} \\
     3076\multicolumn{2}{@{}l}{\lstinline{string s;}} \\
     3077\begin{C++}
     3078cin >> ch >> setw( 5 ) >> c  >> s;
     3079®abcde   fg®
     3080\end{C++}
     3081&
     3082\begin{C++}
     3083'a' "bcde" "fg"
     3084
     3085\end{C++}
     3086\end{tabular}
     3087\end{cquote}
     3088Input text can be \emph{gulped}, including whitespace, from the current point to an arbitrary delimiter character using ©getline©.
     3089
     3090The \CFA philosophy for input is that, for every constant type in C, these constants should be usable as input.
     3091For example, the complex constant ©3.5+4.1i© can appear as input to a complex variable.
     3092\CFA input matching for ©char©, ©char *©, and ©string© are similar.
     3093C-strings may only be read with a width field, which should match the string size.
     3094Certain input manipulators support a scanset, which is a simple regular expression from ©printf©.
     3095The \CFA manipulators for these types are ©wdi©,\footnote{Due to an overloading issue in the type-resolver, the input width name must be temporarily different from the output, \lstinline{wdi} versus \lstinline{wd}.} and its associated width control and ©left©, ©quote©, ©incl©, ©excl©, and ©getline©.
     3096\begin{cquote}
     3097\setlength{\tabcolsep}{10pt}
     3098\begin{tabular}{@{}l|l@{}}
     3099\multicolumn{2}{@{}l}{\lstinline{char ch, c[10];}} \\
     3100\multicolumn{2}{@{}l}{\lstinline{string s;}} \\
     3101\begin{C++}
     3102sin | ch | wdi( 5, c ) | s;
     3103®abcde fg®
     3104sin | quote( ch ) | quote( wdi( sizeof(c), c ) ) | quote( s, '[', ']' ) | nl;
     3105®'a' "bcde" [fg]®
     3106sin | incl( "a-zA-Z0-9 ?!&\n", s ) | nl;
     3107®x?&000xyz TOM !.®
     3108sin | excl( "a-zA-Z0-9 ?!&\n", s );
     3109®<>{}{}STOP®
     3110\end{C++}
     3111&
     3112\begin{C++}
     3113
     3114
     3115'a' "bcde" "fg"
     3116
     3117'a' "bcde" "fg"
     3118
     3119"x?&000xyz TOM !"
     3120
     3121"<>{}{}"
     3122
     3123\end{C++}
     3124\end{tabular}
     3125\end{cquote}
     3126Note, the ability to read in quoted strings with whitespace to match with program string constants.
     3127The ©nl© at the end of an input ignores the rest of the line.
     3128
     3129
     3130\begin{comment}
    25863131A substring is treated as a pointer into the base (substringed) string rather than creating a copy of the subtext.
    25873132As with all pointers, if the item they are pointing at is changed, then the pointer is referring to the changed item.
     
    26113156}
    26123157\end{cfa}
    2613 
    2614 There is an assignment form of substring in which only the starting position is specified and the length is assumed to be the remainder of the string.
    2615 \begin{cfa}
    2616 string operator () (int start);
    2617 \end{cfa}
    2618 For example:
    2619 \begin{cfa}
    2620 s = peter( 2 );                                         §\C{// s is assigned "ETER"}§
    2621 peter( 2 ) = "IPER";                            §\C{// peter is assigned "PIPER"}§
    2622 \end{cfa}
    2623 It is also possible to substring using a string as the index for selecting the substring portion of the string.
    2624 \begin{cfa}
    2625 string operator () (const string &index);
    2626 \end{cfa}
    2627 For example:
    2628 \begin{cfa}[mathescape=false]
    2629 digit( "xyz$$$" ) = "678";                      §\C{// digit is assigned "0156789"}§
    2630 digit( "234") = "***";                          §\C{// digit is assigned "0156789***"}§
    2631 \end{cfa}
    2632 
    2633 
    2634 \subsection{Searching}
    2635 
    2636 The ©index© operation
    2637 \begin{cfa}
    2638 int index( const string &key, int start = 1, occurrence occ = first );
    2639 \end{cfa}
    2640 returns the position of the first or last occurrence of the ©key© (depending on the occurrence indicator ©occ© that is either ©first© or ©last©) in the current string starting the search at position ©start©.
    2641 If the ©key© does not appear in the current string, the length of the current string plus one is returned.
    2642 %If the ©key© has zero length, the value 1 is returned regardless of what the current string contains.
    2643 A negative starting position is a specification from the right end of the string.
    2644 \begin{cfa}
    2645 i = digit.index( "567" );                       §\C{// i is assigned 3}§
    2646 i = digit.index( "567", 7 );            §\C{// i is assigned 11}§
    2647 i = digit.index( "567", -1, last );     §\C{// i is assigned 3}§
    2648 i = peter.index( "E", 5, last );        §\C{// i is assigned 4}§
    2649 \end{cfa}
    2650 
    2651 The next two string operations test a string to see if it is or is not composed completely of a particular class of characters.
    2652 For example, are the characters of a string all alphabetic or all numeric?
    2653 Use of these operations involves a two step operation.
    2654 First, it is necessary to create an instance of type ©strmask© and initialize it to a string containing the characters of the particular character class, as in:
    2655 \begin{cfa}
    2656 strmask digitmask = digit;
    2657 strmask alphamask = string( "abcdefghijklmnopqrstuvwxyz" );
    2658 \end{cfa}
    2659 Second, the character mask is used in the functions ©include© and ©exclude© to check a string for compliance of its characters with the characters indicated by the mask.
    2660 
    2661 The ©include© operation
    2662 \begin{cfa}
    2663 int include( const strmask &, int = 1, occurrence occ = first );
    2664 \end{cfa}
    2665 returns the position of the first or last character (depending on the occurrence indicator, which is either ©first© or ©last©) in the current string that does not appear in the ©mask© starting the search at position ©start©;
    2666 hence it skips over characters in the current string that are included (in) the ©mask©.
    2667 The characters in the current string do not have to be in the same order as the ©mask©.
    2668 If all the characters in the current string appear in the ©mask©, the length of the current string plus one is returned, regardless of which occurrence is being searched for.
    2669 A negative starting position is a specification from the right end of the string.
    2670 \begin{cfa}
    2671 i = peter.include( digitmask );         §\C{// i is assigned 1}§
    2672 i = peter.include( alphamask );         §\C{// i is assigned 6}§
    2673 \end{cfa}
    2674 
    2675 The ©exclude© operation
    2676 \begin{cfa}
    2677 int exclude( string &mask, int start = 1, occurrence occ = first )
    2678 \end{cfa}
    2679 returns the position of the first or last character (depending on the occurrence indicator, which is either ©first© or ©last©) in the current string that does appear in the ©mask© string starting the search at position ©start©;
    2680 hence it skips over characters in the current string that are excluded from (not in) in the ©mask© string.
    2681 The characters in the current string do not have to be in the same order as the ©mask© string.
    2682 If all the characters in the current string do NOT appear in the ©mask© string, the length of the current string plus one is returned, regardless of which occurrence is being searched for.
    2683 A negative starting position is a specification from the right end of the string.
    2684 \begin{cfa}
    2685 i = peter.exclude( digitmask );         §\C{// i is assigned 6}§
    2686 i = ifstmt.exclude( strmask( punctuation ) ); §\C{// i is assigned 4}§
    2687 \end{cfa}
    2688 
    2689 The ©includeStr© operation:
    2690 \begin{cfa}
    2691 string includeStr( strmask &mask, int start = 1, occurrence occ = first )
    2692 \end{cfa}
    2693 returns the longest substring of leading or trailing characters (depending on the occurrence indicator, which is either ©first© or ©last©) of the current string that ARE included in the ©mask© string starting the search at position ©start©.
    2694 A negative starting position is a specification from the right end of the string.
    2695 \begin{cfa}
    2696 s = peter.includeStr( alphamask );      §\C{// s is assigned "PETER"}§
    2697 s = ifstmt.includeStr( alphamask );     §\C{// s is assigned "IF"}§
    2698 s = peter.includeStr( digitmask );      §\C{// s is assigned ""}§
    2699 \end{cfa}
    2700 
    2701 The ©excludeStr© operation:
    2702 \begin{cfa}
    2703 string excludeStr( strmask &mask, int start = 1, occurrence = first )
    2704 \end{cfa}
    2705 returns the longest substring of leading or trailing characters (depending on the occurrence indicator, which is either ©first© or ©last©) of the current string that are excluded (NOT) in the ©mask© string starting the search at position ©start©.
    2706 A negative starting position is a specification from the right end of the string.
    2707 \begin{cfa}
    2708 s = peter.excludeStr( digitmask);       §\C{// s is assigned "PETER"}§
    2709 s = ifstmt.excludeStr( strmask( punctuation ) ); §\C{// s is assigned "IF "}§
    2710 s = peter.excludeStr( alphamask);       §\C{// s is assigned ""}§
    2711 \end{cfa}
    2712 
    2713 
    2714 \subsection{Miscellaneous}
    2715 
    2716 The ©trim© operation
    2717 \begin{cfa}
    2718 string trim( string &mask, occurrence occ = first )
    2719 \end{cfa}
    2720 returns a string in that is the longest substring of leading or trailing characters (depending on the occurrence indicator, which is either ©first© or ©last©) which ARE included in the ©mask© are removed.
    2721 \begin{cfa}
    2722 // remove leading blanks
    2723 s = string( "   ABC" ).trim( " " );     §\C{// s is assigned "ABC",}§
    2724 // remove trailing blanks
    2725 s = string( "ABC   " ).trim( " ", last ); §\C{// s is assigned "ABC",}§
    2726 \end{cfa}
    2727 
    2728 The ©translate© operation
    2729 \begin{cfa}
    2730 string translate( string &from, string &to )
    2731 \end{cfa}
    2732 returns a string that is the same length as the original string in which all occurrences of the characters that appear in the ©from© string have been translated into their corresponding character in the ©to© string.
    2733 Translation is done on a character by character basis between the ©from© and ©to© strings; hence these two strings must be the same length.
    2734 If a character in the original string does not appear in the ©from© string, then it simply appears as is in the resulting string.
    2735 \begin{cfa}
    2736 // upper to lower case
    2737 peter = peter.translate( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz" );
    2738                         // peter is assigned "peter"
    2739 s = ifstmt.translate( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz" );
    2740                         // ifstmt is assigned "if (a > b) {"
    2741 // lower to upper case
    2742 peter = peter.translate( "abcdefghijklmnopqrstuvwxyz", "ABCDEFGHIJKLMNOPQRSTUVWXYZ" );
    2743                         // peter is assigned "PETER"
    2744 \end{cfa}
    2745 
    2746 The ©replace© operation
    2747 \begin{cfa}
    2748 string replace( string &from, string &to )
    2749 \end{cfa}
    2750 returns a string in which all occurrences of the ©from© string in the current string have been replaced by the ©to© string.
    2751 \begin{cfa}
    2752 s = peter.replace( "E", "XX" );         §\C{// s is assigned "PXXTXXR"}§
    2753 \end{cfa}
    2754 The replacement is done left-to-right.
    2755 When an instance of the ©from© string is found and changed to the ©to© string, it is NOT examined again for further replacement.
    2756 
    2757 \subsection{Returning N+1 on Failure}
    2758 
    2759 Any of the string search routines can fail at some point during the search.
    2760 When this happens it is necessary to return indicating the failure.
    2761 Many string types in other languages use some special value to indicate the failure.
    2762 This value is often 0 or -1 (PL/I returns 0).
    2763 This section argues that a value of N+1, where N is the length of the base string in the search, is a more useful value to return.
    2764 The index-of function in APL returns N+1.
    2765 These are the boundary situations and are often overlooked when designing a string type.
    2766 
    2767 The situation that can be optimized by returning N+1 is when a search is performed to find the starting location for a substring operation.
    2768 For example, in a program that is extracting words from a text file, it is necessary to scan from left to right over whitespace until the first alphabetic character is found.
    2769 \begin{cfa}
    2770 line = line( line.exclude( alpha ) );
    2771 \end{cfa}
    2772 If a text line contains all whitespaces, the exclude operation fails to find an alphabetic character.
    2773 If ©exclude© returns 0 or -1, the result of the substring operation is unclear.
    2774 Most string types generate an error, or clip the starting value to 1, resulting in the entire whitespace string being selected.
    2775 If ©exclude© returns N+1, the starting position for the substring operation is beyond the end of the string leaving a null string.
    2776 
    2777 The same situation occurs when scanning off a word.
    2778 \begin{cfa}
    2779 start = line.include(alpha);
    2780 word = line(1, start - 1);
    2781 \end{cfa}
    2782 If the entire line is composed of a word, the include operation will  fail to find a non-alphabetic character.
    2783 In general, returning 0 or -1 is not an appropriate starting position for the substring, which must substring off the word leaving a null string.
    2784 However, returning N+1 will substring off the word leaving a null string.
    2785 
    2786 
    2787 \subsection{C Compatibility}
    2788 
    2789 To ease conversion from C to \CFA, there are companion ©string© routines for C strings.
    2790 \VRef[Table]{t:CompanionStringRoutines} shows the C routines on the left that also work with ©string© and the rough equivalent ©string© opeation of the right.
    2791 Hence, it is possible to directly convert a block of C string operations into @string@ just by changing the
    2792 
    2793 \begin{table}
    2794 \begin{cquote}
    2795 \begin{tabular}{@{}l|l@{}}
    2796 \multicolumn{1}{c|}{©char []©}  & \multicolumn{1}{c}{©string©}  \\
    2797 \hline
    2798 ©strcpy©, ©strncpy©             & ©=©                                                                   \\
    2799 ©strcat©, ©strncat©             & ©+©                                                                   \\
    2800 ©strcmp©, ©strncmp©             & ©==©, ©!=©, ©<©, ©<=©, ©>©, ©>=©              \\
    2801 ©strlen©                                & ©size©                                                                \\
    2802 ©[]©                                    & ©[]©                                                                  \\
    2803 ©strstr©                                & ©find©                                                                \\
    2804 ©strcspn©                               & ©find_first_of©, ©find_last_of©               \\
    2805 ©strspc©                                & ©find_fist_not_of©, ©find_last_not_of©
    2806 \end{tabular}
    2807 \end{cquote}
    2808 \caption{Companion Routines for \CFA \lstinline{string} to C Strings}
    2809 \label{t:CompanionStringRoutines}
    2810 \end{table}
    2811 
    2812 For example, this block of C code can be converted to \CFA by simply changing the type of variable ©s© from ©char []© to ©string©.
    2813 \begin{cfa}
    2814         char s[32];
    2815         //string s;
    2816         strcpy( s, "abc" );                             PRINT( %s, s );
    2817         strncpy( s, "abcdef", 3 );              PRINT( %s, s );
    2818         strcat( s, "xyz" );                             PRINT( %s, s );
    2819         strncat( s, "uvwxyz", 3 );              PRINT( %s, s );
    2820         PRINT( %zd, strlen( s ) );
    2821         PRINT( %c, s[3] );
    2822         PRINT( %s, strstr( s, "yzu" ) ) ;
    2823         PRINT( %s, strstr( s, 'y' ) ) ;
    2824 \end{cfa}
    2825 However, the conversion fails with I/O because ©printf© cannot print a ©string© using format code ©%s© because \CFA strings are not null terminated.
    2826 
    2827 
    2828 \subsection{Input/Output Operators}
    2829 
    2830 Both the \CC operators ©<<© and ©>>© are defined on type ©string©.
    2831 However, input of a string value is different from input of a ©char *© value.
    2832 When a string value is read, \emph{all} input characters from the current point in the input stream to either the end of line (©'\n'©) or the end of file are read.
     3158\end{comment}
    28333159
    28343160
     
    33403666allowable calls are:
    33413667\begin{cquote}
    3342 \setlength{\tabcolsep}{0.75in}
    33433668\begin{tabular}{@{}ll@{}}
    33443669\textbf{positional arguments} & \textbf{empty arguments} \\
  • libcfa/src/collections/string.cfa

    r7ca6bf1 r1dec8f3  
    1010// Created On       : Fri Sep 03 11:00:00 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sun Apr 13 07:58:55 2025
    13 // Update Count     : 390
     12// Last Modified On : Mon Sep 15 10:26:35 2025
     13// Update Count     : 394
    1414//
    1515
     
    9797}
    9898
    99 string str( ssize_t rhs ) {
    100         string s = rhs;
    101         return s;
    102 }
    103 
    104 string str( size_t rhs ) {
    105         string s = rhs;
    106         return s;
    107 }
    108 
    109 string str( double rhs ) {
    110         string s = rhs;
    111         return s;
    112 }
    113 
    114 string str( long double rhs ) {
    115         string s = rhs;
    116         return s;
    117 }
    118 
    119 string str( double _Complex rhs ) {
    120         string s = rhs;
    121         return s;
    122 }
    123 
    124 string str( long double _Complex rhs ) {
    125         string s = rhs;
    126         return s;
    127 }
    128 
    12999void ^?{}( string & s ) {
    130100        ^(*s.inner){};
     
    204174
    205175////////////////////////////////////////////////////////
    206 // Getter
     176// C-style
     177
     178// safe conversion from string to char *
     179char * strncpy( char * dst, string & src, size_t n ) {
     180        size_t l = min( n - 1, len( src ) );                            // ensure null terminated
     181        for ( i; l ) dst[i] = src[i];
     182        dst[l] = '\0';
     183        return dst;
     184}
     185char * ?=?( char *& dst, string & src ) {
     186        dst = aalloc( len( src ) + 1 );                                 // ensure null terminated
     187        for ( i; len( src ) ) dst[i] = src[i];
     188        dst[len(src)] = '\0';
     189        return dst;
     190}
     191void ?{}( char *& dst, string & src ) {
     192        dst = aalloc( len( src ) + 1 );                                 // ensure null terminated
     193        for ( i; len( src ) ) dst[i] = src[i];
     194        dst[len(src)] = '\0';
     195}
    207196
    208197size_t strnlen( const string & s, size_t maxlen ) { return min( len( s ), maxlen ); }
     
    255244
    256245string ?()( string & s, ssize_t start, ssize_t len ) {
    257         if ( start < 0 ) { start += len( s ); }
    258         if ( len < 0 ) { len = -len; start -= len; }
    259         if ( start >= len( s ) ) return (string){ "" };
     246        if ( start < 0 ) start += len( s );
     247        if ( len < 0 ) { len = -len; start -= len - 1; }
     248        if ( start < 0 || start >= len( s ) ) return (string){ "" };
    260249        if ( start + len > len( s ) ) len = len( s ) - start;
    261250        string ret = { *s.inner, start, len };
  • libcfa/src/collections/string.hfa

    r7ca6bf1 r1dec8f3  
    1010// Created On       : Fri Sep 03 11:00:00 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sun Apr 13 21:03:35 2025
    13 // Update Count     : 284
     12// Last Modified On : Sun Sep 14 10:58:28 2025
     13// Update Count     : 311
    1414//
    1515
     
    4343void ?{}( string & s, long double _Complex rhs );
    4444static inline void ?{}( string & s, int rhs ) { (s){(signed long int) rhs}; }
    45 
    46 // string str( ssize_t rhs );
    47 // string str( size_t rhs );
    48 // string str( double rhs );
    49 // string str( long double rhs );
    50 // string str( double _Complex rhs );
    51 // string str( long double _Complex rhs );
    5245
    5346PBOOST string & ?=?( string & s, string c );
     
    6861static inline string & strcpy( string & s, const string & c ) { s = c; return s; }
    6962static inline string & strncpy( string & s, const string & c, size_t n ) { assign( s, c, n ); return s; }
     63char * strncpy( char * dst, string & src, size_t n );
     64char * ?=?( char *& dst, string & src );
     65void ?{}( char *& dst, string & src );
    7066
    7167// Alternate construction: request shared edits
     
    187183PBOOST string ?*?( string s, strmul_factor_t factor );
    188184string ?*?( const char * s, strmul_factor_t factor );
    189 static inline string ?*?( strmul_factor_t factor, char s ) { return s * factor; }
     185static inline string ?*?( strmul_factor_t factor, char c ) { return c * factor; }
    190186PBOOST static inline string ?*?( strmul_factor_t factor, string s ) { return s * factor; }
    191187static inline string ?*?( strmul_factor_t factor, const char * s ) { return s * factor; }
     
    278274
    279275size_t include( const string & s, const charclass & mask );
    280 static inline size_t include( const char * cs, const charclass & mask ) { const string s = cs; return include( s, mask ); }
     276static inline size_t include( const string & s, const char * mask ) { return include( s, (charclass){ mask } ); }
     277static inline size_t include( const string & s, const string & mask ) { return include( s, (charclass){ mask } ); }
     278static inline size_t include( const char * cs, const charclass & mask ) { return include( (string){ cs }, mask ); }
     279static inline size_t include( const char * cs, const char * mask ) { return include( (string){ cs }, (charclass){ mask } ); }
     280static inline size_t include( const char * cs, const string & mask ) { return include( (string){ cs }, (charclass){ mask } ); }
     281
    281282static inline string include( const string & s, const charclass & mask ) { return s( 0, include( s, mask ) ); }
     283static inline string include( const string & s, const char * mask ) { return s( 0, include( s, (charclass){ mask } ) ); }
     284static inline string include( const string & s, const string & mask ) { return s( 0, include( s, (charclass){ mask } ) ); }
    282285static inline string include( const char * cs, const charclass & mask ) { const string s = cs; return s( 0, include( s, mask ) ); }
     286static inline string include( const char * cs, const char * mask ) { const string s = cs; return s( 0, include( s, (charclass){ mask } ) ); }
     287static inline string include( const char * cs, const string & mask ) { const string s = cs; return s( 0, include( s, (charclass){ mask } ) ); }
    283288
    284289size_t exclude( const string & s, const charclass & mask );
    285 static inline size_t exclude( const char * cs, const charclass & mask ) { const string s = cs; return exclude( s, mask ); }
     290static inline size_t exclude( const string & s, const char * mask ) { return exclude( s, (charclass){ mask } ); }
     291static inline size_t exclude( const string & s, const string & mask ) { return exclude( s, (charclass){ mask } ); }
     292static inline size_t exclude( const char * cs, const charclass & mask ) { return exclude( (string){ cs }, mask ); }
     293static inline size_t exclude( const char * cs, const string & mask ) { return exclude( (string){ cs }, (charclass){ mask } ); }
     294static inline size_t exclude( const char * cs, const char * mask ) { return exclude( (string){ cs }, (charclass){ mask } ); }
     295
    286296static inline string exclude( const string & s, const charclass & mask ) { return s( 0, exclude( s, mask ) ); }
     297static inline string exclude( const string & s, const char * mask ) { return s( 0, exclude( s, (charclass){ mask } ) ); }
     298static inline string exclude( const string & s, const string & mask ) { return s( 0, exclude( s, (charclass){ mask } ) ); }
    287299static inline string exclude( const char * cs, const charclass & mask ) { const string s = cs; return s( 0, exclude( s, mask ) ); }
    288 
    289 size_t include( const string & s, int (*f)( int ) );
    290 static inline size_t include( const char * cs, int (*f)( int ) ) { const string S = cs; return include( S, f ); }
    291 static inline string include( const string & s, int (*f)( int ) ) { return s( 0, include( s, f ) ); }
    292 static inline string include( const char * cs, int (*f)( int ) ) { const string s = cs; return s( 0, include( s, f ) ); }
    293 
    294 size_t exclude( const string & s, int (*f)( int ) );
    295 static inline size_t exclude( const char * cs, int (*f)( int ) ) { const string s = cs; return exclude( s, f ); }
    296 static inline string exclude( const string & s, int (*f)( int ) ) { return s( 0, exclude( s, f ) ); }
    297 static inline string exclude( const char * cs, int (*f)( int ) ) { const string s = cs; return s( 0, exclude( s, f ) ); }
     300static inline string exclude( const char * cs, const string & mask ) { const string s = cs; return s( 0, exclude( s, (charclass){ mask } ) ); }
     301static inline string exclude( const char * cs, const char * mask ) { const string s = cs; return s( 0, exclude( s, (charclass){ mask } ) ); }
     302
     303size_t include( const string & s, int (* f)( int ) );   // for C character-class functions, e.g., isdigit
     304static inline size_t include( const char * cs, int (* f)( int ) ) { return include( (string){ cs }, f ); }
     305static inline string include( const string & s, int (* f)( int ) ) { return s( 0, include( s, f ) ); }
     306static inline string include( const char * cs, int (* f)( int ) ) { const string s = cs; return s( 0, include( s, f ) ); }
     307
     308static inline size_t include( const string & s, bool (* f)( char ) ) { return include( s, (int (*)( int ))f ); }
     309static inline size_t include( const char * cs, bool (* f)( char ) ) { return include( (string){ cs }, f ); }
     310static inline string include( const string & s, bool (* f)( char ) ) { return s( 0, include( s, f ) ); }
     311static inline string include( const char * cs, bool (* f)( char ) ) { const string s = cs; return s( 0, include( s, f ) ); }
     312
     313size_t exclude( const string & s, int (* f)( int ) );   // for C character-class functions, e.g., isdigit
     314static inline size_t exclude( const char * cs, int (* f)( int ) ) { return exclude( (string){ cs }, f ); }
     315static inline string exclude( const string & s, int (* f)( int ) ) { return s( 0, exclude( s, f ) ); }
     316static inline string exclude( const char * cs, int (* f)( int ) ) { const string s = cs; return s( 0, exclude( s, f ) ); }
     317
     318static inline size_t exclude( const string & s, bool (* f)( char ) ) { return exclude( s, (int (*)( int ))f ); }
     319static inline size_t exclude( const char * cs, bool (* f)( char ) ) { return exclude( (string){ cs }, f ); }
     320static inline string exclude( const string & s, bool (* f)( char ) ) { return s( 0, exclude( s, f ) ); }
     321static inline string exclude( const char * cs, bool (* f)( char ) ) { const string s = cs; return s( 0, exclude( s, f ) ); }
    298322
    299323string replace( const string & s, const string & from, const string & to );
    300 static inline string replace( const char * s, const char * from, const char * to ) { string S = s, From = from, To = to; return replace( S, From, To ); }
    301 static inline string replace( const string & s, const char * from, const char * to ) { const string From = from, To = to; return replace( s, From, To ); }
    302 static inline string replace( const string & s, const char * from, const string & to ) { const string From = from; return replace( s, From, to ); }
    303 static inline string replace( const string & s, string & from, const char * to ) { const string To = to; return replace( s, from, To ); }
    304 
    305 string translate( const string & s, int (*f)( int ) );
    306 static inline string translate( const char * c, int (*f)( int ) ) { const string S = c; return translate( S, f ); }
     324static inline string replace( const char * s, const char * from, const char * to ) { return replace( (string){ s }, (string){ from }, (string){ to } ); }
     325static inline string replace( const string & s, const char * from, const char * to ) { return replace( s, (string){ from }, (string){ to } ); }
     326static inline string replace( const string & s, const char * from, const string & to ) { return replace( s, (string){ from }, to ); }
     327static inline string replace( const string & s, string & from, const char * to ) { return replace( s, from, (string){ to } ); }
     328
     329string translate( const string & s, int (* f)( int ) ); // for C character-class functions, e.g., isdigit
     330static inline string translate( const char * cs, int (* f)( int ) ) { return translate( (string){ cs }, f ); }
     331
     332static inline string translate( const string & s, bool (* f)( char ) ) { return translate( s, (int (*)( int ))f ); }
     333static inline string translate( const char * cs, bool (* f)( char ) ) { return translate( (string){ cs }, f ); }
    307334
    308335#ifndef _COMPILING_STRING_CFA_
  • libcfa/src/concurrency/clib/cfathread.cfa

    r7ca6bf1 r1dec8f3  
    450450        // Condition
    451451        struct cfathread_condition {
    452                 condition_variable(exp_backoff_then_block_lock) impl;
     452                cond_lock(exp_backoff_then_block_lock) impl;
    453453        };
    454454        int cfathread_cond_init(cfathread_cond_t *restrict cond, const cfathread_condattr_t *restrict) __attribute__((nonnull (1))) { *cond = new(); return 0; }
  • libcfa/src/concurrency/locks.cfa

    r7ca6bf1 r1dec8f3  
    246246        struct alarm_node_wrap {
    247247                alarm_node_t alarm_node;
    248                 condition_variable(L) * cond;
     248                cond_lock(L) * cond;
    249249                info_thread(L) * info_thd;
    250250        };
    251251
    252         void ?{}( alarm_node_wrap(L) & this, Duration alarm, Duration period, Alarm_Callback callback, condition_variable(L) * c, info_thread(L) * i ) {
     252        void ?{}( alarm_node_wrap(L) & this, Duration alarm, Duration period, Alarm_Callback callback, cond_lock(L) * c, info_thread(L) * i ) {
    253253                this.alarm_node{ callback, alarm, period };
    254254                this.cond = c;
     
    259259
    260260        static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
    261                 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin.
     261                // This cond_lock member is called from the kernel, and therefore, cannot block, but it can spin.
    262262                lock( cond->lock __cfaabi_dbg_ctx2 );
    263263
     
    323323        //-----------------------------------------------------------------------------
    324324        // condition variable
    325         void ?{}( condition_variable(L) & this ){
     325        void ?{}( cond_lock(L) & this ){
    326326                this.lock{};
    327327                this.blocked_threads{};
     
    329329        }
    330330
    331         void ^?{}( condition_variable(L) & this ){ }
    332 
    333         static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
     331        void ^?{}( cond_lock(L) & this ){ }
     332
     333        static void process_popped( cond_lock(L) & this, info_thread(L) & popped ) with( this ) {
    334334                if (&popped != 0p) {
    335335                        popped.signalled = true;
     
    345345        }
    346346
    347         bool notify_one( condition_variable(L) & this ) with( this ) {
     347        bool notify_one( cond_lock(L) & this ) with( this ) {
    348348                lock( lock __cfaabi_dbg_ctx2 );
    349349                bool ret = ! isEmpty( blocked_threads );
     
    353353        }
    354354
    355         bool notify_all( condition_variable(L) & this ) with(this) {
     355        bool notify_all( cond_lock(L) & this ) with(this) {
    356356                lock( lock __cfaabi_dbg_ctx2 );
    357357                bool ret = ! isEmpty( blocked_threads );
     
    363363        }
    364364
    365         uintptr_t front( condition_variable(L) & this ) with(this) {
     365        uintptr_t front( cond_lock(L) & this ) with(this) {
    366366                return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info;
    367367        }
    368368
    369         bool empty( condition_variable(L) & this ) with(this) {
     369        bool empty( cond_lock(L) & this ) with(this) {
    370370                lock( lock __cfaabi_dbg_ctx2 );
    371371                bool ret = isEmpty( blocked_threads );
     
    374374        }
    375375
    376         int counter( condition_variable(L) & this ) with(this) { return count; }
    377 
    378         static void enqueue_thread( condition_variable(L) & this, info_thread(L) * i ) with(this) {
     376        int counter( cond_lock(L) & this ) with(this) { return count; }
     377
     378        static void enqueue_thread( cond_lock(L) & this, info_thread(L) * i ) with(this) {
    379379                // add info_thread to waiting queue
    380380                insert_last( blocked_threads, *i );
     
    393393
    394394        // helper for wait()'s' with no timeout
    395         static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
     395        static void queue_info_thread( cond_lock(L) & this, info_thread(L) & i ) with(this) {
    396396                lock( lock __cfaabi_dbg_ctx2 );
    397397                enqueue_thread( this, &i );
     
    412412
    413413        // helper for wait()'s' with a timeout
    414         static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
     414        static void queue_info_thread_timeout( cond_lock(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    415415                lock( lock __cfaabi_dbg_ctx2 );
    416416                enqueue_thread( this, &info );
     
    434434                return i.signalled;
    435435
    436         void wait( condition_variable(L) & this ) with(this) { WAIT( 0, 0p ) }
    437         void wait( condition_variable(L) & this, uintptr_t info ) with(this) { WAIT( info, 0p ) }
    438         void wait( condition_variable(L) & this, L & l  ) with(this) { WAIT( 0, &l ) }
    439         void wait( condition_variable(L) & this, L & l, uintptr_t info ) with(this) { WAIT( info, &l ) }
    440 
    441         bool wait( condition_variable(L) & this, Duration duration ) with(this) { WAIT_TIME( 0 , 0p , duration ) }
    442         bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, 0p , duration ) }
    443         bool wait( condition_variable(L) & this, L & l, Duration duration  ) with(this) { WAIT_TIME( 0 , &l , duration ) }
    444         bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) }
     436        void wait( cond_lock(L) & this ) with(this) { WAIT( 0, 0p ) }
     437        void wait( cond_lock(L) & this, uintptr_t info ) with(this) { WAIT( info, 0p ) }
     438        void wait( cond_lock(L) & this, L & l  ) with(this) { WAIT( 0, &l ) }
     439        void wait( cond_lock(L) & this, L & l, uintptr_t info ) with(this) { WAIT( info, &l ) }
     440
     441        bool wait( cond_lock(L) & this, Duration duration ) with(this) { WAIT_TIME( 0 , 0p , duration ) }
     442        bool wait( cond_lock(L) & this, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, 0p , duration ) }
     443        bool wait( cond_lock(L) & this, L & l, Duration duration  ) with(this) { WAIT_TIME( 0 , &l , duration ) }
     444        bool wait( cond_lock(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) }
    445445
    446446        //-----------------------------------------------------------------------------
  • libcfa/src/concurrency/locks.hfa

    r7ca6bf1 r1dec8f3  
    1111// Created On       : Thu Jan 21 19:46:50 2021
    1212// Last Modified By : Peter A. Buhr
    13 // Last Modified On : Fri Apr 25 07:14:16 2025
    14 // Update Count     : 22
     13// Last Modified On : Thu Aug 21 22:36:44 2025
     14// Update Count     : 23
    1515//
    1616
     
    797797
    798798        //-----------------------------------------------------------------------------
    799         // condition_variable
     799        // cond_lock
    800800
    801801        // The multi-tool condition variable
     
    805805        // - has shadow queue
    806806        // - can be signalled outside of critical sections with no locks held
    807         struct condition_variable {
     807        struct cond_lock {
    808808                // Spin lock used for mutual exclusion
    809809                __spinlock_t lock;
     
    816816        };
    817817
    818         void ?{}( condition_variable( L ) & this );
    819         void ^?{}( condition_variable( L ) & this );
    820 
    821         bool notify_one( condition_variable( L ) & this );
    822         bool notify_all( condition_variable( L ) & this );
    823 
    824         uintptr_t front( condition_variable( L ) & this );
    825 
    826         bool empty  ( condition_variable( L ) & this );
    827         int  counter( condition_variable( L ) & this );
    828 
    829         void wait( condition_variable( L ) & this );
    830         void wait( condition_variable( L ) & this, uintptr_t info );
    831         bool wait( condition_variable( L ) & this, Duration duration );
    832         bool wait( condition_variable( L ) & this, uintptr_t info, Duration duration );
    833 
    834         void wait( condition_variable( L ) & this, L & l );
    835         void wait( condition_variable( L ) & this, L & l, uintptr_t info );
    836         bool wait( condition_variable( L ) & this, L & l, Duration duration );
    837         bool wait( condition_variable( L ) & this, L & l, uintptr_t info, Duration duration );
     818        void ?{}( cond_lock( L ) & this );
     819        void ^?{}( cond_lock( L ) & this );
     820
     821        bool notify_one( cond_lock( L ) & this );
     822        bool notify_all( cond_lock( L ) & this );
     823
     824        uintptr_t front( cond_lock( L ) & this );
     825
     826        bool empty  ( cond_lock( L ) & this );
     827        int  counter( cond_lock( L ) & this );
     828
     829        void wait( cond_lock( L ) & this );
     830        void wait( cond_lock( L ) & this, uintptr_t info );
     831        bool wait( cond_lock( L ) & this, Duration duration );
     832        bool wait( cond_lock( L ) & this, uintptr_t info, Duration duration );
     833
     834        void wait( cond_lock( L ) & this, L & l );
     835        void wait( cond_lock( L ) & this, L & l, uintptr_t info );
     836        bool wait( cond_lock( L ) & this, L & l, Duration duration );
     837        bool wait( cond_lock( L ) & this, L & l, uintptr_t info, Duration duration );
    838838
    839839        //-----------------------------------------------------------------------------
  • libcfa/src/concurrency/mutex.cfa

    r7ca6bf1 r1dec8f3  
    1212// Created On       : Fri May 25 01:37:11 2018
    1313// Last Modified By : Peter A. Buhr
    14 // Last Modified On : Sun Feb 19 17:01:36 2023
    15 // Update Count     : 3
     14// Last Modified On : Thu Aug 21 22:35:44 2025
     15// Update Count     : 4
    1616//
    1717
     
    131131//-----------------------------------------------------------------------------
    132132// Conditions
    133 void ?{}(condition_variable & this) {
     133void ?{}(cond_lock & this) {
    134134        this.blocked_threads{};
    135135}
    136136
    137 void ^?{}(condition_variable & this) {
     137void ^?{}(cond_lock & this) {
    138138        // default
    139139}
    140140
    141 void notify_one(condition_variable & this) with(this) {
     141void notify_one(cond_lock & this) with(this) {
    142142        lock( lock __cfaabi_dbg_ctx2 );
    143143        unpark(
     
    147147}
    148148
    149 void notify_all(condition_variable & this) with(this) {
     149void notify_all(cond_lock & this) with(this) {
    150150        lock( lock __cfaabi_dbg_ctx2 );
    151151        while(this.blocked_threads) {
     
    157157}
    158158
    159 void wait(condition_variable & this) {
     159void wait(cond_lock & this) {
    160160        lock( this.lock __cfaabi_dbg_ctx2 );
    161161        append( this.blocked_threads, active_thread() );
     
    165165
    166166forall(L & | is_lock(L))
    167 void wait(condition_variable & this, L & l) {
     167void wait(cond_lock & this, L & l) {
    168168        lock( this.lock __cfaabi_dbg_ctx2 );
    169169        append( this.blocked_threads, active_thread() );
  • libcfa/src/concurrency/mutex.hfa

    r7ca6bf1 r1dec8f3  
    1212// Created On       : Fri May 25 01:24:09 2018
    1313// Last Modified By : Peter A. Buhr
    14 // Last Modified On : Thu Feb  2 11:46:08 2023
    15 // Update Count     : 2
     14// Last Modified On : Thu Aug 21 22:35:23 2025
     15// Update Count     : 3
    1616//
    1717
     
    7979// Condition variables
    8080
    81 struct condition_variable {
     81struct cond_lock {
    8282        // Spin lock used for mutual exclusion
    8383        __spinlock_t lock;
     
    8787};
    8888
    89 void ?{}(condition_variable & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
    90 void ^?{}(condition_variable & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
     89void ?{}(cond_lock & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
     90void ^?{}(cond_lock & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
    9191
    92 void notify_one(condition_variable & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
    93 void notify_all(condition_variable & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
     92void notify_one(cond_lock & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
     93void notify_all(cond_lock & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
    9494
    95 void wait(condition_variable & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
     95void wait(cond_lock & this) __attribute__((deprecated("use concurrency/locks.hfa instead")));
    9696
    9797forall(L & | is_lock(L))
    98 void wait(condition_variable & this, L & l) __attribute__((deprecated("use concurrency/locks.hfa instead")));
     98void wait(cond_lock & this, L & l) __attribute__((deprecated("use concurrency/locks.hfa instead")));
    9999
    100100//-----------------------------------------------------------------------------
  • libcfa/src/iostream.hfa

    r7ca6bf1 r1dec8f3  
    1010// Created On       : Wed May 27 17:56:53 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon May 12 17:29:29 2025
    13 // Update Count     : 769
     12// Last Modified On : Sat Sep 13 16:10:27 2025
     13// Update Count     : 771
    1414//
    1515
  • longrun_tests/block.cfa

    r7ca6bf1 r1dec8f3  
    1 ../tests/concurrent/signal/block.cfa
     1../tests/concurrency/signal/block.cfa
  • longrun_tests/coroutine.cfa

    r7ca6bf1 r1dec8f3  
    1 ../tests/concurrent/coroutineYield.cfa
     1../tests/concurrency/coroutineYield.cfa
  • longrun_tests/disjoint.cfa

    r7ca6bf1 r1dec8f3  
    1 ../tests/concurrent/signal/disjoint.cfa
     1../tests/concurrency/signal/disjoint.cfa
  • longrun_tests/locks.cfa

    r7ca6bf1 r1dec8f3  
    77
    88multiple_acquisition_lock m;
    9 condition_variable( multiple_acquisition_lock ) c_m;
     9cond_lock( multiple_acquisition_lock ) c_m;
    1010
    1111single_acquisition_lock s;
    12 condition_variable( single_acquisition_lock ) c_s;
     12cond_lock( single_acquisition_lock ) c_s;
    1313
    1414owner_lock o;
    15 condition_variable( owner_lock ) c_o;
     15cond_lock( owner_lock ) c_o;
    1616
    1717thread T_C_M_WS1 {};
  • longrun_tests/preempt.cfa

    r7ca6bf1 r1dec8f3  
    1 ../tests/concurrent/preempt.cfa
     1../tests/concurrency/preempt.cfa
  • longrun_tests/wait.cfa

    r7ca6bf1 r1dec8f3  
    1 ../tests/concurrent/signal/wait.cfa
     1../tests/concurrency/signal/wait.cfa
  • src/Parser/StatementNode.cpp

    r7ca6bf1 r1dec8f3  
    1111// Created On       : Sat May 16 14:59:41 2015
    1212// Last Modified By : Peter A. Buhr
    13 // Last Modified On : Thu Feb  6 11:38:39 2025
    14 // Update Count     : 434
     13// Last Modified On : Sat Apr 19 13:01:31 2025
     14// Update Count     : 436
    1515//
    1616
     
    119119} // build_expr
    120120
    121 static ast::Expr * build_if_control( CondCtrl * ctrl,
    122                 std::vector<ast::ptr<ast::Stmt>> & inits ) {
     121static ast::Expr * build_if_control( CondCtrl * ctrl, std::vector<ast::ptr<ast::Stmt>> & inits ) {
    123122        assert( inits.empty() );
    124123        if ( nullptr != ctrl->init ) {
     
    149148        ast::Stmt const * astelse = buildMoveOptional( else_ );
    150149
    151         return new ast::IfStmt( location, astcond, astthen, astelse,
    152                 std::move( astinit )
    153         );
     150        return new ast::IfStmt( location, astcond, astthen, astelse, std::move( astinit ) );
    154151} // build_if
    155152
     
    193190        ast::Expr * astcond = build_if_control( ctrl, astinit ); // ctrl deleted, cond/init set
    194191
    195         return new ast::WhileDoStmt( location,
    196                 astcond,
    197                 buildMoveSingle( stmt ),
    198                 buildMoveOptional( else_ ),
    199                 std::move( astinit ),
    200                 ast::While
    201         );
     192        return new ast::WhileDoStmt( location, astcond, buildMoveSingle( stmt ), buildMoveOptional( else_ ),
     193                                                                 std::move( astinit ), ast::While );
    202194} // build_while
    203195
  • tests/collections/.expect/string-api-coverage.txt

    r7ca6bf1 r1dec8f3  
    86863 0 0 11 26 0
    8787abc   abcdefghijk abcdefghijklmnopqrstuvwxyz
     888 3 3
     891FeC34aB .,; XXX
     900 0 3
     911  XXX
  • tests/collections/string-api-coverage.cfa

    r7ca6bf1 r1dec8f3  
    22#include <string_sharectx.hfa>
    33#include <fstream.hfa>
    4 
     4#include <ctype.h>                                                                              // isxdigit, ispunct, isupper
    55
    66// Purpose: call each function in string.hfa, top to bottom
     
    407407        | (return string)include( alphabet, cc_alphabet )  // "abcdefghijklmnopqrstuvwxyz"
    408408        | (return string)exclude( alphabet, cc_alphabet ); // ""
     409
     410        sout
     411                | (return size_t)include( "1FeC34aB", isxdigit )
     412                | (return size_t)include( ".,;’!\"", ispunct )
     413                | (return size_t)include( "XXXx", isupper );
     414
     415        sout
     416                | (return string)include( "1FeC34aB", isxdigit )
     417                | (return string)include( ".,;’!\"", ispunct )
     418                | (return string)include( "XXXx", isupper );
     419
     420        sout
     421                | (return size_t)exclude( "1FeC34aB", isdigit )
     422                | (return size_t)exclude( ".,;’!\"", ispunct )
     423                | (return size_t)exclude( "XXXx", islower );
     424
     425        sout
     426                | (return string)exclude( "1FeC34aB", isalpha )
     427                | (return string)exclude( ".,;’!\"", ispunct )
     428                | (return string)exclude( "XXXx", islower );
    409429}
  • tests/concurrency/unified_locking/locks.cfa

    r7ca6bf1 r1dec8f3  
    77
    88multiple_acquisition_lock m;
    9 condition_variable( multiple_acquisition_lock ) c_m;
     9cond_lock( multiple_acquisition_lock ) c_m;
    1010
    1111single_acquisition_lock s;
    12 condition_variable( single_acquisition_lock ) c_s;
     12cond_lock( single_acquisition_lock ) c_s;
    1313
    1414owner_lock o;
    15 condition_variable( owner_lock ) c_o;
     15cond_lock( owner_lock ) c_o;
    1616
    1717exp_backoff_then_block_lock l;
    18 condition_variable( exp_backoff_then_block_lock ) c_l;
     18cond_lock( exp_backoff_then_block_lock ) c_l;
    1919
    2020fast_block_lock f;
  • tests/concurrency/unified_locking/pthread_locks.cfa

    r7ca6bf1 r1dec8f3  
    1212
    1313owner_lock l2;
    14 condition_variable( owner_lock ) c2;
     14cond_lock( owner_lock ) c2;
    1515
    1616volatile int counter = 0;
  • tests/concurrency/unified_locking/timeout_lock.cfa

    r7ca6bf1 r1dec8f3  
    77
    88multiple_acquisition_lock m, n;
    9 condition_variable( multiple_acquisition_lock ) c_m, c_n;
     9cond_lock( multiple_acquisition_lock ) c_m, c_n;
    1010
    1111const unsigned int NoOfTimes = 20;
     
    7373        processor p[2];
    7474        printf("Start Test 1: surface testing condition variable timeout routines\n");
    75         wait( c_m, 1`ns );                                                                                                              // bool wait( condition_variable(L) & this, Duration duration );
    76         wait( c_m, 10, 1`ns );                                                                                                  // bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
    77         lock(m); wait( c_m, m, 1`ns ); unlock(m);                                                               // bool wait( condition_variable(L) & this, L & l, Duration duration );
    78         lock(m); wait( c_m, m, 10, 1`ns ); unlock(m);                                                   // bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
     75        wait( c_m, 1`ns );                                                                                                              // bool wait( cond_lock(L) & this, Duration duration );
     76        wait( c_m, 10, 1`ns );                                                                                                  // bool wait( cond_lock(L) & this, uintptr_t info, Duration duration );
     77        lock(m); wait( c_m, m, 1`ns ); unlock(m);                                                               // bool wait( cond_lock(L) & this, L & l, Duration duration );
     78        lock(m); wait( c_m, m, 10, 1`ns ); unlock(m);                                                   // bool wait( cond_lock(L) & this, L & l, uintptr_t info, Duration duration );
    7979        printf("Done Test 1\n");
    8080
Note: See TracChangeset for help on using the changeset viewer.