Changeset d672350


Ignore:
Timestamp:
Mar 21, 2022, 1:44:06 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
Children:
a76202d
Parents:
ef3c383 (diff), dbe2533 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
88 added
2 deleted
88 edited
4 moved

Legend:

Unmodified
Added
Removed
  • Jenkinsfile

    ref3c383 rd672350  
    108108
    109109                        // Configure libcfa
    110                         sh 'make -j 8 --no-print-directory configure-libcfa'
     110                        sh 'make -j $(nproc) --no-print-directory configure-libcfa'
    111111                }
    112112        }
     
    116116                dir (BuildDir) {
    117117                        // Build driver
    118                         sh 'make -j 8 --no-print-directory -C driver'
     118                        sh 'make -j $(nproc) --no-print-directory -C driver'
    119119
    120120                        // Build translator
    121                         sh 'make -j 8 --no-print-directory -C src'
     121                        sh 'make -j $(nproc) --no-print-directory -C src'
    122122                }
    123123        }
     
    126126                // Build outside of the src tree to ease cleaning
    127127                dir (BuildDir) {
    128                         sh "make -j 8 --no-print-directory -C libcfa/${Settings.Architecture.name}-debug"
     128                        sh "make -j $(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-debug"
    129129                }
    130130        }
     
    133133                // Build outside of the src tree to ease cleaning
    134134                dir (BuildDir) {
    135                         sh "make -j 8 --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"
     135                        sh "make -j $(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"
    136136                }
    137137        }
     
    140140                // Build outside of the src tree to ease cleaning
    141141                dir (BuildDir) {
    142                         sh "make -j 8 --no-print-directory install"
     142                        sh "make -j $(nproc) --no-print-directory install"
    143143                }
    144144        }
     
    161161                Tools.BuildStage('Test: full', Settings.RunAllTests) {
    162162                        dir (BuildDir) {
    163                                         jopt = ""
     163                                        jopt = "-j $(nproc)"
    164164                                        if( Settings.Architecture.node == 'x86' ) {
    165165                                                jopt = "-j2"
  • benchmark/io/http/protocol.cfa

    ref3c383 rd672350  
    173173}
    174174
    175 static void zero_sqe(struct io_uring_sqe * sqe) {
    176         sqe->flags = 0;
    177         sqe->ioprio = 0;
    178         sqe->fd = 0;
    179         sqe->off = 0;
    180         sqe->addr = 0;
    181         sqe->len = 0;
    182         sqe->fsync_flags = 0;
    183         sqe->__pad2[0] = 0;
    184         sqe->__pad2[1] = 0;
    185         sqe->__pad2[2] = 0;
    186         sqe->fd = 0;
    187         sqe->off = 0;
    188         sqe->addr = 0;
    189         sqe->len = 0;
    190 }
    191 
    192175enum FSM_STATE {
    193176        Initial,
  • doc/theses/mubeen_zulfiqar_MMath/Makefile

    ref3c383 rd672350  
    1 DOC = uw-ethesis.pdf
    2 BASE = ${DOC:%.pdf=%} # remove suffix
    31# directory for latex clutter files
    4 BUILD = build
    5 TEXSRC = $(wildcard *.tex)
    6 FIGSRC = $(wildcard *.fig)
    7 BIBSRC = $(wildcard *.bib)
    8 TEXLIB = .:../../LaTeXmacros:${BUILD}: # common latex macros
    9 BIBLIB = .:../../bibliography # common citation repository
     2Build = build
     3Figures = figures
     4Pictures = pictures
     5TeXSRC = ${wildcard *.tex}
     6FigSRC = ${notdir ${wildcard ${Figures}/*.fig}}
     7PicSRC = ${notdir ${wildcard ${Pictures}/*.fig}}
     8BIBSRC = ${wildcard *.bib}
     9TeXLIB = .:../../LaTeXmacros:${Build}: # common latex macros
     10BibLIB = .:../../bibliography # common citation repository
    1011
    1112MAKEFLAGS = --no-print-directory # --silent
    12 VPATH = ${BUILD}
     13VPATH = ${Build} ${Figures} ${Pictures} # extra search path for file names used in document
    1314
    1415### Special Rules:
     
    1819
    1920### Commands:
    20 LATEX = TEXINPUTS=${TEXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${BUILD}
    21 BIBTEX = BIBINPUTS=${BIBLIB} bibtex
    22 #GLOSSARY = INDEXSTYLE=${BUILD} makeglossaries-lite
     21
     22LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
     23BibTeX = BIBINPUTS=${BibLIB} bibtex
     24#Glossary = INDEXSTYLE=${Build} makeglossaries-lite
    2325
    2426### Rules and Recipes:
    2527
     28DOC = uw-ethesis.pdf
     29BASE = ${DOC:%.pdf=%} # remove suffix
     30
    2631all: ${DOC}
    2732
    28 ${BUILD}/%.dvi: ${TEXSRC} ${FIGSRC:%.fig=%.tex} ${BIBSRC} Makefile | ${BUILD}
    29         ${LATEX} ${BASE}
    30         ${BIBTEX} ${BUILD}/${BASE}
    31         ${LATEX} ${BASE}
    32 #       ${GLOSSARY} ${BUILD}/${BASE}
    33 #       ${LATEX} ${BASE}
     33clean:
     34        @rm -frv ${DOC} ${Build}
    3435
    35 ${BUILD}:
     36# File Dependencies #
     37
     38${Build}/%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BIBSRC} Makefile | ${Build}
     39        ${LaTeX} ${BASE}
     40        ${BibTeX} ${Build}/${BASE}
     41        ${LaTeX} ${BASE}
     42        # if nedded, run latex again to get citations
     43        if fgrep -s "LaTeX Warning: Citation" ${basename $@}.log ; then ${LaTeX} ${BASE} ; fi
     44#       ${Glossary} ${Build}/${BASE}
     45#       ${LaTeX} ${BASE}
     46
     47${Build}:
    3648        mkdir $@
    3749
    38 %.pdf : ${BUILD}/%.ps | ${BUILD}
     50%.pdf : ${Build}/%.ps | ${Build}
    3951        ps2pdf $<
    4052
    41 %.ps : %.dvi | ${BUILD}
     53%.ps : %.dvi | ${Build}
    4254        dvips $< -o $@
    4355
    44 %.tex : %.fig | ${BUILD}
    45         fig2dev -L eepic $< > ${BUILD}/$@
     56%.tex : %.fig | ${Build}
     57        fig2dev -L eepic $< > ${Build}/$@
    4658
    47 %.ps : %.fig | ${BUILD}
    48         fig2dev -L ps $< > ${BUILD}/$@
     59%.ps : %.fig | ${Build}
     60        fig2dev -L ps $< > ${Build}/$@
    4961
    50 %.pstex : %.fig | ${BUILD}
    51         fig2dev -L pstex $< > ${BUILD}/$@
    52         fig2dev -L pstex_t -p ${BUILD}/$@ $< > ${BUILD}/$@_t
    53 
    54 clean:
    55         @rm -frv ${DOC} ${BUILD} *.fig.bak
     62%.pstex : %.fig | ${Build}
     63        fig2dev -L pstex $< > ${Build}/$@
     64        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/theses/mubeen_zulfiqar_MMath/allocator.tex

    ref3c383 rd672350  
    11\chapter{Allocator}
    22
    3 \noindent
    4 ====================
    5 
    6 Writing Points:
    7 \begin{itemize}
    8 \item
    9 Objective of uHeapLmmm.
    10 \item
    11 Design philosophy.
    12 \item
    13 Background and previous design of uHeapLmmm.
    14 \item
    15 Distributed design of uHeapLmmm.
    16 
    17 ----- SHOULD WE GIVE IMPLEMENTATION DETAILS HERE? -----
    18 
    19 \PAB{Maybe. There might be an Implementation chapter.}
    20 \item
    21 figure.
    22 \item
    23 Advantages of distributed design.
    24 \end{itemize}
    25 
    26 The new features added to uHeapLmmm (incl. @malloc\_size@ routine)
    27 \CFA alloc interface with examples.
    28 
    29 \begin{itemize}
    30 \item
    31 Why did we need it?
    32 \item
    33 The added benefits.
    34 \end{itemize}
    35 
    36 
    37 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    38 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    39 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% uHeapLmmm Design
    40 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    41 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    42 
    43 \section{Objective of uHeapLmmm}
    44 UHeapLmmm is a lightweight memory allocator. The objective behind uHeapLmmm is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements).
    45 
    46 \subsection{Design philosophy}
    47 The objective of uHeapLmmm's new design was to fulfill following requirements:
    48 \begin{itemize}
    49 \item It should be concurrent to be used in multi-threaded programs.
     3\section{uHeap}
     4uHeap is a lightweight memory allocator. The objective behind uHeap is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements).
     5
     6The objective of uHeap's new design was to fulfill following requirements:
     7\begin{itemize}
     8\item It should be concurrent and thread-safe for multi-threaded programs.
    509\item It should avoid global locks, on resources shared across all threads, as much as possible.
    5110\item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
     
    5514%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    5615
    57 \section{Background and previous design of uHeapLmmm}
    58 uHeapLmmm was originally designed by X in X (FIX ME: add original author after confirming with Peter).
    59 (FIX ME: make and add figure of previous design with description)
    60 
    61 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    62 
    63 \section{Distributed design of uHeapLmmm}
    64 uHeapLmmm's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed:
    65 
    66 \paragraph{Design 1: Decentralized}
     16\section{Design choices for uHeap}
     17uHeap's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed:
     18
     19\paragraph{Design 1: Centralized}
     20One heap, but lower bucket sizes are N-shared across KTs.
     21This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.
     22When KTs $\le$ N, the important bucket sizes are uncontented.
     23When KTs $>$ N, the free buckets are contented.
     24Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.
     25\begin{cquote}
     26\centering
     27\input{AllocDS2}
     28\end{cquote}
     29Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.
     30When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated).
     31
     32\paragraph{Design 2: Decentralized N Heaps}
    6733Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area.
    6834Kernel threads (KT) are assigned to the N heaps.
     
    7743Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT.
    7844
    79 \paragraph{Design 2: Centralized}
    80 One heap, but lower bucket sizes are N-shared across KTs.
    81 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.
    82 When KTs $\le$ N, the important bucket sizes are uncontented.
    83 When KTs $>$ N, the free buckets are contented.
    84 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.
    85 \begin{cquote}
     45\paragraph{Design 3: Decentralized Per-thread Heaps}
     46Design 3 is similar to design 2 but instead of having an M:N model, it uses a 1:1 model. So, instead of having N heaos and sharing them among M KTs, Design 3 has one heap for each KT.
     47Dynamic number of heaps: create a thread-local heap for each kernel thread (KT) with a bump-area allocated from the @sbrk@ area.
     48Each KT will have its own exclusive thread-local heap. Heap will be uncontended between KTs regardless how many KTs have been created.
     49Operations on @sbrk@ area will still be protected by locks.
     50%\begin{cquote}
     51%\centering
     52%\input{AllocDS3} FIXME add figs
     53%\end{cquote}
     54Problems: We cannot destroy the heap when a KT exits because our dynamic objects have ownership and they are returned to the heap that created them when the program frees a dynamic object. All dynamic objects point back to their owner heap. If a thread A creates an object O, passes it to another thread B, and A itself exits. When B will free object O, O should return to A's heap so A's heap should be preserved for the lifetime of the whole program as their might be objects in-use of other threads that were allocated by A. Also, we need to know when a KT is created and destroyed to know when to create/destroy a heap for the KT.
     55
     56\paragraph{Design 4: Decentralized Per-CPU Heaps}
     57Design 4 is similar to Design 3 but instead of having a heap for each thread, it creates a heap for each CPU.
     58Fixed number of heaps for a machine: create a heap for each CPU with a bump-area allocated from the @sbrk@ area.
     59Each CPU will have its own CPU-local heap. When the program does a dynamic memory operation, it will be entertained by the heap of the CPU where the process is currently running on.
     60Each CPU will have its own exclusive heap. Just like Design 3(FIXME cite), heap will be uncontended between KTs regardless how many KTs have been created.
     61Operations on @sbrk@ area will still be protected by locks.
     62To deal with preemtion during a dynamic memory operation, librseq(FIXME cite) will be used to make sure that the whole dynamic memory operation completes on one CPU. librseq's restartable sequences can make it possible to re-run a critical section and undo the current writes if a preemption happened during the critical section's execution.
     63%\begin{cquote}
     64%\centering
     65%\input{AllocDS4} FIXME add figs
     66%\end{cquote}
     67
     68Problems: This approach was slower than the per-thread model. Also, librseq does not provide such restartable sequences to detect preemtions in user-level threading system which is important to us as CFA(FIXME cite) has its own threading system that we want to support.
     69
     70Out of the four designs, Design 3 was chosen because of the following reasons.
     71\begin{itemize}
     72\item
     73Decentralized designes are better in general as compared to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designes shard the whole heap which has all the buckets with the addition of sharding sbrk area. So Design 1 was eliminated.
     74\item
     75Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenerio.
     76\item
     77Design 4 was eliminated because it was slower than Design 3 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achive user-threading safety which has some cost to it. Desing 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
     78\end{itemize}
     79
     80
     81\subsection{Advantages of distributed design}
     82
     83The distributed design of uHeap is concurrent to work in multi-threaded applications.
     84
     85Some key benefits of the distributed design of uHeap are as follows:
     86
     87\begin{itemize}
     88\item
     89The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The call to sbrk will be protected using locks but bump allocation (on memory taken from sbrk) will not be contended once the sbrk call has returned.
     90\item
     91Low or almost no contention on heap resources.
     92\item
     93It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
     94\item
     95Distributed design avoids unnecassry locks on resources shared across all KTs.
     96\end{itemize}
     97
     98%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     99
     100\section{uHeap Structure}
     101
     102As described in (FIXME cite 2.4) uHeap uses following features of multi-threaded memory allocators.
     103\begin{itemize}
     104\item
     105uHeap has multiple heaps without a global heap and uses 1:1 model. (FIXME cite 2.5 1:1 model)
     106\item
     107uHeap uses object ownership. (FIXME cite 2.5.2)
     108\item
     109uHeap does not use object containers (FIXME cite 2.6) or any coalescing technique. Instead each dynamic object allocated by uHeap has a header than contains bookkeeping information.
     110\item
     111Each thread-local heap in uHeap has its own allocation buffer that is taken from the system using sbrk() call. (FIXME cite 2.7)
     112\item
     113Unless a heap is freeing an object that is owned by another thread's heap or heap is using sbrk() system call, uHeap is mostly lock-free which eliminates most of the contention on shared resources. (FIXME cite 2.8)
     114\end{itemize}
     115
     116As uHeap uses a heap per-thread model to reduce contention on heap resources, we manage a list of heaps (heap-list) that can be used by threads. The list is empty at the start of the program. When a kernel thread (KT) is created, we check if heap-list is empty. If no then a heap is removed from the heap-list and is given to this new KT to use exclusively. If yes then a new heap object is created in dynamic memory and is given to this new KT to use exclusively. When a KT exits, its heap is not destroyed but instead its heap is put on the heap-list and is ready to be reused by new KTs.
     117
     118This reduces the memory footprint as the objects on free-lists of a KT that has exited can be reused by a new KT. Also, we preserve all the heaps that were created during the lifetime of the program till the end of the program. uHeap uses object ownership where an object is freed to the free-buckets of the heap that allocated it. Even after a KT A has exited, its heap has to be preserved as there might be objects in-use of other threads that were initially allocated by A and the passed to other threads.
     119
     120\begin{figure}
    86121\centering
    87 \input{AllocDS2}
    88 \end{cquote}
    89 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.
    90 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated).
    91 
    92 Out of the two designs, Design 1 was chosen because it's concurrency is better across all bucket-sizes as design-2 shards a few buckets of selected sizes while design-1 shards all the buckets. Design-2 shards the whole heap which has all the buckets with the addition of sharding sbrk area.
    93 
    94 \subsection{Advantages of distributed design}
    95 The distributed design of uHeapLmmm is concurrent to work in multi-threaded applications.
    96 
    97 Some key benefits of the distributed design of uHeapLmmm are as follows:
    98 
    99 \begin{itemize}
    100 \item
    101 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The lock on bump allocation (on memory taken from sbrk) will only be contended if KTs > N. The contention on sbrk area is less likely as it will only happen in the case if heaps assigned to two KTs get short of bump allocation reserve simultanously.
    102 \item
    103 N heaps are created at the start of the program and destroyed at the end of program. When a KT is created, we only assign it to one of the heaps. When a KT is destroyed, we only dissociate it from the assigned heap but we do not destroy that heap. That heap will go back to our pool-of-heaps, ready to be used by some new KT. And if that heap was shared among multiple KTs (like the case of KTs > N) then, on deletion of one KT, that heap will be still in-use of the other KTs. This will prevent creation and deletion of heaps during run-time as heaps are re-usable which helps in keeping low-memory footprint.
    104 \item
    105 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
    106 \item
    107 Distributed design avoids unnecassry locks on resources shared across all KTs.
    108 \end{itemize}
    109 
    110 FIX ME: Cite performance comparison of the two heap designs if required
     122\includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
     123\caption{HeapStructure}
     124\label{fig:heapStructureFig}
     125\end{figure}
     126
     127Each heap uses seggregated free-buckets that have free objects of a specific size. Each free-bucket of a specific size has following 2 lists in it:
     128\begin{itemize}
     129\item
     130Free list is used when a thread is freeing an object that is owned by its own heap so free list does not use any locks/atomic-operations as it is only used by the owner KT.
     131\item
     132Away list is used when a thread A is freeing an object that is owned by another KT B's heap. This object should be freed to the owner heap (B's heap) so A will place the object on the away list of B. Away list is lock protected as it is shared by all other threads.
     133\end{itemize}
     134
     135When a dynamic object of a size S is requested. The thread-local heap will check if S is greater than or equal to the mmap threshhold. Any request larger than the mmap threshhold is fulfilled by allocating an mmap area of that size and such requests are not allocated on sbrk area. The value of this threshhold can be changed using mallopt routine but the new value should not be larger than our biggest free-bucket size.
     136
     137Algorithm~\ref{alg:heapObjectAlloc} briefly shows how an allocation request is fulfilled.
     138
     139\begin{algorithm}
     140\caption{Dynamic object allocation of size S}\label{alg:heapObjectAlloc}
     141\begin{algorithmic}[1]
     142\State $\textit{O} \gets \text{NULL}$
     143\If {$S < \textit{mmap-threshhold}$}
     144        \State $\textit{B} \gets (\text{smallest free-bucket} \geq S)$
     145        \If {$\textit{B's free-list is empty}$}
     146                \If {$\textit{B's away-list is empty}$}
     147                        \If {$\textit{heap's allocation buffer} < S$}
     148                                \State $\text{get allocation buffer using system call sbrk()}$
     149                        \EndIf
     150                        \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
     151                \Else
     152                        \State $\textit{merge B's away-list into free-list}$
     153                        \State $\textit{O} \gets \text{pop an object from B's free-list}$
     154                \EndIf
     155        \Else
     156                \State $\textit{O} \gets \text{pop an object from B's free-list}$
     157        \EndIf
     158        \State $\textit{O's owner} \gets \text{B}$
     159\Else
     160        \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
     161\EndIf
     162\State $\Return \textit{ O}$
     163\end{algorithmic}
     164\end{algorithm}
     165
    111166
    112167%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    113168
    114169\section{Added Features and Methods}
    115 To improve the UHeapLmmm allocator (FIX ME: cite uHeapLmmm) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.
     170To improve the uHeap allocator (FIX ME: cite uHeap) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a \CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.
    116171
    117172\subsection{C Interface}
    118173We added a few more features and routines to the allocator's C interface that can make the allocator more usable to the programmers. THese features will programmer more control on the dynamic memory allocation.
    119174
    120 \subsubsection void * aalloc( size\_t dim, size\_t elemSize )
    121 aalloc is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0.
    122 \paragraph{Usage}
    123 aalloc takes two parameters.
    124 
    125 \begin{itemize}
    126 \item
    127 dim: number of objects in the array
    128 \item
    129 elemSize: size of the object in the array.
    130 \end{itemize}
    131 It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns NULL pointer.
    132 
    133 \subsubsection void * resize( void * oaddr, size\_t size )
    134 resize is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is realloc but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object.
    135 \paragraph{Usage}
    136 resize takes two parameters.
    137 
    138 \begin{itemize}
    139 \item
    140 oaddr: the address of the old object that needs to be resized.
    141 \item
    142 size: the new size requirement of the to which the old object needs to be resized.
    143 \end{itemize}
    144 It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns NULL pointer.
    145 
    146 \subsubsection void * resize( void * oaddr, size\_t nalign, size\_t size )
    147 This resize is an extension of the above resize (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement.
     175\subsection{Out of Memory}
     176
     177Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
     178hence the need to return an alternate value for a zero-sized allocation.
     179The alternative is to abort a program when out of memory.
     180In theory, notifying the programmer allows recovery;
     181in practice, it is almost impossible to gracefully when out of memory, so the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen.
     182
     183
     184\subsection{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
     185@aalloc@ is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0.
     186\paragraph{Usage}
     187@aalloc@ takes two parameters.
     188
     189\begin{itemize}
     190\item
     191@dim@: number of objects in the array
     192\item
     193@elemSize@: size of the object in the array.
     194\end{itemize}
     195It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns a @NULL@ pointer.
     196
     197\subsection{\lstinline{void * resize( void * oaddr, size_t size )}}
     198@resize@ is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is @realloc@ but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object.
     199\paragraph{Usage}
     200@resize@ takes two parameters.
     201
     202\begin{itemize}
     203\item
     204@oaddr@: the address of the old object that needs to be resized.
     205\item
     206@size@: the new size requirement of the to which the old object needs to be resized.
     207\end{itemize}
     208It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns a @NULL@ pointer.
     209
     210\subsection{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
     211This @resize@ is an extension of the above @resize@ (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement.
    148212\paragraph{Usage}
    149213This resize takes three parameters. It takes an additional parameter of nalign as compared to the above resize (FIX ME: cite above resize).
     
    151215\begin{itemize}
    152216\item
    153 oaddr: the address of the old object that needs to be resized.
    154 \item
    155 nalign: the new alignment to which the old object needs to be realigned.
    156 \item
    157 size: the new size requirement of the to which the old object needs to be resized.
    158 \end{itemize}
    159 It returns an object with the size and alignment given in the parameters. On failure, it returns a NULL pointer.
    160 
    161 \subsubsection void * amemalign( size\_t alignment, size\_t dim, size\_t elemSize )
     217@oaddr@: the address of the old object that needs to be resized.
     218\item
     219@nalign@: the new alignment to which the old object needs to be realigned.
     220\item
     221@size@: the new size requirement of the to which the old object needs to be resized.
     222\end{itemize}
     223It returns an object with the size and alignment given in the parameters. On failure, it returns a @NULL@ pointer.
     224
     225\subsection{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
    162226amemalign is a hybrid of memalign and aalloc. It allows programmer to allocate an aligned dynamic array of objects without calculating the total size of the array explicitly. It frees the programmer from calculating the total size of the array.
    163227\paragraph{Usage}
     
    166230\begin{itemize}
    167231\item
    168 alignment: the alignment to which the dynamic array needs to be aligned.
    169 \item
    170 dim: number of objects in the array
    171 \item
    172 elemSize: size of the object in the array.
    173 \end{itemize}
    174 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns NULL pointer.
    175 
    176 \subsubsection void * cmemalign( size\_t alignment, size\_t dim, size\_t elemSize )
     232@alignment@: the alignment to which the dynamic array needs to be aligned.
     233\item
     234@dim@: number of objects in the array
     235\item
     236@elemSize@: size of the object in the array.
     237\end{itemize}
     238It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns a @NULL@ pointer.
     239
     240\subsection{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
    177241cmemalign is a hybrid of amemalign and calloc. It allows programmer to allocate an aligned dynamic array of objects that is 0 filled. The current way to do this in other allocators is to allocate an aligned object with memalign and then fill it with 0 explicitly. This routine provides both features of aligning and 0 filling, implicitly.
    178242\paragraph{Usage}
     
    181245\begin{itemize}
    182246\item
    183 alignment: the alignment to which the dynamic array needs to be aligned.
    184 \item
    185 dim: number of objects in the array
    186 \item
    187 elemSize: size of the object in the array.
    188 \end{itemize}
    189 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns NULL pointer.
    190 
    191 \subsubsection size\_t malloc\_alignment( void * addr )
    192 malloc\_alignment returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.
    193 \paragraph{Usage}
    194 malloc\_alignment takes one parameters.
    195 
    196 \begin{itemize}
    197 \item
    198 addr: the address of the currently allocated dynamic object.
    199 \end{itemize}
    200 malloc\_alignment returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeapLmmm allocator.
    201 
    202 \subsubsection bool malloc\_zero\_fill( void * addr )
    203 malloc\_zero\_fill returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.
    204 \paragraph{Usage}
    205 malloc\_zero\_fill takes one parameters.
    206 
    207 \begin{itemize}
    208 \item
    209 addr: the address of the currently allocated dynamic object.
    210 \end{itemize}
    211 malloc\_zero\_fill returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.
    212 
    213 \subsubsection size\_t malloc\_size( void * addr )
    214 malloc\_size returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is malloc\_usable\_size. But, malloc\_size is different from malloc\_usable\_size as malloc\_usabe\_size returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, malloc\_size returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.
    215 \paragraph{Usage}
    216 malloc\_size takes one parameters.
    217 
    218 \begin{itemize}
    219 \item
    220 addr: the address of the currently allocated dynamic object.
    221 \end{itemize}
    222 malloc\_size returns the allocation size of the given dynamic object. On failure, it return zero.
    223 
    224 \subsubsection void * realloc( void * oaddr, size\_t nalign, size\_t size )
    225 This realloc is an extension of the default realloc (FIX ME: cite default realloc). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.
    226 \paragraph{Usage}
    227 This realloc takes three parameters. It takes an additional parameter of nalign as compared to the default realloc.
    228 
    229 \begin{itemize}
    230 \item
    231 oaddr: the address of the old object that needs to be reallocated.
    232 \item
    233 nalign: the new alignment to which the old object needs to be realigned.
    234 \item
    235 size: the new size requirement of the to which the old object needs to be resized.
    236 \end{itemize}
    237 It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a NULL pointer.
    238 
    239 \subsection{CFA Malloc Interface}
    240 We added some routines to the malloc interface of CFA. These routines can only be used in CFA and not in our standalone uHeapLmmm allocator as these routines use some features that are only provided by CFA and not by C. It makes the allocator even more usable to the programmers.
    241 CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.
    242 
    243 \subsubsection T * malloc( void )
     247@alignment@: the alignment to which the dynamic array needs to be aligned.
     248\item
     249@dim@: number of objects in the array
     250\item
     251@elemSize@: size of the object in the array.
     252\end{itemize}
     253It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns a @NULL@ pointer.
     254
     255\subsection{\lstinline{size_t malloc_alignment( void * addr )}}
     256@malloc_alignment@ returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.
     257\paragraph{Usage}
     258@malloc_alignment@ takes one parameters.
     259
     260\begin{itemize}
     261\item
     262@addr@: the address of the currently allocated dynamic object.
     263\end{itemize}
     264@malloc_alignment@ returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeap allocator.
     265
     266\subsection{\lstinline{bool malloc_zero_fill( void * addr )}}
     267@malloc_zero_fill@ returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.
     268\paragraph{Usage}
     269@malloc_zero_fill@ takes one parameters.
     270
     271\begin{itemize}
     272\item
     273@addr@: the address of the currently allocated dynamic object.
     274\end{itemize}
     275@malloc_zero_fill@ returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.
     276
     277\subsection{\lstinline{size_t malloc_size( void * addr )}}
     278@malloc_size@ returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is @malloc_usable_size@. But, @malloc_size@ is different from @malloc_usable_size@ as @malloc_usabe_size@ returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, @malloc_size@ returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.
     279\paragraph{Usage}
     280@malloc_size@ takes one parameters.
     281
     282\begin{itemize}
     283\item
     284@addr@: the address of the currently allocated dynamic object.
     285\end{itemize}
     286@malloc_size@ returns the allocation size of the given dynamic object. On failure, it return zero.
     287
     288\subsection{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
     289This @realloc@ is an extension of the default @realloc@ (FIX ME: cite default @realloc@). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.
     290\paragraph{Usage}
     291This @realloc@ takes three parameters. It takes an additional parameter of nalign as compared to the default @realloc@.
     292
     293\begin{itemize}
     294\item
     295@oaddr@: the address of the old object that needs to be reallocated.
     296\item
     297@nalign@: the new alignment to which the old object needs to be realigned.
     298\item
     299@size@: the new size requirement of the to which the old object needs to be resized.
     300\end{itemize}
     301It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a @NULL@ pointer.
     302
     303\subsection{\CFA Malloc Interface}
     304We added some routines to the malloc interface of \CFA. These routines can only be used in \CFA and not in our standalone uHeap allocator as these routines use some features that are only provided by \CFA and not by C. It makes the allocator even more usable to the programmers.
     305\CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.
     306
     307\subsection{\lstinline{T * malloc( void )}}
    244308This malloc is a simplified polymorphic form of defualt malloc (FIX ME: cite malloc). It does not take any parameter as compared to default malloc that takes one parameter.
    245309\paragraph{Usage}
    246310This malloc takes no parameters.
    247 It returns a dynamic object of the size of type T. On failure, it return NULL pointer.
    248 
    249 \subsubsection T * aalloc( size\_t dim )
     311It returns a dynamic object of the size of type @T@. On failure, it returns a @NULL@ pointer.
     312
     313\subsection{\lstinline{T * aalloc( size_t dim )}}
    250314This aalloc is a simplified polymorphic form of above aalloc (FIX ME: cite aalloc). It takes one parameter as compared to the above aalloc that takes two parameters.
    251315\paragraph{Usage}
     
    254318\begin{itemize}
    255319\item
    256 dim: required number of objects in the array.
    257 \end{itemize}
    258 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. On failure, it return NULL pointer.
    259 
    260 \subsubsection T * calloc( size\_t dim )
     320@dim@: required number of objects in the array.
     321\end{itemize}
     322It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
     323
     324\subsection{\lstinline{T * calloc( size_t dim )}}
    261325This calloc is a simplified polymorphic form of defualt calloc (FIX ME: cite calloc). It takes one parameter as compared to the default calloc that takes two parameters.
    262326\paragraph{Usage}
     
    265329\begin{itemize}
    266330\item
    267 dim: required number of objects in the array.
    268 \end{itemize}
    269 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. On failure, it return NULL pointer.
    270 
    271 \subsubsection T * resize( T * ptr, size\_t size )
    272 This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as CFA provides gives allocator the liberty to get the alignment of the returned type.
     331@dim@: required number of objects in the array.
     332\end{itemize}
     333It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
     334
     335\subsection{\lstinline{T * resize( T * ptr, size_t size )}}
     336This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
    273337\paragraph{Usage}
    274338This resize takes two parameters.
     
    276340\begin{itemize}
    277341\item
    278 ptr: address of the old object.
    279 \item
    280 size: the required size of the new object.
    281 \end{itemize}
    282 It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type T. On failure, it return NULL pointer.
    283 
    284 \subsubsection T * realloc( T * ptr, size\_t size )
    285 This realloc is a simplified polymorphic form of defualt realloc (FIX ME: cite realloc with align). It takes two parameters as compared to the above realloc that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as CFA provides gives allocator the liberty to get the alignment of the returned type.
    286 \paragraph{Usage}
    287 This realloc takes two parameters.
    288 
    289 \begin{itemize}
    290 \item
    291 ptr: address of the old object.
    292 \item
    293 size: the required size of the new object.
    294 \end{itemize}
    295 It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type T. On failure, it return NULL pointer.
    296 
    297 \subsubsection T * memalign( size\_t align )
     342@ptr@: address of the old object.
     343\item
     344@size@: the required size of the new object.
     345\end{itemize}
     346It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
     347
     348\subsection{\lstinline{T * realloc( T * ptr, size_t size )}}
     349This @realloc@ is a simplified polymorphic form of defualt @realloc@ (FIX ME: cite @realloc@ with align). It takes two parameters as compared to the above @realloc@ that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
     350\paragraph{Usage}
     351This @realloc@ takes two parameters.
     352
     353\begin{itemize}
     354\item
     355@ptr@: address of the old object.
     356\item
     357@size@: the required size of the new object.
     358\end{itemize}
     359It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
     360
     361\subsection{\lstinline{T * memalign( size_t align )}}
    298362This memalign is a simplified polymorphic form of defualt memalign (FIX ME: cite memalign). It takes one parameters as compared to the default memalign that takes two parameters.
    299363\paragraph{Usage}
     
    302366\begin{itemize}
    303367\item
    304 align: the required alignment of the dynamic object.
    305 \end{itemize}
    306 It returns a dynamic object of the size of type T that is aligned to given parameter align. On failure, it return NULL pointer.
    307 
    308 \subsubsection T * amemalign( size\_t align, size\_t dim )
     368@align@: the required alignment of the dynamic object.
     369\end{itemize}
     370It returns a dynamic object of the size of type @T@ that is aligned to given parameter align. On failure, it returns a @NULL@ pointer.
     371
     372\subsection{\lstinline{T * amemalign( size_t align, size_t dim )}}
    309373This amemalign is a simplified polymorphic form of above amemalign (FIX ME: cite amemalign). It takes two parameter as compared to the above amemalign that takes three parameters.
    310374\paragraph{Usage}
     
    313377\begin{itemize}
    314378\item
    315 align: required alignment of the dynamic array.
    316 \item
    317 dim: required number of objects in the array.
    318 \end{itemize}
    319 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. The returned object is aligned to the given parameter align. On failure, it return NULL pointer.
    320 
    321 \subsubsection T * cmemalign( size\_t align, size\_t dim  )
     379@align@: required alignment of the dynamic array.
     380\item
     381@dim@: required number of objects in the array.
     382\end{itemize}
     383It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align. On failure, it returns a @NULL@ pointer.
     384
     385\subsection{\lstinline{T * cmemalign( size_t align, size_t dim  )}}
    322386This cmemalign is a simplified polymorphic form of above cmemalign (FIX ME: cite cmemalign). It takes two parameter as compared to the above cmemalign that takes three parameters.
    323387\paragraph{Usage}
     
    326390\begin{itemize}
    327391\item
    328 align: required alignment of the dynamic array.
    329 \item
    330 dim: required number of objects in the array.
    331 \end{itemize}
    332 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. The returned object is aligned to the given parameter align and is zero filled. On failure, it return NULL pointer.
    333 
    334 \subsubsection T * aligned\_alloc( size\_t align )
    335 This aligned\_alloc is a simplified polymorphic form of defualt aligned\_alloc (FIX ME: cite aligned\_alloc). It takes one parameter as compared to the default aligned\_alloc that takes two parameters.
    336 \paragraph{Usage}
    337 This aligned\_alloc takes one parameter.
    338 
    339 \begin{itemize}
    340 \item
    341 align: required alignment of the dynamic object.
    342 \end{itemize}
    343 It returns a dynamic object of the size of type T that is aligned to the given parameter. On failure, it return NULL pointer.
    344 
    345 \subsubsection int posix\_memalign( T ** ptr, size\_t align )
    346 This posix\_memalign is a simplified polymorphic form of defualt posix\_memalign (FIX ME: cite posix\_memalign). It takes two parameters as compared to the default posix\_memalign that takes three parameters.
    347 \paragraph{Usage}
    348 This posix\_memalign takes two parameter.
    349 
    350 \begin{itemize}
    351 \item
    352 ptr: variable address to store the address of the allocated object.
    353 \item
    354 align: required alignment of the dynamic object.
    355 \end{itemize}
    356 
    357 It stores address of the dynamic object of the size of type T in given parameter ptr. This object is aligned to the given parameter. On failure, it return NULL pointer.
    358 
    359 \subsubsection T * valloc( void )
    360 This valloc is a simplified polymorphic form of defualt valloc (FIX ME: cite valloc). It takes no parameters as compared to the default valloc that takes one parameter.
    361 \paragraph{Usage}
    362 valloc takes no parameters.
    363 It returns a dynamic object of the size of type T that is aligned to the page size. On failure, it return NULL pointer.
    364 
    365 \subsubsection T * pvalloc( void )
    366 This pcvalloc is a simplified polymorphic form of defualt pcvalloc (FIX ME: cite pcvalloc). It takes no parameters as compared to the default pcvalloc that takes one parameter.
    367 \paragraph{Usage}
    368 pvalloc takes no parameters.
    369 It returns a dynamic object of the size that is calcutaed by rouding the size of type T. The returned object is also aligned to the page size. On failure, it return NULL pointer.
    370 
    371 \subsection Alloc Interface
    372 In addition to improve allocator interface both for CFA and our standalone allocator uHeapLmmm in C. We also added a new alloc interface in CFA that increases usability of dynamic memory allocation.
     392@align@: required alignment of the dynamic array.
     393\item
     394@dim@: required number of objects in the array.
     395\end{itemize}
     396It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align and is zero filled. On failure, it returns a @NULL@ pointer.
     397
     398\subsection{\lstinline{T * aligned_alloc( size_t align )}}
     399This @aligned_alloc@ is a simplified polymorphic form of defualt @aligned_alloc@ (FIX ME: cite @aligned_alloc@). It takes one parameter as compared to the default @aligned_alloc@ that takes two parameters.
     400\paragraph{Usage}
     401This @aligned_alloc@ takes one parameter.
     402
     403\begin{itemize}
     404\item
     405@align@: required alignment of the dynamic object.
     406\end{itemize}
     407It returns a dynamic object of the size of type @T@ that is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
     408
     409\subsection{\lstinline{int posix_memalign( T ** ptr, size_t align )}}
     410This @posix_memalign@ is a simplified polymorphic form of defualt @posix_memalign@ (FIX ME: cite @posix_memalign@). It takes two parameters as compared to the default @posix_memalign@ that takes three parameters.
     411\paragraph{Usage}
     412This @posix_memalign@ takes two parameter.
     413
     414\begin{itemize}
     415\item
     416@ptr@: variable address to store the address of the allocated object.
     417\item
     418@align@: required alignment of the dynamic object.
     419\end{itemize}
     420
     421It stores address of the dynamic object of the size of type @T@ in given parameter ptr. This object is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
     422
     423\subsection{\lstinline{T * valloc( void )}}
     424This @valloc@ is a simplified polymorphic form of defualt @valloc@ (FIX ME: cite @valloc@). It takes no parameters as compared to the default @valloc@ that takes one parameter.
     425\paragraph{Usage}
     426@valloc@ takes no parameters.
     427It returns a dynamic object of the size of type @T@ that is aligned to the page size. On failure, it returns a @NULL@ pointer.
     428
     429\subsection{\lstinline{T * pvalloc( void )}}
     430\paragraph{Usage}
     431@pvalloc@ takes no parameters.
     432It returns a dynamic object of the size that is calcutaed by rouding the size of type @T@. The returned object is also aligned to the page size. On failure, it returns a @NULL@ pointer.
     433
     434\subsection{Alloc Interface}
     435In addition to improve allocator interface both for \CFA and our standalone allocator uHeap in C. We also added a new alloc interface in \CFA that increases usability of dynamic memory allocation.
    373436This interface helps programmers in three major ways.
    374437
     
    379442Parametre Positions: alloc interface frees programmers from remembering parameter postions in call to routines.
    380443\item
    381 Object Size: alloc interface does not require programmer to mention the object size as CFA allows allocator to determince the object size from returned type of alloc call.
    382 \end{itemize}
    383 
    384 Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.
    385 
    386 \subsubsection{Routine: T * alloc( ... )}
    387 Call to alloc wihout any parameter returns one object of size of type T allocated dynamically.
     444Object Size: alloc interface does not require programmer to mention the object size as \CFA allows allocator to determince the object size from returned type of alloc call.
     445\end{itemize}
     446
     447Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of \CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.
     448
     449\subsection{Routine: \lstinline{T * alloc( ... )}}
     450Call to alloc wihout any parameter returns one object of size of type @T@ allocated dynamically.
    388451Only the dimension (dim) parameter for array allocation has the fixed position in the alloc routine. If programmer wants to allocate an array of objects that the required number of members in the array has to be given as the first parameter to the alloc routine.
    389 alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except `realloc and `resize that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both `resize and `realloc are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.
     452alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except @`realloc@ and @`resize@ that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both @`resize@ and @`realloc@ are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.
    390453
    391454\paragraph{Dim}
    392 This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type T.
    393 It represents the required number of members in the array allocation as in CFA's aalloc (FIX ME: cite aalloc).
    394 This parameter should be of type size\_t.
    395 
    396 Example: int a = alloc( 5 )
     455This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type @T@.
     456It represents the required number of members in the array allocation as in \CFA's aalloc (FIX ME: cite aalloc).
     457This parameter should be of type @size_t@.
     458
     459Example: @int a = alloc( 5 )@
    397460This call will return a dynamic array of five integers.
    398461
    399462\paragraph{Align}
    400 This parameter is position-free and uses a backtick routine align (`align). The parameter passed with `align should be of type size\_t. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in CFA) then the passed alignment parameter will be rejected and the default alignment will be used.
    401 
    402 Example: int b = alloc( 5 , 64`align )
     463This parameter is position-free and uses a backtick routine align (@`align@). The parameter passed with @`align@ should be of type @size_t@. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in \CFA) then the passed alignment parameter will be rejected and the default alignment will be used.
     464
     465Example: @int b = alloc( 5 , 64`align )@
    403466This call will return a dynamic array of five integers. It will align the allocated object to 64.
    404467
    405468\paragraph{Fill}
    406 This parameter is position-free and uses a backtick routine fill (`fill). In case of realloc, only the extra space after copying the data in the old object will be filled with given parameter.
     469This parameter is position-free and uses a backtick routine fill (@`fill@). In case of @realloc@, only the extra space after copying the data in the old object will be filled with given parameter.
    407470Three types of parameters can be passed using `fill.
    408471
    409472\begin{itemize}
    410473\item
    411 char: A char can be passed with `fill to fill the whole dynamic allocation with the given char recursively till the end of required allocation.
    412 \item
    413 Object of returned type: An object of type of returned type can be passed with `fill to fill the whole dynamic allocation with the given object recursively till the end of required allocation.
    414 \item
    415 Dynamic object of returned type: A dynamic object of type of returned type can be passed with `fill to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to `fill or the end of requested allocation reaches.
    416 \end{itemize}
    417 
    418 Example: int b = alloc( 5 , 'a'`fill )
     474@char@: A char can be passed with @`fill@ to fill the whole dynamic allocation with the given char recursively till the end of required allocation.
     475\item
     476Object of returned type: An object of type of returned type can be passed with @`fill@ to fill the whole dynamic allocation with the given object recursively till the end of required allocation.
     477\item
     478Dynamic object of returned type: A dynamic object of type of returned type can be passed with @`fill@ to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to @`fill@ or the end of requested allocation reaches.
     479\end{itemize}
     480
     481Example: @int b = alloc( 5 , 'a'`fill )@
    419482This call will return a dynamic array of five integers. It will fill the allocated object with character 'a' recursively till the end of requested allocation size.
    420483
    421 Example: int b = alloc( 5 , 4`fill )
     484Example: @int b = alloc( 5 , 4`fill )@
    422485This call will return a dynamic array of five integers. It will fill the allocated object with integer 4 recursively till the end of requested allocation size.
    423486
    424 Example: int b = alloc( 5 , a`fill ) where a is a pointer of int type
     487Example: @int b = alloc( 5 , a`fill )@ where @a@ is a pointer of int type
    425488This call will return a dynamic array of five integers. It will copy data in a to the returned object non-recursively untill end of a or the newly allocated object is reached.
    426489
    427490\paragraph{Resize}
    428 This parameter is position-free and uses a backtick routine resize (`resize). It represents the old dynamic object (oaddr) that the programmer wants to
     491This parameter is position-free and uses a backtick routine resize (@`resize@). It represents the old dynamic object (oaddr) that the programmer wants to
    429492\begin{itemize}
    430493\item
     
    435498fill with something.
    436499\end{itemize}
    437 The data in old dynamic object will not be preserved in the new object. The type of object passed to `resize and the returned type of alloc call can be different.
    438 
    439 Example: int b = alloc( 5 , a`resize )
     500The data in old dynamic object will not be preserved in the new object. The type of object passed to @`resize@ and the returned type of alloc call can be different.
     501
     502Example: @int b = alloc( 5 , a`resize )@
    440503This call will resize object a to a dynamic array that can contain 5 integers.
    441504
    442 Example: int b = alloc( 5 , a`resize , 32`align )
     505Example: @int b = alloc( 5 , a`resize , 32`align )@
    443506This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
    444507
    445 Example: int b = alloc( 5 , a`resize , 32`align , 2`fill)
     508Example: @int b = alloc( 5 , a`resize , 32`align , 2`fill )@
    446509This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32 and will be filled with 2.
    447510
    448511\paragraph{Realloc}
    449 This parameter is position-free and uses a backtick routine realloc (`realloc). It represents the old dynamic object (oaddr) that the programmer wants to
     512This parameter is position-free and uses a backtick routine @realloc@ (@`realloc@). It represents the old dynamic object (oaddr) that the programmer wants to
    450513\begin{itemize}
    451514\item
     
    456519fill with something.
    457520\end{itemize}
    458 The data in old dynamic object will be preserved in the new object. The type of object passed to `realloc and the returned type of alloc call cannot be different.
    459 
    460 Example: int b = alloc( 5 , a`realloc )
     521The data in old dynamic object will be preserved in the new object. The type of object passed to @`realloc@ and the returned type of alloc call cannot be different.
     522
     523Example: @int b = alloc( 5 , a`realloc )@
    461524This call will realloc object a to a dynamic array that can contain 5 integers.
    462525
    463 Example: int b = alloc( 5 , a`realloc , 32`align )
     526Example: @int b = alloc( 5 , a`realloc , 32`align )@
    464527This call will realloc object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
    465528
    466 Example: int b = alloc( 5 , a`realloc , 32`align , 2`fill)
     529Example: @int b = alloc( 5 , a`realloc , 32`align , 2`fill )@
    467530This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. The extra space after copying data of a to the returned object will be filled with 2.
  • doc/theses/mubeen_zulfiqar_MMath/background.tex

    ref3c383 rd672350  
    1 \chapter{Background}
    2 
    3 \noindent
     1\begin{comment}
    42====================
    5 
    63Writing Points:
    74\begin{itemize}
     
    1916Features and limitations.
    2017\end{itemize}
    21 
    22 \noindent
    23 ====================
    24 
    25 \section{Background}
    26 
    27 % FIXME: cite wasik
    28 \cite{wasik.thesis}
    29 
    30 \subsection{Memory Allocation}
    31 With dynamic allocation being an important feature of C, there are many standalone memory allocators that have been designed for different purposes. For this thesis, we chose 7 of the most popular and widely used memory allocators.
    32 
    33 \paragraph{dlmalloc}
    34 dlmalloc (FIX ME: cite allocator) is a thread-safe allocator that is single threaded and single heap. dlmalloc maintains free-lists of different sizes to store freed dynamic memory. (FIX ME: cite wasik)
    35 
    36 \paragraph{hoard}
    37 Hoard (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thred heaps that have thread-local free-lists, and a gloabl shared heap. (FIX ME: cite wasik)
    38 
    39 \paragraph{jemalloc}
    40 jemalloc (FIX ME: cite allocator) is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena. Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
    41 
    42 \paragraph{ptmalloc}
    43 ptmalloc (FIX ME: cite allocator) is a modification of dlmalloc. It is a thread-safe multi-threaded memory allocator that uses multiple heaps. ptmalloc heap has similar design to dlmalloc's heap.
    44 
    45 \paragraph{rpmalloc}
    46 rpmalloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses per-thread heap. Each heap has multiple size-classes and each size-calss contains memory regions of the relevant size.
    47 
    48 \paragraph{tbb malloc}
    49 tbb malloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
    50 
    51 \paragraph{tc malloc}
    52 tcmalloc (FIX ME: cite allocator) is a thread-safe allocator. It uses per-thread cache to store free objects that prevents contention on shared resources in multi-threaded application. A central free-list is used to refill per-thread cache when it gets empty.
    53 
    54 \subsection{Benchmarks}
    55 There are multiple benchmarks that are built individually and evaluate different aspects of a memory allocator. But, there is not standard set of benchamrks that can be used to evaluate multiple aspects of memory allocators.
    56 
    57 \paragraph{threadtest}
    58 (FIX ME: cite benchmark and hoard) Each thread repeatedly allocates and then deallocates 100,000 objects. Runtime of the benchmark evaluates its efficiency.
    59 
    60 \paragraph{shbench}
    61 (FIX ME: cite benchmark and hoard) Each thread allocates and randomly frees a number of random-sized objects. It is a stress test that also uses runtime to determine efficiency of the allocator.
    62 
    63 \paragraph{larson}
    64 (FIX ME: cite benchmark and hoard) Larson simulates a server environment. Multiple threads are created where each thread allocator and free a number of objects within a size range. Some objects are passed from threads to the child threads to free. It caluculates memory operations per second as an indicator of memory allocator's performance.
     18\end{comment}
     19
     20\chapter[Background]{Background\footnote{Part of this chapter draws from similar background work in~\cite{wasik.thesis} with many updates.}}
     21
     22
     23A program dynamically allocates and deallocates the storage for a variable, referred to as an \newterm{object}, through calls such as @malloc@ and @free@ in C, and @new@ and @delete@ in \CC.
     24Space for each allocated object comes from the dynamic-allocation zone.
     25A \newterm{memory allocator} contains a complex data-structure and code that manages the layout of objects in the dynamic-allocation zone.
     26The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory.
     27Objects in C/\CC cannot be moved to aid the packing process, only adjacent free storage can be \newterm{coalesced} into larger free areas.
     28The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage via operating-system calls, such as @mmap@ or @sbrk@ in UNIX.
     29
     30
     31\section{Allocator Components}
     32\label{s:AllocatorComponents}
     33
     34\VRef[Figure]{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}.
     35The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data.
     36The management data starts with fixed-sized information in the static-data memory that flows into the dynamic-allocation memory.
     37The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
     38Allocated objects (white) are variable sized, and allocated and maintained by the program;
     39\ie only the program knows the location of allocated storage, not the memory allocator.
     40\begin{figure}[h]
     41\centering
     42\input{AllocatorComponents}
     43\caption{Allocator Components (Heap)}
     44\label{f:AllocatorComponents}
     45\end{figure}
     46Freed objects (light grey) are memory deallocated by the program, which are linked into one or more lists facilitating easy location for new allocations.
     47Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
     48Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program;
     49if there are multiple reserved blocks, they are also chained together, usually internally.
     50
     51Allocated and freed objects typically have additional management data embedded within them.
     52\VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object.
     53The header contains information about the object, \eg size, type, etc.
     54The trailer may be used to simplify an allocation implementation, \eg coalescing, and/or for security purposes to mark the end of an object.
     55An object may be preceded by padding to ensure proper alignment.
     56Some algorithms quantize allocation requests into distinct sizes resulting in additional spacing after objects less than the quantized value.
     57When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
     58A free object also contains management data, \eg size, chaining, etc.
     59The amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, any allocation request less than 16 bytes must be rounded up, otherwise the free list cannot use internal chaining.
     60The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new management information and possibly data.
     61
     62\begin{figure}
     63\centering
     64\input{AllocatedObject}
     65\caption{Allocated Object}
     66\label{f:AllocatedObject}
     67\end{figure}
     68
     69
     70\section{Single-Threaded Memory-Allocator}
     71\label{s:SingleThreadedMemoryAllocator}
     72
     73A single-threaded memory-allocator does not run any threads itself, but is used by a single-threaded program.
     74Because the memory allocator is only executed by a single thread, concurrency issues do not exist.
     75The primary issues in designing a single-threaded memory-allocator are fragmentation and locality.
     76
     77
     78\subsection{Fragmentation}
     79\label{s:Fragmentation}
     80
     81Fragmentation is memory requested from the operating system but not used by the program;
     82hence, allocated objects are not fragmentation.
     83\VRef[Figure]{f:InternalExternalFragmentation}) shows fragmentation is divided into two forms: internal or external.
     84
     85\begin{figure}
     86\centering
     87\input{IntExtFragmentation}
     88\caption{Internal and External Fragmentation}
     89\label{f:InternalExternalFragmentation}
     90\end{figure}
     91
     92\newterm{Internal fragmentation} is memory space that is allocated to the program, but is not intended to be accessed by the program, such as headers, trailers, padding, and spacing around an allocated object.
     93This memory is typically used by the allocator for management purposes or required by the architecture for correctness, \eg alignment.
     94Internal fragmentation is problematic when management space is a significant proportion of an allocated object.
     95For example, if internal fragmentation is as large as the object being managed, then the memory usage for that object is doubled.
     96An allocator should strive to keep internal management information to a minimum.
     97
     98\newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes freed objects, all external management data, and reserved memory.
     99This memory is problematic in two ways: heap blowup and highly fragmented memory.
     100\newterm{Heap blowup} occurs when memory freed by the program is not reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
     101Heap blowup can occur due to allocator policies that are too restrictive in reusing freed memory and/or no coalescing of free storage.
     102Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects.
     103\VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
     104Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
     105Memory is highly fragmented when the sizes of most free blocks are unusable.
     106For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented.
     107If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system.
     108
     109\begin{figure}
     110\centering
     111\input{MemoryFragmentation}
     112\caption{Memory Fragmentation}
     113\label{f:MemoryFragmentation}
     114\vspace{10pt}
     115\subfigure[Contiguous]{
     116        \input{ContigFragmentation}
     117        \label{f:Contiguous}
     118} % subfigure
     119        \subfigure[Highly Fragmented]{
     120        \input{NonContigFragmentation}
     121\label{f:HighlyFragmented}
     122} % subfigure
     123\caption{Fragmentation Quality}
     124\label{f:FragmentationQuality}
     125\end{figure}
     126
     127For a single-threaded memory allocator, three basic approaches for controlling fragmentation have been identified~\cite{Johnstone99}.
     128The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
     129Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
     130Any storage larger than the request can become spacing after the object or be split into a smaller free object.
     131The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list.
     132
     133The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
     134When an object is allocated, the requested size is rounded up to the nearest bin-size, possibly with spacing after the object.
     135A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
     136The fewer bin-sizes, the fewer lists need to be searched and maintained;
     137however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation.
     138The more bin-sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.
     139A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size.
     140For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
     141For subsequent requests, the bin free-lists contain objects of different sizes, ranging from one bin-size to the next (8-16 in this example), and a sequential-fit algorithm may be used to find an object large enough for the requested size on the associated bin list.
     142
     143The third approach is \newterm{splitting} and \newterm{coalescing algorithms}.
     144When an object is allocated, if there are no free objects of the requested size, a larger free object may be split into two smaller objects to satisfy the allocation request without obtaining more memory from the operating system.
     145For example, in the buddy system, a block of free memory is split into two equal chunks, one of those chunks is again split into two equal chunks, and so on until a block just large enough to fit the requested object is created.
     146When an object is deallocated it is coalesced with the objects immediately before and after it in memory, if they are free, turning them into one larger object.
     147Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled.
     148In all cases, coalescing increases allocation latency, hence some allocations can cause unbounded delays during coalescing.
     149While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup.
     150Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory.
     151
     152
     153\subsection{Locality}
     154\label{s:Locality}
     155
     156The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}.
     157Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period.
     158Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.
     159
     160Hardware takes advantage of temporal and spatial locality through multiple levels of caching (\ie memory hierarchy).
     161When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
     162For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory.
     163A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}.
     164
     165Temporal locality is largely controlled by how a program accesses its variables~\cite{Feng05}.
     166Nevertheless, a memory allocator can have some indirect influence on temporal locality and largely dictates spatial locality.
     167For temporal locality, an allocator can return storage for new allocations that was just freed as these memory locations are still \emph{warm} in the memory hierarchy.
     168For spatial locality, an allocator can place objects used together close together in memory, so the working set of the program fits into the fewest possible cache lines and pages.
     169However, usage patterns are different for every program as is the underlying hardware memory architecture;
     170hence, no general-purpose memory-allocator can provide ideal locality for every program on every computer.
     171
     172There are a number of ways a memory allocator can degrade locality by increasing the working set.
     173For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request (\eg sequential-fit algorithm).
     174If there are a (large) number of objects accessed in very different areas of memory, the allocator may perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}.
     175Another way locality can be degraded is by spatially separating related data.
     176For example, in a binning allocator, objects of different sizes are allocated from different bins that may be located in different pages of memory.
     177
     178
     179\section{Multi-Threaded Memory-Allocator}
     180\label{s:MultiThreadedMemoryAllocator}
     181
     182A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program.
     183In addition to single-threaded design issues of locality and fragmentation, a multi-threaded allocator may be simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
     184
     185
     186\subsection{Mutual Exclusion}
     187\label{s:MutualExclusion}
     188
     189\newterm{Mutual exclusion} provides sequential access to the shared management data of the heap.
     190There are two performance issues for mutual exclusion.
     191First is the overhead necessary to perform (at least) a hardware atomic operation every time a shared resource is accessed.
     192Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released.
     193Contention can be reduced in a number of ways:
     194using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks;
     195using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff;
     196using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
     197However, all of these approaches have degenerate cases where contention occurs.
     198
     199
     200\subsection{False Sharing}
     201\label{s:FalseSharing}
     202
     203False sharing is a dynamic phenomenon leading to cache thrashing.
     204When two or more threads on separate CPUs simultaneously change different objects sharing a cache line, the change invalidates the other thread's associated cache, even though these threads may be uninterested in the other modified object.
     205False sharing can occur in three different ways: program induced, allocator-induced active, and allocator-induced passive;
     206a memory allocator can only affect the latter two.
     207
     208\paragraph{\newterm{Program-induced false-sharing}} occurs when one thread passes an object sharing a cache line to another thread, and both threads modify the respective objects.
     209\VRef[Figure]{f:ProgramInducedFalseSharing} shows when Task$_1$ passes Object$_2$ to Task$_2$, a false-sharing situation forms when Task$_1$ modifies Object$_1$ and Task$_2$ modifies Object$_2$.
     210Changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line.
     211
     212\begin{figure}
     213\centering
     214\subfigure[Program-Induced False-Sharing]{
     215        \input{ProgramFalseSharing}
     216        \label{f:ProgramInducedFalseSharing}
     217} \\
     218\vspace{5pt}
     219\subfigure[Allocator-Induced Active False-Sharing]{
     220        \input{AllocInducedActiveFalseSharing}
     221        \label{f:AllocatorInducedActiveFalseSharing}
     222} \\
     223\vspace{5pt}
     224\subfigure[Allocator-Induced Passive False-Sharing]{
     225        \input{AllocInducedPassiveFalseSharing}
     226        \label{f:AllocatorInducedPassiveFalseSharing}
     227} % subfigure
     228\caption{False Sharing}
     229\label{f:FalseSharing}
     230\end{figure}
     231
     232\paragraph{\newterm{Allocator-induced active false-sharing}} occurs when objects are allocated within the same cache line but to different threads.
     233For example, in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}, each task allocates an object and loads a cache-line of memory into its associated cache.
     234Again, changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line.
     235
     236\paragraph{\newterm{Allocator-induced passive false-sharing}} is another form of allocator-induced false-sharing caused by program-induced false-sharing.
     237When an object in a program-induced false-sharing situation is deallocated, a future allocation of that object may cause passive false-sharing.
     238For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Task$_1$ passes Object$_2$ to Task$_2$, and Task$_2$ subsequently deallocates Object$_2$.
     239Allocator-induced passive false-sharing occurs when Object$_2$ is reallocated to Task$_2$ while Task$_1$ is still using Object$_1$.
     240
     241
     242\subsection{Heap Blowup}
     243\label{s:HeapBlowup}
     244
     245In a multi-threaded program, heap blowup can occur when memory freed by one thread is inaccessible to other threads due to the allocation strategy.
     246Specific examples are presented in later sections.
     247
     248
     249\section{Multi-Threaded Memory-Allocator Features}
     250\label{s:MultiThreadedMemoryAllocatorFeatures}
     251
     252The following features are used in the construction of multi-threaded memory-allocators:
     253\begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=0.5ex\parsep=0pt\itemsep=0pt}
     254\item multiple heaps
     255\begin{list}{\alph{enumii})}{\usecounter{enumii}\topsep=0.5ex\parsep=0pt\itemsep=0pt}
     256\item with or without a global heap
     257\item with or without ownership
     258\end{list}
     259\item object containers
     260\begin{list}{\alph{enumii})}{\usecounter{enumii}\topsep=0.5ex\parsep=0pt\itemsep=0pt}
     261\item with or without ownership
     262\item fixed or variable sized
     263\item global or local free-lists
     264\end{list}
     265\item hybrid private/public heap
     266\item allocation buffer
     267\item lock-free operations
     268\end{list}
     269The first feature, multiple heaps, pertains to different kinds of heaps.
     270The second feature, object containers, pertains to the organization of objects within the storage area.
     271The remaining features apply to different parts of the allocator design or implementation.
     272
     273
     274\section{Multiple Heaps}
     275\label{s:MultipleHeaps}
     276
     277A single-threaded allocator has at most one thread and heap, while a multi-threaded allocator has potentially multiple threads and heaps.
     278The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity.
     279The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see \VRef[Figure]{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see \VRef[Figure]{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see \VRef[Figure]{f:PerThreadHeap}), which is almost back to a single-threaded allocator.
     280
     281
     282\paragraph{T:1 model} where all threads allocate and deallocate objects from one heap.
     283Memory is obtained from the freed objects, or reserved memory in the heap, or from the operating system (OS);
     284the heap may also return freed memory to the operating system.
     285The arrows indicate the direction memory conceptually moves for each kind of operation: allocation moves memory along the path from the heap/operating-system to the user application, while deallocation moves memory along the path from the application back to the heap/operating-system.
     286To safely handle concurrency, a single heap uses locking to provide mutual exclusion.
     287Whether using a single lock for all heap operations or fine-grained locking for different operations, a single heap may be a significant source of contention for programs with a large amount of memory allocation.
     288
     289\begin{figure}
     290\centering
     291\subfigure[T:1]{
     292%       \input{SingleHeap.pstex_t}
     293        \input{SingleHeap}
     294        \label{f:SingleHeap}
     295} % subfigure
     296\vrule
     297\subfigure[T:H]{
     298%       \input{MultipleHeaps.pstex_t}
     299        \input{SharedHeaps}
     300        \label{f:SharedHeaps}
     301} % subfigure
     302\vrule
     303\subfigure[1:1]{
     304%       \input{MultipleHeapsGlobal.pstex_t}
     305        \input{PerThreadHeap}
     306        \label{f:PerThreadHeap}
     307} % subfigure
     308\caption{Multiple Heaps, Thread:Heap Relationship}
     309\end{figure}
     310
     311
     312\paragraph{T:H model} where each thread allocates storage from several heaps depending on certain criteria, with the goal of reducing contention by spreading allocations/deallocations across the heaps.
     313The decision on when to create a new heap and which heap a thread allocates from depends on the allocator design.
     314The performance goal is to reduce the ratio of heaps to threads.
     315In general, locking is required, since more than one thread may concurrently access a heap during its lifetime, but contention is reduced because fewer threads access a specific heap.
     316
     317For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
     318At creation, a thread is associated with a heap from the pool.
     319When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
     320If an unlocked heap is found, the thread changes its association and uses that heap.
     321If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
     322or the thread can block waiting for a heap to become available.
     323While the heap-pool approach often minimizes the number of extant heaps, the worse case can result in more heaps than threads;
     324\eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces.
     325
     326Threads using multiple heaps need to determine the specific heap to access for an allocation/deallocation, \ie association of thread to heap.
     327A number of techniques are used to establish this association.
     328The simplest approach is for each thread to have a pointer to its associated heap (or to administrative information that points to the heap), and this pointer changes if the association changes.
     329For threading systems with thread-local storage, the heap pointer is created using this mechanism;
     330otherwise, the heap routines must simulate thread-local storage using approaches like hashing the thread's stack-pointer or thread-id to find its associated heap.
     331
     332The storage management for multiple heaps is more complex than for a single heap (see \VRef[Figure]{f:AllocatorComponents}).
     333\VRef[Figure]{f:MultipleHeapStorage} illustrates the general storage layout for multiple heaps.
     334Allocated and free objects are labelled by the thread or heap they are associated with.
     335(Links between free objects are removed for simplicity.)
     336The management information in the static zone must be able to locate all heaps in the dynamic zone.
     337The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
     338Each heap in the dynamic zone is composed of a list of a free objects and a pointer to its reserved memory.
     339An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
     340Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
     341Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area.
     342
     343\begin{figure}
     344\centering
     345\input{MultipleHeapsStorage}
     346\caption{Multiple-Heap Storage}
     347\label{f:MultipleHeapStorage}
     348\end{figure}
     349
     350Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
     351The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
     352Additionally, objects freed by one heap cannot be reused by other threads, except indirectly by returning free memory to the operating system, which can be expensive.
     353(Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.)
     354In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
     355
     356Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space.
     357Now, each heap obtains and returns storage to/from the global heap rather than the operating system.
     358Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold.
     359Similarly, the global heap buffers this memory, obtaining and returning storage to/from the operating system as necessary.
     360The global heap does not have its own thread and makes no internal allocation requests;
     361instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations.
     362Hence, the worst-case cost of a memory operation includes all these steps.
     363With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the operating system to achieve the same goal and is independent of the mechanism used by the operating system to present dynamic memory to an address space.
     364
     365However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking.
     366A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention.
     367In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap.
     368
     369
     370\paragraph{1:1 model (thread heaps)} where each thread has its own heap, which eliminates most contention and locking because threads seldom accesses another thread's heap (see ownership in \VRef{s:Ownership}).
     371An additional benefit of thread heaps is improved locality due to better memory layout.
     372As each thread only allocates from its heap, all objects for a thread are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages.
     373In contrast, the T:H model spreads each thread's objects over a larger area in different heaps.
     374Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap.
     375For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory, no two threads share a page or cache line unless pointers are passed among them.
     376Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps.
     377
     378When a thread terminates, there are two options for handling its heap.
     379First is to free all objects in the heap to the global heap and destroy the thread heap.
     380Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
     381Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
     382Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap.
     383
     384
     385\subsection{User-Level Threading}
     386
     387It is possible to use any of the heap models with user-level (M:N) threading.
     388However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
     389It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
     390\VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model provided by the language runtime.
     391Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
     392
     393\begin{figure}
     394\centering
     395\input{UserKernelHeaps}
     396\caption{User-Level Kernel Heaps}
     397\label{f:UserLevelKernelHeaps}
     398\end{figure}
     399
     400Adopting this model results in a subtle problem with shared heaps.
     401With kernel threading, an operation that is started by a kernel thread is always completed by that thread.
     402For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted.
     403Any correctness locking associated with the shared heap is preserved across preemption.
     404
     405However, this correctness property is not preserved for user-level threading.
     406A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}.
     407When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
     408To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
     409However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).
     410Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
     411Occasionally ignoring a preemption should be benign.
     412
     413
     414\begin{figure}
     415\centering
     416\subfigure[Ownership]{
     417        \input{MultipleHeapsOwnership}
     418} % subfigure
     419\hspace{0.25in}
     420\subfigure[No Ownership]{
     421        \input{MultipleHeapsNoOwnership}
     422} % subfigure
     423\caption{Heap Ownership}
     424\label{f:HeapsOwnership}
     425\end{figure}
     426
     427
     428\subsection{Ownership}
     429\label{s:Ownership}
     430
     431\newterm{Ownership} defines which heap an object is returned-to on deallocation.
     432If a thread returns an object to the heap it was originally allocated from, the heap has ownership of its objects.
     433Alternatively, a thread can return an object to the heap it is currently allocating from, which can be any heap accessible during a thread's lifetime.
     434\VRef[Figure]{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
     435Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
     436For the 1:1 thread:heap relationship, a thread only allocates from its own heap, and without ownership, a thread only frees objects to its own heap, which means the heap is private to its owner thread and does not require any locking, called a \newterm{private heap}.
     437For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
     438
     439\VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
     440(For simplicity assume the heaps all use the same size of reserves storage.)
     441In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
     442Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
     443The exception is for the 1:1 model if reserved memory does not overlap a cache-line because all allocated storage within a used area is associated with a single thread.
     444In this case, there is no allocator-induced active false-sharing (see \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}) because two adjacent allocated objects used by different threads cannot share a cache-line.
     445As well, there is no allocator-induced passive false-sharing (see \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}) because two adjacent allocated objects used by different threads cannot occur because free objects are returned to the owner heap.
     446% Passive false-sharing may still occur, if delayed ownership is used (see below).
     447
     448\begin{figure}
     449\centering
     450\input{MultipleHeapsOwnershipStorage.pstex_t}
     451\caption{Multiple-Heap Storage with Ownership}
     452\label{f:MultipleHeapStorageOwnership}
     453\end{figure}
     454
     455The main advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap.
     456Ownership prevents the classical problem where one thread performs allocations from one heap, passes the object to another thread, and the receiving thread deallocates the object to another heap, hence draining the initial heap of storage.
     457As well, allocator-induced passive false-sharing is eliminated because returning an object to its owner heap means it can never be allocated to another thread.
     458For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, the deallocation by Task$_2$ returns Object$_2$ back to Task$_1$'s heap;
     459hence a subsequent allocation by Task$_2$ cannot return this storage.
     460The disadvantage of ownership is deallocating to another task's heap so heaps are no longer private and require locks to provide safe concurrent access.
     461
     462Object ownership can be immediate or delayed, meaning free objects may be batched on a separate free list either by the returning or receiving thread.
     463While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
     464It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
     465Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads.
     466
     467It is possible for heaps to steal objects rather than return them and reallocating these objects when storage runs out on a heap.
     468However, stealing can result in passive false-sharing.
     469For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Task$_2$'s heap initially.
     470If Task$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur.
     471
     472
     473\section{Object Containers}
     474\label{s:ObjectContainers}
     475
     476Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}.
     477Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
     478As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective.
     479Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}:
     480while the header and object are together in memory, they are generally not accessed together;
     481\eg the object is accessed by the program when it is allocated, while the header is accessed by the allocator when the object is free.
     482
     483\begin{figure}
     484\centering
     485\subfigure[Object Headers]{
     486        \input{ObjectHeaders}
     487        \label{f:ObjectHeaders}
     488} % subfigure
     489\subfigure[Object Container]{
     490        \input{Container}
     491        \label{f:ObjectContainer}
     492} % subfigure
     493\caption{Header Placement}
     494\label{f:HeaderPlacement}
     495\end{figure}
     496
     497An alternative approach factors common header/trailer information to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks} in~\cite{Berger00}), as in \VRef[Figure]{f:ObjectContainer}.
     498The header for the container holds information necessary for all objects in the container;
     499a trailer may also be used at the end of the container.
     500Similar to the approach described for thread heaps in \VRef{s:MultipleHeaps}, if container boundaries do not overlap with memory of another container at crucial boundaries and all objects in a container are allocated to the same thread, allocator-induced active false-sharing is avoided.
     501
     502The difficulty with object containers lies in finding the object header/trailer given only the object address, since that is normally the only information passed to the deallocation operation.
     503One way to do this is to start containers on aligned addresses in memory, then truncate the lower bits of the object address to obtain the header address (or round up and subtract the trailer size to obtain the trailer address).
     504For example, if an object at address 0xFC28\,EF08 is freed and containers are aligned on 64\,KB (0x0001\,0000) addresses, then the container header is at 0xFC28\,0000.
     505
     506Normally, a container has homogeneous objects of fixed size, with fixed information in the header that applies to all container objects (\eg object size and ownership).
     507This approach greatly reduces internal fragmentation since far fewer headers are required, and potentially increases spatial locality as a cache line or page holds more objects since the objects are closer together due to the lack of headers.
     508However, although similar objects are close spatially within the same container, different sized objects are further apart in separate containers.
     509Depending on the program, this may or may not improve locality.
     510If the program uses several objects from a small number of containers in its working set, then locality is improved since fewer cache lines and pages are required.
     511If the program uses many containers, there is poor locality, as both caching and paging increase.
     512Another drawback is that external fragmentation may be increased since containers reserve space for objects that may never be allocated by the program, \ie there are often multiple containers for each size only partially full.
     513However, external fragmentation can be reduced by using small containers.
     514
     515Containers with heterogeneous objects implies different headers describing them, which complicates the problem of locating a specific header solely by an address.
     516A couple of solutions can be used to implement containers with heterogeneous objects.
     517However, the problem with allowing objects of different sizes is that the number of objects, and therefore headers, in a single container is unpredictable.
     518One solution allocates headers at one end of the container, while allocating objects from the other end of the container;
     519when the headers meet the objects, the container is full.
     520Freed objects cannot be split or coalesced since this causes the number of headers to change.
     521The difficulty in this strategy remains in finding the header for a specific object;
     522in general, a search is necessary to find the object's header among the container headers.
     523A second solution combines the use of container headers and individual object headers.
     524Each object header stores the object's heterogeneous information, such as its size, while the container header stores the homogeneous information, such as the owner when using ownership.
     525This approach allows containers to hold different types of objects, but does not completely separate headers from objects.
     526The benefit of the container in this case is to reduce some redundant information that is factored into the container header.
     527
     528In summary, object containers trade off internal fragmentation for external fragmentation by isolating common administration information to remove/reduce internal fragmentation, but at the cost of external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations.
     529A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on program access-patterns.
     530
     531
     532\subsection{Container Ownership}
     533\label{s:ContainerOwnership}
     534
     535Without ownership, objects in a container are deallocated to the heap currently associated with the thread that frees the object.
     536Thus, different objects in a container may be on different heap free-lists (see \VRef[Figure]{f:ContainerNoOwnershipFreelist}).
     537With ownership, all objects in a container belong to the same heap (see \VRef[Figure]{f:ContainerOwnershipFreelist}), so ownership of an object is determined by the container owner.
     538If multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
     539Only with the 1:1 model and ownership is active and passive false-sharing avoided (see \VRef{s:Ownership}).
     540Passive false-sharing may still occur, if delayed ownership is used.
     541
     542\begin{figure}
     543\centering
     544\subfigure[No Ownership]{
     545        \input{ContainerNoOwnershipFreelist}
     546        \label{f:ContainerNoOwnershipFreelist}
     547} % subfigure
     548\vrule
     549\subfigure[Ownership]{
     550        \input{ContainerOwnershipFreelist}
     551        \label{f:ContainerOwnershipFreelist}
     552} % subfigure
     553\caption{Free-list Structure with Container Ownership}
     554\end{figure}
     555
     556A fragmented heap has multiple containers that may be partially or completely free.
     557A completely free container can become reserved storage and be reset to allocate objects of a new size.
     558When a heap reaches a threshold of free objects, it moves some free storage to the global heap for reuse to prevent heap blowup.
     559Without ownership, when a heap frees objects to the global heap, individual objects must be passed, and placed on the global-heap's free-list.
     560Containers cannot be freed to the global heap unless completely free because
     561
     562When a container changes ownership, the ownership of all objects within it change as well.
     563Moving a container involves moving all objects on the heap's free-list in that container to the new owner.
     564This approach can reduce contention for the global heap, since each request for objects from the global heap returns a container rather than individual objects.
     565
     566Additional restrictions may be applied to the movement of containers to prevent active false-sharing.
     567For example, in \VRef[Figure]{f:ContainerFalseSharing1}, a container being used by Task$_1$ changes ownership, through the global heap.
     568In \VRef[Figure]{f:ContainerFalseSharing2}, when Task$_2$ allocates an object from the newly acquired container it is actively false-sharing even though no objects are passed among threads.
     569Note, once the object is freed by Task$_1$, no more false sharing can occur until the container changes ownership again.
     570To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
     571One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area.
     572
     573\begin{figure}
     574\centering
     575\subfigure[]{
     576        \input{ContainerFalseSharing1}
     577        \label{f:ContainerFalseSharing1}
     578} % subfigure
     579\subfigure[]{
     580        \input{ContainerFalseSharing2}
     581        \label{f:ContainerFalseSharing2}
     582} % subfigure
     583\caption{Active False-Sharing using Containers}
     584\label{f:ActiveFalseSharingContainers}
     585\end{figure}
     586
     587Using containers with ownership increases external fragmentation since a new container for a requested object size must be allocated separately for each thread requesting it.
     588In \VRef[Figure]{f:ExternalFragmentationContainerOwnership}, using object ownership allocates 80\% more space than without ownership.
     589
     590\begin{figure}
     591\centering
     592\subfigure[No Ownership]{
     593        \input{ContainerNoOwnership}
     594} % subfigure
     595\\
     596\subfigure[Ownership]{
     597        \input{ContainerOwnership}
     598} % subfigure
     599\caption{External Fragmentation with Container Ownership}
     600\label{f:ExternalFragmentationContainerOwnership}
     601\end{figure}
     602
     603
     604\subsection{Container Size}
     605\label{s:ContainerSize}
     606
     607One way to control the external fragmentation caused by allocating a large container for a small number of requested objects is to vary the size of the container.
     608As described earlier, container boundaries need to be aligned on addresses that are a power of two to allow easy location of the header (by truncating lower bits).
     609Aligning containers in this manner also determines the size of the container.
     610However, the size of the container has different implications for the allocator.
     611
     612The larger the container, the fewer containers are needed, and hence, the fewer headers need to be maintained in memory, improving both internal fragmentation and potentially performance.
     613However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation.
     614With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container.
     615In general, large objects, \eg greater than 64\,KB, are allocated directly from the operating system and are returned immediately to the operating system to reduce long-term external fragmentation.
     616If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate.
     617Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header.
     618
     619In order to find the container header when using different sized containers, a super container is used (see~\VRef[Figure]{f:SuperContainers}).
     620The super container spans several containers, contains a header with information for finding each container header, and starts on an aligned address.
     621Super-container headers are found using the same method used to find container headers by dropping the lower bits of an object address.
     622The containers within a super container may be different sizes or all the same size.
     623If the containers in the super container are different sizes, then the super-container header must be searched to determine the specific container for an object given its address.
     624If all containers in the super container are the same size, \eg 16KB, then a specific container header can be found by a simple calculation.
     625The free space at the end of a super container is used to allocate new containers.
     626
     627\begin{figure}
     628\centering
     629\input{SuperContainers}
     630% \includegraphics{diagrams/supercontainer.eps}
     631\caption{Super Containers}
     632\label{f:SuperContainers}
     633\end{figure}
     634
     635Minimal internal and external fragmentation is achieved by having as few containers as possible, each being as full as possible.
     636It is also possible to achieve additional benefit by using larger containers for popular small sizes, as it reduces the number of containers with associated headers.
     637However, this approach assumes it is possible for an allocator to determine in advance which sizes are popular.
     638Keeping statistics on requested sizes allows the allocator to make a dynamic decision about which sizes are popular.
     639For example, after receiving a number of allocation requests for a particular size, that size is considered a popular request size and larger containers are allocated for that size.
     640If the decision is incorrect, larger containers than necessary are allocated that remain mostly unused.
     641A programmer may be able to inform the allocator about popular object sizes, using a mechanism like @mallopt@, in order to select an appropriate container size for each object size.
     642
     643
     644\subsection{Container Free-Lists}
     645\label{s:containersfreelists}
     646
     647The container header allows an alternate approach for managing the heap's free-list.
     648Rather than maintain a global free-list throughout the heap (see~\VRef[Figure]{f:GlobalFreeListAmongContainers}), the containers are linked through their headers and only the local free objects within a container are linked together (see~\VRef[Figure]{f:LocalFreeListWithinContainers}).
     649Note, maintaining free lists within a container assumes all free objects in the container are associated with the same heap;
     650thus, this approach only applies to containers with ownership.
     651
     652This alternate free-list approach can greatly reduce the complexity of moving all freed objects belonging to a container to another heap.
     653To move a container using a global free-list, as in \VRef[Figure]{f:GlobalFreeListAmongContainers}, the free list is first searched to find all objects within the container.
     654Each object is then removed from the free list and linked together to form a local free-list for the move to the new heap.
     655With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list.
     656Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
     657The cost is adding information to a header, which increases the header size, and therefore internal fragmentation.
     658
     659\begin{figure}
     660\centering
     661\subfigure[Global Free-List Among Containers]{
     662        \input{FreeListAmongContainers}
     663        \label{f:GlobalFreeListAmongContainers}
     664} % subfigure
     665\hspace{0.25in}
     666\subfigure[Local Free-List Within Containers]{
     667        \input{FreeListWithinContainers}
     668        \label{f:LocalFreeListWithinContainers}
     669} % subfigure
     670\caption{Container Free-List Structure}
     671\label{f:ContainerFreeListStructure}
     672\end{figure}
     673
     674When all objects in the container are the same size, a single free-list is sufficient.
     675However, when objects in the container are different size, the header needs a free list for each size class when using a binning allocation algorithm, which can be a significant increase in the container-header size.
     676The alternative is to use a different allocation algorithm with a single free-list, such as a sequential-fit allocation-algorithm.
     677
     678
     679\subsection{Hybrid Private/Public Heap}
     680\label{s:HybridPrivatePublicHeap}
     681
     682Section~\Vref{s:Ownership} discusses advantages and disadvantages of public heaps (T:H model and with ownership) and private heaps (thread heaps with ownership).
     683For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps (see~\VRef[Figure]{f:HybridPrivatePublicHeap}).
     684The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
     685In the hybrid approach, a task first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
     686Similarly, a task first deallocates an object its private heap, and second to the public heap.
     687Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
     688Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
     689Finally, when a task frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing task's private heap for delayed ownership, which allows the freeing task to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return.
     690
     691\begin{figure}
     692\centering
     693\input{PrivatePublicHeaps.pstex_t}
     694\caption{Hybrid Private/Public Heap for Per-thread Heaps}
     695\label{f:HybridPrivatePublicHeap}
     696% \vspace{10pt}
     697% \input{RemoteFreeList.pstex_t}
     698% \caption{Remote Free-List}
     699% \label{f:RemoteFreeList}
     700\end{figure}
     701
     702As mentioned, an implementation may have only one heap deal with the global heap, so the other heap can be simplified.
     703For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
     704To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
     705Since the remote free-list is occasionally cleared during an allocation, this adds to that cost.
     706Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object.
     707
     708If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking.
     709In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur.
     710If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations.
     711
     712The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator.
     713Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used.
     714In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps.
     715For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach.
     716
     717
     718\section{Allocation Buffer}
     719\label{s:AllocationBuffer}
     720
     721An allocation buffer is reserved memory (see~\VRef{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
     722That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
     723Both any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
     724The allocation buffer reduces contention and the number of global/operating-system calls.
     725For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
     726
     727Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts.
     728Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
     729Thus, allocation buffers are often allocated more frequently at program/thread start, and then their use often diminishes.
     730
     731Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
     732For example, if all objects sharing a cache line come from the same allocation buffer, then these objects are allocated to the same thread, avoiding active false-sharing.
     733Active false-sharing may still occur if objects are freed to the global heap and reused by another heap.
     734
     735Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated.
     736A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or operating system.
     737The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer.
     738
     739The unused part of a container, neither allocated or freed, is an allocation buffer.
     740For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made.
     741This lazy method of constructing objects is beneficial in terms of paging and caching.
     742For example, although an entire container, possibly spanning several pages, is allocated from the operating system, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache.
     743
     744
     745\section{Lock-Free Operations}
     746\label{s:LockFreeOperations}
     747
     748A lock-free algorithm guarantees safe concurrent-access to a data structure, so that at least one thread can make progress in the system, but an individual task has no bound to execution, and hence, may starve~\cite[pp.~745--746]{Herlihy93}.
     749% A wait-free algorithm puts a finite bound on the number of steps any thread takes to complete an operation, so an individual task cannot starve
     750Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
     751Locks are a problem for high contention or if the thread holding the lock is preempted and other threads attempt to use that lock.
     752With respect to the heap, these situations are unlikely unless all threads makes extremely high use of dynamic-memory allocation, which can be an indication of poor design.
     753Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
     754on the other hand, a thread may busy-wait for an unbounded period.
     755Finally, lock-free implementations have greater complexity and hardware dependency.
     756Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
     757Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is more complex.
     758Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
  • doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex

    ref3c383 rd672350  
    4141%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    4242
     43
     44\section{Benchmarks}
     45There are multiple benchmarks that are built individually and evaluate different aspects of a memory allocator. But, there is not standard set of benchamrks that can be used to evaluate multiple aspects of memory allocators.
     46
     47\paragraph{threadtest}
     48(FIX ME: cite benchmark and hoard) Each thread repeatedly allocates and then deallocates 100,000 objects. Runtime of the benchmark evaluates its efficiency.
     49
     50\paragraph{shbench}
     51(FIX ME: cite benchmark and hoard) Each thread allocates and randomly frees a number of random-sized objects. It is a stress test that also uses runtime to determine efficiency of the allocator.
     52
     53\paragraph{larson}
     54(FIX ME: cite benchmark and hoard) Larson simulates a server environment. Multiple threads are created where each thread allocator and free a number of objects within a size range. Some objects are passed from threads to the child threads to free. It caluculates memory operations per second as an indicator of memory allocator's performance.
     55
     56
    4357\section{Performance Matrices of Memory Allocators}
    4458
  • doc/theses/mubeen_zulfiqar_MMath/intro.tex

    ref3c383 rd672350  
    11\chapter{Introduction}
    22
     3% Shared-memory multi-processor computers are ubiquitous and important for improving application performance.
     4% However, writing programs that take advantage of multiple processors is not an easy task~\cite{Alexandrescu01b}, \eg shared resources can become a bottleneck when increasing (scaling) threads.
     5% One crucial shared resource is program memory, since it is used by all threads in a shared-memory concurrent-program~\cite{Berger00}.
     6% Therefore, providing high-performance, scalable memory-management is important for virtually all shared-memory multi-threaded programs.
     7
     8\vspace*{-23pt}
     9Memory management takes a sequence of program generated allocation/deallocation requests and attempts to satisfy them within a fixed-sized block of memory while minimizing the total amount of memory used.
     10A general-purpose dynamic-allocation algorithm cannot anticipate future allocation requests so its output is rarely optimal.
     11However, memory allocators do take advantage of regularities in allocation patterns for typical programs to produce excellent results, both in time and space (similar to LRU paging).
     12In general, allocators use a number of similar techniques, each optimizing specific allocation patterns.
     13Nevertheless, memory allocators are a series of compromises, occasionally with some static or dynamic tuning parameters to optimize specific program-request patterns.
     14
     15
     16\section{Memory Structure}
     17\label{s:MemoryStructure}
     18
     19\VRef[Figure]{f:ProgramAddressSpace} shows the typical layout of a program's address space divided into the following zones (right to left): static code/data, dynamic allocation, dynamic code/data, and stack, with free memory surrounding the dynamic code/data~\cite{memlayout}.
     20Static code and data are placed into memory at load time from the executable and are fixed-sized at runtime.
     21Dynamic-allocation memory starts empty and grows/shrinks as the program dynamically creates/deletes variables with independent lifetime.
     22The programming-language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables.
     23Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}.
     24However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control.
     25Stack memory is managed by the program call-mechanism using a simple LIFO technique, which works well for sequential programs.
     26For multi-threaded programs (and coroutines), a new stack is created for each thread;
     27these thread stacks are commonly created in dynamic-allocation memory.
     28This thesis focuses on management of the dynamic-allocation memory.
     29
     30\begin{figure}
     31\centering
     32\input{AddressSpace}
     33\vspace{-5pt}
     34\caption{Program Address Space Divided into Zones}
     35\label{f:ProgramAddressSpace}
     36\end{figure}
     37
     38
     39\section{Dynamic Memory-Management}
     40\label{s:DynamicMemoryManagement}
     41
     42Modern programming languages manage dynamic-allocation memory in different ways.
     43Some languages, such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, provide explicit allocation but \emph{implicit} deallocation of data through garbage collection~\cite{Wilson92}.
     44In general, garbage collection supports memory compaction, where dynamic (live) data is moved during runtime to better utilize space.
     45However, moving data requires finding pointers to it and updating them to reflect new data locations.
     46Programming languages such as C~\cite{C}, \CC~\cite{C++}, and Rust~\cite{Rust} provide the programmer with explicit allocation \emph{and} deallocation of data.
     47These languages cannot find and subsequently move live data because pointers can be created to any storage zone, including internal components of allocated objects, and may contain temporary invalid values generated by pointer arithmetic.
     48Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
     49This thesis only examines dynamic memory-management with \emph{explicit} deallocation.
     50While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.
     51
     52Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime.
     53When this allocator proves inadequate, programmers often write specialize allocators for specific needs.
     54C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator.
     55(Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.)
     56However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved.
     57For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
     58This thesis examines the design of high-performance allocators for use by kernel and user multi-threaded applications written in C/\CC.
     59
     60
     61\section{Contributions}
     62\label{s:Contributions}
     63
     64This work provides the following contributions in the area of concurrent dynamic allocation:
     65\begin{enumerate}[leftmargin=*]
     66\item
     67Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
     68
     69\item
     70Adopt returning of @nullptr@ for a zero-sized allocation, rather than an actual memory address, both of which can be passed to @free@.
     71
     72\item
     73Extended the standard C heap functionality by preserving with each allocation its original request size versus the amount allocated, if an allocation is zero fill, and the allocation alignment.
     74
     75\item
     76Use the zero fill and alignment as \emph{sticky} properties for @realloc@, to realign existing storage, or preserve existing zero-fill and alignment when storage is copied.
     77Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying.
     78This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient.
     79
     80\item
     81Provide additional heap operations to complete programmer expectation with respect to accessing different allocation properties.
     82\begin{itemize}
     83\item
     84@resize( oaddr, size )@ re-purpose an old allocation for a new type \emph{without} preserving fill or alignment.
     85\item
     86@resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
     87\item
     88@realloc( oaddr, alignment, size )@ same as previous @realloc@ but adding or changing alignment.
     89\item
     90@aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled.
     91\item
     92@amemalign( alignment, dim, elemSize )@ same as @aalloc@ with memory alignment.
     93\item
     94@cmemalign( alignment, dim, elemSize )@ same as @calloc@ with memory alignment.
     95\end{itemize}
     96
     97\item
     98Provide additional heap wrapper functions in \CFA to provide a complete orthogonal set of allocation operations and properties.
     99
     100\item
     101Provide additional query operations to access information about an allocation:
     102\begin{itemize}
     103\item
     104@malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@.
     105If the allocation is not aligned or @addr@ is the @nulladdr@, the minimal alignment is returned.
     106\item
     107@malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@.
     108\item
     109@malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@.
     110\item
     111@malloc_usable_size( addr )@ returns the usable size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
     112\end{itemize}
     113
     114\item
     115Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation.
     116
     117\item
     118Provide complete, fast, and contention-free allocation statistics to help understand program behaviour:
     119\begin{itemize}
     120\item
     121@malloc_stats()@ print memory-allocation statistics on the file-descriptor set by @malloc_stats_fd@.
     122\item
     123@malloc_info( options, stream )@ print memory-allocation statistics as an XML string on the specified file-descriptor set by @malloc_stats_fd@.
     124\item
     125@malloc_stats_fd( fd )@ set file-descriptor number for printing memory-allocation statistics (default @STDERR_FILENO@).
     126This file descriptor is used implicitly by @malloc_stats@ and @malloc_info@.
     127\end{itemize}
     128
     129\item
     130Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination.
     131
     132\item
     133Build 4 different versions of the allocator:
     134\begin{itemize}
     135\item
     136static or dynamic linking
     137\item
     138statistic/debugging (testing) or no statistic/debugging (performance)
     139\end{itemize}
     140A program may link to any of these 4 versions of the allocator often without recompilation.
     141(It is possible to separate statistics and debugging, giving 8 different versions.)
     142
     143\item
     144A micro-benchmark test-suite for comparing allocators rather than relying on a suite of arbitrary programs.
     145These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs
     146\end{enumerate}
     147
     148\begin{comment}
    3149\noindent
    4150====================
     
    26172
    27173\section{Introduction}
    28 Dynamic memory allocation and management is one of the core features of C. It gives programmer the freedom to allocate, free, use, and manage dynamic memory himself. The programmer is not given the complete control of the dynamic memory management instead an interface of memory allocator is given to the progrmmer that can be used to allocate/free dynamic memory for the application's use.
    29 
    30 Memory allocator is a layer between thr programmer and the system. Allocator gets dynamic memory from the system in heap/mmap area of application storage and manages it for programmer's use.
    31 
    32 GNU C Library (FIX ME: cite this) provides an interchangeable memory allocator that can be replaced with a custom memory allocator that supports required features and fulfills application's custom needs. It also allows others to innovate in memory allocation and design their own memory allocator. GNU C Library has set guidelines that should be followed when designing a standalone memory allocator. GNU C Library requires new memory allocators to have atlease following set of functions in their allocator's interface:
     174Dynamic memory allocation and management is one of the core features of C. It gives programmer the freedom to allocate, free, use, and manage dynamic memory himself. The programmer is not given the complete control of the dynamic memory management instead an interface of memory allocator is given to the programmer that can be used to allocate/free dynamic memory for the application's use.
     175
     176Memory allocator is a layer between the programmer and the system. Allocator gets dynamic memory from the system in heap/mmap area of application storage and manages it for programmer's use.
     177
     178GNU C Library (FIX ME: cite this) provides an interchangeable memory allocator that can be replaced with a custom memory allocator that supports required features and fulfills application's custom needs. It also allows others to innovate in memory allocation and design their own memory allocator. GNU C Library has set guidelines that should be followed when designing a stand-alone memory allocator. GNU C Library requires new memory allocators to have at lease following set of functions in their allocator's interface:
    33179
    34180\begin{itemize}
     
    43189\end{itemize}
    44190
    45 In addition to the above functions, GNU C Library also provides some more functions to increase the usability of the dynamic memory allocator. Most standalone allocators also provide all or some of the above additional functions.
     191In addition to the above functions, GNU C Library also provides some more functions to increase the usability of the dynamic memory allocator. Most stand-alone allocators also provide all or some of the above additional functions.
    46192
    47193\begin{itemize}
     
    60206\end{itemize}
    61207
    62 With the rise of concurrent applications, memory allocators should be able to fulfill dynamic memory requests from multiple threads in parallel without causing contention on shared resources. There needs to be a set of a standard benchmarks that can be used to evaluate an allocator's performance in different scenerios.
     208With the rise of concurrent applications, memory allocators should be able to fulfill dynamic memory requests from multiple threads in parallel without causing contention on shared resources. There needs to be a set of a standard benchmarks that can be used to evaluate an allocator's performance in different scenarios.
    63209
    64210\section{Research Objectives}
     
    69215Design a lightweight concurrent memory allocator with added features and usability that are currently not present in the other memory allocators.
    70216\item
    71 Design a suite of benchmarks to evalute multiple aspects of a memory allocator.
     217Design a suite of benchmarks to evaluate multiple aspects of a memory allocator.
    72218\end{itemize}
    73219
    74220\section{An outline of the thesis}
    75221LAST FIX ME: add outline at the end
     222\end{comment}
  • doc/theses/mubeen_zulfiqar_MMath/performance.tex

    ref3c383 rd672350  
    1818\noindent
    1919====================
     20
     21\section{Machine Specification}
     22
     23The performance experiments were run on three different multicore systems to determine if there is consistency across platforms:
     24\begin{itemize}
     25\item
     26AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz
     27\item
     28Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz
     29\item
     30Intel Xeon Gold 5220R, 48-core socket $\times$ 2, 2.20GHz
     31\end{itemize}
     32
     33
     34\section{Existing Memory Allocators}
     35With dynamic allocation being an important feature of C, there are many stand-alone memory allocators that have been designed for different purposes. For this thesis, we chose 7 of the most popular and widely used memory allocators.
     36
     37\paragraph{dlmalloc}
     38dlmalloc (FIX ME: cite allocator) is a thread-safe allocator that is single threaded and single heap. dlmalloc maintains free-lists of different sizes to store freed dynamic memory. (FIX ME: cite wasik)
     39
     40\paragraph{hoard}
     41Hoard (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap. (FIX ME: cite wasik)
     42
     43\paragraph{jemalloc}
     44jemalloc (FIX ME: cite allocator) is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena. Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes.
     45
     46\paragraph{ptmalloc}
     47ptmalloc (FIX ME: cite allocator) is a modification of dlmalloc. It is a thread-safe multi-threaded memory allocator that uses multiple heaps. ptmalloc heap has similar design to dlmalloc's heap.
     48
     49\paragraph{rpmalloc}
     50rpmalloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses per-thread heap. Each heap has multiple size-classes and each size-class contains memory regions of the relevant size.
     51
     52\paragraph{tbb malloc}
     53tbb malloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
     54
     55\paragraph{tc malloc}
     56tcmalloc (FIX ME: cite allocator) is a thread-safe allocator. It uses per-thread cache to store free objects that prevents contention on shared resources in multi-threaded application. A central free-list is used to refill per-thread cache when it gets empty.
     57
    2058
    2159\section{Memory Allocators}
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.bib

    ref3c383 rd672350  
    3434    year          = "2008"
    3535}
     36
     37@article{Sleator85,
     38    author      = {Sleator, Daniel Dominic and Tarjan, Robert Endre},
     39    title       = {Self-Adjusting Binary Search Trees},
     40    journal     = jacm,
     41    volume      = 32,
     42    number      = 3,
     43    year        = 1985,
     44    issn        = {0004-5411},
     45    pages       = {652-686},
     46    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/3828.3835},
     47    address     = {New York, NY, USA},
     48}
     49
     50@article{Berger00,
     51    author      = {Emery D. Berger and Kathryn S. McKinley and Robert D. Blumofe and Paul R. Wilson},
     52    title       = {Hoard: A Scalable Memory Allocator for Multithreaded Applications},
     53    booktitle   = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
     54    journal     = sigplan,
     55    volume      = 35,
     56    number      = 11,
     57    month       = nov,
     58    year        = 2000,
     59    pages       = {117-128},
     60    note        = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)},
     61}
     62
     63@inproceedings{berger02reconsidering,
     64    author      = {Emery D. Berger and Benjamin G. Zorn and Kathryn S. McKinley},
     65    title       = {Reconsidering Custom Memory Allocation},
     66    booktitle   = {Proceedings of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002},
     67    month       = nov,
     68    year        = 2002,
     69    location    = {Seattle, Washington, USA},
     70    publisher   = {ACM},
     71    address     = {New York, NY, USA},
     72}
     73
     74@article{larson99memory,
     75    author      = {Per-{\AA}ke Larson and Murali Krishnan},
     76    title       = {Memory Allocation for Long-Running Server Applications},
     77    journal     = sigplan,
     78    volume      = 34,
     79    number      = 3,
     80    pages       = {176-185},
     81    year        = 1999,
     82    url         = {http://citeseer.ist.psu.edu/article/larson98memory.html}
     83}
     84
     85@techreport{gidpt04,
     86    author      = {Anders Gidenstam and Marina Papatriantafilou and Philippas Tsigas},
     87    title       = {Allocating Memory in a Lock-Free Manner},
     88    number      = {2004-04},
     89    institution = {Computing Science},
     90    address     = {Chalmers University of Technology},
     91    year        = 2004,
     92    url         = {http://citeseer.ist.psu.edu/gidenstam04allocating.html}
     93}
     94
     95@phdthesis{berger02thesis,
     96    author      = {Emery Berger},
     97    title       = {Memory Management for High-Performance Applications},
     98    school      = {The University of Texas at Austin},
     99    year        = 2002,
     100    month       = aug,
     101    url         = {http://citeseer.ist.psu.edu/article/berger02memory.html}
     102}
     103
     104@misc{sgimisc,
     105    author      = {SGI},
     106    title       = {The Standard Template Library for {C++}},
     107    note        = {\textsf{www.sgi.com/\-tech/\-stl/\-Allocators.html}},
     108}
     109
     110@misc{dlmalloc,
     111    author      = {Doug Lea},
     112    title       = {dlmalloc version 2.8.4},
     113    month       = may,
     114    year        = 2009,
     115    note        = {\textsf{ftp://g.oswego.edu/\-pub/\-misc/\-malloc.c}},
     116}
     117
     118@misc{ptmalloc2,
     119    author      = {Wolfram Gloger},
     120    title       = {ptmalloc version 2},
     121    month       = jun,
     122    year        = 2006,
     123    note        = {\textsf{http://www.malloc.de/\-malloc/\-ptmalloc2-current.tar.gz}},
     124}
     125
     126@misc{nedmalloc,
     127    author      = {Niall Douglas},
     128    title       = {nedmalloc version 1.06 Beta},
     129    month       = jan,
     130    year        = 2010,
     131    note        = {\textsf{http://\-prdownloads.\-sourceforge.\-net/\-nedmalloc/\-nedmalloc\_v1.06beta1\_svn1151.zip}},
     132}
     133
     134@misc{hoard,
     135    author      = {Emery D. Berger},
     136    title       = {hoard version 3.8},
     137    month       = nov,
     138    year        = 2009,
     139    note        = {\textsf{http://www.cs.umass.edu/\-$\sim$emery/\-hoard/\-hoard-3.8/\-source/hoard-38.tar.gz}},
     140}
     141
     142@comment{mtmalloc,
     143    author      = {Greg Nakhimovsky},
     144    title       = {Improving Scalability of Multithreaded Dynamic Memory Allocation},
     145    journal     = {Dr. Dobb's},
     146    month       = jul,
     147    year        = 2001,
     148    url         = {http://www.ddj.com/mobile/184404685?pgno=1}
     149}
     150
     151@misc{mtmalloc,
     152    key         = {mtmalloc},
     153    title       = {mtmalloc.c},
     154    year        = 2009,
     155    note        = {\textsf{http://src.opensolaris.org/\-source/\-xref/\-onnv/\-onnv-gate/\-usr/\-src/\-lib/\-libmtmalloc/\-common/\-mtmalloc.c}},
     156}
     157
     158@misc{tcmalloc,
     159    author      = {Sanjay Ghemawat and Paul Menage},
     160    title       = {tcmalloc version 1.5},
     161    month       = jan,
     162    year        = 2010,
     163    note        = {\textsf{http://google-perftools.\-googlecode.\-com/\-files/\-google-perftools-1.5.tar.gz}},
     164}
     165
     166@inproceedings{streamflow,
     167    author      = {Scott Schneider and Christos D. Antonopoulos and Dimitrios S. Nikolopoulos},
     168    title       = {Scalable Locality-Conscious Multithreaded Memory Allocation},
     169    booktitle   = {International Symposium on Memory Management (ISSM'06)},
     170    month       = jun,
     171    year        = 2006,
     172    pages       = {84-94},
     173    location    = {Ottawa, Ontario, Canada},
     174    publisher   = {ACM},
     175    address     = {New York, NY, USA},
     176}
     177
     178@misc{streamflowweb,
     179    author      = {Scott Schneider and Christos Antonopoulos and Dimitrios Nikolopoulos},
     180    title       = {Streamflow},
     181    note        = {\textsf{http://people.cs.vt.edu/\-\char`\~scschnei/\-streamflow}},
     182}
     183
     184@inproceedings{Blumofe94,
     185    author      = {R. Blumofe and C. Leiserson},
     186    title       = {Scheduling Multithreaded Computations by Work Stealing},
     187    booktitle   = {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.},
     188    pages       = {356-368},
     189    year        = 1994,
     190    month       = nov,
     191    url         = {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html}
     192}
     193
     194@article{Johnstone99,
     195    author      = {Mark S. Johnstone and Paul R. Wilson},
     196    title       = {The Memory Fragmentation Problem: Solved?},
     197    journal     = sigplan,
     198    volume      = 34,
     199    number      = 3,
     200    pages       = {26-36},
     201    year        = 1999,
     202}
     203
     204@inproceedings{Grunwald93,
     205    author      = {Dirk Grunwald and Benjamin G. Zorn and Robert Henderson},
     206    title       = {Improving the Cache Locality of Memory Allocation},
     207    booktitle   = {{SIGPLAN} Conference on Programming Language Design and Implementation},
     208    pages       = {177-186},
     209    year        = 1993,
     210    url         = {http://citeseer.ist.psu.edu/grunwald93improving.html}
     211}
     212
     213@inproceedings{Wilson95,
     214    author      = {Wilson, Paul R. and Johnstone, Mark S. and Neely, Michael and Boles, David},
     215    title       = {Dynamic Storage Allocation: A Survey and Critical Review},
     216    booktitle   = {Proc. Int. Workshop on Memory Management},
     217    address     = {Kinross Scotland, UK},
     218    year        = 1995,
     219    url         = {http://citeseer.ist.psu.edu/wilson95dynamic.html}
     220}
     221
     222@inproceedings{Siebert00,
     223    author      = {Fridtjof Siebert},
     224    title       = {Eliminating External Fragmentation in a Non-moving Garbage Collector for Java},
     225    booktitle   = {CASES '00: Proceedings of the 2000 international conference on Compilers, architecture, and synthesis for embedded systems},
     226    year        = 2000,
     227    isbn        = {1-58113-338-3},
     228    pages       = {9-17},
     229    location    = {San Jose, California, United States},
     230    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/354880.354883},
     231    publisher   = {ACM Press},
     232    address     = {New York, NY, USA}
     233}
     234
     235@inproceedings{Lim98,
     236   author       = {Tian F. Lim and Przemyslaw Pardyak and Brian N. Bershad},
     237   title        = {A Memory-Efficient Real-Time Non-copying Garbage Collector},
     238   booktitle    = {ISMM '98: Proceedings of the 1st international symposium on Memory management},
     239   year         = 1998,
     240   isbn         = {1-58113-114-3},
     241   pages        = {118-129},
     242   location     = {Vancouver, British Columbia, Canada},
     243   doi          = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/286860.286873},
     244   publisher    = {ACM Press},
     245   address      = {New York, NY, USA}
     246}
     247
     248@article{Chang01,
     249    author      = {J. Morris Chang and Woo Hyong Lee and Witawas Srisa-an},
     250    title       = {A Study of the Allocation Behavior of {C++} Programs},
     251    journal     = {J. Syst. Softw.},
     252    volume      = 57,
     253    number      = 2,
     254    year        = 2001,
     255    issn        = {0164-1212},
     256    pages       = {107-118},
     257    doi         = {http://dx.doi.org/10.1016/S0164-1212(00)00122-9},
     258    publisher   = {Elsevier Science Inc.},
     259    address     = {New York, NY, USA}
     260}
     261
     262@article{Herlihy93,
     263    author      = {Maurice Herlihy},
     264    title       = {A Methodology for Implementing Highly Concurrent Data Objects},
     265    journal     = toplas,
     266    volume      = 15,
     267    number      = 5,
     268    year        = 1993,
     269    issn        = {0164-0925},
     270    pages       = {745-770},
     271    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/161468.161469},
     272    publisher   = {ACM Press},
     273    address     = {New York, NY, USA}
     274}
     275
     276@article{Denning05,
     277    author      = {Peter J. Denning},
     278    title       = {The Locality Principle},
     279    journal     = cacm,
     280    volume      = 48,
     281    number      = 7,
     282    year        = 2005,
     283    issn        = {0001-0782},
     284    pages       = {19-24},
     285    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/1070838.1070856},
     286    publisher   = {ACM Press},
     287    address     = {New York, NY, USA}
     288}
     289
     290@misc{wilson-locality,
     291    author      = {Paul R. Wilson},
     292    title       = {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies},
     293    url         = {http://citeseer.ist.psu.edu/337869.html}
     294}
     295
     296@inproceedings{Feng05,
     297    author      = {Yi Feng and Emery D. Berger},
     298    title       = {A Locality-Improving Dynamic Memory Allocator},
     299    booktitle   = {Proceedings of the 2005 Workshop on Memory System Performance},
     300    location    = {Chicago, Illinois},
     301    publisher   = {ACM},
     302    address     = {New York, NY, USA},
     303    month       = jun,
     304    year        = 2005,
     305    pages       = {68-77},
     306}
     307
     308@inproceedings{grunwald-locality,
     309    author      = {Dirk Grunwald and Benjamin Zorn and Robert Henderson},
     310    title       = {Improving the Cache Locality of Memory Allocation},
     311    booktitle   = {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation},
     312    year        = 1993,
     313    isbn        = {0-89791-598-4},
     314    pages       = {177-186},
     315    location    = {Albuquerque, New Mexico, United States},
     316    doi         = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/155090.155107},
     317    publisher   = {ACM Press},
     318    address     = {New York, NY, USA}
     319}
     320
     321@article{Alexandrescu01b,
     322    author      = {Andrei Alexandrescu},
     323    title       = {{volatile} -- Multithreaded Programmer's Best Friend},
     324    journal     = {Dr. Dobb's},
     325    month       = feb,
     326    year        = 2001,
     327    url         = {http://www.ddj.com/cpp/184403766}
     328}
     329
     330@article{Attardi03,
     331    author      = {Joseph Attardi and Neelakanth Nadgir},
     332    title       = {A Comparison of Memory Allocators in Multiprocessors},
     333    journal     = {Sun Developer Network},
     334    month       = jun,
     335    year        = 2003,
     336    note        = {\textsf{http://developers.sun.com/\-solaris/\-articles/\-multiproc/\-multiproc.html}},
     337}
     338
     339@unpublished{memlayout,
     340    author      = {Peter Jay Salzman},
     341    title       = {Memory Layout and the Stack},
     342    journal     = {Using GNU's GDB Debugger},
     343    note        = {\textsf{http://dirac.org/\-linux/\-gdb/\-02a-Memory\_Layout\_And\_The\_Stack.php}},
     344}
     345
     346@unpublished{Ferguson07,
     347    author      = {Justin N. Ferguson},
     348    title       = {Understanding the Heap by Breaking It},
     349    note        = {\textsf{https://www.blackhat.com/\-presentations/\-bh-usa-07/Ferguson/\-Whitepaper/\-bh-usa-07-ferguson-WP.pdf}},
     350}
     351
     352@inproceedings{Huang06,
     353    author      = {Xianglong Huang and Brian T Lewis and Kathryn S McKinley},
     354    title       = {Dynamic Code Management: Improving Whole Program Code Locality in Managed Runtimes},
     355    booktitle   = {VEE '06: Proceedings of the 2nd international conference on Virtual execution environments},
     356    year        = 2006,
     357    isbn        = {1-59593-332-6},
     358    pages       = {133-143},
     359    location    = {Ottawa, Ontario, Canada},
     360    doi         = {http://doi.acm.org/10.1145/1134760.1134779},
     361    publisher   = {ACM Press},
     362    address     = {New York, NY, USA}
     363 }
     364
     365@inproceedings{Herlihy03,
     366    author      = {M. Herlihy and V. Luchangco and M. Moir},
     367    title       = {Obstruction-free Synchronization: Double-ended Queues as an Example},
     368    booktitle   = {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems},
     369    year        = 2003,
     370    month       = may,
     371    url         = {http://www.cs.brown.edu/~mph/publications.html}
     372}
     373
     374@techreport{Detlefs93,
     375    author      = {David L. Detlefs and Al Dosser and Benjamin Zorn},
     376    title       = {Memory Allocation Costs in Large {C} and {C++} Programs},
     377    number      = {CU-CS-665-93},
     378    institution = {University of Colorado},
     379    address     = {130 Lytton Avenue, Palo Alto, CA 94301 and Campus Box 430, Boulder, CO 80309},
     380    year        = 1993,
     381    url         = {http://citeseer.ist.psu.edu/detlefs93memory.html}
     382}
     383
     384@inproceedings{Oyama99,
     385    author      = {Y. Oyama and K. Taura and A. Yonezawa},
     386    title       = {Executing Parallel Programs With Synchronization Bottlenecks Efficiently},
     387    booktitle   = {Proceedings of International Workshop on Parallel and Distributed Computing for Symbolic and Irregular Applications (PDSIA '99)},
     388    year        = {1999},
     389    pages       = {182--204},
     390    publisher   = {World Scientific},
     391    address     = {Sendai, Japan},
     392}
     393
     394@inproceedings{Dice02,
     395    author      = {Dave Dice and Alex Garthwaite},
     396    title       = {Mostly Lock-Free Malloc},
     397    booktitle   = {Proceedings of the 3rd international symposium on Memory management (ISMM'02)},
     398    month       = jun,
     399    year        = 2002,
     400    pages       = {163-174},
     401    location    = {Berlin, Germany},
     402    publisher   = {ACM},
     403    address     = {New York, NY, USA},
     404}
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

    ref3c383 rd672350  
    8585\usepackage{comment} % Removes large sections of the document.
    8686\usepackage{tabularx}
     87\usepackage{subfigure}
     88
     89\usepackage{algorithm}
     90\usepackage{algpseudocode}
    8791
    8892% Hyperlinks make it very easy to navigate an electronic document.
     
    168172%\usepackageinput{common}
    169173\CFAStyle                                               % CFA code-style for all languages
    170 \lstset{basicstyle=\linespread{0.9}\tt}                 % CFA typewriter font
     174\lstset{basicstyle=\linespread{0.9}\sf}                 % CFA typewriter font
     175\newcommand{\uC}{$\mu$\CC}
    171176\newcommand{\PAB}[1]{{\color{red}PAB: #1}}
    172177
     
    224229\addcontentsline{toc}{chapter}{\textbf{References}}
    225230
    226 \bibliography{uw-ethesis,pl}
     231\bibliography{pl,uw-ethesis}
    227232% Tip: You can create multiple .bib files to organize your references.
    228233% Just list them all in the \bibliogaphy command, separated by commas (no spaces).
  • doc/theses/thierry_delisle_PhD/thesis/text/existing.tex

    ref3c383 rd672350  
    11\chapter{Previous Work}\label{existing}
    2 Scheduling is a topic with a very long history, predating its use in computer science. As such, early work in computed science was inspired from other fields and focused principally on solving scheduling upfront rather that as the system is running.
     2Scheduling is the process of assigning resources to incomming requests.
     3A very common form of this is assigning available workers to work-requests.
     4The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but it is also common in other fields.
     5For example, assmebly lines are an example of scheduling where parts needed assembly are assigned to line workers.
     6
     7In all these cases, the choice of a scheduling algorithm generally depends first and formost on how much information is available to the scheduler.
     8Workloads that are well-kown, consistent and homegenous can benefit from a scheduler that is optimized to use this information while ill-defined inconsistent heterogenous workloads will require general algorithms.
     9A secondary aspect to that is how much information can be gathered versus how much information must be given as part of the input.
     10There is therefore a spectrum of scheduling algorithms, going from static schedulers that are well informed from the start, to schedulers that gather most of the information needed, to schedulers that can only rely on very limitted information.
     11Note that this description includes both infomation about each requests, \eg time to complete or resources needed, and information about the relationships between request, \eg whether or not some request must be completed before another request starts.
     12
     13Scheduling physical resources, for example in assembly lines, is generally amenable to using very well informed scheduling since information can be gathered much faster than the physical resources can be assigned and workloads are likely to stay stable for long periods of time.
     14When a faster pace is needed and changes are much more frequent gathering information on workloads, up-front or live, can become much more limiting and more general schedulers are needed.
    315
    416\section{Naming Convention}
     
    618
    719\section{Static Scheduling}
    8 Static schedulers require that programmers explicitly and exhaustively specify dependencies among tasks in order to schedule them. The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere. An example application for these schedulers
    9 
     20Static schedulers require that tasks have their dependencies and costs explicitly and exhaustively specified prior schedule.
     21The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere.
     22This approach is generally popular in real-time systems since the need for strong guarantees justifies the cost of supplying this information.
    1023In general, static schedulers are less relavant to this project since they require input from the programmers that \CFA does not have as part of its concurrency semantic.
    11 \todo{Rate-monotonic scheduling}
     24Specifying this information explicitly can add a significant burden on the programmers and reduces flexibility, for this reason the \CFA scheduler does not require this information.
    1225
    1326
    1427\section{Dynamic Scheduling}
    15 It may be difficult to fulfill the requirements of static scheduler if dependencies are be conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
     28It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
    1629
    1730\subsection{Explicitly Informed Dynamic Schedulers}
     
    2942\subsubsection{Feedback Scheduling}
    3043As mentionned, Schedulers may also gather information about each tasks to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain tasks, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
    31 
    32 Feedback scheduler
    3344
    3445
  • doc/theses/thierry_delisle_PhD/thesis/text/io.tex

    ref3c383 rd672350  
    11\chapter{User Level \io}
    22As mentioned in Section~\ref{prev:io}, User-Level \io requires multiplexing the \io operations of many \glspl{thrd} onto fewer \glspl{proc} using asynchronous \io operations.
    3 Different operating systems offer various forms of asynchronous operations and as mentioned in Chapter~\ref{intro}, this work is exclusively focused on the Linux operating-system.
     3Different operating systems offer various forms of asynchronous operations and, as mentioned in Chapter~\ref{intro}, this work is exclusively focused on the Linux operating-system.
    44
    55\section{Kernel Interface}
     
    178178Since completions are sent to the instance where requests were submitted, all instances with pending operations must be polled continously
    179179\footnote{As will be described in Chapter~\ref{practice}, this does not translate into constant cpu usage.}.
     180Note that once an operation completes, there is nothing that ties it to the @io_uring@ instance that handled it.
     181There is nothing preventing a new operation with, for example, the same file descriptors to a different @io_uring@ instance.
    180182
    181183A complicating aspect of submission is @io_uring@'s support for chains of operations, where the completion of an operation triggers the submission of the next operation on the link.
     
    240242To remove this requirement, a \gls{thrd} would need the ability to ``yield to a specific \gls{proc}'', \ie, park with the promise that it will be run next on a specific \gls{proc}, the \gls{proc} attached to the correct ring.}
    241243, greatly simplifying both allocation and submission.
    242 In this design, allocation and submission form a ring partitionned ring buffer as shown in Figure~\ref{fig:pring}.
     244In this design, allocation and submission form a partitionned ring buffer as shown in Figure~\ref{fig:pring}.
    243245Once added to the ring buffer, the attached \gls{proc} has a significant amount of flexibility with regards to when to do the system call.
    244 Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of threads \glspl{thrd}, etc.
     246Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of \glspl{thrd}, etc.
    245247
    246248\begin{figure}
  • doc/user/user.tex

    ref3c383 rd672350  
    1111%% Created On       : Wed Apr  6 14:53:29 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Sat Feb 12 17:04:03 2022
    14 %% Update Count     : 5376
     13%% Last Modified On : Mon Feb 14 17:20:39 2022
     14%% Update Count     : 5382
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    82238223Random numbers are values generated independently, i.e., new values do not depend on previous values (independent trials), \eg lottery numbers, shuffled cards, dice roll, coin flip.
    82248224While a primary goal of programming is computing values that are \emph{not} random, random values are useful in simulation, cryptography, games, etc.
    8225 A random-number generator is an algorithm computing independent values.
    8226 If the algorithm uses deterministic computation (predictable sequence of values), it generates \emph{pseudo} random numbers versus \emph{true} random numbers.
     8225A random-number generator is an algorithm that computes independent values.
     8226If the algorithm uses deterministic computation (a predictable sequence of values), it generates \emph{pseudo} random numbers versus \emph{true} random numbers.
    82278227
    82288228All \newterm{pseudo random-number generators} (\newterm{PRNG}) involve some technique to scramble bits of a value, \eg multiplicative recurrence:
     
    82498249Finally, a PRNG usually generates a range of large values, \eg ©[0, UINT_MAX]©, which are scaled using the modulus operator, \eg ©prng() % 5© produces random values in the range 0--4.
    82508250
    8251 \CFA provides a sequential and concurrent PRNGs.
     8251\CFA provides a sequential PRNG type only accessible by a single thread (not thread-safe) and a set of global and companion thread PRNG functions accessible by multiple threads without contention.
    82528252\begin{itemize}
    82538253\item
    8254 For sequential programs, like coroutining, the PRNG is used to randomize behaviour or values during execution, \eg in games, a character makes a random move or an object takes on a random value.
     8254The ©PRNG© type is for sequential programs, like coroutining:
    82558255\begin{cfa}
    82568256struct PRNG { ... }; $\C[3.75in]{// opaque type}$
     
    82648264uint32_t calls( PRNG & prng ); $\C{// number of calls}\CRT$
    82658265\end{cfa}
    8266 Sequential execution is repeatable given the same starting seeds for all ©PRNG©s.
    8267 In this scenario, it is useful to have multiple ©PRNG©, \eg one per player or object so a type is provided to generate multiple instances.
     8266A ©PRNG© object is used to randomize behaviour or values during execution, \eg in games, a character makes a random move or an object takes on a random value.
     8267In this scenario, it is useful to have multiple ©PRNG© objects, \eg one per player or object.
     8268However, sequential execution is still repeatable given the same starting seeds for all ©PRNG©s.
    82688269\VRef[Figure]{f:SequentialPRNG} shows an example that creates two sequential ©PRNG©s, sets both to the same seed (1009), and illustrates the three forms for generating random values, where both ©PRNG©s generate the same sequence of values.
    82698270
     
    83078308\end{tabular}
    83088309\end{cquote}
    8309 \vspace{-10pt}
    83108310\caption{Sequential PRNG}
    83118311\label{f:SequentialPRNG}
     
    83138313
    83148314\item
    8315 For concurrent programs, it is important the PRNG is thread-safe and not a point of contention.
    8316 A PRNG in concurrent programs is often used to randomize execution in short-running programs, \eg ©yield( prng() % 5 )©.
    8317 
    8318 Because concurrent execution is non-deterministic, seeding the concurrent PRNG is less important, as repeatable execution is impossible.
    8319 Hence, there is one system-wide PRNG (global seed) but each \CFA thread has its own non-contended PRNG state.
    8320 If the global seed is set, threads start with this seed, until it is reset and than threads start with the reset seed.
    8321 Hence, these threads generate the same sequence of random numbers from their specific starting seed.
    8322 If the global seed is \emph{not} set, threads start with a random seed, until the global seed is set.
    8323 Hence, these threads generate different sequences of random numbers.
    8324 If each thread needs its own seed, use a sequential ©PRNG© in each thread.
    8325 
    8326 There are two versions of the PRNG functions to manipulate the thread-local PRNG-state, which are differentiated by performance.
     8315The PRNG global and companion thread functions are for concurrent programming, such as randomizing execution in short-running programs, \eg ©yield( prng() % 5 )©.
    83278316\begin{cfa}
    83288317void set_seed( uint32_t seed ); $\C[3.75in]{// set global seed}$
     
    83378326uint32_t prng( $thread\LstStringStyle{\textdollar}$ & th, uint32_t l, uint32_t u );     $\C{// [l,u]}\CRT$
    83388327\end{cfa}
    8339 The slower ©prng© functions call ©active_thread© internally to access the thread-local PRNG-state, while the faster ©prng© functions are passed a pointer to the active thread.
    8340 If the thread pointer is known, \eg in a thread ©main©, eliminating the call to ©active_thread© significantly reduces the cost for accessing the thread's PRNG state.
     8328The only difference between the two sets of ©prng© routines is performance.
     8329
     8330Because concurrent execution is non-deterministic, seeding the concurrent PRNG is less important, as repeatable execution is impossible.
     8331Hence, there is one system-wide PRNG (global seed) but each \CFA thread has its own non-contended PRNG state.
     8332If the global seed is set, threads start with this seed, until it is reset and then threads start with the reset seed.
     8333Hence, these threads generate the same sequence of random numbers from their specific starting seed.
     8334If the global seed is \emph{not} set, threads start with a random seed, until the global seed is set.
     8335Hence, these threads generate different sequences of random numbers.
     8336If each thread needs its own seed, use a sequential ©PRNG© in each thread.
     8337The slower ©prng© functions \emph{without} a thread argument call ©active_thread© internally to indirectly access the current thread's PRNG state, while the faster ©prng© functions \emph{with} a thread argument directly access the thread through the thread parameter.
     8338If a thread pointer is available, \eg in thread main, eliminating the call to ©active_thread© significantly reduces the cost of accessing the thread's PRNG state.
    83418339\VRef[Figure]{f:ConcurrentPRNG} shows an example using the slower/faster concurrent PRNG in the program main and a thread.
    83428340
  • driver/cc1.cc

    ref3c383 rd672350  
    1010// Created On       : Fri Aug 26 14:23:51 2005
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Jul 21 09:46:24 2021
    13 // Update Count     : 419
     12// Last Modified On : Thu Feb 17 18:04:23 2022
     13// Update Count     : 422
    1414//
    1515
     
    6161static string __CFA_FLAGPREFIX__( "__CFA_FLAG" );               // "__CFA_FLAG__=" suffix
    6262
    63 static void checkEnv1( const char * args[], int & nargs ) { // stage 1
     63static void checkEnv1() {                                                               // stage 1
    6464        extern char ** environ;
    6565
     
    155155        cerr << "Stage1" << endl;
    156156        #endif // __DEBUG_H__
    157         checkEnv1( args, nargs );                                                       // arguments passed via environment variables
     157        checkEnv1();                                                                            // arguments passed via environment variables
    158158        #ifdef __DEBUG_H__
    159159        for ( int i = 1; i < argc; i += 1 ) {
  • libcfa/src/Makefile.am

    ref3c383 rd672350  
    6363        containers/queueLockFree.hfa \
    6464        containers/stackLockFree.hfa \
     65        containers/string_sharectx.hfa \
    6566        containers/vector2.hfa \
    6667        vec/vec.hfa \
     
    118119        concurrency/exception.hfa \
    119120        concurrency/kernel.hfa \
     121        concurrency/kernel/cluster.hfa \
    120122        concurrency/locks.hfa \
    121123        concurrency/monitor.hfa \
     
    133135        concurrency/io/call.cfa \
    134136        concurrency/iofwd.hfa \
    135         concurrency/kernel_private.hfa \
     137        concurrency/kernel/private.hfa \
    136138        concurrency/kernel/startup.cfa \
    137139        concurrency/preemption.cfa \
  • libcfa/src/concurrency/coroutine.cfa

    ref3c383 rd672350  
    2727#include <unwind.h>
    2828
    29 #include "kernel_private.hfa"
     29#include "kernel/private.hfa"
    3030#include "exception.hfa"
    3131#include "math.hfa"
  • libcfa/src/concurrency/io.cfa

    ref3c383 rd672350  
    4141        #include "kernel.hfa"
    4242        #include "kernel/fwd.hfa"
    43         #include "kernel_private.hfa"
     43        #include "kernel/private.hfa"
    4444        #include "io/types.hfa"
    4545
     
    9393        extern void __kernel_unpark( thread$ * thrd, unpark_hint );
    9494
    95         bool __cfa_io_drain( processor * proc ) {
     95        bool __cfa_io_drain( $io_context * ctx ) {
    9696                /* paranoid */ verify( ! __preemption_enabled() );
    9797                /* paranoid */ verify( ready_schedule_islocked() );
    98                 /* paranoid */ verify( proc );
    99                 /* paranoid */ verify( proc->io.ctx );
     98                /* paranoid */ verify( ctx );
    10099
    101100                // Drain the queue
    102                 $io_context * ctx = proc->io.ctx;
    103101                unsigned head = *ctx->cq.head;
    104102                unsigned tail = *ctx->cq.tail;
     
    110108                if(count == 0) return false;
    111109
     110                if(!__atomic_try_acquire(&ctx->cq.lock)) {
     111                        return false;
     112                }
     113
    112114                for(i; count) {
    113115                        unsigned idx = (head + i) & mask;
     
    130132                /* paranoid */ verify( ready_schedule_islocked() );
    131133                /* paranoid */ verify( ! __preemption_enabled() );
     134
     135                __atomic_unlock(&ctx->cq.lock);
    132136
    133137                return true;
     
    175179                        /* paranoid */ verify( ! __preemption_enabled() );
    176180
    177                         ctx.proc->io.pending = false;
     181                        __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
    178182                }
    179183
    180184                ready_schedule_lock();
    181                 bool ret = __cfa_io_drain( proc );
     185                bool ret = __cfa_io_drain( &ctx );
    182186                ready_schedule_unlock();
    183187                return ret;
     
    287291        //=============================================================================================
    288292        // submission
    289         static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {
     293        static inline void __submit_only( struct $io_context * ctx, __u32 idxs[], __u32 have) {
    290294                // We can proceed to the fast path
    291295                // Get the right objects
     
    304308                sq.to_submit += have;
    305309
    306                 ctx->proc->io.pending = true;
    307                 ctx->proc->io.dirty   = true;
     310                __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
     311                __atomic_store_n(&ctx->proc->io.dirty  , true, __ATOMIC_RELAXED);
     312        }
     313
     314        static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {
     315                __sub_ring_t & sq = ctx->sq;
     316                __submit_only(ctx, idxs, have);
     317
    308318                if(sq.to_submit > 30) {
    309319                        __tls_stats()->io.flush.full++;
     
    402412// I/O Arbiter
    403413//=============================================================================================
    404         static inline void block(__outstanding_io_queue & queue, __outstanding_io & item) {
     414        static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) {
     415                bool was_empty;
     416
    405417                // Lock the list, it's not thread safe
    406418                lock( queue.lock __cfaabi_dbg_ctx2 );
    407419                {
     420                        was_empty = empty(queue.queue);
     421
    408422                        // Add our request to the list
    409423                        add( queue.queue, item );
     
    414428                unlock( queue.lock );
    415429
    416                 wait( item.sem );
     430                return was_empty;
    417431        }
    418432
     
    432446                pa.want = want;
    433447
    434                 block(this.pending, (__outstanding_io&)pa);
     448                enqueue(this.pending, (__outstanding_io&)pa);
     449
     450                wait( pa.sem );
    435451
    436452                return pa.ctx;
     
    485501                ei.lazy = lazy;
    486502
    487                 block(ctx->ext_sq, (__outstanding_io&)ei);
     503                bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
     504
     505                __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
     506
     507                if( we ) {
     508                        sigval_t value = { PREEMPT_IO };
     509                        pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
     510                }
     511
     512                wait( ei.sem );
    488513
    489514                __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
     
    501526                                        __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
    502527
    503                                         __submit(&ctx, ei.idxs, ei.have, ei.lazy);
     528                                        __submit_only(&ctx, ei.idxs, ei.have);
    504529
    505530                                        post( ei.sem );
  • libcfa/src/concurrency/io/setup.cfa

    ref3c383 rd672350  
    3939
    4040#else
     41#pragma GCC diagnostic push
     42#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
    4143        #include <errno.h>
    4244        #include <stdint.h>
     
    5658
    5759        #include "bitmanip.hfa"
    58         #include "kernel_private.hfa"
     60        #include "fstream.hfa"
     61        #include "kernel/private.hfa"
    5962        #include "thread.hfa"
     63#pragma GCC diagnostic pop
    6064
    6165        void ?{}(io_context_params & this) {
     
    111115                this.ext_sq.empty = true;
    112116                (this.ext_sq.queue){};
    113                 __io_uring_setup( this, cl.io.params, proc->idle_fd );
     117                __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd );
    114118                __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this);
    115119        }
     
    121125                __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd);
    122126        }
    123 
    124         extern void __disable_interrupts_hard();
    125         extern void __enable_interrupts_hard();
    126127
    127128        static void __io_uring_setup( $io_context & this, const io_context_params & params_in, int procfd ) {
     
    213214
    214215                // completion queue
     216                cq.lock      = 0;
    215217                cq.head      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
    216218                cq.tail      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
     
    226228                        __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
    227229
    228                         __disable_interrupts_hard();
    229 
    230230                        int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
    231231                        if (ret < 0) {
    232232                                abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
    233233                        }
    234 
    235                         __enable_interrupts_hard();
    236234
    237235                        __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
     
    258256                struct __sub_ring_t & sq = this.sq;
    259257                struct __cmp_ring_t & cq = this.cq;
     258                {
     259                        __u32 fhead = sq.free_ring.head;
     260                        __u32 ftail = sq.free_ring.tail;
     261
     262                        __u32 total = *sq.num;
     263                        __u32 avail = ftail - fhead;
     264
     265                        if(avail != total) abort | "Processor (" | (void*)this.proc | ") tearing down ring with" | (total - avail) | "entries allocated but not submitted, out of" | total;
     266                }
    260267
    261268                // unmap the submit queue entries
  • libcfa/src/concurrency/io/types.hfa

    ref3c383 rd672350  
    2323#include "bits/locks.hfa"
    2424#include "bits/queue.hfa"
     25#include "iofwd.hfa"
    2526#include "kernel/fwd.hfa"
    2627
     
    7778
    7879        struct __cmp_ring_t {
     80                volatile bool lock;
     81
    7982                // Head and tail of the ring
    8083                volatile __u32 * head;
     
    170173        // void __ioctx_prepare_block($io_context & ctx);
    171174#endif
    172 
    173 //-----------------------------------------------------------------------
    174 // IO user data
    175 struct io_future_t {
    176         future_t self;
    177         __s32 result;
    178 };
    179 
    180 static inline {
    181         thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
    182                 this.result = result;
    183                 return fulfil(this.self, do_unpark);
    184         }
    185 
    186         // Wait for the future to be fulfilled
    187         bool wait     ( io_future_t & this ) { return wait     (this.self); }
    188         void reset    ( io_future_t & this ) { return reset    (this.self); }
    189         bool available( io_future_t & this ) { return available(this.self); }
    190 }
  • libcfa/src/concurrency/iofwd.hfa

    ref3c383 rd672350  
    1919extern "C" {
    2020        #include <asm/types.h>
     21        #include <sys/stat.h> // needed for mode_t
    2122        #if CFA_HAVE_LINUX_IO_URING_H
    2223                #include <linux/io_uring.h>
     
    2425}
    2526#include "bits/defs.hfa"
     27#include "kernel/fwd.hfa"
    2628#include "time.hfa"
    2729
     
    4749
    4850struct cluster;
    49 struct io_future_t;
    5051struct $io_context;
    5152
     
    5758
    5859struct io_uring_sqe;
     60
     61//-----------------------------------------------------------------------
     62// IO user data
     63struct io_future_t {
     64        future_t self;
     65        __s32 result;
     66};
     67
     68static inline {
     69        thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
     70                this.result = result;
     71                return fulfil(this.self, do_unpark);
     72        }
     73
     74        // Wait for the future to be fulfilled
     75        bool wait     ( io_future_t & this ) { return wait     (this.self); }
     76        void reset    ( io_future_t & this ) { return reset    (this.self); }
     77        bool available( io_future_t & this ) { return available(this.self); }
     78}
    5979
    6080//----------
     
    133153// Check if a function is blocks a only the user thread
    134154bool has_user_level_blocking( fptr_t func );
     155
     156#if CFA_HAVE_LINUX_IO_URING_H
     157        static inline void zero_sqe(struct io_uring_sqe * sqe) {
     158                sqe->flags = 0;
     159                sqe->ioprio = 0;
     160                sqe->fd = 0;
     161                sqe->off = 0;
     162                sqe->addr = 0;
     163                sqe->len = 0;
     164                sqe->fsync_flags = 0;
     165                sqe->__pad2[0] = 0;
     166                sqe->__pad2[1] = 0;
     167                sqe->__pad2[2] = 0;
     168                sqe->fd = 0;
     169                sqe->off = 0;
     170                sqe->addr = 0;
     171                sqe->len = 0;
     172        }
     173#endif
  • libcfa/src/concurrency/kernel.cfa

    ref3c383 rd672350  
    1919// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
    2020
     21#pragma GCC diagnostic push
     22#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
     23
    2124//C Includes
    2225#include <errno.h>
     
    2528#include <signal.h>
    2629#include <unistd.h>
     30
    2731extern "C" {
    2832        #include <sys/eventfd.h>
     
    3135
    3236//CFA Includes
    33 #include "kernel_private.hfa"
     37#include "kernel/private.hfa"
    3438#include "preemption.hfa"
    3539#include "strstream.hfa"
     
    4044#define __CFA_INVOKE_PRIVATE__
    4145#include "invoke.h"
     46#pragma GCC diagnostic pop
    4247
    4348#if !defined(__CFA_NO_STATISTICS__)
     
    131136static void mark_awake(__cluster_proc_list & idles, processor & proc);
    132137
    133 extern void __cfa_io_start( processor * );
    134 extern bool __cfa_io_drain( processor * );
     138extern bool __cfa_io_drain( $io_context * );
    135139extern bool __cfa_io_flush( processor *, int min_comp );
    136 extern void __cfa_io_stop ( processor * );
    137140static inline bool __maybe_io_drain( processor * );
    138141
     
    159162        verify(this);
    160163
    161         io_future_t future; // used for idle sleep when io_uring is present
    162         future.self.ptr = 1p;  // mark it as already fulfilled so we know if there is a pending request or not
    163         eventfd_t idle_val;
    164         iovec idle_iovec = { &idle_val, sizeof(idle_val) };
    165 
    166         __cfa_io_start( this );
     164        /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
     165        /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
     166
     167        // used for idle sleep when io_uring is present
     168        // mark it as already fulfilled so we know if there is a pending request or not
     169        this->idle_wctx.ftr->self.ptr = 1p;
     170        iovec idle_iovec = { this->idle_wctx.rdbuf, sizeof(eventfd_t) };
    167171
    168172        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
     
    231235                                }
    232236
    233                                 idle_sleep( this, future, idle_iovec );
     237                                idle_sleep( this, *this->idle_wctx.ftr, idle_iovec );
    234238
    235239                                // We were woken up, remove self from idle
     
    251255                        if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    252256
    253                         if(this->io.pending && !this->io.dirty) {
     257                        if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
    254258                                __IO_STATS__(true, io.flush.dirty++; )
    255259                                __cfa_io_flush( this, 0 );
     
    259263                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
    260264        }
    261 
    262         for(int i = 0; !available(future); i++) {
    263                 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);
    264                 __cfa_io_flush( this, 1 );
    265         }
    266 
    267         __cfa_io_stop( this );
    268265
    269266        post( this->terminated );
     
    634631
    635632        int fd = 1;
    636         if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) {
    637                 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED);
     633        if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
     634                fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
    638635        }
    639636
     
    677674        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    678675
    679         this->idle_wctx.fd = 1;
     676        this->idle_wctx.sem = 1;
    680677
    681678        eventfd_t val;
    682679        val = 1;
    683         eventfd_write( this->idle_fd, val );
     680        eventfd_write( this->idle_wctx.evfd, val );
    684681
    685682        /* paranoid */ verify( ! __preemption_enabled() );
     
    689686        // Tell everyone we are ready to go do sleep
    690687        for() {
    691                 int expected = this->idle_wctx.fd;
     688                int expected = this->idle_wctx.sem;
    692689
    693690                // Someone already told us to wake-up! No time for a nap.
     
    695692
    696693                // Try to mark that we are going to sleep
    697                 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     694                if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
    698695                        // Every one agreed, taking a nap
    699696                        break;
     
    713710                {
    714711                        eventfd_t val;
    715                         ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
     712                        ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
    716713                        if(ret < 0) {
    717714                                switch((int)errno) {
     
    740737                        reset(future);
    741738
    742                         __kernel_read(this, future, iov, this->idle_fd );
     739                        __kernel_read(this, future, iov, this->idle_wctx.evfd );
    743740                }
    744741
     
    750747        __STATS__(true, ready.sleep.halts++; )
    751748
    752         proc.idle_wctx.fd = 0;
     749        proc.idle_wctx.sem = 0;
    753750
    754751        /* paranoid */ verify( ! __preemption_enabled() );
     
    842839                if(head == tail) return false;
    843840                ready_schedule_lock();
    844                 ret = __cfa_io_drain( proc );
     841                ret = __cfa_io_drain( ctx );
    845842                ready_schedule_unlock();
    846843        #endif
  • libcfa/src/concurrency/kernel.hfa

    ref3c383 rd672350  
    4848extern struct cluster * mainCluster;
    4949
    50 // Processor id, required for scheduling threads
    51 
    52 
     50// Coroutine used py processors for the 2-step context switch
    5351coroutine processorCtx_t {
    5452        struct processor * proc;
    5553};
    5654
    57 
     55struct io_future_t;
     56
     57// Information needed for idle sleep
    5858struct __fd_waitctx {
    59         volatile int fd;
     59        // semaphore/future like object
     60        // values can be 0, 1 or some file descriptor.
     61        // 0 - is the default state
     62        // 1 - means the proc should wake-up immediately
     63        // FD - means the proc is going asleep and should be woken by writing to the FD.
     64        volatile int sem;
     65
     66        // The event FD that corresponds to this processor
     67        int evfd;
     68
     69        // buffer into which the proc will read from evfd
     70        // unused if not using io_uring for idle sleep
     71        void * rdbuf;
     72
     73        // future use to track the read of the eventfd
     74        // unused if not using io_uring for idle sleep
     75        io_future_t * ftr;
    6076};
    6177
     
    92108        struct {
    93109                $io_context * ctx;
    94                 bool pending;
    95                 bool dirty;
     110                unsigned id;
     111                unsigned target;
     112                volatile bool pending;
     113                volatile bool dirty;
    96114        } io;
    97115
     
    103121        bool pending_preemption;
    104122
    105         // Idle lock (kernel semaphore)
    106         int idle_fd;
    107 
    108         // Idle waitctx
     123        // context for idle sleep
    109124        struct __fd_waitctx idle_wctx;
    110125
     
    155170void ^?{}(__intrusive_lane_t & this);
    156171
    157 // Aligned timestamps which are used by the relaxed ready queue
     172// Aligned timestamps which are used by the ready queue and io subsystem
    158173struct __attribute__((aligned(128))) __timestamp_t {
    159174        volatile unsigned long long tv;
     
    161176};
    162177
     178static inline void  ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }
     179static inline void ^?{}(__timestamp_t &) {}
     180
     181
    163182struct __attribute__((aligned(16))) __cache_id_t {
    164183        volatile unsigned id;
    165184};
    166 
    167 // Aligned timestamps which are used by the relaxed ready queue
    168 struct __attribute__((aligned(128))) __help_cnts_t {
    169         volatile unsigned long long src;
    170         volatile unsigned long long dst;
    171         volatile unsigned long long tri;
    172 };
    173 
    174 static inline void  ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }
    175 static inline void ^?{}(__timestamp_t &) {}
    176 
    177 struct __attribute__((aligned(128))) __ready_queue_caches_t;
    178 void  ?{}(__ready_queue_caches_t & this);
    179 void ^?{}(__ready_queue_caches_t & this);
    180 
    181 //TODO adjust cache size to ARCHITECTURE
    182 // Structure holding the ready queue
    183 struct __ready_queue_t {
    184         // Data tracking the actual lanes
    185         // On a seperate cacheline from the used struct since
    186         // used can change on each push/pop but this data
    187         // only changes on shrink/grow
    188         struct {
    189                 // Arary of lanes
    190                 __intrusive_lane_t * volatile data;
    191 
    192                 // Array of times
    193                 __timestamp_t * volatile tscs;
    194 
    195                 __cache_id_t * volatile caches;
    196 
    197                 // Array of stats
    198                 __help_cnts_t * volatile help;
    199 
    200                 // Number of lanes (empty or not)
    201                 volatile size_t count;
    202         } lanes;
    203 };
    204 
    205 void  ?{}(__ready_queue_t & this);
    206 void ^?{}(__ready_queue_t & this);
    207 #if !defined(__CFA_NO_STATISTICS__)
    208         unsigned cnt(const __ready_queue_t & this, unsigned idx);
    209 #endif
    210185
    211186// Idle Sleep
     
    233208// Cluster
    234209struct __attribute__((aligned(128))) cluster {
    235         // Ready queue for threads
    236         __ready_queue_t ready_queue;
     210        struct {
     211                struct {
     212                        // Arary of subqueues
     213                        __intrusive_lane_t * data;
     214
     215                        // Time since subqueues were processed
     216                        __timestamp_t * tscs;
     217
     218                        // Number of subqueue / timestamps
     219                        size_t count;
     220                } readyQ;
     221
     222                struct {
     223                        // Array of $io_
     224                        $io_context ** data;
     225
     226                        // Time since subqueues were processed
     227                        __timestamp_t * tscs;
     228
     229                        // Number of I/O subqueues
     230                        size_t count;
     231                } io;
     232
     233                // Cache each kernel thread belongs to
     234                __cache_id_t * caches;
     235        } sched;
     236
     237        // // Ready queue for threads
     238        // __ready_queue_t ready_queue;
    237239
    238240        // Name of the cluster
  • libcfa/src/concurrency/kernel/fwd.hfa

    ref3c383 rd672350  
    347347                                        struct oneshot * want = expected == 0p ? 1p : 2p;
    348348                                        if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    349                                                 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return 0p; }
     349                                                if( expected == 0p ) { return 0p; }
    350350                                                thread$ * ret = post( *expected, do_unpark );
    351351                                                __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);
  • libcfa/src/concurrency/kernel/private.hfa

    ref3c383 rd672350  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // kernel_private.hfa --
     7// kernel/private.hfa --
    88//
    99// Author           : Thierry Delisle
     
    1717
    1818#if !defined(__cforall_thread__)
    19         #error kernel_private.hfa should only be included in libcfathread source
     19        #error kernel/private.hfa should only be included in libcfathread source
    2020#endif
    2121
     
    3333#else
    3434        #ifndef _GNU_SOURCE
    35         #error kernel_private requires gnu_source
     35        #error kernel/private requires gnu_source
    3636        #endif
    3737        #include <sched.h>
     
    5959
    6060extern bool __preemption_enabled();
     61
     62enum {
     63        PREEMPT_NORMAL    = 0,
     64        PREEMPT_TERMINATE = 1,
     65        PREEMPT_IO = 2,
     66};
    6167
    6268static inline void __disable_interrupts_checked() {
     
    359365void ready_queue_shrink(struct cluster * cltr);
    360366
     367//-----------------------------------------------------------------------
     368// Decrease the width of the ready queue (number of lanes) by 4
     369void ready_queue_close(struct cluster * cltr);
    361370
    362371// Local Variables: //
  • libcfa/src/concurrency/kernel/startup.cfa

    ref3c383 rd672350  
    1818
    1919// C Includes
    20 #include <errno.h>                                                                              // errno
     20#include <errno.h>                                      // errno
    2121#include <signal.h>
    22 #include <string.h>                                                                             // strerror
    23 #include <unistd.h>                                                                             // sysconf
     22#include <string.h>                                     // strerror
     23#include <unistd.h>                                     // sysconf
    2424
    2525extern "C" {
    26         #include <limits.h>                                                                     // PTHREAD_STACK_MIN
    27         #include <unistd.h>                                                                     // syscall
    28         #include <sys/eventfd.h>                                                        // eventfd
    29         #include <sys/mman.h>                                                           // mprotect
    30         #include <sys/resource.h>                                                       // getrlimit
     26        #include <limits.h>                             // PTHREAD_STACK_MIN
     27        #include <unistd.h>                             // syscall
     28        #include <sys/eventfd.h>                        // eventfd
     29        #include <sys/mman.h>                           // mprotect
     30        #include <sys/resource.h>                       // getrlimit
    3131}
    3232
    3333// CFA Includes
    34 #include "kernel_private.hfa"
    35 #include "startup.hfa"                                                                  // STARTUP_PRIORITY_XXX
     34#include "kernel/private.hfa"
     35#include "iofwd.hfa"
     36#include "startup.hfa"                                  // STARTUP_PRIORITY_XXX
    3637#include "limits.hfa"
    3738#include "math.hfa"
     
    9798extern void __kernel_alarm_startup(void);
    9899extern void __kernel_alarm_shutdown(void);
     100extern void __cfa_io_start( processor * );
     101extern void __cfa_io_stop ( processor * );
    99102
    100103//-----------------------------------------------------------------------------
     
    111114KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    112115KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
     116KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
     117KERNEL_STORAGE(io_future_t,          mainIdleFuture);
    113118#if !defined(__CFA_NO_STATISTICS__)
    114119KERNEL_STORAGE(__stats_t, mainProcStats);
     
    224229        (*mainProcessor){};
    225230
     231        mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;
     232        mainProcessor->idle_wctx.ftr   = (io_future_t*)&storage_mainIdleFuture;
     233        /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
     234
    226235        register_tls( mainProcessor );
     236        __cfa_io_start( mainProcessor );
    227237
    228238        // Start by initializing the main thread
     
    304314        mainProcessor->local_data = 0p;
    305315
     316        __cfa_io_stop( mainProcessor );
    306317        unregister_tls( mainProcessor );
    307318
     
    355366        register_tls( proc );
    356367
     368        __cfa_io_start( proc );
     369
     370        // used for idle sleep when io_uring is present
     371        io_future_t future;
     372        eventfd_t idle_buf;
     373        proc->idle_wctx.ftr = &future;
     374        proc->idle_wctx.rdbuf = &idle_buf;
     375
     376
    357377        // SKULLDUGGERY: We want to create a context for the processor coroutine
    358378        // which is needed for the 2-step context switch. However, there is no reason
     
    381401        // Main routine of the core returned, the core is now fully terminated
    382402        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
     403
     404        __cfa_io_stop( proc );
    383405
    384406        #if !defined(__CFA_NO_STATISTICS__)
     
    515537        this.rdq.its = 0;
    516538        this.rdq.itr = 0;
    517         this.rdq.id  = MAX;
     539        this.rdq.id  = 0;
    518540        this.rdq.target = MAX;
    519541        this.rdq.last = MAX;
     
    532554        this.local_data = 0p;
    533555
    534         this.idle_fd = eventfd(0, 0);
    535         if (idle_fd < 0) {
     556        idle_wctx.evfd = eventfd(0, 0);
     557        if (idle_wctx.evfd < 0) {
    536558                abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno));
    537559        }
    538560
    539         this.idle_wctx.fd = 0;
     561        idle_wctx.sem = 0;
    540562
    541563        // I'm assuming these two are reserved for standard input and output
    542564        // so I'm using them as sentinels with idle_wctx.
    543         /* paranoid */ verify( this.idle_fd != 0 );
    544         /* paranoid */ verify( this.idle_fd != 1 );
     565        /* paranoid */ verify( idle_wctx.evfd != 0 );
     566        /* paranoid */ verify( idle_wctx.evfd != 1 );
    545567
    546568        #if !defined(__CFA_NO_STATISTICS__)
     
    554576// Not a ctor, it just preps the destruction but should not destroy members
    555577static void deinit(processor & this) {
    556         close(this.idle_fd);
     578        close(this.idle_wctx.evfd);
    557579}
    558580
     
    605627        this.name = name;
    606628        this.preemption_rate = preemption_rate;
    607         ready_queue{};
     629        this.sched.readyQ.data = 0p;
     630        this.sched.readyQ.tscs = 0p;
     631        this.sched.readyQ.count = 0;
     632        this.sched.io.tscs = 0p;
     633        this.sched.caches = 0p;
    608634
    609635        #if !defined(__CFA_NO_STATISTICS__)
     
    644670        // Unlock the RWlock
    645671        ready_mutate_unlock( last_size );
     672
     673        ready_queue_close( &this );
     674        /* paranoid */ verify( this.sched.readyQ.data == 0p );
     675        /* paranoid */ verify( this.sched.readyQ.tscs == 0p );
     676        /* paranoid */ verify( this.sched.readyQ.count == 0 );
     677        /* paranoid */ verify( this.sched.io.tscs == 0p );
     678        /* paranoid */ verify( this.sched.caches == 0p );
     679
    646680        enable_interrupts( false ); // Don't poll, could be in main cluster
     681
    647682
    648683        #if !defined(__CFA_NO_STATISTICS__)
     
    736771        check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
    737772
    738         size_t stacksize = DEFAULT_STACK_SIZE;
     773        size_t stacksize = max( PTHREAD_STACK_MIN, DEFAULT_STACK_SIZE );
    739774
    740775        void * stack;
  • libcfa/src/concurrency/locks.cfa

    ref3c383 rd672350  
    1919
    2020#include "locks.hfa"
    21 #include "kernel_private.hfa"
     21#include "kernel/private.hfa"
    2222
    2323#include <kernel.hfa>
  • libcfa/src/concurrency/locks.hfa

    ref3c383 rd672350  
    164164}
    165165
    166 static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
     166static inline void lock(linear_backoff_then_block_lock & this) with(this) {
    167167        // if owner just return
    168         if (active_thread() == owner) return true;
     168        if (active_thread() == owner) return;
    169169        size_t compare_val = 0;
    170170        int spin = spin_start;
     
    172172        for( ;; ) {
    173173                compare_val = 0;
    174                 if (internal_try_lock(this, compare_val)) return true;
     174                if (internal_try_lock(this, compare_val)) return;
    175175                if (2 == compare_val) break;
    176176                for (int i = 0; i < spin; i++) Pause();
     
    179179        }
    180180
    181         if(2 != compare_val && try_lock_contention(this)) return true;
     181        if(2 != compare_val && try_lock_contention(this)) return;
    182182        // block until signalled
    183         while (block(this)) if(try_lock_contention(this)) return true;
    184 
    185         // this should never be reached as block(this) always returns true
    186         return false;
     183        while (block(this)) if(try_lock_contention(this)) return;
    187184}
    188185
  • libcfa/src/concurrency/monitor.cfa

    ref3c383 rd672350  
    2222#include <inttypes.h>
    2323
    24 #include "kernel_private.hfa"
     24#include "kernel/private.hfa"
    2525
    2626#include "bits/algorithm.hfa"
  • libcfa/src/concurrency/mutex.cfa

    ref3c383 rd672350  
    2121#include "mutex.hfa"
    2222
    23 #include "kernel_private.hfa"
     23#include "kernel/private.hfa"
    2424
    2525//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/mutex_stmt.hfa

    ref3c383 rd672350  
    1212};
    1313
     14
     15struct __mutex_stmt_lock_guard {
     16    void ** lockarr;
     17    __lock_size_t count;
     18};
     19
     20static inline void ?{}( __mutex_stmt_lock_guard & this, void * lockarr [], __lock_size_t count  ) {
     21    this.lockarr = lockarr;
     22    this.count = count;
     23
     24    // Sort locks based on address
     25    __libcfa_small_sort(this.lockarr, count);
     26
     27    // acquire locks in order
     28    // for ( size_t i = 0; i < count; i++ ) {
     29    //     lock(*this.lockarr[i]);
     30    // }
     31}
     32
     33static inline void ^?{}( __mutex_stmt_lock_guard & this ) with(this) {
     34    // for ( size_t i = count; i > 0; i-- ) {
     35    //     unlock(*lockarr[i - 1]);
     36    // }
     37}
     38
    1439forall(L & | is_lock(L)) {
    15 
    16     struct __mutex_stmt_lock_guard {
    17         L ** lockarr;
    18         __lock_size_t count;
    19     };
    20    
    21     static inline void ?{}( __mutex_stmt_lock_guard(L) & this, L * lockarr [], __lock_size_t count  ) {
    22         this.lockarr = lockarr;
    23         this.count = count;
    24 
    25         // Sort locks based on address
    26         __libcfa_small_sort(this.lockarr, count);
    27 
    28         // acquire locks in order
    29         for ( size_t i = 0; i < count; i++ ) {
    30             lock(*this.lockarr[i]);
    31         }
    32     }
    33    
    34     static inline void ^?{}( __mutex_stmt_lock_guard(L) & this ) with(this) {
    35         for ( size_t i = count; i > 0; i-- ) {
    36             unlock(*lockarr[i - 1]);
    37         }
    38     }
    3940
    4041    struct scoped_lock {
     
    5152    }
    5253
    53     static inline L * __get_ptr( L & this ) {
     54    static inline void * __get_mutexstmt_lock_ptr( L & this ) {
    5455        return &this;
    5556    }
    5657
    57     static inline L __get_type( L & this );
     58    static inline L __get_mutexstmt_lock_type( L & this );
    5859
    59     static inline L __get_type( L * this );
     60    static inline L __get_mutexstmt_lock_type( L * this );
    6061}
  • libcfa/src/concurrency/preemption.cfa

    ref3c383 rd672350  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Nov  6 07:42:13 2020
    13 // Update Count     : 54
     12// Last Modified On : Thu Feb 17 11:18:57 2022
     13// Update Count     : 59
    1414//
    1515
     
    3131#include "bits/debug.hfa"
    3232#include "bits/signal.hfa"
    33 #include "kernel_private.hfa"
     33#include "kernel/private.hfa"
    3434
    3535
     
    9797}
    9898
    99 enum {
    100         PREEMPT_NORMAL    = 0,
    101         PREEMPT_TERMINATE = 1,
    102 };
    103 
    10499//=============================================================================================
    105100// Kernel Preemption logic
     
    243238//----------
    244239// special case for preemption since used often
    245 bool __preemption_enabled() {
     240__attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {
    246241        // create a assembler label before
    247242        // marked as clobber all to avoid movement
     
    664659        choose(sfp->si_value.sival_int) {
    665660                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
     661                case PREEMPT_IO       : ;// I/O asked to stop spinning, nothing to do here
    666662                case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
    667663                default:
  • libcfa/src/concurrency/ready_queue.cfa

    ref3c383 rd672350  
    2020
    2121
    22 // #define USE_RELAXED_FIFO
    23 // #define USE_WORK_STEALING
    24 // #define USE_CPU_WORK_STEALING
    2522#define USE_AWARE_STEALING
    2623
    2724#include "bits/defs.hfa"
    2825#include "device/cpu.hfa"
    29 #include "kernel_private.hfa"
    30 
    31 #include "stdlib.hfa"
     26#include "kernel/cluster.hfa"
     27#include "kernel/private.hfa"
     28
    3229#include "limits.hfa"
    33 #include "math.hfa"
    34 
    35 #include <errno.h>
    36 #include <unistd.h>
    37 
    38 extern "C" {
    39         #include <sys/syscall.h>  // __NR_xxx
    40 }
     30
     31// #include <errno.h>
     32// #include <unistd.h>
    4133
    4234#include "ready_subqueue.hfa"
     
    5042#endif
    5143
    52 // No overriden function, no environment variable, no define
    53 // fall back to a magic number
    54 #ifndef __CFA_MAX_PROCESSORS__
    55         #define __CFA_MAX_PROCESSORS__ 1024
    56 #endif
    57 
    58 #if   defined(USE_AWARE_STEALING)
    59         #define READYQ_SHARD_FACTOR 2
    60         #define SEQUENTIAL_SHARD 2
    61 #elif defined(USE_CPU_WORK_STEALING)
    62         #define READYQ_SHARD_FACTOR 2
    63 #elif defined(USE_RELAXED_FIFO)
    64         #define BIAS 4
    65         #define READYQ_SHARD_FACTOR 4
    66         #define SEQUENTIAL_SHARD 1
    67 #elif defined(USE_WORK_STEALING)
    68         #define READYQ_SHARD_FACTOR 2
    69         #define SEQUENTIAL_SHARD 2
    70 #else
    71         #error no scheduling strategy selected
    72 #endif
    73 
    7444static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
    7545static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
    7646static inline struct thread$ * search(struct cluster * cltr);
    77 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
    78 
    79 
    80 // returns the maximum number of processors the RWLock support
    81 __attribute__((weak)) unsigned __max_processors() {
    82         const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
    83         if(!max_cores_s) {
    84                 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
    85                 return __CFA_MAX_PROCESSORS__;
    86         }
    87 
    88         char * endptr = 0p;
    89         long int max_cores_l = strtol(max_cores_s, &endptr, 10);
    90         if(max_cores_l < 1 || max_cores_l > 65535) {
    91                 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
    92                 return __CFA_MAX_PROCESSORS__;
    93         }
    94         if('\0' != *endptr) {
    95                 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
    96                 return __CFA_MAX_PROCESSORS__;
    97         }
    98 
    99         return max_cores_l;
    100 }
    101 
    102 #if   defined(CFA_HAVE_LINUX_LIBRSEQ)
    103         // No forward declaration needed
    104         #define __kernel_rseq_register rseq_register_current_thread
    105         #define __kernel_rseq_unregister rseq_unregister_current_thread
    106 #elif defined(CFA_HAVE_LINUX_RSEQ_H)
    107         static void __kernel_raw_rseq_register  (void);
    108         static void __kernel_raw_rseq_unregister(void);
    109 
    110         #define __kernel_rseq_register __kernel_raw_rseq_register
    111         #define __kernel_rseq_unregister __kernel_raw_rseq_unregister
    112 #else
    113         // No forward declaration needed
    114         // No initialization needed
    115         static inline void noop(void) {}
    116 
    117         #define __kernel_rseq_register noop
    118         #define __kernel_rseq_unregister noop
    119 #endif
    120 
    121 //=======================================================================
    122 // Cluster wide reader-writer lock
    123 //=======================================================================
    124 void  ?{}(__scheduler_RWLock_t & this) {
    125         this.max   = __max_processors();
    126         this.alloc = 0;
    127         this.ready = 0;
    128         this.data  = alloc(this.max);
    129         this.write_lock  = false;
    130 
    131         /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
    132         /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
    133 
    134 }
    135 void ^?{}(__scheduler_RWLock_t & this) {
    136         free(this.data);
    137 }
    138 
    139 
    140 //=======================================================================
    141 // Lock-Free registering/unregistering of threads
    142 unsigned register_proc_id( void ) with(*__scheduler_lock) {
    143         __kernel_rseq_register();
    144 
    145         bool * handle = (bool *)&kernelTLS().sched_lock;
    146 
    147         // Step - 1 : check if there is already space in the data
    148         uint_fast32_t s = ready;
    149 
    150         // Check among all the ready
    151         for(uint_fast32_t i = 0; i < s; i++) {
    152                 bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems
    153                 /* paranoid */ verify( handle != *cell );
    154 
    155                 bool * null = 0p; // Re-write every loop since compare thrashes it
    156                 if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null
    157                         && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    158                         /* paranoid */ verify(i < ready);
    159                         /* paranoid */ verify( (kernelTLS().sched_id = i, true) );
    160                         return i;
    161                 }
    162         }
    163 
    164         if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
    165 
    166         // Step - 2 : F&A to get a new spot in the array.
    167         uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
    168         if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
    169 
    170         // Step - 3 : Mark space as used and then publish it.
    171         data[n] = handle;
    172         while() {
    173                 unsigned copy = n;
    174                 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
    175                         && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
    176                         break;
    177                 Pause();
    178         }
    179 
    180         // Return new spot.
    181         /* paranoid */ verify(n < ready);
    182         /* paranoid */ verify( (kernelTLS().sched_id = n, true) );
    183         return n;
    184 }
    185 
    186 void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
    187         /* paranoid */ verify(id < ready);
    188         /* paranoid */ verify(id == kernelTLS().sched_id);
    189         /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);
    190 
    191         bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems
    192 
    193         __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
    194 
    195         __kernel_rseq_unregister();
    196 }
    197 
    198 //-----------------------------------------------------------------------
    199 // Writer side : acquire when changing the ready queue, e.g. adding more
    200 //  queues or removing them.
    201 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
    202         /* paranoid */ verify( ! __preemption_enabled() );
    203 
    204         // Step 1 : lock global lock
    205         // It is needed to avoid processors that register mid Critical-Section
    206         //   to simply lock their own lock and enter.
    207         __atomic_acquire( &write_lock );
    208 
    209         // Make sure we won't deadlock ourself
    210         // Checking before acquiring the writer lock isn't safe
    211         // because someone else could have locked us.
    212         /* paranoid */ verify( ! kernelTLS().sched_lock );
    213 
    214         // Step 2 : lock per-proc lock
    215         // Processors that are currently being registered aren't counted
    216         //   but can't be in read_lock or in the critical section.
    217         // All other processors are counted
    218         uint_fast32_t s = ready;
    219         for(uint_fast32_t i = 0; i < s; i++) {
    220                 volatile bool * llock = data[i];
    221                 if(llock) __atomic_acquire( llock );
    222         }
    223 
    224         /* paranoid */ verify( ! __preemption_enabled() );
    225         return s;
    226 }
    227 
    228 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
    229         /* paranoid */ verify( ! __preemption_enabled() );
    230 
    231         // Step 1 : release local locks
    232         // This must be done while the global lock is held to avoid
    233         //   threads that where created mid critical section
    234         //   to race to lock their local locks and have the writer
    235         //   immidiately unlock them
    236         // Alternative solution : return s in write_lock and pass it to write_unlock
    237         for(uint_fast32_t i = 0; i < last_s; i++) {
    238                 volatile bool * llock = data[i];
    239                 if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);
    240         }
    241 
    242         // Step 2 : release global lock
    243         /*paranoid*/ assert(true == write_lock);
    244         __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);
    245 
    246         /* paranoid */ verify( ! __preemption_enabled() );
    247 }
    248 
    249 //=======================================================================
    250 // caches handling
    251 
    252 struct __attribute__((aligned(128))) __ready_queue_caches_t {
    253         // Count States:
    254         // - 0  : No one is looking after this cache
    255         // - 1  : No one is looking after this cache, BUT it's not empty
    256         // - 2+ : At least one processor is looking after this cache
    257         volatile unsigned count;
    258 };
    259 
    260 void  ?{}(__ready_queue_caches_t & this) { this.count = 0; }
    261 void ^?{}(__ready_queue_caches_t & this) {}
    262 
    263 static inline void depart(__ready_queue_caches_t & cache) {
    264         /* paranoid */ verify( cache.count > 1);
    265         __atomic_fetch_add(&cache.count, -1, __ATOMIC_SEQ_CST);
    266         /* paranoid */ verify( cache.count != 0);
    267         /* paranoid */ verify( cache.count < 65536 ); // This verify assumes no cluster will have more than 65000 kernel threads mapped to a single cache, which could be correct but is super weird.
    268 }
    269 
    270 static inline void arrive(__ready_queue_caches_t & cache) {
    271         // for() {
    272         //      unsigned expected = cache.count;
    273         //      unsigned desired  = 0 == expected ? 2 : expected + 1;
    274         // }
    275 }
    27647
    27748//=======================================================================
    27849// Cforall Ready Queue used for scheduling
    27950//=======================================================================
    280 unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) {
    281         /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );
    282         /* paranoid */ verifyf( instsc  < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );
    283         /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg );
    284 
    285         const unsigned long long new_val = currtsc > instsc ? currtsc - instsc : 0;
    286         const unsigned long long total_weight = 16;
    287         const unsigned long long new_weight   = 4;
    288         const unsigned long long old_weight = total_weight - new_weight;
    289         const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight;
    290         return ret;
    291 }
    292 
    293 void ?{}(__ready_queue_t & this) with (this) {
    294         #if defined(USE_CPU_WORK_STEALING)
    295                 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR;
    296                 lanes.data = alloc( lanes.count );
    297                 lanes.tscs = alloc( lanes.count );
    298                 lanes.help = alloc( cpu_info.hthrd_count );
    299 
    300                 for( idx; (size_t)lanes.count ) {
    301                         (lanes.data[idx]){};
    302                         lanes.tscs[idx].tv = rdtscl();
    303                         lanes.tscs[idx].ma = rdtscl();
    304                 }
    305                 for( idx; (size_t)cpu_info.hthrd_count ) {
    306                         lanes.help[idx].src = 0;
    307                         lanes.help[idx].dst = 0;
    308                         lanes.help[idx].tri = 0;
    309                 }
    310         #else
    311                 lanes.data   = 0p;
    312                 lanes.tscs   = 0p;
    313                 lanes.caches = 0p;
    314                 lanes.help   = 0p;
    315                 lanes.count  = 0;
    316         #endif
    317 }
    318 
    319 void ^?{}(__ready_queue_t & this) with (this) {
    320         #if !defined(USE_CPU_WORK_STEALING)
    321                 verify( SEQUENTIAL_SHARD == lanes.count );
    322         #endif
    323 
    324         free(lanes.data);
    325         free(lanes.tscs);
    326         free(lanes.caches);
    327         free(lanes.help);
    328 }
    329 
    330 //-----------------------------------------------------------------------
    331 #if defined(USE_AWARE_STEALING)
    332         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    333                 processor * const proc = kernelTLS().this_processor;
    334                 const bool external = (!proc) || (cltr != proc->cltr);
    335                 const bool remote   = hint == UNPARK_REMOTE;
    336 
    337                 unsigned i;
    338                 if( external || remote ) {
    339                         // Figure out where thread was last time and make sure it's valid
    340                         /* paranoid */ verify(thrd->preferred >= 0);
    341                         if(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count) {
    342                                 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
    343                                 unsigned start = thrd->preferred * READYQ_SHARD_FACTOR;
    344                                 do {
    345                                         unsigned r = __tls_rand();
    346                                         i = start + (r % READYQ_SHARD_FACTOR);
    347                                         /* paranoid */ verify( i < lanes.count );
    348                                         // If we can't lock it retry
    349                                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    350                         } else {
    351                                 do {
    352                                         i = __tls_rand() % lanes.count;
    353                                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    354                         }
     51// void ?{}(__ready_queue_t & this) with (this) {
     52//      lanes.data   = 0p;
     53//      lanes.tscs   = 0p;
     54//      lanes.caches = 0p;
     55//      lanes.count  = 0;
     56// }
     57
     58// void ^?{}(__ready_queue_t & this) with (this) {
     59//      free(lanes.data);
     60//      free(lanes.tscs);
     61//      free(lanes.caches);
     62// }
     63
     64//-----------------------------------------------------------------------
     65__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) {
     66        processor * const proc = kernelTLS().this_processor;
     67        const bool external = (!proc) || (cltr != proc->cltr);
     68        const bool remote   = hint == UNPARK_REMOTE;
     69        const size_t lanes_count = readyQ.count;
     70
     71        /* paranoid */ verify( __shard_factor.readyq > 0 );
     72        /* paranoid */ verify( lanes_count > 0 );
     73
     74        unsigned i;
     75        if( external || remote ) {
     76                // Figure out where thread was last time and make sure it's valid
     77                /* paranoid */ verify(thrd->preferred >= 0);
     78                unsigned start = thrd->preferred * __shard_factor.readyq;
     79                if(start < lanes_count) {
     80                        do {
     81                                unsigned r = __tls_rand();
     82                                i = start + (r % __shard_factor.readyq);
     83                                /* paranoid */ verify( i < lanes_count );
     84                                // If we can't lock it retry
     85                        } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
    35586                } else {
    35687                        do {
    357                                 unsigned r = proc->rdq.its++;
    358                                 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
    359                                 /* paranoid */ verify( i < lanes.count );
    360                                 // If we can't lock it retry
    361                         } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    362                 }
    363 
    364                 // Actually push it
    365                 push(lanes.data[i], thrd);
    366 
    367                 // Unlock and return
    368                 __atomic_unlock( &lanes.data[i].lock );
    369 
    370                 #if !defined(__CFA_NO_STATISTICS__)
    371                         if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    372                         else __tls_stats()->ready.push.local.success++;
    373                 #endif
    374         }
    375 
    376         static inline unsigned long long calc_cutoff(const unsigned long long ctsc, const processor * proc, __ready_queue_t & rdq) {
    377                 unsigned start = proc->rdq.id;
    378                 unsigned long long max = 0;
    379                 for(i; READYQ_SHARD_FACTOR) {
    380                         unsigned long long ptsc = ts(rdq.lanes.data[start + i]);
    381                         if(ptsc != -1ull) {
    382                                 /* paranoid */ verify( start + i < rdq.lanes.count );
    383                                 unsigned long long tsc = moving_average(ctsc, ptsc, rdq.lanes.tscs[start + i].ma);
    384                                 if(tsc > max) max = tsc;
    385                         }
    386                 }
    387                 return (max + 2 * max) / 2;
    388         }
    389 
    390         __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    391                 /* paranoid */ verify( lanes.count > 0 );
    392                 /* paranoid */ verify( kernelTLS().this_processor );
    393                 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
    394 
    395                 processor * const proc = kernelTLS().this_processor;
    396                 unsigned this = proc->rdq.id;
    397                 /* paranoid */ verify( this < lanes.count );
    398                 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
    399 
    400                 // Figure out the current cpu and make sure it is valid
    401                 const int cpu = __kernel_getcpu();
    402                 /* paranoid */ verify(cpu >= 0);
    403                 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
    404                 unsigned this_cache = cpu_info.llc_map[cpu].cache;
    405 
    406                 // Super important: don't write the same value over and over again
    407                 // We want to maximise our chances that his particular values stays in cache
    408                 if(lanes.caches[this / READYQ_SHARD_FACTOR].id != this_cache)
    409                         __atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED);
    410 
    411                 const unsigned long long ctsc = rdtscl();
    412 
    413                 if(proc->rdq.target == MAX) {
    414                         uint64_t chaos = __tls_rand();
    415                         unsigned ext = chaos & 0xff;
    416                         unsigned other  = (chaos >> 8) % (lanes.count);
    417 
    418                         if(ext < 3 || __atomic_load_n(&lanes.caches[other / READYQ_SHARD_FACTOR].id, __ATOMIC_RELAXED) == this_cache) {
    419                                 proc->rdq.target = other;
    420                         }
    421                 }
    422                 else {
    423                         const unsigned target = proc->rdq.target;
    424                         __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, lanes.tscs[target].tv);
    425                         /* paranoid */ verify( lanes.tscs[target].tv != MAX );
    426                         if(target < lanes.count) {
    427                                 const unsigned long long cutoff = calc_cutoff(ctsc, proc, cltr->ready_queue);
    428                                 const unsigned long long age = moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma);
    429                                 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
    430                                 if(age > cutoff) {
    431                                         thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    432                                         if(t) return t;
    433                                 }
    434                         }
    435                         proc->rdq.target = MAX;
    436                 }
    437 
    438                 for(READYQ_SHARD_FACTOR) {
    439                         unsigned i = this + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    440                         if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    441                 }
    442 
    443                 // All lanes where empty return 0p
    444                 return 0p;
    445 
    446         }
    447         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    448                 unsigned i = __tls_rand() % lanes.count;
    449                 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    450         }
    451         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    452                 return search(cltr);
    453         }
    454 #endif
    455 #if defined(USE_CPU_WORK_STEALING)
    456         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    457                 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    458 
    459                 processor * const proc = kernelTLS().this_processor;
    460                 const bool external = (!proc) || (cltr != proc->cltr);
    461 
    462                 // Figure out the current cpu and make sure it is valid
    463                 const int cpu = __kernel_getcpu();
    464                 /* paranoid */ verify(cpu >= 0);
    465                 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
    466                 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
    467 
    468                 // Figure out where thread was last time and make sure it's
    469                 /* paranoid */ verify(thrd->preferred >= 0);
    470                 /* paranoid */ verify(thrd->preferred < cpu_info.hthrd_count);
    471                 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
    472                 const int prf = thrd->preferred * READYQ_SHARD_FACTOR;
    473 
    474                 const cpu_map_entry_t & map;
    475                 choose(hint) {
    476                         case UNPARK_LOCAL : &map = &cpu_info.llc_map[cpu];
    477                         case UNPARK_REMOTE: &map = &cpu_info.llc_map[prf];
    478                 }
    479                 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
    480                 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
    481                 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
    482 
    483                 const int start = map.self * READYQ_SHARD_FACTOR;
    484                 unsigned i;
     88                                i = __tls_rand() % lanes_count;
     89                        } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
     90                }
     91        } else {
    48592                do {
    486                         unsigned r;
    487                         if(unlikely(external)) { r = __tls_rand(); }
    488                         else { r = proc->rdq.its++; }
    489                         choose(hint) {
    490                                 case UNPARK_LOCAL : i = start + (r % READYQ_SHARD_FACTOR);
    491                                 case UNPARK_REMOTE: i = prf   + (r % READYQ_SHARD_FACTOR);
    492                         }
     93                        unsigned r = proc->rdq.its++;
     94                        i = proc->rdq.id + (r % __shard_factor.readyq);
     95                        /* paranoid */ verify( i < lanes_count );
    49396                        // If we can't lock it retry
    494                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    495 
    496                 // Actually push it
    497                 push(lanes.data[i], thrd);
    498 
    499                 // Unlock and return
    500                 __atomic_unlock( &lanes.data[i].lock );
    501 
    502                 #if !defined(__CFA_NO_STATISTICS__)
    503                         if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    504                         else __tls_stats()->ready.push.local.success++;
    505                 #endif
    506 
    507                 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
    508 
    509         }
    510 
    511         // Pop from the ready queue from a given cluster
    512         __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    513                 /* paranoid */ verify( lanes.count > 0 );
    514                 /* paranoid */ verify( kernelTLS().this_processor );
    515 
    516                 processor * const proc = kernelTLS().this_processor;
    517                 const int cpu = __kernel_getcpu();
    518                 /* paranoid */ verify(cpu >= 0);
    519                 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
    520                 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
    521 
    522                 const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
    523                 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
    524                 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
    525                 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
    526 
    527                 const int start = map.self * READYQ_SHARD_FACTOR;
    528                 const unsigned long long ctsc = rdtscl();
    529 
    530                 // Did we already have a help target
    531                 if(proc->rdq.target == MAX) {
    532                         unsigned long long max = 0;
    533                         for(i; READYQ_SHARD_FACTOR) {
    534                                 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma);
    535                                 if(tsc > max) max = tsc;
    536                         }
    537                         //  proc->rdq.cutoff = (max + 2 * max) / 2;
    538                         /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores.
    539                         /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores.
    540 
    541                         if(0 == (__tls_rand() % 100)) {
    542                                 proc->rdq.target = __tls_rand() % lanes.count;
    543                         } else {
    544                                 unsigned cpu_chaos = map.start + (__tls_rand() % map.count);
    545                                 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (__tls_rand() % READYQ_SHARD_FACTOR);
    546                                 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR));
    547                                 /* paranoid */ verify(proc->rdq.target <  ((map.start + map.count) * READYQ_SHARD_FACTOR));
    548                         }
    549 
    550                         /* paranoid */ verify(proc->rdq.target != MAX);
    551                 }
    552                 else {
    553                         unsigned long long max = 0;
    554                         for(i; READYQ_SHARD_FACTOR) {
    555                                 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma);
    556                                 if(tsc > max) max = tsc;
    557                         }
    558                         const unsigned long long cutoff = (max + 2 * max) / 2;
    559                         {
    560                                 unsigned target = proc->rdq.target;
    561                                 proc->rdq.target = MAX;
    562                                 lanes.help[target / READYQ_SHARD_FACTOR].tri++;
    563                                 if(moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma) > cutoff) {
    564                                         thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    565                                         proc->rdq.last = target;
    566                                         if(t) return t;
    567                                 }
    568                                 proc->rdq.target = MAX;
    569                         }
    570 
    571                         unsigned last = proc->rdq.last;
    572                         if(last != MAX && moving_average(ctsc, lanes.tscs[last].tv, lanes.tscs[last].ma) > cutoff) {
    573                                 thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
    574                                 if(t) return t;
    575                         }
    576                         else {
    577                                 proc->rdq.last = MAX;
    578                         }
    579                 }
    580 
    581                 for(READYQ_SHARD_FACTOR) {
    582                         unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    583                         if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    584                 }
    585 
    586                 // All lanes where empty return 0p
    587                 return 0p;
    588         }
    589 
    590         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    591                 processor * const proc = kernelTLS().this_processor;
    592                 unsigned last = proc->rdq.last;
    593                 if(last != MAX) {
    594                         struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
    595                         if(t) return t;
    596                         proc->rdq.last = MAX;
    597                 }
    598 
    599                 unsigned i = __tls_rand() % lanes.count;
    600                 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    601         }
    602         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    603                 return search(cltr);
    604         }
    605 #endif
    606 #if defined(USE_RELAXED_FIFO)
    607         //-----------------------------------------------------------------------
    608         // get index from random number with or without bias towards queues
    609         static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
    610                 unsigned i;
    611                 bool local;
    612                 unsigned rlow  = r % BIAS;
    613                 unsigned rhigh = r / BIAS;
    614                 if((0 != rlow) && preferred >= 0) {
    615                         // (BIAS - 1) out of BIAS chances
    616                         // Use perferred queues
    617                         i = preferred + (rhigh % READYQ_SHARD_FACTOR);
    618                         local = true;
    619                 }
    620                 else {
    621                         // 1 out of BIAS chances
    622                         // Use all queues
    623                         i = rhigh;
    624                         local = false;
    625                 }
    626                 return [i, local];
    627         }
    628 
    629         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    630                 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    631 
    632                 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
    633                 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
    634 
    635                 bool local;
    636                 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
    637 
    638                 // Try to pick a lane and lock it
    639                 unsigned i;
    640                 do {
    641                         // Pick the index of a lane
    642                         unsigned r = __tls_rand_fwd();
    643                         [i, local] = idx_from_r(r, preferred);
    644 
    645                         i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    646 
    647                         #if !defined(__CFA_NO_STATISTICS__)
    648                                 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
    649                                 else if(local) __tls_stats()->ready.push.local.attempt++;
    650                                 else __tls_stats()->ready.push.share.attempt++;
    651                         #endif
    652 
    653                         // If we can't lock it retry
    654                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    655 
    656                 // Actually push it
    657                 push(lanes.data[i], thrd);
    658 
    659                 // Unlock and return
    660                 __atomic_unlock( &lanes.data[i].lock );
    661 
    662                 // Mark the current index in the tls rng instance as having an item
    663                 __tls_rand_advance_bck();
    664 
    665                 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
    666 
    667                 // Update statistics
    668                 #if !defined(__CFA_NO_STATISTICS__)
    669                         if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    670                         else if(local) __tls_stats()->ready.push.local.success++;
    671                         else __tls_stats()->ready.push.share.success++;
    672                 #endif
    673         }
    674 
    675         // Pop from the ready queue from a given cluster
    676         __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    677                 /* paranoid */ verify( lanes.count > 0 );
    678                 /* paranoid */ verify( kernelTLS().this_processor );
    679                 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
    680 
    681                 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    682                 int preferred = kernelTLS().this_processor->rdq.id;
    683 
    684 
    685                 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
    686                 for(25) {
    687                         // Pick two lists at random
    688                         unsigned ri = __tls_rand_bck();
    689                         unsigned rj = __tls_rand_bck();
    690 
    691                         unsigned i, j;
    692                         __attribute__((unused)) bool locali, localj;
    693                         [i, locali] = idx_from_r(ri, preferred);
    694                         [j, localj] = idx_from_r(rj, preferred);
    695 
    696                         i %= count;
    697                         j %= count;
    698 
    699                         // try popping from the 2 picked lists
    700                         struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
    701                         if(thrd) {
    702                                 return thrd;
    703                         }
    704                 }
    705 
    706                 // All lanes where empty return 0p
    707                 return 0p;
    708         }
    709 
    710         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
    711         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    712                 return search(cltr);
    713         }
    714 #endif
    715 #if defined(USE_WORK_STEALING)
    716         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    717                 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    718 
    719                 // #define USE_PREFERRED
    720                 #if !defined(USE_PREFERRED)
    721                 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
    722                 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
    723                 #else
    724                         unsigned preferred = thrd->preferred;
    725                         const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == MAX || thrd->curr_cluster != cltr;
    726                         /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
    727 
    728                         unsigned r = preferred % READYQ_SHARD_FACTOR;
    729                         const unsigned start = preferred - r;
    730                 #endif
    731 
    732                 // Try to pick a lane and lock it
    733                 unsigned i;
    734                 do {
    735                         #if !defined(__CFA_NO_STATISTICS__)
    736                                 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
    737                                 else __tls_stats()->ready.push.local.attempt++;
    738                         #endif
    739 
    740                         if(unlikely(external)) {
    741                                 i = __tls_rand() % lanes.count;
    742                         }
    743                         else {
    744                                 #if !defined(USE_PREFERRED)
    745                                         processor * proc = kernelTLS().this_processor;
    746                                         unsigned r = proc->rdq.its++;
    747                                         i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
    748                                 #else
    749                                         i = start + (r++ % READYQ_SHARD_FACTOR);
    750                                 #endif
    751                         }
    752                         // If we can't lock it retry
    753                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    754 
    755                 // Actually push it
    756                 push(lanes.data[i], thrd);
    757 
    758                 // Unlock and return
    759                 __atomic_unlock( &lanes.data[i].lock );
    760 
    761                 #if !defined(__CFA_NO_STATISTICS__)
    762                         if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    763                         else __tls_stats()->ready.push.local.success++;
    764                 #endif
    765 
    766                 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
    767         }
    768 
    769         // Pop from the ready queue from a given cluster
    770         __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    771                 /* paranoid */ verify( lanes.count > 0 );
    772                 /* paranoid */ verify( kernelTLS().this_processor );
    773                 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
    774 
    775                 processor * proc = kernelTLS().this_processor;
    776 
    777                 if(proc->rdq.target == MAX) {
    778                         unsigned long long min = ts(lanes.data[proc->rdq.id]);
    779                         for(int i = 0; i < READYQ_SHARD_FACTOR; i++) {
    780                                 unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]);
    781                                 if(tsc < min) min = tsc;
    782                         }
    783                         proc->rdq.cutoff = min;
    784                         proc->rdq.target = __tls_rand() % lanes.count;
    785                 }
    786                 else {
    787                         unsigned target = proc->rdq.target;
    788                         proc->rdq.target = MAX;
    789                         const unsigned long long bias = 0; //2_500_000_000;
    790                         const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
    791                         if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
     97                } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
     98        }
     99
     100        // Actually push it
     101        push(readyQ.data[i], thrd);
     102
     103        // Unlock and return
     104        __atomic_unlock( &readyQ.data[i].lock );
     105
     106        #if !defined(__CFA_NO_STATISTICS__)
     107                if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
     108                else __tls_stats()->ready.push.local.success++;
     109        #endif
     110}
     111
     112__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) {
     113        const size_t lanes_count = readyQ.count;
     114
     115        /* paranoid */ verify( __shard_factor.readyq > 0 );
     116        /* paranoid */ verify( lanes_count > 0 );
     117        /* paranoid */ verify( kernelTLS().this_processor );
     118        /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count );
     119
     120        processor * const proc = kernelTLS().this_processor;
     121        unsigned this = proc->rdq.id;
     122        /* paranoid */ verify( this < lanes_count );
     123        __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
     124
     125        // Figure out the current cache is
     126        const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq);
     127        const unsigned long long ctsc = rdtscl();
     128
     129        if(proc->rdq.target == MAX) {
     130                uint64_t chaos = __tls_rand();
     131                unsigned ext = chaos & 0xff;
     132                unsigned other  = (chaos >> 8) % (lanes_count);
     133
     134                if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) {
     135                        proc->rdq.target = other;
     136                }
     137        }
     138        else {
     139                const unsigned target = proc->rdq.target;
     140                __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
     141                /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
     142                if(target < lanes_count) {
     143                        const unsigned long long cutoff = calc_cutoff(ctsc, proc, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
     144                        const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma);
     145                        __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
     146                        if(age > cutoff) {
    792147                                thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    793148                                if(t) return t;
    794149                        }
    795150                }
    796 
    797                 for(READYQ_SHARD_FACTOR) {
    798                         unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    799                         if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    800                 }
    801                 return 0p;
    802         }
    803 
    804         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    805                 unsigned i = __tls_rand() % lanes.count;
    806                 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    807         }
    808 
    809         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
    810                 return search(cltr);
    811         }
    812 #endif
     151                proc->rdq.target = MAX;
     152        }
     153
     154        for(__shard_factor.readyq) {
     155                unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq);
     156                if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     157        }
     158
     159        // All lanes where empty return 0p
     160        return 0p;
     161
     162}
     163__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) {
     164        unsigned i = __tls_rand() % (cltr->sched.readyQ.count);
     165        return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
     166}
     167__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
     168        return search(cltr);
     169}
    813170
    814171//=======================================================================
     
    820177//-----------------------------------------------------------------------
    821178// try to pop from a lane given by index w
    822 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
    823         /* paranoid */ verify( w < lanes.count );
     179static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
     180        /* paranoid */ verify( w < readyQ.count );
    824181        __STATS( stats.attempt++; )
    825182
    826183        // Get relevant elements locally
    827         __intrusive_lane_t & lane = lanes.data[w];
     184        __intrusive_lane_t & lane = readyQ.data[w];
    828185
    829186        // If list looks empty retry
     
    845202        // Actually pop the list
    846203        struct thread$ * thrd;
    847         #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
    848                 unsigned long long tsc_before = ts(lane);
    849         #endif
     204        unsigned long long tsc_before = ts(lane);
    850205        unsigned long long tsv;
    851206        [thrd, tsv] = pop(lane);
     
    861216        __STATS( stats.success++; )
    862217
    863         #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
    864                 if (tsv != MAX) {
    865                         unsigned long long now = rdtscl();
    866                         unsigned long long pma = __atomic_load_n(&lanes.tscs[w].ma, __ATOMIC_RELAXED);
    867                         __atomic_store_n(&lanes.tscs[w].tv, tsv, __ATOMIC_RELAXED);
    868                         __atomic_store_n(&lanes.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
    869                 }
    870         #endif
    871 
    872         #if defined(USE_AWARE_STEALING) || defined(USE_CPU_WORK_STEALING)
    873                 thrd->preferred = w / READYQ_SHARD_FACTOR;
    874         #else
    875                 thrd->preferred = w;
    876         #endif
     218        if (tsv != MAX) {
     219                unsigned long long now = rdtscl();
     220                unsigned long long pma = __atomic_load_n(&readyQ.tscs[w].ma, __ATOMIC_RELAXED);
     221                __atomic_store_n(&readyQ.tscs[w].tv, tsv, __ATOMIC_RELAXED);
     222                __atomic_store_n(&readyQ.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
     223        }
     224
     225        thrd->preferred = w / __shard_factor.readyq;
    877226
    878227        // return the popped thread
     
    883232// try to pop from any lanes making sure you don't miss any threads push
    884233// before the start of the function
    885 static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) {
    886         /* paranoid */ verify( lanes.count > 0 );
    887         unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     234static inline struct thread$ * search(struct cluster * cltr) {
     235        const size_t lanes_count = cltr->sched.readyQ.count;
     236        /* paranoid */ verify( lanes_count > 0 );
     237        unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED );
    888238        unsigned offset = __tls_rand();
    889239        for(i; count) {
     
    902252// get preferred ready for new thread
    903253unsigned ready_queue_new_preferred() {
    904         unsigned pref = 0;
     254        unsigned pref = MAX;
    905255        if(struct thread$ * thrd = publicTLS_get( this_thread )) {
    906256                pref = thrd->preferred;
    907257        }
    908         else {
    909                 #if defined(USE_CPU_WORK_STEALING)
    910                         pref = __kernel_getcpu();
    911                 #endif
    912         }
    913 
    914         #if defined(USE_CPU_WORK_STEALING)
    915                 /* paranoid */ verify(pref >= 0);
    916                 /* paranoid */ verify(pref < cpu_info.hthrd_count);
    917         #endif
    918258
    919259        return pref;
     
    921261
    922262//-----------------------------------------------------------------------
    923 // Check that all the intrusive queues in the data structure are still consistent
    924 static void check( __ready_queue_t & q ) with (q) {
    925         #if defined(__CFA_WITH_VERIFY__)
    926                 {
    927                         for( idx ; lanes.count ) {
    928                                 __intrusive_lane_t & sl = lanes.data[idx];
    929                                 assert(!lanes.data[idx].lock);
    930 
    931                                         if(is_empty(sl)) {
    932                                                 assert( sl.anchor.next == 0p );
    933                                                 assert( sl.anchor.ts   == -1llu );
    934                                                 assert( mock_head(sl)  == sl.prev );
    935                                         } else {
    936                                                 assert( sl.anchor.next != 0p );
    937                                                 assert( sl.anchor.ts   != -1llu );
    938                                                 assert( mock_head(sl)  != sl.prev );
    939                                         }
    940                         }
    941                 }
    942         #endif
    943 }
    944 
    945 //-----------------------------------------------------------------------
    946263// Given 2 indexes, pick the list with the oldest push an try to pop from it
    947 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
     264static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
    948265        // Pick the bet list
    949266        int w = i;
    950         if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
    951                 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
     267        if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) {
     268                w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j;
    952269        }
    953270
    954271        return try_pop(cltr, w __STATS(, stats));
    955272}
    956 
    957 // Call this function of the intrusive list was moved using memcpy
    958 // fixes the list so that the pointers back to anchors aren't left dangling
    959 static inline void fix(__intrusive_lane_t & ll) {
    960                         if(is_empty(ll)) {
    961                                 verify(ll.anchor.next == 0p);
    962                                 ll.prev = mock_head(ll);
    963                         }
    964 }
    965 
    966 static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {
    967         processor * it = &list`first;
    968         for(unsigned i = 0; i < count; i++) {
    969                 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
    970                 it->rdq.id = value;
    971                 it->rdq.target = MAX;
    972                 value += READYQ_SHARD_FACTOR;
    973                 it = &(*it)`next;
    974         }
    975 }
    976 
    977 static void reassign_cltr_id(struct cluster * cltr) {
    978         unsigned preferred = 0;
    979         assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
    980         assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
    981 }
    982 
    983 static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
    984         #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING)
    985                 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
    986                 for(i; lanes.count) {
    987                         lanes.tscs[i].tv = rdtscl();
    988                         lanes.tscs[i].ma = 0;
    989                 }
    990         #endif
    991 }
    992 
    993 #if defined(USE_CPU_WORK_STEALING)
    994         // ready_queue size is fixed in this case
    995         void ready_queue_grow(struct cluster * cltr) {}
    996         void ready_queue_shrink(struct cluster * cltr) {}
    997 #else
    998         // Grow the ready queue
    999         void ready_queue_grow(struct cluster * cltr) {
    1000                 size_t ncount;
    1001                 int target = cltr->procs.total;
    1002 
    1003                 /* paranoid */ verify( ready_mutate_islocked() );
    1004                 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
    1005 
    1006                 // Make sure that everything is consistent
    1007                 /* paranoid */ check( cltr->ready_queue );
    1008 
    1009                 // grow the ready queue
    1010                 with( cltr->ready_queue ) {
    1011                         // Find new count
    1012                         // Make sure we always have atleast 1 list
    1013                         if(target >= 2) {
    1014                                 ncount = target * READYQ_SHARD_FACTOR;
    1015                         } else {
    1016                                 ncount = SEQUENTIAL_SHARD;
    1017                         }
    1018 
    1019                         // Allocate new array (uses realloc and memcpies the data)
    1020                         lanes.data = alloc( ncount, lanes.data`realloc );
    1021 
    1022                         // Fix the moved data
    1023                         for( idx; (size_t)lanes.count ) {
    1024                                 fix(lanes.data[idx]);
    1025                         }
    1026 
    1027                         // Construct new data
    1028                         for( idx; (size_t)lanes.count ~ ncount) {
    1029                                 (lanes.data[idx]){};
    1030                         }
    1031 
    1032                         // Update original
    1033                         lanes.count = ncount;
    1034 
    1035                         lanes.caches = alloc( target, lanes.caches`realloc );
    1036                 }
    1037 
    1038                 fix_times(cltr);
    1039 
    1040                 reassign_cltr_id(cltr);
    1041 
    1042                 // Make sure that everything is consistent
    1043                 /* paranoid */ check( cltr->ready_queue );
    1044 
    1045                 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
    1046 
    1047                 /* paranoid */ verify( ready_mutate_islocked() );
    1048         }
    1049 
    1050         // Shrink the ready queue
    1051         void ready_queue_shrink(struct cluster * cltr) {
    1052                 /* paranoid */ verify( ready_mutate_islocked() );
    1053                 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
    1054 
    1055                 // Make sure that everything is consistent
    1056                 /* paranoid */ check( cltr->ready_queue );
    1057 
    1058                 int target = cltr->procs.total;
    1059 
    1060                 with( cltr->ready_queue ) {
    1061                         // Remember old count
    1062                         size_t ocount = lanes.count;
    1063 
    1064                         // Find new count
    1065                         // Make sure we always have atleast 1 list
    1066                         lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
    1067                         /* paranoid */ verify( ocount >= lanes.count );
    1068                         /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
    1069 
    1070                         // for printing count the number of displaced threads
    1071                         #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    1072                                 __attribute__((unused)) size_t displaced = 0;
    1073                         #endif
    1074 
    1075                         // redistribute old data
    1076                         for( idx; (size_t)lanes.count ~ ocount) {
    1077                                 // Lock is not strictly needed but makes checking invariants much easier
    1078                                 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
    1079                                 verify(locked);
    1080 
    1081                                 // As long as we can pop from this lane to push the threads somewhere else in the queue
    1082                                 while(!is_empty(lanes.data[idx])) {
    1083                                         struct thread$ * thrd;
    1084                                         unsigned long long _;
    1085                                         [thrd, _] = pop(lanes.data[idx]);
    1086 
    1087                                         push(cltr, thrd, true);
    1088 
    1089                                         // for printing count the number of displaced threads
    1090                                         #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    1091                                                 displaced++;
    1092                                         #endif
    1093                                 }
    1094 
    1095                                 // Unlock the lane
    1096                                 __atomic_unlock(&lanes.data[idx].lock);
    1097 
    1098                                 // TODO print the queue statistics here
    1099 
    1100                                 ^(lanes.data[idx]){};
    1101                         }
    1102 
    1103                         __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
    1104 
    1105                         // Allocate new array (uses realloc and memcpies the data)
    1106                         lanes.data = alloc( lanes.count, lanes.data`realloc );
    1107 
    1108                         // Fix the moved data
    1109                         for( idx; (size_t)lanes.count ) {
    1110                                 fix(lanes.data[idx]);
    1111                         }
    1112 
    1113                         lanes.caches = alloc( target, lanes.caches`realloc );
    1114                 }
    1115 
    1116                 fix_times(cltr);
    1117 
    1118 
    1119                 reassign_cltr_id(cltr);
    1120 
    1121                 // Make sure that everything is consistent
    1122                 /* paranoid */ check( cltr->ready_queue );
    1123 
    1124                 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
    1125                 /* paranoid */ verify( ready_mutate_islocked() );
    1126         }
    1127 #endif
    1128 
    1129 #if !defined(__CFA_NO_STATISTICS__)
    1130         unsigned cnt(const __ready_queue_t & this, unsigned idx) {
    1131                 /* paranoid */ verify(this.lanes.count > idx);
    1132                 return this.lanes.data[idx].cnt;
    1133         }
    1134 #endif
    1135 
    1136 
    1137 #if   defined(CFA_HAVE_LINUX_LIBRSEQ)
    1138         // No definition needed
    1139 #elif defined(CFA_HAVE_LINUX_RSEQ_H)
    1140 
    1141         #if defined( __x86_64 ) || defined( __i386 )
    1142                 #define RSEQ_SIG        0x53053053
    1143         #elif defined( __ARM_ARCH )
    1144                 #ifdef __ARMEB__
    1145                 #define RSEQ_SIG    0xf3def5e7      /* udf    #24035    ; 0x5de3 (ARMv6+) */
    1146                 #else
    1147                 #define RSEQ_SIG    0xe7f5def3      /* udf    #24035    ; 0x5de3 */
    1148                 #endif
    1149         #endif
    1150 
    1151         extern void __disable_interrupts_hard();
    1152         extern void __enable_interrupts_hard();
    1153 
    1154         static void __kernel_raw_rseq_register  (void) {
    1155                 /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );
    1156 
    1157                 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);
    1158                 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);
    1159                 if(ret != 0) {
    1160                         int e = errno;
    1161                         switch(e) {
    1162                         case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");
    1163                         case ENOSYS: abort("KERNEL ERROR: rseq register no supported");
    1164                         case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");
    1165                         case EBUSY : abort("KERNEL ERROR: rseq register already registered");
    1166                         case EPERM : abort("KERNEL ERROR: rseq register sig  argument  on unregistration does not match the signature received on registration");
    1167                         default: abort("KERNEL ERROR: rseq register unexpected return %d", e);
    1168                         }
    1169                 }
    1170         }
    1171 
    1172         static void __kernel_raw_rseq_unregister(void) {
    1173                 /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );
    1174 
    1175                 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);
    1176                 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
    1177                 if(ret != 0) {
    1178                         int e = errno;
    1179                         switch(e) {
    1180                         case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");
    1181                         case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");
    1182                         case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");
    1183                         case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");
    1184                         case EPERM : abort("KERNEL ERROR: rseq unregister sig  argument  on unregistration does not match the signature received on registration");
    1185                         default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);
    1186                         }
    1187                 }
    1188         }
    1189 #else
    1190         // No definition needed
    1191 #endif
  • libcfa/src/concurrency/ready_subqueue.hfa

    ref3c383 rd672350  
    2525        );
    2626        return rhead;
    27 }
    28 
    29 // Ctor
    30 void ?{}( __intrusive_lane_t & this ) {
    31         this.lock = false;
    32         this.prev = mock_head(this);
    33         this.anchor.next = 0p;
    34         this.anchor.ts   = -1llu;
    35         #if !defined(__CFA_NO_STATISTICS__)
    36                 this.cnt  = 0;
    37         #endif
    38 
    39         // We add a boat-load of assertions here because the anchor code is very fragile
    40         /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
    41         /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
    42         /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) );
    43         /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next );
    44         /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
    45         /* paranoid */ verify( mock_head(this)->link.next == 0p );
    46         /* paranoid */ verify( mock_head(this)->link.ts   == -1llu  );
    47         /* paranoid */ verify( mock_head(this) == this.prev );
    48         /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );
    49         /* paranoid */ verify( __alignof__(this) == 128 );
    50         /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) );
    51 }
    52 
    53 // Dtor is trivial
    54 void ^?{}( __intrusive_lane_t & this ) {
    55         // Make sure the list is empty
    56         /* paranoid */ verify( this.anchor.next == 0p );
    57         /* paranoid */ verify( this.anchor.ts   == -1llu );
    58         /* paranoid */ verify( mock_head(this)  == this.prev );
    5927}
    6028
  • libcfa/src/concurrency/thread.cfa

    ref3c383 rd672350  
    1919#include "thread.hfa"
    2020
    21 #include "kernel_private.hfa"
     21#include "kernel/private.hfa"
    2222#include "exception.hfa"
    2323
  • libcfa/src/containers/string.cfa

    ref3c383 rd672350  
    9292}
    9393
    94 string ?=?(string & this, string other) {
     94string & ?=?(string & this, string & other) { //// <---- straw man change
    9595    (*this.inner) = (*other.inner);
    9696    return this;
     
    235235int find(const string &s, const char* search, size_t searchsize) {
    236236    return find( *s.inner, search, searchsize);
     237}
     238
     239int findFrom(const string &s, size_t fromPos, char search) {
     240    return findFrom( *s.inner, fromPos, search );
     241}
     242
     243int findFrom(const string &s, size_t fromPos, const string &search) {
     244    return findFrom( *s.inner, fromPos, *search.inner );
     245}
     246
     247int findFrom(const string &s, size_t fromPos, const char* search) {
     248    return findFrom( *s.inner, fromPos, search );
     249}
     250
     251int findFrom(const string &s, size_t fromPos, const char* search, size_t searchsize) {
     252    return findFrom( *s.inner, fromPos, search, searchsize );
    237253}
    238254
  • libcfa/src/containers/string.hfa

    ref3c383 rd672350  
    4141void ?=?(string &s, const string &other);
    4242void ?=?(string &s, char other);
    43 string ?=?(string &s, string other);  // string tolerates memcpys; still saw calls to autogen
    44 
     43string & ?=?(string &s, string &other);  // surprising ret seems to help avoid calls to autogen
     44//string ?=?( string &, string ) = void;
    4545void ^?{}(string &s);
    4646
     
    9393int find(const string &s, const char* search, size_t searchsize);
    9494
     95int findFrom(const string &s, size_t fromPos, char search);
     96int findFrom(const string &s, size_t fromPos, const string &search);
     97int findFrom(const string &s, size_t fromPos, const char* search);
     98int findFrom(const string &s, size_t fromPos, const char* search, size_t searchsize);
     99
    95100bool includes(const string &s, const string &search);
    96101bool includes(const string &s, const char* search);
  • libcfa/src/containers/string_res.cfa

    ref3c383 rd672350  
    1515
    1616#include "string_res.hfa"
    17 #include <stdlib.hfa>  // e.g. malloc
    18 #include <string.h>    // e.g. strlen
     17#include "string_sharectx.hfa"
     18#include "stdlib.hfa"
     19
     20// Workaround for observed performance penalty from calling CFA's alloc.
     21// Workaround is:  EndVbyte = TEMP_ALLOC(char, CurrSize)
     22// Should be:      EndVbyte = alloc(CurrSize)
     23#define TEMP_ALLOC(T, n) (( T* ) malloc( n * sizeof( T ) ))
     24
     25#include <assert.h>
    1926
    2027//######################### VbyteHeap "header" #########################
    21 
    22 
    23 
    24 
    25 
    26 
    27 
    28 
    29 // DON'T COMMIT:
    30 // #define VbyteDebug
    31 
    32 
    33 
    34 
    3528
    3629#ifdef VbyteDebug
     
    5447
    5548   
    56 static inline void compaction( VbyteHeap & );                           // compaction of the byte area
    57 static inline void garbage( VbyteHeap & );                              // garbage collect the byte area
    58 static inline void extend( VbyteHeap &, int );                  // extend the size of the byte area
    59 static inline void reduce( VbyteHeap &, int );                  // reduce the size of the byte area
    60 
    61 static inline void ?{}( VbyteHeap &, int = 1000 );
    62 static inline void ^?{}( VbyteHeap & );
    63 static inline void ByteCopy( VbyteHeap &, char *, int, int, char *, int, int ); // copy a block of bytes from one location in the heap to another
    64 static inline int ByteCmp( VbyteHeap &, char *, int, int, char *, int, int );   // compare 2 blocks of bytes
    65 static inline char *VbyteAlloc( VbyteHeap &, int );                     // allocate a block bytes in the heap
    66 
    67 
    68 static inline void AddThisAfter( HandleNode &, HandleNode & );
    69 static inline void DeleteNode( HandleNode & );
    70 static inline void MoveThisAfter( HandleNode &, const HandleNode & );           // move current handle after parameter handle
     49static void compaction( VbyteHeap & );                          // compaction of the byte area
     50static void garbage( VbyteHeap &, int );                                // garbage collect the byte area
     51static void extend( VbyteHeap &, int );                 // extend the size of the byte area
     52static void reduce( VbyteHeap &, int );                 // reduce the size of the byte area
     53
     54static void ?{}( VbyteHeap &, size_t = 1000 );
     55static void ^?{}( VbyteHeap & );
     56
     57static int ByteCmp( char *, int, int, char *, int, int );       // compare 2 blocks of bytes
     58static char *VbyteAlloc( VbyteHeap &, int );                    // allocate a block bytes in the heap
     59static char *VbyteTryAdjustLast( VbyteHeap &, int );
     60
     61static void AddThisAfter( HandleNode &, HandleNode & );
     62static void DeleteNode( HandleNode & );
     63static void MoveThisAfter( HandleNode &, const HandleNode & );          // move current handle after parameter handle
    7164
    7265
    7366// Allocate the storage for the variable sized area and intialize the heap variables.
    7467
    75 static inline void ?{}( VbyteHeap & this, int Size ) with(this) {
     68static void ?{}( VbyteHeap & this, size_t Size ) with(this) {
    7669#ifdef VbyteDebug
    7770    serr | "enter:VbyteHeap::VbyteHeap, this:" | &this | " Size:" | Size;
     
    7972    NoOfCompactions = NoOfExtensions = NoOfReductions = 0;
    8073    InitSize = CurrSize = Size;
    81     StartVbyte = EndVbyte = alloc(CurrSize);
     74    StartVbyte = EndVbyte = TEMP_ALLOC(char, CurrSize);
    8275    ExtVbyte = (void *)( StartVbyte + CurrSize );
    8376    Header.flink = Header.blink = &Header;
     77    Header.ulink = & this;
    8478#ifdef VbyteDebug
    8579    HeaderPtr = &Header;
     
    9185// Release the dynamically allocated storage for the byte area.
    9286
    93 static inline void ^?{}( VbyteHeap & this ) with(this) {
     87static void ^?{}( VbyteHeap & this ) with(this) {
    9488    free( StartVbyte );
    9589} // ~VbyteHeap
     
    10296// creator.
    10397
    104 void ?{}( HandleNode & this ) with(this) {
     98static void ?{}( HandleNode & this ) with(this) {
    10599#ifdef VbyteDebug
    106100    serr | "enter:HandleNode::HandleNode, this:" | &this;
     
    117111// collection.
    118112
    119 void ?{}( HandleNode & this, VbyteHeap & vh ) with(this) {
     113static void ?{}( HandleNode & this, VbyteHeap & vh ) with(this) {
    120114#ifdef VbyteDebug
    121115    serr | "enter:HandleNode::HandleNode, this:" | &this;
     
    123117    s = 0;
    124118    lnth = 0;
     119    ulink = &vh;
    125120    AddThisAfter( this, *vh.Header.blink );
    126121#ifdef VbyteDebug
     
    133128// is the responsibility of the creator to destroy it.
    134129
    135 void ^?{}( HandleNode & this ) with(this) {
     130static void ^?{}( HandleNode & this ) with(this) {
    136131#ifdef VbyteDebug
    137132    serr | "enter:HandleNode::~HandleNode, this:" | & this;
     
    149144} // ~HandleNode
    150145
     146
     147//######################### String Sharing Context #########################
     148
     149static string_sharectx * ambient_string_sharectx;               // fickle top of stack
     150static string_sharectx default_string_sharectx = {NEW_SHARING}; // stable bottom of stack
     151
     152void ?{}( string_sharectx & this, StringSharectx_Mode mode ) with( this ) {
     153    (older){ ambient_string_sharectx };
     154    if ( mode == NEW_SHARING ) {
     155        (activeHeap){ new( (size_t) 1000 ) };
     156    } else {
     157        verify( mode == NO_SHARING );
     158        (activeHeap){ 0p };
     159    }
     160    ambient_string_sharectx = & this;
     161}
     162
     163void ^?{}( string_sharectx & this ) with( this ) {
     164    if ( activeHeap ) delete( activeHeap );
     165
     166    // unlink this from older-list starting from ambient_string_sharectx
     167    // usually, this==ambient_string_sharectx and the loop runs zero times
     168    string_sharectx *& c = ambient_string_sharectx;
     169    while ( c != &this ) &c = &c->older;              // find this
     170    c = this.older;                                   // unlink
     171}
     172
    151173//######################### String Resource #########################
    152174
    153175
    154 VbyteHeap HeapArea;
    155 
    156 VbyteHeap * DEBUG_string_heap = & HeapArea;
     176VbyteHeap * DEBUG_string_heap() {
     177    assert( ambient_string_sharectx->activeHeap && "No sharing context is active" );
     178    return ambient_string_sharectx->activeHeap;
     179}
    157180
    158181size_t DEBUG_string_bytes_avail_until_gc( VbyteHeap * heap ) {
     
    160183}
    161184
     185size_t DEBUG_string_bytes_in_heap( VbyteHeap * heap ) {
     186    return heap->CurrSize;
     187}
     188
    162189const char * DEBUG_string_heap_start( VbyteHeap * heap ) {
    163190    return heap->StartVbyte;
    164191}
    165 
    166192
    167193// Returns the size of the string in bytes
     
    187213    // Store auto-newline state so it can be restored
    188214    bool anl = getANL$(out);
    189     nlOff(out);
    190     for (size_t i = 0; i < s.Handle.lnth; i++) {
    191         // Need to re-apply on the last output operator, for whole-statement version
    192         if (anl && i == s.Handle.lnth-1) nlOn(out);
    193         out | s[i];
    194     }
    195     return out;
     215    if( s.Handle.lnth == 0 ) {
     216        sout | "";
     217    } else {
     218        nlOff(out);
     219        for (size_t i = 0; i < s.Handle.lnth; i++) {
     220            // Need to re-apply on the last output operator, for whole-statement version
     221            if (anl && i == s.Handle.lnth-1) nlOn(out);
     222            out | s[i];
     223        }
     224    }
    196225}
    197226
    198227// Empty constructor
    199228void ?{}(string_res &s) with(s) {
    200     (Handle){ HeapArea };
     229    if( ambient_string_sharectx->activeHeap ) {
     230        (Handle){ * ambient_string_sharectx->activeHeap };
     231        (shareEditSet_owns_ulink){ false };
     232        verify( Handle.s == 0p && Handle.lnth == 0 );
     233    } else {
     234        (Handle){ * new( (size_t) 10 ) };  // TODO: can I lazily avoid allocating for empty string
     235        (shareEditSet_owns_ulink){ true };
     236        Handle.s = Handle.ulink->StartVbyte;
     237        verify( Handle.lnth == 0 );
     238    }
    201239    s.shareEditSet_prev = &s;
    202240    s.shareEditSet_next = &s;
    203241}
    204242
     243static void eagerCopyCtorHelper(string_res &s, const char* rhs, size_t rhslnth) with(s) {
     244    if( ambient_string_sharectx->activeHeap ) {
     245        (Handle){ * ambient_string_sharectx->activeHeap };
     246        (shareEditSet_owns_ulink){ false };
     247    } else {
     248        (Handle){ * new( rhslnth ) };
     249        (shareEditSet_owns_ulink){ true };
     250    }
     251    Handle.s = VbyteAlloc(*Handle.ulink, rhslnth);
     252    Handle.lnth = rhslnth;
     253    memmove( Handle.s, rhs, rhslnth );
     254    s.shareEditSet_prev = &s;
     255    s.shareEditSet_next = &s;
     256}
     257
    205258// Constructor from a raw buffer and size
    206259void ?{}(string_res &s, const char* rhs, size_t rhslnth) with(s) {
    207     (Handle){ HeapArea };
    208     Handle.s = VbyteAlloc(HeapArea, rhslnth);
     260    eagerCopyCtorHelper(s, rhs, rhslnth);
     261}
     262
     263// private ctor (not in header): use specified heap (ignore ambient) and copy chars in
     264void ?{}( string_res &s, VbyteHeap & heap, const char* rhs, size_t rhslnth ) with(s) {
     265    (Handle){ heap };
     266    Handle.s = VbyteAlloc(*Handle.ulink, rhslnth);
    209267    Handle.lnth = rhslnth;
    210     for ( int i = 0; i < rhslnth; i += 1 ) {            // copy characters
    211         Handle.s[i] = rhs[i];
    212     } // for
     268    (s.shareEditSet_owns_ulink){ false };
     269    memmove( Handle.s, rhs, rhslnth );
    213270    s.shareEditSet_prev = &s;
    214271    s.shareEditSet_next = &s;
    215272}
    216273
    217 // String literal constructor
    218 void ?{}(string_res &s, const char* rhs) {
    219     (s){ rhs, strlen(rhs) };
    220 }
    221 
    222274// General copy constructor
    223275void ?{}(string_res &s, const string_res & s2, StrResInitMode mode, size_t start, size_t end ) {
    224276
    225     (s.Handle){ HeapArea };
    226     s.Handle.s = s2.Handle.s + start;
    227     s.Handle.lnth = end - start;
    228     MoveThisAfter(s.Handle, s2.Handle );                        // insert this handle after rhs handle
    229     // ^ bug?  skip others at early point in string
    230    
    231     if (mode == COPY_VALUE) {
    232         // make s alone in its shareEditSet
    233         s.shareEditSet_prev = &s;
    234         s.shareEditSet_next = &s;
     277    verify( start <= end && end <= s2.Handle.lnth );
     278
     279    if (s2.Handle.ulink != ambient_string_sharectx->activeHeap && mode == COPY_VALUE) {
     280        // crossing heaps (including private): copy eagerly
     281        eagerCopyCtorHelper(s, s2.Handle.s + start, end - start);
     282        verify(s.shareEditSet_prev == &s);
     283        verify(s.shareEditSet_next == &s);
    235284    } else {
    236         assert( mode == SHARE_EDITS );
    237 
    238         // s2 is logically const but not implementation const
    239         string_res & s2mod = (string_res &) s2;
    240 
    241         // insert s after s2 on shareEditSet
    242         s.shareEditSet_next = s2mod.shareEditSet_next;
    243         s.shareEditSet_prev = &s2mod;
    244         s.shareEditSet_next->shareEditSet_prev = &s;
    245         s.shareEditSet_prev->shareEditSet_next = &s;
    246     }
    247 }
    248 
    249 void assign(string_res &this, const char* buffer, size_t bsize) {
    250 
    251     // traverse the incumbent share-edit set (SES) to recover the range of a base string to which `this` belongs
    252     string_res * shareEditSetStartPeer = & this;
    253     string_res * shareEditSetEndPeer = & this;
    254     for (string_res * editPeer = this.shareEditSet_next; editPeer != &this; editPeer = editPeer->shareEditSet_next) {
    255         if ( editPeer->Handle.s < shareEditSetStartPeer->Handle.s ) {
    256             shareEditSetStartPeer = editPeer;
     285        (s.Handle){};
     286        s.Handle.s = s2.Handle.s + start;
     287        s.Handle.lnth = end - start;
     288        s.Handle.ulink = s2.Handle.ulink;
     289
     290        AddThisAfter(s.Handle, s2.Handle );                     // insert this handle after rhs handle
     291        // ^ bug?  skip others at early point in string
     292
     293        if (mode == COPY_VALUE) {
     294            verify(s2.Handle.ulink == ambient_string_sharectx->activeHeap);
     295            // requested logical copy in same heap: defer copy until write
     296
     297            (s.shareEditSet_owns_ulink){ false };
     298
     299            // make s alone in its shareEditSet
     300            s.shareEditSet_prev = &s;
     301            s.shareEditSet_next = &s;
     302        } else {
     303            verify( mode == SHARE_EDITS );
     304            // sharing edits with source forces same heap as source (ignore context)
     305
     306            (s.shareEditSet_owns_ulink){ s2.shareEditSet_owns_ulink };
     307
     308            // s2 is logically const but not implementation const
     309            string_res & s2mod = (string_res &) s2;
     310
     311            // insert s after s2 on shareEditSet
     312            s.shareEditSet_next = s2mod.shareEditSet_next;
     313            s.shareEditSet_prev = &s2mod;
     314            s.shareEditSet_next->shareEditSet_prev = &s;
     315            s.shareEditSet_prev->shareEditSet_next = &s;
    257316        }
    258         if ( shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth < editPeer->Handle.s + editPeer->Handle.lnth) {
    259             shareEditSetEndPeer = editPeer;
    260         }
    261     }
    262 
    263     // full string is from start of shareEditSetStartPeer thru end of shareEditSetEndPeer
    264     // `this` occurs in the middle of it, to be replaced
    265     // build up the new text in `pasting`
    266 
    267     string_res pasting = {
    268         shareEditSetStartPeer->Handle.s,                   // start of SES
    269         this.Handle.s - shareEditSetStartPeer->Handle.s }; // length of SES, before this
    270     append( pasting,
    271         buffer,                                            // start of replacement for this
    272         bsize );                                           // length of replacement for this
    273     append( pasting,
    274         this.Handle.s + this.Handle.lnth,                  // start of SES after this
    275         shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth -
    276         (this.Handle.s + this.Handle.lnth) );              // length of SES, after this
    277 
    278     // The above string building can trigger compaction.
    279     // The reference points (that are arguments of the string building) may move during that building.
    280     // From this point on, they are stable.
    281     // So now, capture their values for use in the overlap cases, below.
    282     // Do not factor these definitions with the arguments used above.
     317    }
     318}
     319
     320static void assignEditSet(string_res & this, string_res * shareEditSetStartPeer, string_res * shareEditSetEndPeer,
     321    char * resultSesStart,
     322    size_t resultSesLnth,
     323    HandleNode * resultPadPosition, size_t bsize ) {
    283324
    284325    char * beforeBegin = shareEditSetStartPeer->Handle.s;
     
    290331    size_t oldLnth = this.Handle.lnth;
    291332
    292     this.Handle.s = pasting.Handle.s + beforeLen;
     333    this.Handle.s = resultSesStart + beforeLen;
    293334    this.Handle.lnth = bsize;
    294     MoveThisAfter( this.Handle, pasting.Handle );
     335    if (resultPadPosition)
     336        MoveThisAfter( this.Handle, *resultPadPosition );
    295337
    296338    // adjust all substring string and handle locations, and check if any substring strings are outside the new base string
    297     char *limit = pasting.Handle.s + pasting.Handle.lnth;
     339    char *limit = resultSesStart + resultSesLnth;
    298340    for (string_res * p = this.shareEditSet_next; p != &this; p = p->shareEditSet_next) {
    299         assert (p->Handle.s >= beforeBegin);
     341        verify (p->Handle.s >= beforeBegin);
    300342        if ( p->Handle.s >= afterBegin ) {
    301             assert ( p->Handle.s <= afterBegin + afterLen );
    302             assert ( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen );
     343            verify ( p->Handle.s <= afterBegin + afterLen );
     344            verify ( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen );
    303345            // p starts after the edit
    304346            // take start and end as end-anchored
     
    318360            } else {
    319361                // p ends after the edit
    320                 assert ( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen );
     362                verify ( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen );
    321363                // take end as end-anchored
    322364                // stretch-shrink p according to the edit
     
    326368            // take start as start-anchored
    327369            size_t startOffsetFromStart = p->Handle.s - beforeBegin;
    328             p->Handle.s = pasting.Handle.s + startOffsetFromStart;
     370            p->Handle.s = resultSesStart + startOffsetFromStart;
    329371        } else {
    330             assert ( p->Handle.s < afterBegin );
     372            verify ( p->Handle.s < afterBegin );
    331373            // p starts during the edit
    332             assert( p->Handle.s + p->Handle.lnth >= beforeBegin + beforeLen );
     374            verify( p->Handle.s + p->Handle.lnth >= beforeBegin + beforeLen );
    333375            if ( p->Handle.s + p->Handle.lnth < afterBegin ) {
    334376                // p ends during the edit; p does not include the last character replaced
     
    344386            }
    345387        }
    346         MoveThisAfter( p->Handle, pasting.Handle );     // move substring handle to maintain sorted order by string position
    347     }
    348 }
    349 
    350 void ?=?(string_res &s, const char* other) {
    351     assign(s, other, strlen(other));
    352 }
    353 
    354 void ?=?(string_res &s, char other) {
    355     assign(s, &other, 1);
     388        if (resultPadPosition)
     389            MoveThisAfter( p->Handle, *resultPadPosition );     // move substring handle to maintain sorted order by string position
     390    }
     391}
     392
     393static string_res & assign_(string_res &this, const char* buffer, size_t bsize, const string_res & valSrc) {
     394
     395    // traverse the incumbent share-edit set (SES) to recover the range of a base string to which `this` belongs
     396    string_res * shareEditSetStartPeer = & this;
     397    string_res * shareEditSetEndPeer = & this;
     398    for (string_res * editPeer = this.shareEditSet_next; editPeer != &this; editPeer = editPeer->shareEditSet_next) {
     399        if ( editPeer->Handle.s < shareEditSetStartPeer->Handle.s ) {
     400            shareEditSetStartPeer = editPeer;
     401        }
     402        if ( shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth < editPeer->Handle.s + editPeer->Handle.lnth) {
     403            shareEditSetEndPeer = editPeer;
     404        }
     405    }
     406
     407    verify( shareEditSetEndPeer->Handle.s >= shareEditSetStartPeer->Handle.s );
     408    size_t origEditSetLength = shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth - shareEditSetStartPeer->Handle.s;
     409    verify( origEditSetLength >= this.Handle.lnth );
     410
     411    if ( this.shareEditSet_owns_ulink ) {                 // assigning to private context
     412        // ok to overwrite old value within LHS
     413        char * prefixStartOrig = shareEditSetStartPeer->Handle.s;
     414        int prefixLen = this.Handle.s - prefixStartOrig;
     415        char * suffixStartOrig = this.Handle.s + this.Handle.lnth;
     416        int suffixLen = shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth - suffixStartOrig;
     417
     418        int delta = bsize - this.Handle.lnth;
     419        if ( char * oldBytes = VbyteTryAdjustLast( *this.Handle.ulink, delta ) ) {
     420            // growing: copy from old to new
     421            char * dest = VbyteAlloc( *this.Handle.ulink, origEditSetLength + delta );
     422            char *destCursor = dest;  memcpy(destCursor, prefixStartOrig, prefixLen);
     423            destCursor += prefixLen;  memcpy(destCursor, buffer         , bsize    );
     424            destCursor += bsize;      memcpy(destCursor, suffixStartOrig, suffixLen);
     425            assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer,
     426                dest,
     427                origEditSetLength + delta,
     428                0p, bsize);
     429            free( oldBytes );
     430        } else {
     431            // room is already allocated in-place: bubble suffix and overwite middle
     432            memmove( suffixStartOrig + delta, suffixStartOrig, suffixLen );
     433            memcpy( this.Handle.s, buffer, bsize );
     434
     435            assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer,
     436                shareEditSetStartPeer->Handle.s,
     437                origEditSetLength + delta,
     438                0p, bsize);
     439        }
     440
     441    } else if (                                           // assigning to shared context
     442        this.Handle.lnth == origEditSetLength &&          // overwriting entire run of SES
     443        & valSrc &&                                       // sourcing from a managed string
     444        valSrc.Handle.ulink == this.Handle.ulink  ) {     // sourcing from same heap
     445
     446        // SES's result will only use characters from the source string => reuse source
     447        assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer,
     448            valSrc.Handle.s,
     449            valSrc.Handle.lnth,
     450            &((string_res&)valSrc).Handle, bsize);
     451       
     452    } else {
     453        // overwriting a proper substring of some string: mash characters from old and new together (copy on write)
     454        // OR we are importing characters: need to copy eagerly (can't refer to source)
     455
     456        // full string is from start of shareEditSetStartPeer thru end of shareEditSetEndPeer
     457        // `this` occurs in the middle of it, to be replaced
     458        // build up the new text in `pasting`
     459
     460        string_res pasting = {
     461            * this.Handle.ulink,                               // maintain same heap, regardless of context
     462            shareEditSetStartPeer->Handle.s,                   // start of SES
     463            this.Handle.s - shareEditSetStartPeer->Handle.s }; // length of SES, before this
     464        append( pasting,
     465            buffer,                                            // start of replacement for this
     466            bsize );                                           // length of replacement for this
     467        append( pasting,
     468            this.Handle.s + this.Handle.lnth,                  // start of SES after this
     469            shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth -
     470            (this.Handle.s + this.Handle.lnth) );              // length of SES, after this
     471
     472        // The above string building can trigger compaction.
     473        // The reference points (that are arguments of the string building) may move during that building.
     474        // From this point on, they are stable.
     475
     476        assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer,
     477            pasting.Handle.s,
     478            pasting.Handle.lnth,
     479            &pasting.Handle, bsize);
     480    }
     481
     482    return this;
     483}
     484
     485string_res & assign(string_res &this, const char* buffer, size_t bsize) {
     486    return assign_(this, buffer, bsize, *0p);
     487}
     488
     489string_res & ?=?(string_res &s, char other) {
     490    return assign(s, &other, 1);
    356491}
    357492
    358493// Copy assignment operator
    359 void ?=?(string_res & this, const string_res & rhs) with( this ) {
    360     assign(this, rhs.Handle.s, rhs.Handle.lnth);
    361 }
    362 
    363 void ?=?(string_res & this, string_res & rhs) with( this ) {
     494string_res & ?=?(string_res & this, const string_res & rhs) with( this ) {
     495    return assign_(this, rhs.Handle.s, rhs.Handle.lnth, rhs);
     496}
     497
     498string_res & ?=?(string_res & this, string_res & rhs) with( this ) {
    364499    const string_res & rhs2 = rhs;
    365     this = rhs2;
     500    return this = rhs2;
    366501}
    367502
     
    374509    s.shareEditSet_prev->shareEditSet_next = s.shareEditSet_next;
    375510    s.shareEditSet_next->shareEditSet_prev = s.shareEditSet_prev;
    376     s.shareEditSet_next = &s;
    377     s.shareEditSet_prev = &s;
     511    // s.shareEditSet_next = &s;
     512    // s.shareEditSet_prev = &s;
     513
     514    if (shareEditSet_owns_ulink && s.shareEditSet_next == &s) { // last one out
     515        delete( s.Handle.ulink );
     516    }
    378517}
    379518
     
    387526}
    388527
     528void assignAt(const string_res &s, size_t index, char val) {
     529    string_res editZone = { s, SHARE_EDITS, index, index+1 };
     530    assign(editZone, &val, 1);
     531}
     532
    389533
    390534///////////////////////////////////////////////////////////////////
     
    392536
    393537void append(string_res &str1, const char * buffer, size_t bsize) {
    394     size_t clnth = size(str1) + bsize;
    395     if ( str1.Handle.s + size(str1) == buffer ) { // already juxtapose ?
     538    size_t clnth = str1.Handle.lnth + bsize;
     539    if ( str1.Handle.s + str1.Handle.lnth == buffer ) { // already juxtapose ?
    396540        // no-op
    397541    } else {                                            // must copy some text
    398         if ( str1.Handle.s + size(str1) == VbyteAlloc(HeapArea, 0) ) { // str1 at end of string area ?
    399             VbyteAlloc(HeapArea, bsize); // create room for 2nd part at the end of string area
     542        if ( str1.Handle.s + str1.Handle.lnth == VbyteAlloc(*str1.Handle.ulink, 0) ) { // str1 at end of string area ?
     543            VbyteAlloc( *str1.Handle.ulink, bsize ); // create room for 2nd part at the end of string area
    400544        } else {                                        // copy the two parts
    401             char * str1oldBuf = str1.Handle.s;
    402             str1.Handle.s = VbyteAlloc( HeapArea, clnth );
    403             ByteCopy( HeapArea, str1.Handle.s, 0, str1.Handle.lnth, str1oldBuf, 0, str1.Handle.lnth);
     545            char * str1newBuf = VbyteAlloc( *str1.Handle.ulink, clnth );
     546            char * str1oldBuf = str1.Handle.s;  // must read after VbyteAlloc call in case it gs's
     547            str1.Handle.s = str1newBuf;
     548            memcpy( str1.Handle.s, str1oldBuf,  str1.Handle.lnth );
    404549        } // if
    405         ByteCopy( HeapArea, str1.Handle.s, str1.Handle.lnth, bsize, (char*)buffer, 0, (int)bsize);
    406         //       VbyteHeap & this, char *Dst, int DstStart, int DstLnth, char *Src, int SrcStart, int SrcLnth
     550        memcpy( str1.Handle.s + str1.Handle.lnth, buffer, bsize );
    407551    } // if
    408552    str1.Handle.lnth = clnth;
     
    417561}
    418562
    419 void ?+=?(string_res &s, const char* other) {
    420     append( s, other, strlen(other) );
    421 }
    422563
    423564
     
    429570
    430571bool ?==?(const string_res &s1, const string_res &s2) {
    431     return ByteCmp( HeapArea, s1.Handle.s, 0, s1.Handle.lnth, s2.Handle.s, 0, s2.Handle.lnth) == 0;
     572    return ByteCmp( s1.Handle.s, 0, s1.Handle.lnth, s2.Handle.s, 0, s2.Handle.lnth) == 0;
    432573}
    433574
     
    455596
    456597int find(const string_res &s, char search) {
    457     for (i; size(s)) {
    458         if (s[i] == search) return i;
    459     }
    460     return size(s);
    461 }
     598    return findFrom(s, 0, search);
     599}
     600
     601int findFrom(const string_res &s, size_t fromPos, char search) {
     602    // FIXME: This paricular overload (find of single char) is optimized to use memchr.
     603    // The general overload (find of string, memchr applying to its first character) and `contains` should be adjusted to match.
     604    char * searchFrom = s.Handle.s + fromPos;
     605    size_t searchLnth = s.Handle.lnth - fromPos;
     606    int searchVal = search;
     607    char * foundAt = (char *) memchr(searchFrom, searchVal, searchLnth);
     608    if (foundAt == 0p) return s.Handle.lnth;
     609    else return foundAt - s.Handle.s;
     610}
     611
     612int find(const string_res &s, const string_res &search) {
     613    return findFrom(s, 0, search);
     614}
     615
     616int findFrom(const string_res &s, size_t fromPos, const string_res &search) {
     617    return findFrom(s, fromPos, search.Handle.s, search.Handle.lnth);
     618}
     619
     620int find(const string_res &s, const char* search) {
     621    return findFrom(s, 0, search);
     622}
     623int findFrom(const string_res &s, size_t fromPos, const char* search) {
     624    return findFrom(s, fromPos, search, strlen(search));
     625}
     626
     627int find(const string_res &s, const char* search, size_t searchsize) {
     628    return findFrom(s, 0, search, searchsize);
     629}
     630
     631int findFrom(const string_res &s, size_t fromPos, const char* search, size_t searchsize) {
    462632
    463633    /* Remaining implementations essentially ported from Sunjay's work */
    464634
    465 int find(const string_res &s, const string_res &search) {
    466     return find(s, search.Handle.s, search.Handle.lnth);
    467 }
    468 
    469 int find(const string_res &s, const char* search) {
    470     return find(s, search, strlen(search));
    471 }
    472 
    473 int find(const string_res &s, const char* search, size_t searchsize) {
     635
    474636    // FIXME: This is a naive algorithm. We probably want to switch to someting
    475637    // like Boyer-Moore in the future.
     
    481643    }
    482644
    483     for (size_t i = 0; i < s.Handle.lnth; i++) {
     645    for (size_t i = fromPos; i < s.Handle.lnth; i++) {
    484646        size_t remaining = s.Handle.lnth - i;
    485647        // Never going to find the search string if the remaining string is
     
    596758// Add a new HandleNode node n after the current HandleNode node.
    597759
    598 static inline void AddThisAfter( HandleNode & this, HandleNode & n ) with(this) {
     760static void AddThisAfter( HandleNode & this, HandleNode & n ) with(this) {
    599761#ifdef VbyteDebug
    600762    serr | "enter:AddThisAfter, this:" | &this | " n:" | &n;
    601763#endif // VbyteDebug
     764    // Performance note: we are on the critical path here. MB has ensured that the verifies don't contribute to runtime (are compiled away, like they're supposed to be).
     765    verify( n.ulink != 0p );
     766    verify( this.ulink == n.ulink );
    602767    flink = n.flink;
    603768    blink = &n;
     
    624789// Delete the current HandleNode node.
    625790
    626 static inline void DeleteNode( HandleNode & this ) with(this) {
     791static void DeleteNode( HandleNode & this ) with(this) {
    627792#ifdef VbyteDebug
    628793    serr | "enter:DeleteNode, this:" | &this;
     
    638803
    639804// Allocates specified storage for a string from byte-string area. If not enough space remains to perform the
    640 // allocation, the garbage collection routine is called and a second attempt is made to allocate the space. If the
    641 // second attempt fails, a further attempt is made to create a new, larger byte-string area.
    642 
    643 static inline char * VbyteAlloc( VbyteHeap & this, int size ) with(this) {
     805// allocation, the garbage collection routine is called.
     806
     807static char * VbyteAlloc( VbyteHeap & this, int size ) with(this) {
    644808#ifdef VbyteDebug
    645809    serr | "enter:VbyteAlloc, size:" | size;
     
    650814    NoBytes = ( uintptr_t )EndVbyte + size;
    651815    if ( NoBytes > ( uintptr_t )ExtVbyte ) {            // enough room for new byte-string ?
    652                 garbage( this );                                        // firer up the garbage collector
    653                 NoBytes = ( uintptr_t )EndVbyte + size;         // try again
    654                 if ( NoBytes > ( uintptr_t )ExtVbyte ) {        // enough room for new byte-string ?
    655 assert( 0 && "need to implement actual growth" );
    656                         // extend( size );                              // extend the byte-string area
    657                 } // if
     816                garbage( this, size );                                  // firer up the garbage collector
     817                verify( (( uintptr_t )EndVbyte + size) <= ( uintptr_t )ExtVbyte  && "garbage run did not free up required space" );
    658818    } // if
    659819    r = EndVbyte;
     
    666826
    667827
     828// Adjusts the last allocation in this heap by delta bytes, or resets this heap to be able to offer
     829// new allocations of its original size + delta bytes. Positive delta means bigger;
     830// negative means smaller.  A null return indicates that the original heap location has room for
     831// the requested growth.  A non-null return indicates that copying to a new location is required
     832// but has not been done; the returned value is the old heap storage location; `this` heap is
     833// modified to reference the new location.  In the copy-requred case, the caller should use
     834// VbyteAlloc to claim the new space, while doing optimal copying from old to new, then free old.
     835
     836static char * VbyteTryAdjustLast( VbyteHeap & this, int delta ) with(this) {
     837
     838    if ( ( uintptr_t )EndVbyte + delta <= ( uintptr_t )ExtVbyte ) {
     839        // room available
     840        EndVbyte += delta;
     841        return 0p;
     842    }
     843
     844    char *oldBytes = StartVbyte;
     845
     846    NoOfExtensions += 1;
     847    CurrSize *= 2;
     848    StartVbyte = EndVbyte = TEMP_ALLOC(char, CurrSize);
     849    ExtVbyte = StartVbyte + CurrSize;
     850
     851    return oldBytes;
     852}
     853
     854
    668855// Move an existing HandleNode node h somewhere after the current HandleNode node so that it is in ascending order by
    669856// the address in the byte string area.
    670857
    671 static inline void MoveThisAfter( HandleNode & this, const HandleNode  & h ) with(this) {
     858static void MoveThisAfter( HandleNode & this, const HandleNode  & h ) with(this) {
    672859#ifdef VbyteDebug
    673860    serr | "enter:MoveThisAfter, this:" | & this | " h:" | & h;
    674861#endif // VbyteDebug
     862    verify( h.ulink != 0p );
     863    verify( this.ulink == h.ulink );
    675864    if ( s < h.s ) {                                    // check argument values
    676865                // serr | "VbyteSM: Error - Cannot move byte string starting at:" | s | " after byte string starting at:"
    677866                //      | ( h->s ) | " and keep handles in ascending order";
    678867                // exit(-1 );
    679                 assert( 0 && "VbyteSM: Error - Cannot move byte strings as requested and keep handles in ascending order");
     868                verify( 0 && "VbyteSM: Error - Cannot move byte strings as requested and keep handles in ascending order");
    680869    } // if
    681870
     
    709898//######################### VbyteHeap #########################
    710899
    711 // Move characters from one location in the byte-string area to another. The routine handles the following situations:
    712 //
    713 // if the |Src| > |Dst| => truncate
    714 // if the |Dst| > |Src| => pad Dst with blanks
    715 
    716 void ByteCopy( VbyteHeap & this, char *Dst, int DstStart, int DstLnth, char *Src, int SrcStart, int SrcLnth ) {
    717     for ( int i = 0; i < DstLnth; i += 1 ) {
    718       if ( i == SrcLnth ) {                             // |Dst| > |Src|
    719             for ( ; i < DstLnth; i += 1 ) {             // pad Dst with blanks
    720                 Dst[DstStart + i] = ' ';
    721             } // for
    722             break;
    723         } // exit
    724         Dst[DstStart + i] = Src[SrcStart + i];
    725     } // for
    726 } // ByteCopy
    727 
    728900// Compare two byte strings in the byte-string area. The routine returns the following values:
    729901//
     
    732904// -1 => Src1-byte-string < Src2-byte-string
    733905
    734 int ByteCmp( VbyteHeap & this, char *Src1, int Src1Start, int Src1Lnth, char *Src2, int Src2Start, int Src2Lnth )  with(this) {
     906int ByteCmp( char *Src1, int Src1Start, int Src1Lnth, char *Src2, int Src2Start, int Src2Lnth ) {
    735907#ifdef VbyteDebug
    736908    serr | "enter:ByteCmp, Src1Start:" | Src1Start | " Src1Lnth:" | Src1Lnth | " Src2Start:" | Src2Start | " Src2Lnth:" | Src2Lnth;
     
    789961    h = Header.flink;                                   // ignore header node
    790962    for (;;) {
    791                 ByteCopy( this, EndVbyte, 0, h->lnth, h->s, 0, h->lnth );
     963                memmove( EndVbyte, h->s, h->lnth );
    792964                obase = h->s;
    793965                h->s = EndVbyte;
     
    810982
    811983
     984static double heap_expansion_freespace_threshold = 0.1;  // default inherited from prior work: expand heap when less than 10% "free" (i.e. garbage)
     985                                                         // probably an unreasonable default, but need to assess early-round tests on changing it
     986
     987void TUNING_set_string_heap_liveness_threshold( double val ) {
     988    heap_expansion_freespace_threshold = 1.0 - val;
     989}
     990
     991
    812992// Garbage determines the amount of free space left in the heap and then reduces, leave the same, or extends the size of
    813993// the heap.  The heap is then compacted in the existing heap or into the newly allocated heap.
    814994
    815 void garbage(VbyteHeap & this ) with(this) {
     995void garbage(VbyteHeap & this, int minreq ) with(this) {
    816996#ifdef VbyteDebug
    817997    serr | "enter:garbage";
     
    8371017    AmountFree = ( uintptr_t )ExtVbyte - ( uintptr_t )StartVbyte - AmountUsed;
    8381018   
    839     if ( AmountFree < ( int )( CurrSize * 0.1 )) {      // free space less than 10% ?
    840 
    841 assert( 0 && "need to implement actual growth" );
    842 //              extend( CurrSize );                             // extend the heap
     1019    if ( ( double ) AmountFree < ( CurrSize * heap_expansion_freespace_threshold ) || AmountFree < minreq ) {   // free space less than threshold or not enough to serve cur request
     1020
     1021                extend( this, max( CurrSize, minreq ) );                                // extend the heap
    8431022
    8441023                        //  Peter says, "This needs work before it should be used."
     
    8461025                        //              reduce(( AmountFree / CurrSize - 3 ) * CurrSize ); // reduce the memory
    8471026
    848     } // if
    849     compaction(this);                                   // compact the byte area, in the same or new heap area
     1027        // `extend` implies a `compaction` during the copy
     1028
     1029    } else {
     1030        compaction(this);                                       // in-place
     1031    }// if
    8501032#ifdef VbyteDebug
    8511033    {
     
    8671049#undef VbyteDebug
    8681050
    869 //WIP
    870 #if 0
    8711051
    8721052
     
    8741054// area is deleted.
    8751055
    876 void VbyteHeap::extend( int size ) {
     1056void extend( VbyteHeap & this, int size ) with (this) {
    8771057#ifdef VbyteDebug
    8781058    serr | "enter:extend, size:" | size;
     
    8841064   
    8851065    CurrSize += size > InitSize ? size : InitSize;      // minimum extension, initial size
    886     StartVbyte = EndVbyte = new char[CurrSize];
     1066    StartVbyte = EndVbyte = TEMP_ALLOC(char, CurrSize);
    8871067    ExtVbyte = (void *)( StartVbyte + CurrSize );
    888     compaction();                                       // copy from old heap to new & adjust pointers to new heap
    889     delete OldStartVbyte;                               // release old heap
     1068    compaction(this);                                   // copy from old heap to new & adjust pointers to new heap
     1069    free( OldStartVbyte );                              // release old heap
    8901070#ifdef VbyteDebug
    8911071    serr | "exit:extend, CurrSize:" | CurrSize;
     
    8931073} // extend
    8941074
     1075//WIP
     1076#if 0
    8951077
    8961078// Extend the size of the byte-string area by creating a new area and copying the old area into it. The old byte-string
  • libcfa/src/containers/string_res.hfa

    ref3c383 rd672350  
    1717
    1818#include <fstream.hfa>
     19#include <string.h>    // e.g. strlen
    1920
    2021   
     
    2728    HandleNode *flink;                                  // forward link
    2829    HandleNode *blink;                                  // backward link
     30    VbyteHeap *ulink;                   // upward link
    2931
    3032    char *s;                                            // pointer to byte string
     
    3234}; // HandleNode
    3335
    34 void ?{}( HandleNode & );                       // constructor for header node
    35 
    36 void ?{}( HandleNode &, VbyteHeap & );          // constructor for nodes in the handle list
    37 void ^?{}( HandleNode & );                      // destructor for handle nodes
    38 
    39 extern VbyteHeap * DEBUG_string_heap;
     36VbyteHeap * DEBUG_string_heap();
     37size_t DEBUG_string_bytes_in_heap( VbyteHeap * heap );
    4038size_t DEBUG_string_bytes_avail_until_gc( VbyteHeap * heap );
    4139const char * DEBUG_string_heap_start( VbyteHeap * heap );
    4240
     41void TUNING_set_string_heap_liveness_threshold( double val );
    4342
    4443//######################### String #########################
     
    4746struct string_res {
    4847    HandleNode Handle; // chars, start, end, global neighbours
     48    bool shareEditSet_owns_ulink;
    4949    string_res * shareEditSet_prev;
    5050    string_res * shareEditSet_next;
     
    7474// Constructors, Assignment Operators, Destructor
    7575void ?{}(string_res &s); // empty string
    76 void ?{}(string_res &s, const char* initial); // copy from string literal (NULL-terminated)
    7776void ?{}(string_res &s, const char* buffer, size_t bsize); // copy specific length from buffer
     77static inline void ?{}(string_res &s, const char* rhs) { // copy from string literal (NULL-terminated)
     78    (s){ rhs, strlen(rhs) };
     79}
    7880
    7981void ?{}(string_res &s, const string_res & s2) = void;
     
    8688}
    8789
    88 void assign(string_res &s, const char* buffer, size_t bsize); // copy specific length from buffer
    89 void ?=?(string_res &s, const char* other); // copy from string literal (NULL-terminated)
    90 void ?=?(string_res &s, const string_res &other);
    91 void ?=?(string_res &s, string_res &other);
    92 void ?=?(string_res &s, char other);
     90string_res & assign(string_res &s, const char* buffer, size_t bsize); // copy specific length from buffer
     91static inline string_res & ?=?(string_res &s, const char* other) {  // copy from string literal (NULL-terminated)
     92    return assign(s, other, strlen(other));
     93}
     94string_res & ?=?(string_res &s, const string_res &other);
     95string_res & ?=?(string_res &s, string_res &other);
     96string_res & ?=?(string_res &s, char other);
    9397
    9498void ^?{}(string_res &s);
     
    99103
    100104// Concatenation
     105void append(string_res &s, const char* buffer, size_t bsize);
    101106void ?+=?(string_res &s, char other); // append a character
    102107void ?+=?(string_res &s, const string_res &s2); // append-concatenate to first string
    103 void ?+=?(string_res &s, const char* other);
    104 void append(string_res &s, const char* buffer, size_t bsize);
     108static inline void ?+=?(string_res &s, const char* other) {
     109    append( s, other, strlen(other) );
     110}
    105111
    106112// Character access
     113void assignAt(const string_res &s, size_t index, char val);
    107114char ?[?](const string_res &s, size_t index); // Mike changed to ret by val from Sunjay's ref, to match Peter's
    108115//char codePointAt(const string_res &s, size_t index); // revisit under Unicode
     
    121128int find(const string_res &s, const char* search);
    122129int find(const string_res &s, const char* search, size_t searchsize);
     130
     131int findFrom(const string_res &s, size_t fromPos, char search);
     132int findFrom(const string_res &s, size_t fromPos, const string_res &search);
     133int findFrom(const string_res &s, size_t fromPos, const char* search);
     134int findFrom(const string_res &s, size_t fromPos, const char* search, size_t searchsize);
    123135
    124136bool includes(const string_res &s, const string_res &search);
  • libcfa/src/math.trait.hfa

    ref3c383 rd672350  
    1616#pragma once
    1717
    18 trait Not( T ) {
    19         void ?{}( T &, zero_t );
    20         int !?( T );
     18trait Not( U ) {
     19        void ?{}( U &, zero_t );
     20        int !?( U );
    2121}; // Not
    2222
     
    2626}; // Equality
    2727
    28 trait Relational( T | Equality( T ) ) {
    29         int ?<?( T, T );
    30         int ?<=?( T, T );
    31         int ?>?( T, T );
    32         int ?>=?( T, T );
     28trait Relational( U | Equality( U ) ) {
     29        int ?<?( U, U );
     30        int ?<=?( U, U );
     31        int ?>?( U, U );
     32        int ?>=?( U, U );
    3333}; // Relational
    3434
     
    3939}; // Signed
    4040
    41 trait Additive( T | Signed( T ) ) {
    42         T ?+?( T, T );
    43         T ?-?( T, T );
    44         T ?+=?( T &, T );
    45         T ?-=?( T &, T );
     41trait Additive( U | Signed( U ) ) {
     42        U ?+?( U, U );
     43        U ?-?( U, U );
     44        U ?+=?( U &, U );
     45        U ?-=?( U &, U );
    4646}; // Additive
    4747
     
    4949        void ?{}( T &, one_t );
    5050        // T ?++( T & );
    51         // T ++?( T &);
     51        // T ++?( T & );
    5252        // T ?--( T & );
    5353        // T --?( T & );
    5454}; // Incdec
    5555
    56 trait Multiplicative( T | Incdec( T ) ) {
    57         T ?*?( T, T );
    58         T ?/?( T, T );
    59         T ?%?( T, T );
    60         T ?/=?( T &, T );
     56trait Multiplicative( U | Incdec( U ) ) {
     57        U ?*?( U, U );
     58        U ?/?( U, U );
     59        U ?%?( U, U );
     60        U ?/=?( U &, U );
    6161}; // Multiplicative
    6262
  • src/AST/Convert.cpp

    ref3c383 rd672350  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu May 09 15::37::05 2019
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Feb  2 13:19:22 2022
    13 // Update Count     : 41
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 15:01:00 2022
     13// Update Count     : 42
    1414//
    1515
     
    4949//================================================================================================
    5050namespace ast {
    51 
    52 // This is to preserve the FindSpecialDecls hack. It does not (and perhaps should not)
    53 // allow us to use the same stratagy in the new ast.
    54 // xxx - since convert back pass works, this concern seems to be unnecessary.
    55 
    56 // these need to be accessed in new FixInit now
    57 ast::ptr<ast::Type> sizeType = nullptr;
    58 const ast::FunctionDecl * dereferenceOperator = nullptr;
    59 const ast::StructDecl   * dtorStruct = nullptr;
    60 const ast::FunctionDecl * dtorStructDestroy = nullptr;
     51// These are the shared local information used by ConverterNewToOld and
     52// ConverterOldToNew to update the global information in the two versions.
     53
     54static ast::ptr<ast::Type> sizeType = nullptr;
     55static const ast::FunctionDecl * dereferenceOperator = nullptr;
     56static const ast::StructDecl   * dtorStruct = nullptr;
     57static const ast::FunctionDecl * dtorStructDestroy = nullptr;
    6158
    6259}
  • src/AST/Decl.cpp

    ref3c383 rd672350  
    3939        if ( uniqueId ) return;  // ensure only set once
    4040        uniqueId = ++lastUniqueId;
    41         idMap[ uniqueId ] = this;
     41        // The extra readonly pointer is causing some reference counting issues.
     42        // idMap[ uniqueId ] = this;
    4243}
    4344
    4445readonly<Decl> Decl::fromId( UniqueId id ) {
     46        // Right now this map is always empty, so don't use it.
     47        assert( false );
    4548        IdMapType::const_iterator i = idMap.find( id );
    4649        if ( i != idMap.end() ) return i->second;
  • src/AST/Fwd.hpp

    ref3c383 rd672350  
    141141
    142142class TranslationUnit;
    143 // TODO: Get from the TranslationUnit:
    144 extern ptr<Type> sizeType;
    145 extern const FunctionDecl * dereferenceOperator;
    146 extern const StructDecl   * dtorStruct;
    147 extern const FunctionDecl * dtorStructDestroy;
     143class TranslationGlobal;
    148144
    149145}
  • src/AST/GenericSubstitution.cpp

    ref3c383 rd672350  
    4545                        visit_children = false;
    4646                        const AggregateDecl * aggr = ty->aggr();
    47                         sub = TypeSubstitution{ aggr->params.begin(), aggr->params.end(), ty->params.begin() };
     47                        sub = TypeSubstitution( aggr->params, ty->params );
    4848                }
    4949
  • src/AST/TranslationUnit.hpp

    ref3c383 rd672350  
    1010// Created On       : Tue Jun 11 15:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Jun 11 15:42:00 2019
    13 // Update Count     : 0
     12// Last Modified On : Tue Mar 11 11:19:00 2022
     13// Update Count     : 1
    1414//
    1515
     
    2323namespace ast {
    2424
     25class TranslationGlobal {
     26public:
     27        std::map< UniqueId, Decl * > idMap;
     28
     29        ptr<Type> sizeType;
     30        const FunctionDecl * dereference;
     31        const StructDecl * dtorStruct;
     32        const FunctionDecl * dtorDestroy;
     33};
     34
    2535class TranslationUnit {
    2636public:
    2737        std::list< ptr< Decl > > decls;
    28 
    29         struct Global {
    30                 std::map< UniqueId, Decl * > idMap;
    31 
    32                 ptr<Type> sizeType;
    33                 const FunctionDecl * dereference;
    34                 const StructDecl * dtorStruct;
    35                 const FunctionDecl * dtorDestroy;
    36         } global;
     38        TranslationGlobal global;
    3739};
    3840
  • src/AST/TypeSubstitution.hpp

    ref3c383 rd672350  
    3737  public:
    3838        TypeSubstitution();
     39        template< typename FormalContainer, typename ActualContainer >
     40        TypeSubstitution( FormalContainer formals, ActualContainer actuals );
    3941        template< typename FormalIterator, typename ActualIterator >
    4042        TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
     
    7678        bool empty() const;
    7779
     80        template< typename FormalContainer, typename ActualContainer >
     81        void addAll( FormalContainer formals, ActualContainer actuals );
    7882        template< typename FormalIterator, typename ActualIterator >
    79         void add( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
     83        void addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
    8084
    8185        /// create a new TypeSubstitution using bindings from env containing all of the type variables in expr
     
    112116};
    113117
     118template< typename FormalContainer, typename ActualContainer >
     119TypeSubstitution::TypeSubstitution( FormalContainer formals, ActualContainer actuals ) {
     120        assert( formals.size() == actuals.size() );
     121        addAll( formals.begin(), formals.end(), actuals.begin() );
     122}
     123
     124template< typename FormalIterator, typename ActualIterator >
     125TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
     126        addAll( formalBegin, formalEnd, actualBegin );
     127}
     128
     129template< typename FormalContainer, typename ActualContainer >
     130void TypeSubstitution::addAll( FormalContainer formals, ActualContainer actuals ) {
     131        assert( formals.size() == actuals.size() );
     132        addAll( formals.begin(), formals.end(), actuals.begin() );
     133}
     134
    114135// this is the only place where type parameters outside a function formal may be substituted.
    115136template< typename FormalIterator, typename ActualIterator >
    116 void TypeSubstitution::add( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
     137void TypeSubstitution::addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
    117138        // FormalIterator points to a TypeDecl
    118139        // ActualIterator points to a Type
     
    129150                        } // if
    130151                } else {
    131                        
     152                        // Is this an error?
    132153                } // if
    133154        } // for
    134155}
    135 
    136 
    137 
    138 template< typename FormalIterator, typename ActualIterator >
    139 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
    140         add( formalBegin, formalEnd, actualBegin );
    141 }
    142 
    143156
    144157} // namespace ast
  • src/AST/module.mk

    ref3c383 rd672350  
    1616
    1717SRC_AST = \
    18         AST/AssertAcyclic.cpp \
    19         AST/AssertAcyclic.hpp \
    2018        AST/Attribute.cpp \
    2119        AST/Attribute.hpp \
     
    6462        AST/TypeSubstitution.cpp \
    6563        AST/TypeSubstitution.hpp \
     64        AST/Util.cpp \
     65        AST/Util.hpp \
    6666        AST/Visitor.hpp
    6767
  • src/Common/CodeLocationTools.cpp

    ref3c383 rd672350  
    99// Author           : Andrew Beach
    1010// Created On       : Fri Dec  4 15:42:00 2020
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Feb  1 09:14:39 2022
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Mon Mar 14 15:14:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    239239};
    240240
     241class LocalFillCore : public ast::WithGuards {
     242        CodeLocation const * parent;
     243public:
     244        LocalFillCore( CodeLocation const & location ) : parent( &location ) {
     245                assert( location.isSet() );
     246        }
     247
     248        template<typename node_t>
     249        auto previsit( node_t const * node )
     250                        -> typename std::enable_if<has_code_location<node_t>::value, node_t const *>::type {
     251                if ( node->location.isSet() ) {
     252                        GuardValue( parent ) = &node->location;
     253                        return node;
     254                } else {
     255                        node_t * mut = ast::mutate( node );
     256                        mut->location = *parent;
     257                        return mut;
     258                }
     259        }
     260};
     261
    241262} // namespace
    242263
     
    278299        ast::Pass<FillCore>::run( unit );
    279300}
     301
     302ast::Node const * localFillCodeLocations(
     303                CodeLocation const & location , ast::Node const * node ) {
     304        ast::Pass<LocalFillCore> visitor( location );
     305        return node->accept( visitor );
     306}
  • src/Common/CodeLocationTools.hpp

    ref3c383 rd672350  
    1010// Created On       : Fri Dec  4 15:35:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Dec  9  9:53:00 2020
    13 // Update Count     : 1
     12// Last Modified On : Mon Mar 14 15:14:00 2022
     13// Update Count     : 2
    1414//
    1515
    1616#pragma once
    1717
     18struct CodeLocation;
    1819namespace ast {
     20        class Node;
    1921        class TranslationUnit;
    2022}
     
    2830// Assign a nearby code-location to any unset code locations in the forest.
    2931void forceFillCodeLocations( ast::TranslationUnit & unit );
     32
     33// Fill in code-locations with a parent code location,
     34// using the provided CodeLocation as the base.
     35ast::Node const *
     36        localFillCodeLocations( CodeLocation const &, ast::Node const * );
  • src/Common/Examine.cc

    ref3c383 rd672350  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Examine.h --
     7// Examine.cc -- Helpers for examining AST code.
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Wed Sept 2 14:02 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Sep  8 12:15 2020
    13 // Update Count     : 0
     12// Last Modified On : Fri Dec 10 10:27 2021
     13// Update Count     : 1
    1414//
    1515
    1616#include "Common/Examine.h"
    1717
     18#include "AST/Type.hpp"
    1819#include "CodeGen/OperatorTable.h"
     20#include "InitTweak/InitTweak.h"
    1921
    2022DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ) {
     
    3638
    3739namespace {
     40
     41// getTypeofThis but does some extra checks used in this module.
     42const ast::Type * getTypeofThisSolo( const ast::FunctionDecl * func ) {
     43        if ( 1 != func->params.size() ) {
     44                return nullptr;
     45        }
     46        auto ref = func->type->params.front().as<ast::ReferenceType>();
     47        return (ref) ? ref->base : nullptr;
     48}
     49
     50}
     51
     52const ast::DeclWithType * isMainFor(
     53                const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ) {
     54        if ( "main" != func->name ) return nullptr;
     55        if ( 1 != func->params.size() ) return nullptr;
     56
     57        auto param = func->params.front();
     58
     59        auto type = dynamic_cast<const ast::ReferenceType *>( param->get_type() );
     60        if ( !type ) return nullptr;
     61
     62        auto obj = type->base.as<ast::StructInstType>();
     63        if ( !obj ) return nullptr;
     64
     65        if ( kind != obj->base->kind ) return nullptr;
     66
     67        return param;
     68}
     69
     70namespace {
    3871        Type * getDestructorParam( FunctionDecl * func ) {
    3972                if ( !CodeGen::isDestructor( func->name ) ) return nullptr;
     
    4881                return nullptr;
    4982        }
     83
     84const ast::Type * getDestructorParam( const ast::FunctionDecl * func ) {
     85        if ( !CodeGen::isDestructor( func->name ) ) return nullptr;
     86        //return InitTweak::getParamThis( func )->type;
     87        return getTypeofThisSolo( func );
     88}
     89
    5090}
    5191
     
    5797        return false;
    5898}
     99
     100bool isDestructorFor(
     101                const ast::FunctionDecl * func, const ast::StructDecl * type_decl ) {
     102        if ( const ast::Type * type = getDestructorParam( func ) ) {
     103                auto stype = dynamic_cast<const ast::StructInstType *>( type );
     104                return stype && stype->base.get() == type_decl;
     105        }
     106        return false;
     107}
  • src/Common/Examine.h

    ref3c383 rd672350  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Examine.h --
     7// Examine.h -- Helpers for examining AST code.
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Wed Sept 2 13:57 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Sep  8 12:08 2020
    13 // Update Count     : 0
     12// Last Modified On : Fri Dec 10 10:28 2021
     13// Update Count     : 1
    1414//
    1515
     16#include "AST/Decl.hpp"
    1617#include "SynTree/Declaration.h"
    1718
    1819/// Check if this is a main function for a type of an aggregate kind.
    1920DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind );
     21const ast::DeclWithType * isMainFor(
     22        const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind );
    2023// Returns a pointer to the parameter if true, nullptr otherwise.
    2124
    2225/// Check if this function is a destructor for the given structure.
    2326bool isDestructorFor( FunctionDecl * func, StructDecl * type_decl );
     27bool isDestructorFor(
     28        const ast::FunctionDecl * func, const ast::StructDecl * type );
  • src/Concurrency/Keywords.cc

    ref3c383 rd672350  
    422422                        ;
    423423                else if ( auto param = isMainFor( decl, cast_target ) ) {
    424                         // This should never trigger.
    425                         assert( vtable_decl );
     424                        if ( !vtable_decl ) {
     425                                SemanticError( decl, context_error );
     426                        }
    426427                        // Should be safe because of isMainFor.
    427428                        StructInstType * struct_type = static_cast<StructInstType *>(
     
    12031204                                        //new TypeofType( noQualifiers, args.front()->clone() )
    12041205                                        new TypeofType( noQualifiers, new UntypedExpr(
    1205                                                         new NameExpr( "__get_type" ),
     1206                                                        new NameExpr( "__get_mutexstmt_lock_type" ),
    12061207                                                        { args.front()->clone() }
    12071208                                                )
     
    12151216                                map_range < std::list<Initializer*> > ( args, [](Expression * var ){
    12161217                                        return new SingleInit( new UntypedExpr(
    1217                                                         new NameExpr( "__get_ptr" ),
     1218                                                        new NameExpr( "__get_mutexstmt_lock_ptr" ),
    12181219                                                        { var }
    12191220                                        ) );
     
    12261227                TypeExpr * lock_type_expr = new TypeExpr(
    12271228                        new TypeofType( noQualifiers, new UntypedExpr(
    1228                                 new NameExpr( "__get_type" ),
     1229                                new NameExpr( "__get_mutexstmt_lock_type" ),
    12291230                                { args.front()->clone() }
    12301231                                )
  • src/Concurrency/KeywordsNew.cpp

    ref3c383 rd672350  
    1010// Created On       : Tue Nov 16  9:53:00 2021
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Dec  1 11:24:00 2021
    13 // Update Count     : 1
     12// Last Modified On : Fri Mar 11 10:40:00 2022
     13// Update Count     : 2
    1414//
     15
     16#include <iostream>
    1517
    1618#include "Concurrency/Keywords.h"
     
    1820#include "AST/Copy.hpp"
    1921#include "AST/Decl.hpp"
     22#include "AST/Expr.hpp"
    2023#include "AST/Pass.hpp"
    2124#include "AST/Stmt.hpp"
     25#include "AST/DeclReplacer.hpp"
    2226#include "AST/TranslationUnit.hpp"
    2327#include "CodeGen/OperatorTable.h"
     28#include "Common/Examine.h"
    2429#include "Common/utility.h"
     30#include "Common/UniqueName.h"
     31#include "ControlStruct/LabelGeneratorNew.hpp"
    2532#include "InitTweak/InitTweak.h"
     33#include "Virtual/Tables.h"
    2634
    2735namespace Concurrency {
     
    2937namespace {
    3038
    31 inline static bool isThread( const ast::DeclWithType * decl ) {
     39// --------------------------------------------------------------------------
     40// Loose Helper Functions:
     41
     42/// Detect threads constructed with the keyword thread.
     43bool isThread( const ast::DeclWithType * decl ) {
    3244        auto baseType = decl->get_type()->stripDeclarator();
    3345        auto instType = dynamic_cast<const ast::StructInstType *>( baseType );
     
    3648}
    3749
     50/// Get the virtual type id if given a type name.
     51std::string typeIdType( std::string const & exception_name ) {
     52        return exception_name.empty() ? std::string()
     53                : Virtual::typeIdType( exception_name );
     54}
     55
     56/// Get the vtable type name if given a type name.
     57std::string vtableTypeName( std::string const & exception_name ) {
     58        return exception_name.empty() ? std::string()
     59                : Virtual::vtableTypeName( exception_name );
     60}
     61
     62static ast::Type * mutate_under_references( ast::ptr<ast::Type>& type ) {
     63        ast::Type * mutType = type.get_and_mutate();
     64        for ( ast::ReferenceType * mutRef
     65                ; (mutRef = dynamic_cast<ast::ReferenceType *>( mutType ))
     66                ; mutType = mutRef->base.get_and_mutate() );
     67        return mutType;
     68}
     69
     70// Describe that it adds the generic parameters and the uses of the generic
     71// parameters on the function and first "this" argument.
     72ast::FunctionDecl * fixupGenerics(
     73                const ast::FunctionDecl * func, const ast::StructDecl * decl ) {
     74        const CodeLocation & location = decl->location;
     75        // We have to update both the declaration
     76        auto mutFunc = ast::mutate( func );
     77        auto mutType = mutFunc->type.get_and_mutate();
     78
     79        if ( decl->params.empty() ) {
     80                return mutFunc;
     81        }
     82
     83        assert( 0 != mutFunc->params.size() );
     84        assert( 0 != mutType->params.size() );
     85
     86        // Add the "forall" clause information.
     87        for ( const ast::ptr<ast::TypeDecl> & typeParam : decl->params ) {
     88                auto typeDecl = ast::deepCopy( typeParam );
     89                mutFunc->type_params.push_back( typeDecl );
     90                mutType->forall.push_back(
     91                        new ast::TypeInstType( typeDecl->name, typeDecl ) );
     92                for ( auto & assertion : typeDecl->assertions ) {
     93                        mutFunc->assertions.push_back( assertion );
     94                        mutType->assertions.emplace_back(
     95                                new ast::VariableExpr( location, assertion ) );
     96                }
     97                typeDecl->assertions.clear();
     98        }
     99
     100        // Even chain_mutate is not powerful enough for this:
     101        ast::ptr<ast::Type>& paramType = strict_dynamic_cast<ast::ObjectDecl *>(
     102                mutFunc->params[0].get_and_mutate() )->type;
     103        auto paramTypeInst = strict_dynamic_cast<ast::StructInstType *>(
     104                mutate_under_references( paramType ) );
     105        auto typeParamInst = strict_dynamic_cast<ast::StructInstType *>(
     106                mutate_under_references( mutType->params[0] ) );
     107
     108        for ( const ast::ptr<ast::TypeDecl> & typeDecl : mutFunc->type_params ) {
     109                paramTypeInst->params.push_back(
     110                        new ast::TypeExpr( location,
     111                                new ast::TypeInstType( typeDecl->name, typeDecl ) ) );
     112                typeParamInst->params.push_back(
     113                        new ast::TypeExpr( location,
     114                                new ast::TypeInstType( typeDecl->name, typeDecl ) ) );
     115        }
     116
     117        return mutFunc;
     118}
     119
    38120// --------------------------------------------------------------------------
    39 struct MutexKeyword final {
     121struct ConcurrentSueKeyword : public ast::WithDeclsToAdd<> {
     122        ConcurrentSueKeyword(
     123                std::string&& type_name, std::string&& field_name,
     124                std::string&& getter_name, std::string&& context_error,
     125                std::string&& exception_name,
     126                bool needs_main, ast::AggregateDecl::Aggregate cast_target
     127        ) :
     128                type_name( type_name ), field_name( field_name ),
     129                getter_name( getter_name ), context_error( context_error ),
     130                exception_name( exception_name ),
     131                typeid_name( typeIdType( exception_name ) ),
     132                vtable_name( vtableTypeName( exception_name ) ),
     133                needs_main( needs_main ), cast_target( cast_target )
     134        {}
     135
     136        virtual ~ConcurrentSueKeyword() {}
     137
     138        const ast::Decl * postvisit( const ast::StructDecl * decl );
     139        const ast::DeclWithType * postvisit( const ast::FunctionDecl * decl );
     140        const ast::Expr * postvisit( const ast::KeywordCastExpr * expr );
     141
     142        struct StructAndField {
     143                const ast::StructDecl * decl;
     144                const ast::ObjectDecl * field;
     145        };
     146
     147        const ast::StructDecl * handleStruct( const ast::StructDecl * );
     148        void handleMain( const ast::FunctionDecl *, const ast::StructInstType * );
     149        void addTypeId( const ast::StructDecl * );
     150        void addVtableForward( const ast::StructDecl * );
     151        const ast::FunctionDecl * forwardDeclare( const ast::StructDecl * );
     152        StructAndField addField( const ast::StructDecl * );
     153        void addGetRoutines( const ast::ObjectDecl *, const ast::FunctionDecl * );
     154        void addLockUnlockRoutines( const ast::StructDecl * );
     155
     156private:
     157        const std::string type_name;
     158        const std::string field_name;
     159        const std::string getter_name;
     160        const std::string context_error;
     161        const std::string exception_name;
     162        const std::string typeid_name;
     163        const std::string vtable_name;
     164        const bool needs_main;
     165        const ast::AggregateDecl::Aggregate cast_target;
     166
     167        const ast::StructDecl   * type_decl = nullptr;
     168        const ast::FunctionDecl * dtor_decl = nullptr;
     169        const ast::StructDecl * except_decl = nullptr;
     170        const ast::StructDecl * typeid_decl = nullptr;
     171        const ast::StructDecl * vtable_decl = nullptr;
     172
     173};
     174
     175// Handles thread type declarations:
     176//
     177// thread Mythread {                         struct MyThread {
     178//  int data;                                  int data;
     179//  a_struct_t more_data;                      a_struct_t more_data;
     180//                                =>             thread$ __thrd_d;
     181// };                                        };
     182//                                           static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; }
     183//
     184struct ThreadKeyword final : public ConcurrentSueKeyword {
     185        ThreadKeyword() : ConcurrentSueKeyword(
     186                "thread$",
     187                "__thrd",
     188                "get_thread",
     189                "thread keyword requires threads to be in scope, add #include <thread.hfa>\n",
     190                "ThreadCancelled",
     191                true,
     192                ast::AggregateDecl::Thread )
     193        {}
     194
     195        virtual ~ThreadKeyword() {}
     196};
     197
     198// Handles coroutine type declarations:
     199//
     200// coroutine MyCoroutine {                   struct MyCoroutine {
     201//  int data;                                  int data;
     202//  a_struct_t more_data;                      a_struct_t more_data;
     203//                                =>             coroutine$ __cor_d;
     204// };                                        };
     205//                                           static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; }
     206//
     207struct CoroutineKeyword final : public ConcurrentSueKeyword {
     208        CoroutineKeyword() : ConcurrentSueKeyword(
     209                "coroutine$",
     210                "__cor",
     211                "get_coroutine",
     212                "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa>\n",
     213                "CoroutineCancelled",
     214                true,
     215                ast::AggregateDecl::Coroutine )
     216        {}
     217
     218        virtual ~CoroutineKeyword() {}
     219};
     220
     221// Handles monitor type declarations:
     222//
     223// monitor MyMonitor {                       struct MyMonitor {
     224//  int data;                                  int data;
     225//  a_struct_t more_data;                      a_struct_t more_data;
     226//                                =>             monitor$ __mon_d;
     227// };                                        };
     228//                                           static inline monitor$ * get_coroutine( MyMonitor * this ) {
     229//                                               return &this->__cor_d;
     230//                                           }
     231//                                           void lock(MyMonitor & this) {
     232//                                               lock(get_monitor(this));
     233//                                           }
     234//                                           void unlock(MyMonitor & this) {
     235//                                               unlock(get_monitor(this));
     236//                                           }
     237//
     238struct MonitorKeyword final : public ConcurrentSueKeyword {
     239        MonitorKeyword() : ConcurrentSueKeyword(
     240                "monitor$",
     241                "__mon",
     242                "get_monitor",
     243                "monitor keyword requires monitors to be in scope, add #include <monitor.hfa>\n",
     244                "",
     245                false,
     246                ast::AggregateDecl::Monitor )
     247        {}
     248
     249        virtual ~MonitorKeyword() {}
     250};
     251
     252// Handles generator type declarations:
     253//
     254// generator MyGenerator {                   struct MyGenerator {
     255//  int data;                                  int data;
     256//  a_struct_t more_data;                      a_struct_t more_data;
     257//                                =>             int __generator_state;
     258// };                                        };
     259//
     260struct GeneratorKeyword final : public ConcurrentSueKeyword {
     261        GeneratorKeyword() : ConcurrentSueKeyword(
     262                "generator$",
     263                "__generator_state",
     264                "get_generator",
     265                "Unable to find builtin type generator$\n",
     266                "",
     267                true,
     268                ast::AggregateDecl::Generator )
     269        {}
     270
     271        virtual ~GeneratorKeyword() {}
     272};
     273
     274const ast::Decl * ConcurrentSueKeyword::postvisit(
     275                const ast::StructDecl * decl ) {
     276        if ( !decl->body ) {
     277                return decl;
     278        } else if ( cast_target == decl->kind ) {
     279                return handleStruct( decl );
     280        } else if ( type_name == decl->name ) {
     281                assert( !type_decl );
     282                type_decl = decl;
     283        } else if ( exception_name == decl->name ) {
     284                assert( !except_decl );
     285                except_decl = decl;
     286        } else if ( typeid_name == decl->name ) {
     287                assert( !typeid_decl );
     288                typeid_decl = decl;
     289        } else if ( vtable_name == decl->name ) {
     290                assert( !vtable_decl );
     291                vtable_decl = decl;
     292        }
     293        return decl;
     294}
     295
     296// Try to get the full definition, but raise an error on conflicts.
     297const ast::FunctionDecl * getDefinition(
     298                const ast::FunctionDecl * old_decl,
     299                const ast::FunctionDecl * new_decl ) {
     300        if ( !new_decl->stmts ) {
     301                return old_decl;
     302        } else if ( !old_decl->stmts ) {
     303                return new_decl;
     304        } else {
     305                assert( !old_decl->stmts || !new_decl->stmts );
     306                return nullptr;
     307        }
     308}
     309
     310const ast::DeclWithType * ConcurrentSueKeyword::postvisit(
     311                const ast::FunctionDecl * decl ) {
     312        if ( type_decl && isDestructorFor( decl, type_decl ) ) {
     313                // Check for forward declarations, try to get the full definition.
     314                dtor_decl = (dtor_decl) ? getDefinition( dtor_decl, decl ) : decl;
     315        } else if ( !vtable_name.empty() && decl->has_body() ) {
     316                if (const ast::DeclWithType * param = isMainFor( decl, cast_target )) {
     317                        if ( !vtable_decl ) {
     318                                SemanticError( decl, context_error );
     319                        }
     320                        // Should be safe because of isMainFor.
     321                        const ast::StructInstType * struct_type =
     322                                static_cast<const ast::StructInstType *>(
     323                                        static_cast<const ast::ReferenceType *>(
     324                                                param->get_type() )->base.get() );
     325
     326                        handleMain( decl, struct_type );
     327                }
     328        }
     329        return decl;
     330}
     331
     332const ast::Expr * ConcurrentSueKeyword::postvisit(
     333                const ast::KeywordCastExpr * expr ) {
     334        if ( cast_target == expr->target ) {
     335                // Convert `(thread &)ex` to `(thread$ &)*get_thread(ex)`, etc.
     336                if ( !type_decl || !dtor_decl ) {
     337                        SemanticError( expr, context_error );
     338                }
     339                assert( nullptr == expr->result );
     340                auto cast = ast::mutate( expr );
     341                cast->result = new ast::ReferenceType( new ast::StructInstType( type_decl ) );
     342                cast->concrete_target.field  = field_name;
     343                cast->concrete_target.getter = getter_name;
     344                return cast;
     345        }
     346        return expr;
     347}
     348
     349const ast::StructDecl * ConcurrentSueKeyword::handleStruct(
     350                const ast::StructDecl * decl ) {
     351        assert( decl->body );
     352
     353        if ( !type_decl || !dtor_decl ) {
     354                SemanticError( decl, context_error );
     355        }
     356
     357        if ( !exception_name.empty() ) {
     358                if( !typeid_decl || !vtable_decl ) {
     359                        SemanticError( decl, context_error );
     360                }
     361                addTypeId( decl );
     362                addVtableForward( decl );
     363        }
     364
     365        const ast::FunctionDecl * func = forwardDeclare( decl );
     366        StructAndField addFieldRet = addField( decl );
     367        decl = addFieldRet.decl;
     368        const ast::ObjectDecl * field = addFieldRet.field;
     369
     370        addGetRoutines( field, func );
     371        // Add routines to monitors for use by mutex stmt.
     372        if ( ast::AggregateDecl::Monitor == cast_target ) {
     373                addLockUnlockRoutines( decl );
     374        }
     375
     376        return decl;
     377}
     378
     379void ConcurrentSueKeyword::handleMain(
     380                const ast::FunctionDecl * decl, const ast::StructInstType * type ) {
     381        assert( vtable_decl );
     382        assert( except_decl );
     383
     384        const CodeLocation & location = decl->location;
     385
     386        std::vector<ast::ptr<ast::Expr>> poly_args = {
     387                new ast::TypeExpr( location, type ),
     388        };
     389        ast::ObjectDecl * vtable_object = Virtual::makeVtableInstance(
     390                location,
     391                "_default_vtable_object_declaration",
     392                new ast::StructInstType( vtable_decl, copy( poly_args ) ),
     393                type,
     394                nullptr
     395        );
     396        declsToAddAfter.push_back( vtable_object );
     397        declsToAddAfter.push_back(
     398                new ast::ObjectDecl(
     399                        location,
     400                        Virtual::concurrentDefaultVTableName(),
     401                        new ast::ReferenceType( vtable_object->type, ast::CV::Const ),
     402                        new ast::SingleInit( location,
     403                                new ast::VariableExpr( location, vtable_object ) ),
     404                        ast::Storage::Classes(),
     405                        ast::Linkage::Cforall
     406                )
     407        );
     408        declsToAddAfter.push_back( Virtual::makeGetExceptionFunction(
     409                location,
     410                vtable_object,
     411                new ast::StructInstType( except_decl, copy( poly_args ) )
     412        ) );
     413}
     414
     415void ConcurrentSueKeyword::addTypeId( const ast::StructDecl * decl ) {
     416        assert( typeid_decl );
     417        const CodeLocation & location = decl->location;
     418
     419        ast::StructInstType * typeid_type =
     420                new ast::StructInstType( typeid_decl, ast::CV::Const );
     421        typeid_type->params.push_back(
     422                new ast::TypeExpr( location, new ast::StructInstType( decl ) ) );
     423        declsToAddBefore.push_back(
     424                Virtual::makeTypeIdInstance( location, typeid_type ) );
     425        // If the typeid_type is going to be kept, the other reference will have
     426        // been made by now, but we also get to avoid extra mutates.
     427        ast::ptr<ast::StructInstType> typeid_cleanup = typeid_type;
     428}
     429
     430void ConcurrentSueKeyword::addVtableForward( const ast::StructDecl * decl ) {
     431        assert( vtable_decl );
     432        const CodeLocation& location = decl->location;
     433
     434        std::vector<ast::ptr<ast::Expr>> poly_args = {
     435                new ast::TypeExpr( location, new ast::StructInstType( decl ) ),
     436        };
     437        declsToAddBefore.push_back( Virtual::makeGetExceptionForward(
     438                location,
     439                new ast::StructInstType( vtable_decl, copy( poly_args ) ),
     440                new ast::StructInstType( except_decl, copy( poly_args ) )
     441        ) );
     442        ast::ObjectDecl * vtable_object = Virtual::makeVtableForward(
     443                location,
     444                "_default_vtable_object_declaration",
     445                new ast::StructInstType( vtable_decl, std::move( poly_args ) )
     446        );
     447        declsToAddBefore.push_back( vtable_object );
     448        declsToAddBefore.push_back(
     449                new ast::ObjectDecl(
     450                        location,
     451                        Virtual::concurrentDefaultVTableName(),
     452                        new ast::ReferenceType( vtable_object->type, ast::CV::Const ),
     453                        nullptr,
     454                        ast::Storage::Extern,
     455                        ast::Linkage::Cforall
     456                )
     457        );
     458}
     459
     460const ast::FunctionDecl * ConcurrentSueKeyword::forwardDeclare(
     461                const ast::StructDecl * decl ) {
     462        const CodeLocation & location = decl->location;
     463
     464        ast::StructDecl * forward = ast::deepCopy( decl );
     465        {
     466                // If removing members makes ref-count go to zero, do not free.
     467                ast::ptr<ast::StructDecl> forward_ptr = forward;
     468                forward->body = false;
     469                forward->members.clear();
     470                forward_ptr.release();
     471        }
     472
     473        ast::ObjectDecl * this_decl = new ast::ObjectDecl(
     474                location,
     475                "this",
     476                new ast::ReferenceType( new ast::StructInstType( decl ) ),
     477                nullptr,
     478                ast::Storage::Classes(),
     479                ast::Linkage::Cforall
     480        );
     481
     482        ast::ObjectDecl * ret_decl = new ast::ObjectDecl(
     483                location,
     484                "ret",
     485                new ast::PointerType( new ast::StructInstType( type_decl ) ),
     486                nullptr,
     487                ast::Storage::Classes(),
     488                ast::Linkage::Cforall
     489        );
     490
     491        ast::FunctionDecl * get_decl = new ast::FunctionDecl(
     492                location,
     493                getter_name,
     494                {}, // forall
     495                { this_decl }, // params
     496                { ret_decl }, // returns
     497                nullptr, // stmts
     498                ast::Storage::Static,
     499                ast::Linkage::Cforall,
     500                { new ast::Attribute( "const" ) },
     501                ast::Function::Inline
     502        );
     503        get_decl = fixupGenerics( get_decl, decl );
     504
     505        ast::FunctionDecl * main_decl = nullptr;
     506        if ( needs_main ) {
     507                // `this_decl` is copied here because the original was used above.
     508                main_decl = new ast::FunctionDecl(
     509                        location,
     510                        "main",
     511                        {},
     512                        { ast::deepCopy( this_decl ) },
     513                        {},
     514                        nullptr,
     515                        ast::Storage::Classes(),
     516                        ast::Linkage::Cforall
     517                );
     518                main_decl = fixupGenerics( main_decl, decl );
     519        }
     520
     521        declsToAddBefore.push_back( forward );
     522        if ( needs_main ) declsToAddBefore.push_back( main_decl );
     523        declsToAddBefore.push_back( get_decl );
     524
     525        return get_decl;
     526}
     527
     528ConcurrentSueKeyword::StructAndField ConcurrentSueKeyword::addField(
     529                const ast::StructDecl * decl ) {
     530        const CodeLocation & location = decl->location;
     531
     532        ast::ObjectDecl * field = new ast::ObjectDecl(
     533                location,
     534                field_name,
     535                new ast::StructInstType( type_decl ),
     536                nullptr,
     537                ast::Storage::Classes(),
     538                ast::Linkage::Cforall
     539        );
     540
     541        auto mutDecl = ast::mutate( decl );
     542        mutDecl->members.push_back( field );
     543
     544        return {mutDecl, field};
     545}
     546
     547void ConcurrentSueKeyword::addGetRoutines(
     548                const ast::ObjectDecl * field, const ast::FunctionDecl * forward ) {
     549        // Say it is generated at the "same" places as the forward declaration.
     550        const CodeLocation & location = forward->location;
     551
     552        const ast::DeclWithType * param = forward->params.front();
     553        ast::Stmt * stmt = new ast::ReturnStmt( location,
     554                new ast::AddressExpr( location,
     555                        new ast::MemberExpr( location,
     556                                field,
     557                                new ast::CastExpr( location,
     558                                        new ast::VariableExpr( location, param ),
     559                                        ast::deepCopy( param->get_type()->stripReferences() ),
     560                                        ast::ExplicitCast
     561                                )
     562                        )
     563                )
     564        );
     565
     566        ast::FunctionDecl * decl = ast::deepCopy( forward );
     567        decl->stmts = new ast::CompoundStmt( location, { stmt } );
     568        declsToAddAfter.push_back( decl );
     569}
     570
     571void ConcurrentSueKeyword::addLockUnlockRoutines(
     572                const ast::StructDecl * decl ) {
     573        // This should only be used on monitors.
     574        assert( ast::AggregateDecl::Monitor == cast_target );
     575
     576        const CodeLocation & location = decl->location;
     577
     578        // The parameter for both routines.
     579        ast::ObjectDecl * this_decl = new ast::ObjectDecl(
     580                location,
     581                "this",
     582                new ast::ReferenceType( new ast::StructInstType( decl ) ),
     583                nullptr,
     584                ast::Storage::Classes(),
     585                ast::Linkage::Cforall
     586        );
     587
     588        ast::FunctionDecl * lock_decl = new ast::FunctionDecl(
     589                location,
     590                "lock",
     591                { /* forall */ },
     592                {
     593                        // Copy the declaration of this.
     594                        ast::deepCopy( this_decl ),
     595                },
     596                { /* returns */ },
     597                nullptr,
     598                ast::Storage::Static,
     599                ast::Linkage::Cforall,
     600                { /* attributes */ },
     601                ast::Function::Inline
     602        );
     603        lock_decl = fixupGenerics( lock_decl, decl );
     604
     605        lock_decl->stmts = new ast::CompoundStmt( location, {
     606                new ast::ExprStmt( location,
     607                        new ast::UntypedExpr( location,
     608                                new ast::NameExpr( location, "lock" ),
     609                                {
     610                                        new ast::UntypedExpr( location,
     611                                                new ast::NameExpr( location, "get_monitor" ),
     612                                                { new ast::VariableExpr( location,
     613                                                        InitTweak::getParamThis( lock_decl ) ) }
     614                                        )
     615                                }
     616                        )
     617                )
     618        } );
     619
     620        ast::FunctionDecl * unlock_decl = new ast::FunctionDecl(
     621                location,
     622                "unlock",
     623                { /* forall */ },
     624                {
     625                        // Last use, consume the declaration of this.
     626                        this_decl,
     627                },
     628                { /* returns */ },
     629                nullptr,
     630                ast::Storage::Static,
     631                ast::Linkage::Cforall,
     632                { /* attributes */ },
     633                ast::Function::Inline
     634        );
     635        unlock_decl = fixupGenerics( unlock_decl, decl );
     636
     637        unlock_decl->stmts = new ast::CompoundStmt( location, {
     638                new ast::ExprStmt( location,
     639                        new ast::UntypedExpr( location,
     640                                new ast::NameExpr( location, "unlock" ),
     641                                {
     642                                        new ast::UntypedExpr( location,
     643                                                new ast::NameExpr( location, "get_monitor" ),
     644                                                { new ast::VariableExpr( location,
     645                                                        InitTweak::getParamThis( unlock_decl ) ) }
     646                                        )
     647                                }
     648                        )
     649                )
     650        } );
     651
     652        declsToAddAfter.push_back( lock_decl );
     653        declsToAddAfter.push_back( unlock_decl );
     654}
     655
     656
     657// --------------------------------------------------------------------------
     658struct SuspendKeyword final :
     659                public ast::WithStmtsToAdd<>, public ast::WithGuards {
     660        SuspendKeyword() = default;
     661        virtual ~SuspendKeyword() = default;
     662
     663        void previsit( const ast::FunctionDecl * );
     664        const ast::DeclWithType * postvisit( const ast::FunctionDecl * );
     665        const ast::Stmt * postvisit( const ast::SuspendStmt * );
     666
     667private:
     668        bool is_real_suspend( const ast::FunctionDecl * );
     669
     670        const ast::Stmt * make_generator_suspend( const ast::SuspendStmt * );
     671        const ast::Stmt * make_coroutine_suspend( const ast::SuspendStmt * );
     672
     673        struct LabelPair {
     674                ast::Label obj;
     675                int idx;
     676        };
     677
     678        LabelPair make_label(const ast::Stmt * stmt ) {
     679                labels.push_back( ControlStruct::newLabel( "generator", stmt ) );
     680                return { labels.back(), int(labels.size()) };
     681        }
     682
     683        const ast::DeclWithType * in_generator = nullptr;
     684        const ast::FunctionDecl * decl_suspend = nullptr;
     685        std::vector<ast::Label> labels;
     686};
     687
     688void SuspendKeyword::previsit( const ast::FunctionDecl * decl ) {
     689        GuardValue( in_generator ); in_generator = nullptr;
     690
     691        // If it is the real suspend, grab it if we don't have one already.
     692        if ( is_real_suspend( decl ) ) {
     693                decl_suspend = decl_suspend ? decl_suspend : decl;
     694                return;
     695        }
     696
     697        // Otherwise check if this is a generator main and, if so, handle it.
     698        auto param = isMainFor( decl, ast::AggregateDecl::Generator );
     699        if ( !param ) return;
     700
     701        if ( 0 != decl->returns.size() ) {
     702                SemanticError( decl->location, "Generator main must return void" );
     703        }
     704
     705        in_generator = param;
     706        GuardValue( labels ); labels.clear();
     707}
     708
     709const ast::DeclWithType * SuspendKeyword::postvisit(
     710                const ast::FunctionDecl * decl ) {
     711        // Only modify a full definition of a generator with states.
     712        if ( !decl->stmts || !in_generator || labels.empty() ) return decl;
     713
     714        const CodeLocation & location = decl->location;
     715
     716        // Create a new function body:
     717        // static void * __generator_labels[] = {&&s0, &&s1, ...};
     718        // void * __generator_label = __generator_labels[GEN.__generator_state];
     719        // goto * __generator_label;
     720        // s0: ;
     721        // OLD_BODY
     722
     723        // This is the null statement inserted right before the body.
     724        ast::NullStmt * noop = new ast::NullStmt( location );
     725        noop->labels.push_back( ControlStruct::newLabel( "generator", noop ) );
     726        const ast::Label & first_label = noop->labels.back();
     727
     728        // Add each label to the init, starting with the first label.
     729        std::vector<ast::ptr<ast::Init>> inits = {
     730                new ast::SingleInit( location,
     731                        new ast::LabelAddressExpr( location, copy( first_label ) ) ) };
     732        // Then go through all the stored labels, and clear the store.
     733        for ( auto && label : labels ) {
     734                inits.push_back( new ast::SingleInit( label.location,
     735                        new ast::LabelAddressExpr( label.location, std::move( label )
     736                        ) ) );
     737        }
     738        labels.clear();
     739        // Then construct the initializer itself.
     740        auto init = new ast::ListInit( location, std::move( inits ) );
     741
     742        ast::ObjectDecl * generatorLabels = new ast::ObjectDecl(
     743                location,
     744                "__generator_labels",
     745                new ast::ArrayType(
     746                        new ast::PointerType( new ast::VoidType() ),
     747                        nullptr,
     748                        ast::FixedLen,
     749                        ast::DynamicDim
     750                ),
     751                init,
     752                ast::Storage::Classes(),
     753                ast::Linkage::AutoGen
     754        );
     755
     756        ast::ObjectDecl * generatorLabel = new ast::ObjectDecl(
     757                location,
     758                "__generator_label",
     759                new ast::PointerType( new ast::VoidType() ),
     760                new ast::SingleInit( location,
     761                        new ast::UntypedExpr( location,
     762                                new ast::NameExpr( location, "?[?]" ),
     763                                {
     764                                        // TODO: Could be a variable expr.
     765                                        new ast::NameExpr( location, "__generator_labels" ),
     766                                        new ast::UntypedMemberExpr( location,
     767                                                new ast::NameExpr( location, "__generator_state" ),
     768                                                new ast::VariableExpr( location, in_generator )
     769                                        )
     770                                }
     771                        )
     772                ),
     773                ast::Storage::Classes(),
     774                ast::Linkage::AutoGen
     775        );
     776
     777        ast::BranchStmt * theGoTo = new ast::BranchStmt(
     778                location, new ast::VariableExpr( location, generatorLabel )
     779        );
     780
     781        // The noop goes here in order.
     782
     783        ast::CompoundStmt * body = new ast::CompoundStmt( location, {
     784                { new ast::DeclStmt( location, generatorLabels ) },
     785                { new ast::DeclStmt( location, generatorLabel ) },
     786                { theGoTo },
     787                { noop },
     788                { decl->stmts },
     789        } );
     790
     791        auto mutDecl = ast::mutate( decl );
     792        mutDecl->stmts = body;
     793        return mutDecl;
     794}
     795
     796const ast::Stmt * SuspendKeyword::postvisit( const ast::SuspendStmt * stmt ) {
     797        switch ( stmt->type ) {
     798        case ast::SuspendStmt::None:
     799                // Use the context to determain the implicit target.
     800                if ( in_generator ) {
     801                        return make_generator_suspend( stmt );
     802                } else {
     803                        return make_coroutine_suspend( stmt );
     804                }
     805        case ast::SuspendStmt::Coroutine:
     806                return make_coroutine_suspend( stmt );
     807        case ast::SuspendStmt::Generator:
     808                // Generator suspends must be directly in a generator.
     809                if ( !in_generator ) SemanticError( stmt->location, "'suspend generator' must be used inside main of generator type." );
     810                return make_generator_suspend( stmt );
     811        }
     812        assert( false );
     813        return stmt;
     814}
     815
     816/// Find the real/official suspend declaration.
     817bool SuspendKeyword::is_real_suspend( const ast::FunctionDecl * decl ) {
     818        return ( !decl->linkage.is_mangled
     819                && 0 == decl->params.size()
     820                && 0 == decl->returns.size()
     821                && "__cfactx_suspend" == decl->name );
     822}
     823
     824const ast::Stmt * SuspendKeyword::make_generator_suspend(
     825                const ast::SuspendStmt * stmt ) {
     826        assert( in_generator );
     827        // Target code is:
     828        //   GEN.__generator_state = X;
     829        //   THEN
     830        //   return;
     831        //   __gen_X:;
     832
     833        const CodeLocation & location = stmt->location;
     834
     835        LabelPair label = make_label( stmt );
     836
     837        // This is the context saving statement.
     838        stmtsToAddBefore.push_back( new ast::ExprStmt( location,
     839                new ast::UntypedExpr( location,
     840                        new ast::NameExpr( location, "?=?" ),
     841                        {
     842                                new ast::UntypedMemberExpr( location,
     843                                        new ast::NameExpr( location, "__generator_state" ),
     844                                        new ast::VariableExpr( location, in_generator )
     845                                ),
     846                                ast::ConstantExpr::from_int( location, label.idx ),
     847                        }
     848                )
     849        ) );
     850
     851        // The THEN component is conditional (return is not).
     852        if ( stmt->then ) {
     853                stmtsToAddBefore.push_back( stmt->then.get() );
     854        }
     855        stmtsToAddBefore.push_back( new ast::ReturnStmt( location, nullptr ) );
     856
     857        // The null statement replaces the old suspend statement.
     858        return new ast::NullStmt( location, { label.obj } );
     859}
     860
     861const ast::Stmt * SuspendKeyword::make_coroutine_suspend(
     862                const ast::SuspendStmt * stmt ) {
     863        // The only thing we need from the old statement is the location.
     864        const CodeLocation & location = stmt->location;
     865
     866        if ( !decl_suspend ) {
     867                SemanticError( location, "suspend keyword applied to coroutines requires coroutines to be in scope, add #include <coroutine.hfa>\n" );
     868        }
     869        if ( stmt->then ) {
     870                SemanticError( location, "Compound statement following coroutines is not implemented." );
     871        }
     872
     873        return new ast::ExprStmt( location,
     874                new ast::UntypedExpr( location,
     875                        ast::VariableExpr::functionPointer( location, decl_suspend ) )
     876        );
     877}
     878
     879// --------------------------------------------------------------------------
     880struct MutexKeyword final : public ast::WithDeclsToAdd<> {
    40881        const ast::FunctionDecl * postvisit( const ast::FunctionDecl * decl );
    41882        void postvisit( const ast::StructDecl * decl );
     
    50891        ast::CompoundStmt * addStatements( const ast::CompoundStmt * body, const std::vector<ast::ptr<ast::Expr>> & args );
    51892        ast::CompoundStmt * addThreadDtorStatements( const ast::FunctionDecl* func, const ast::CompoundStmt * body, const std::vector<const ast::DeclWithType *> & args );
    52 
     893        ast::ExprStmt * genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param);
     894        ast::IfStmt * genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam );
    53895private:
    54896        const ast::StructDecl * monitor_decl = nullptr;
     
    59901
    60902        static ast::ptr<ast::Type> generic_func;
     903
     904        UniqueName mutex_func_namer = UniqueName("__lock_unlock_curr");
    61905};
    62906
     
    1601004
    1611005const ast::Stmt * MutexKeyword::postvisit( const ast::MutexStmt * stmt ) {
     1006        if ( !lock_guard_decl ) {
     1007                SemanticError( stmt->location, "mutex stmt requires a header, add #include <mutex_stmt.hfa>\n" );
     1008        }
    1621009        ast::CompoundStmt * body =
    1631010                        new ast::CompoundStmt( stmt->location, { stmt->stmt } );
    164         addStatements( body, stmt->mutexObjs );
    165         return body;
     1011       
     1012        return addStatements( body, stmt->mutexObjs );;
    1661013}
    1671014
     
    2511098                                {
    2521099                                        new ast::SingleInit( location,
    253                                                 new ast::AddressExpr(
     1100                                                new ast::AddressExpr( location,
    2541101                                                        new ast::VariableExpr( location, monitor ) ) ),
    2551102                                        new ast::SingleInit( location,
     
    3581205}
    3591206
     1207// generates a cast to the void ptr to the appropriate lock type and dereferences it before calling lock or unlock on it
     1208// used to undo the type erasure done by storing all the lock pointers as void
     1209ast::ExprStmt * MutexKeyword::genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param ) {
     1210        return new ast::ExprStmt( location,
     1211                new ast::UntypedExpr( location,
     1212                        new ast::NameExpr( location, fnName ), {
     1213                                ast::UntypedExpr::createDeref(
     1214                                        location,
     1215                                        new ast::CastExpr( location,
     1216                                                param,
     1217                                                new ast::PointerType( new ast::TypeofType( new ast::UntypedExpr(
     1218                                                        expr->location,
     1219                                                        new ast::NameExpr( expr->location, "__get_mutexstmt_lock_type" ),
     1220                                                        { expr }
     1221                                                ) ) ),
     1222                                                ast::GeneratedFlag::ExplicitCast
     1223                                        )
     1224                                )
     1225                        }
     1226                )
     1227        );
     1228}
     1229
     1230ast::IfStmt * MutexKeyword::genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ) {
     1231        ast::IfStmt * outerLockIf = nullptr;
     1232        ast::IfStmt * lastLockIf = nullptr;
     1233
     1234        //adds an if/elif clause for each lock to assign type from void ptr based on ptr address
     1235        for ( long unsigned int i = 0; i < args.size(); i++ ) {
     1236               
     1237                ast::UntypedExpr * ifCond = new ast::UntypedExpr( location,
     1238                        new ast::NameExpr( location, "?==?" ), {
     1239                                ast::deepCopy( thisParam ),
     1240                                new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() ))
     1241                        }
     1242                );
     1243
     1244                ast::IfStmt * currLockIf = new ast::IfStmt(
     1245                        location,
     1246                        ifCond,
     1247                        genVirtLockUnlockExpr( fnName, args.at(i), location, ast::deepCopy( thisParam ) )
     1248                );
     1249               
     1250                if ( i == 0 ) {
     1251                        outerLockIf = currLockIf;
     1252                } else {
     1253                        // add ifstmt to else of previous stmt
     1254                        lastLockIf->else_ = currLockIf;
     1255                }
     1256
     1257                lastLockIf = currLockIf;
     1258        }
     1259        return outerLockIf;
     1260}
     1261
    3601262ast::CompoundStmt * MutexKeyword::addStatements(
    3611263                const ast::CompoundStmt * body,
    3621264                const std::vector<ast::ptr<ast::Expr>> & args ) {
    363         ast::CompoundStmt * mutBody = ast::mutate( body );
    3641265
    3651266        // Code is generated near the beginning of the compound statement.
    366         const CodeLocation & location = mutBody->location;
     1267        const CodeLocation & location = body->location;
     1268
     1269                // final body to return
     1270        ast::CompoundStmt * newBody = new ast::CompoundStmt( location );
     1271
     1272        // std::string lockFnName = mutex_func_namer.newName();
     1273        // std::string unlockFnName = mutex_func_namer.newName();
    3671274
    3681275        // Make pointer to the monitors.
     
    3721279                new ast::ArrayType(
    3731280                        new ast::PointerType(
    374                                 new ast::TypeofType(
    375                                         new ast::UntypedExpr(
    376                                                 location,
    377                                                 new ast::NameExpr( location, "__get_type" ),
    378                                                 { args.front() }
    379                                         )
    380                                 )
     1281                                new ast::VoidType()
    3811282                        ),
    3821283                        ast::ConstantExpr::from_ulong( location, args.size() ),
     
    3921293                                                new ast::UntypedExpr(
    3931294                                                        expr->location,
    394                                                         new ast::NameExpr( expr->location, "__get_ptr" ),
     1295                                                        new ast::NameExpr( expr->location, "__get_mutexstmt_lock_ptr" ),
    3951296                                                        { expr }
    3961297                                                )
     
    4051306        ast::StructInstType * lock_guard_struct =
    4061307                        new ast::StructInstType( lock_guard_decl );
    407         ast::TypeExpr * lock_type_expr = new ast::TypeExpr(
    408                 location,
    409                 new ast::TypeofType(
    410                         new ast::UntypedExpr(
    411                                 location,
    412                                 new ast::NameExpr( location, "__get_type" ),
    413                                 { args.front() }
    414                         )
    415                 )
    416         );
    417 
    418         lock_guard_struct->params.push_back( lock_type_expr );
    419 
    420         // In reverse order:
     1308
     1309        // use try stmts to lock and finally to unlock
     1310        ast::TryStmt * outerTry = nullptr;
     1311        ast::TryStmt * currentTry;
     1312        ast::CompoundStmt * lastBody = nullptr;
     1313
     1314        // adds a nested try stmt for each lock we are locking
     1315        for ( long unsigned int i = 0; i < args.size(); i++ ) {
     1316                ast::UntypedExpr * innerAccess = new ast::UntypedExpr(
     1317                        location,
     1318                        new ast::NameExpr( location,"?[?]" ), {
     1319                                new ast::NameExpr( location, "__monitors" ),
     1320                                ast::ConstantExpr::from_int( location, i )
     1321                        }
     1322                );
     1323
     1324                // make the try body
     1325                ast::CompoundStmt * currTryBody = new ast::CompoundStmt( location );
     1326                ast::IfStmt * lockCall = genTypeDiscrimLockUnlock( "lock", args, location, innerAccess );
     1327                currTryBody->push_back( lockCall );
     1328
     1329                // make the finally stmt
     1330                ast::CompoundStmt * currFinallyBody = new ast::CompoundStmt( location );
     1331                ast::IfStmt * unlockCall = genTypeDiscrimLockUnlock( "unlock", args, location, innerAccess );
     1332                currFinallyBody->push_back( unlockCall );
     1333
     1334                // construct the current try
     1335                currentTry = new ast::TryStmt(
     1336                        location,
     1337                        currTryBody,
     1338                        {},
     1339                        new ast::FinallyStmt( location, currFinallyBody )
     1340                );
     1341                if ( i == 0 ) outerTry = currentTry;
     1342                else {
     1343                        // pushback try into the body of the outer try
     1344                        lastBody->push_back( currentTry );
     1345                }
     1346                lastBody = currTryBody;
     1347        }
     1348
     1349        // push body into innermost try body
     1350        if ( lastBody != nullptr ) {
     1351                lastBody->push_back( body );
     1352                newBody->push_front( outerTry );
     1353        }       
     1354
    4211355        // monitor_guard_t __guard = { __monitors, # };
    422         mutBody->push_front(
     1356        newBody->push_front(
    4231357                new ast::DeclStmt(
    4241358                        location,
     
    4471381
    4481382        // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) };
    449         mutBody->push_front( new ast::DeclStmt( location, monitors ) );
    450 
    451         return mutBody;
     1383        newBody->push_front( new ast::DeclStmt( location, monitors ) );
     1384
     1385        // // The parameter for both __lock_curr/__unlock_curr routines.
     1386        // ast::ObjectDecl * this_decl = new ast::ObjectDecl(
     1387        //      location,
     1388        //      "this",
     1389        //      new ast::PointerType( new ast::VoidType() ),
     1390        //      nullptr,
     1391        //      {},
     1392        //      ast::Linkage::Cforall
     1393        // );
     1394
     1395        // ast::FunctionDecl * lock_decl = new ast::FunctionDecl(
     1396        //      location,
     1397        //      lockFnName,
     1398        //      { /* forall */ },
     1399        //      {
     1400        //              // Copy the declaration of this.
     1401        //              this_decl,
     1402        //      },
     1403        //      { /* returns */ },
     1404        //      nullptr,
     1405        //      0,
     1406        //      ast::Linkage::Cforall,
     1407        //      { /* attributes */ },
     1408        //      ast::Function::Inline
     1409        // );
     1410
     1411        // ast::FunctionDecl * unlock_decl = new ast::FunctionDecl(
     1412        //      location,
     1413        //      unlockFnName,
     1414        //      { /* forall */ },
     1415        //      {
     1416        //              // Copy the declaration of this.
     1417        //              ast::deepCopy( this_decl ),
     1418        //      },
     1419        //      { /* returns */ },
     1420        //      nullptr,
     1421        //      0,
     1422        //      ast::Linkage::Cforall,
     1423        //      { /* attributes */ },
     1424        //      ast::Function::Inline
     1425        // );
     1426
     1427        // ast::IfStmt * outerLockIf = nullptr;
     1428        // ast::IfStmt * outerUnlockIf = nullptr;
     1429        // ast::IfStmt * lastLockIf = nullptr;
     1430        // ast::IfStmt * lastUnlockIf = nullptr;
     1431
     1432        // //adds an if/elif clause for each lock to assign type from void ptr based on ptr address
     1433        // for ( long unsigned int i = 0; i < args.size(); i++ ) {
     1434        //      ast::VariableExpr * thisParam = new ast::VariableExpr( location, InitTweak::getParamThis( lock_decl ) );
     1435        //      ast::UntypedExpr * ifCond = new ast::UntypedExpr( location,
     1436        //              new ast::NameExpr( location, "?==?" ), {
     1437        //                      thisParam,
     1438        //                      new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() ))
     1439        //              }
     1440        //      );
     1441
     1442        //      ast::IfStmt * currLockIf = new ast::IfStmt(
     1443        //              location,
     1444        //              ast::deepCopy( ifCond ),
     1445        //              genVirtLockUnlockExpr( "lock", args.at(i), location, ast::deepCopy( thisParam ) )
     1446        //      );
     1447
     1448        //      ast::IfStmt * currUnlockIf = new ast::IfStmt(
     1449        //              location,
     1450        //              ifCond,
     1451        //              genVirtLockUnlockExpr( "unlock", args.at(i), location, ast::deepCopy( thisParam ) )
     1452        //      );
     1453               
     1454        //      if ( i == 0 ) {
     1455        //              outerLockIf = currLockIf;
     1456        //              outerUnlockIf = currUnlockIf;
     1457        //      } else {
     1458        //              // add ifstmt to else of previous stmt
     1459        //              lastLockIf->else_ = currLockIf;
     1460        //              lastUnlockIf->else_ = currUnlockIf;
     1461        //      }
     1462
     1463        //      lastLockIf = currLockIf;
     1464        //      lastUnlockIf = currUnlockIf;
     1465        // }
     1466       
     1467        // // add pointer typing if/elifs to body of routines
     1468        // lock_decl->stmts = new ast::CompoundStmt( location, { outerLockIf } );
     1469        // unlock_decl->stmts = new ast::CompoundStmt( location, { outerUnlockIf } );
     1470
     1471        // // add routines to scope
     1472        // declsToAddBefore.push_back( lock_decl );
     1473        // declsToAddBefore.push_back( unlock_decl );
     1474
     1475        // newBody->push_front(new ast::DeclStmt( location, lock_decl ));
     1476        // newBody->push_front(new ast::DeclStmt( location, unlock_decl ));
     1477
     1478        return newBody;
    4521479}
    4531480
     
    5641591
    5651592// --------------------------------------------------------------------------
     1593// Interface Functions:
    5661594
    5671595void implementKeywords( ast::TranslationUnit & translationUnit ) {
    568         (void)translationUnit;
    569         assertf(false, "Apply Keywords not implemented." );
     1596        ast::Pass<ThreadKeyword>::run( translationUnit );
     1597        ast::Pass<CoroutineKeyword>::run( translationUnit );
     1598        ast::Pass<MonitorKeyword>::run( translationUnit );
     1599        ast::Pass<GeneratorKeyword>::run( translationUnit );
     1600        ast::Pass<SuspendKeyword>::run( translationUnit );
    5701601}
    5711602
  • src/ControlStruct/ExceptTranslateNew.cpp

    ref3c383 rd672350  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Nov  8 11:53:00 2021
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Jan 31 18:49:58 2022
    13 // Update Count     : 1
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Fri Mar 11 17:51:00 2022
     13// Update Count     : 2
    1414//
    1515
     
    2727
    2828        typedef std::list<ast::CatchStmt*> CatchList;
    29 
    30         void split( CatchList& allHandlers, CatchList& terHandlers,
    31                                 CatchList& resHandlers ) {
    32                 while ( !allHandlers.empty() ) {
    33                         ast::CatchStmt * stmt = allHandlers.front();
    34                         allHandlers.pop_front();
    35                         if (stmt->kind == ast::ExceptionKind::Terminate) {
    36                                 terHandlers.push_back(stmt);
    37                         } else {
    38                                 resHandlers.push_back(stmt);
    39                         }
    40                 }
    41         }
    4229
    4330        void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) {
     
    171158        ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt );
    172159
    173         // Types used in translation, make sure to use clone.
     160        // Types used in translation, first group are internal.
     161        ast::ObjectDecl * make_index_object( CodeLocation const & ) const;
     162        ast::ObjectDecl * make_exception_object( CodeLocation const & ) const;
     163        ast::ObjectDecl * make_bool_object( CodeLocation const & ) const;
     164        ast::ObjectDecl * make_voidptr_object( CodeLocation const & ) const;
     165        ast::ObjectDecl * make_unused_index_object( CodeLocation const & ) const;
    174166        // void (*function)();
    175         ast::FunctionDecl * try_func_t;
     167        ast::FunctionDecl * make_try_function( CodeLocation const & ) const;
    176168        // void (*function)(int, exception);
    177         ast::FunctionDecl * catch_func_t;
     169        ast::FunctionDecl * make_catch_function( CodeLocation const & ) const;
    178170        // int (*function)(exception);
    179         ast::FunctionDecl * match_func_t;
     171        ast::FunctionDecl * make_match_function( CodeLocation const & ) const;
    180172        // bool (*function)(exception);
    181         ast::FunctionDecl * handle_func_t;
     173        ast::FunctionDecl * make_handle_function( CodeLocation const & ) const;
    182174        // void (*function)(__attribute__((unused)) void *);
    183         ast::FunctionDecl * finally_func_t;
    184 
    185         ast::StructInstType * create_except_type() {
    186                 assert( except_decl );
    187                 return new ast::StructInstType( except_decl );
    188         }
    189         void init_func_types();
     175        ast::FunctionDecl * make_finally_function( CodeLocation const & ) const;
    190176
    191177public:
     
    199185};
    200186
    201 void TryMutatorCore::init_func_types() {
     187ast::ObjectDecl * TryMutatorCore::make_index_object(
     188                CodeLocation const & location ) const {
     189        return new ast::ObjectDecl(
     190                location,
     191                "__handler_index",
     192                new ast::BasicType(ast::BasicType::SignedInt),
     193                nullptr, //init
     194                ast::Storage::Classes{},
     195                ast::Linkage::Cforall
     196                );
     197}
     198
     199ast::ObjectDecl * TryMutatorCore::make_exception_object(
     200                CodeLocation const & location ) const {
    202201        assert( except_decl );
    203 
    204         ast::ObjectDecl index_obj(
    205                 {},
    206                 "__handler_index",
    207                 new ast::BasicType(ast::BasicType::SignedInt)
    208                 );
    209         ast::ObjectDecl exception_obj(
    210                 {},
     202        return new ast::ObjectDecl(
     203                location,
    211204                "__exception_inst",
    212205                new ast::PointerType(
    213206                        new ast::StructInstType( except_decl )
    214207                        ),
    215                 NULL
    216                 );
    217         ast::ObjectDecl bool_obj(
    218                 {},
     208                nullptr, //init
     209                ast::Storage::Classes{},
     210                ast::Linkage::Cforall
     211                );
     212}
     213
     214ast::ObjectDecl * TryMutatorCore::make_bool_object(
     215                CodeLocation const & location ) const {
     216        return new ast::ObjectDecl(
     217                location,
    219218                "__ret_bool",
    220219                new ast::BasicType( ast::BasicType::Bool ),
     
    225224                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    226225                );
    227         ast::ObjectDecl voidptr_obj(
    228                 {},
     226}
     227
     228ast::ObjectDecl * TryMutatorCore::make_voidptr_object(
     229                CodeLocation const & location ) const {
     230        return new ast::ObjectDecl(
     231                location,
    229232                "__hook",
    230233                new ast::PointerType(
     
    237240                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    238241                );
    239 
    240         ast::ObjectDecl unused_index_obj(
    241                 {},
     242}
     243
     244ast::ObjectDecl * TryMutatorCore::make_unused_index_object(
     245                CodeLocation const & location ) const {
     246        return new ast::ObjectDecl(
     247                location,
    242248                "__handler_index",
    243249                new ast::BasicType(ast::BasicType::SignedInt),
     
    248254                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    249255        );
    250         //unused_index_obj->attributes.push_back( new Attribute( "unused" ) );
    251 
    252         try_func_t = new ast::FunctionDecl(
    253                 {},
     256}
     257
     258ast::FunctionDecl * TryMutatorCore::make_try_function(
     259                CodeLocation const & location ) const {
     260        return new ast::FunctionDecl(
     261                location,
    254262                "try",
    255263                {}, //forall
     
    260268                ast::Linkage::Cforall
    261269        );
    262 
    263         catch_func_t = new ast::FunctionDecl(
    264                 {},
     270}
     271
     272ast::FunctionDecl * TryMutatorCore::make_catch_function(
     273                CodeLocation const & location ) const {
     274        return new ast::FunctionDecl(
     275                location,
    265276                "catch",
    266277                {}, //forall
    267                 {ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param
     278                { make_index_object( location ), make_exception_object( location ) },
    268279                {}, //return void
    269280                nullptr,
     
    271282                ast::Linkage::Cforall
    272283        );
    273 
    274         match_func_t = new ast::FunctionDecl(
    275                 {},
     284}
     285
     286ast::FunctionDecl * TryMutatorCore::make_match_function(
     287                CodeLocation const & location ) const {
     288        return new ast::FunctionDecl(
     289                location,
    276290                "match",
    277291                {}, //forall
    278                 {ast::deepCopy(&exception_obj)},
    279                 {ast::deepCopy(&unused_index_obj)},
     292                { make_exception_object( location ) },
     293                { make_unused_index_object( location ) },
    280294                nullptr,
    281295                ast::Storage::Classes{},
    282296                ast::Linkage::Cforall
    283297        );
    284 
    285         handle_func_t = new ast::FunctionDecl(
    286                 {},
     298}
     299
     300ast::FunctionDecl * TryMutatorCore::make_handle_function(
     301                CodeLocation const & location ) const {
     302        return new ast::FunctionDecl(
     303                location,
    287304                "handle",
    288305                {}, //forall
    289                 {ast::deepCopy(&exception_obj)},
    290                 {ast::deepCopy(&bool_obj)},
     306                { make_exception_object( location ) },
     307                { make_bool_object( location ) },
    291308                nullptr,
    292309                ast::Storage::Classes{},
    293310                ast::Linkage::Cforall
    294311        );
    295 
    296         finally_func_t = new ast::FunctionDecl(
    297                 {},
     312}
     313
     314ast::FunctionDecl * TryMutatorCore::make_finally_function(
     315                CodeLocation const & location ) const {
     316        return new ast::FunctionDecl(
     317                location,
    298318                "finally",
    299319                {}, //forall
    300                 {ast::deepCopy(&voidptr_obj)},
     320                { make_voidptr_object( location ) },
    301321                {}, //return void
    302322                nullptr,
     
    304324                ast::Linkage::Cforall
    305325        );
    306 
    307         //catch_func_t.get_parameters().push_back( index_obj.clone() );
    308         //catch_func_t.get_parameters().push_back( exception_obj.clone() );
    309         //match_func_t.get_returnVals().push_back( unused_index_obj );
    310         //match_func_t.get_parameters().push_back( exception_obj.clone() );
    311         //handle_func_t.get_returnVals().push_back( bool_obj.clone() );
    312         //handle_func_t.get_parameters().push_back( exception_obj.clone() );
    313         //finally_func_t.get_parameters().push_back( voidptr_obj.clone() );
    314326}
    315327
    316328// TryStmt Mutation Helpers
    317 
    318 /*
    319 ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) {
    320         ast::CompoundStmt * block = tryStmt->body;
    321         tryStmt->body = nullptr;
    322         return block;
    323 }
    324 */
    325329
    326330ast::FunctionDecl * TryMutatorCore::create_try_wrapper(
    327331                const ast::CompoundStmt *body ) {
    328332
    329         ast::FunctionDecl * ret = ast::deepCopy(try_func_t);
     333        ast::FunctionDecl * ret = make_try_function( body->location );
    330334        ret->stmts = body;
    331335        return ret;
     
    339343        const CodeLocation loc = handlers.front()->location;
    340344
    341         ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t);
     345        ast::FunctionDecl * func_t = make_catch_function( loc );
    342346        const ast::DeclWithType * index_obj = func_t->params.front();
    343347        const ast::DeclWithType * except_obj = func_t->params.back();
     
    386390                // handler->body = nullptr;
    387391
    388                 handler_wrappers.push_back( new ast::CaseStmt(loc, 
     392                handler_wrappers.push_back( new ast::CaseStmt(loc,
    389393                        ast::ConstantExpr::from_int(loc, index) ,
    390394                        { block, new ast::ReturnStmt( loc, nullptr ) }
     
    393397        // TODO: Some sort of meaningful error on default perhaps?
    394398
    395         /*
    396         std::list<Statement*> stmt_handlers;
    397         while ( !handler_wrappers.empty() ) {
    398                 stmt_handlers.push_back( handler_wrappers.front() );
    399                 handler_wrappers.pop_front();
    400         }
    401         */
    402 
    403         ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc,
     399        ast::SwitchStmt * handler_lookup = new ast::SwitchStmt( loc,
    404400                new ast::VariableExpr( loc, index_obj ),
    405401                std::move(handler_wrappers)
    406402                );
    407         ast::CompoundStmt * body = new ast::CompoundStmt(loc,
    408                 {handler_lookup});
     403        ast::CompoundStmt * body = new ast::CompoundStmt( loc, {handler_lookup} );
    409404
    410405        func_t->stmts = body;
     
    433428
    434429        // Check for type match.
    435         ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 
     430        ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc,
    436431                new ast::VariableExpr(loc, except_obj ),
    437432                local_except->get_type()
     
    445440        }
    446441        // Construct the match condition.
    447         block->push_back( new ast::IfStmt(loc, 
     442        block->push_back( new ast::IfStmt(loc,
    448443                cond, modded_handler->body, nullptr ) );
    449444
    450         // xxx - how does this work in new ast
    451         //modded_handler->set_decl( nullptr );
    452         //modded_handler->set_cond( nullptr );
    453         //modded_handler->set_body( nullptr );
    454         //delete modded_handler;
    455445        return block;
    456446}
     
    467457        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
    468458
    469         ast::FunctionDecl * func_t = ast::deepCopy(match_func_t);
     459        ast::FunctionDecl * func_t = make_match_function( loc );
    470460        const ast::DeclWithType * except_obj = func_t->params.back();
    471461
     
    490480        }
    491481
    492         body->push_back( new ast::ReturnStmt(loc, 
     482        body->push_back( new ast::ReturnStmt(loc,
    493483                ast::ConstantExpr::from_int( loc, 0 ) ));
    494484
     
    525515        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
    526516
    527         ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t);
     517        ast::FunctionDecl * func_t = make_handle_function( loc );
    528518        const ast::DeclWithType * except_obj = func_t->params.back();
    529519
     
    535525                ast::CompoundStmt * handling_code;
    536526                if (handler->body.as<ast::CompoundStmt>()) {
    537                         handling_code =
    538                         strict_dynamic_cast<ast::CompoundStmt*>( handler->body.get_and_mutate() );
     527                        handling_code = strict_dynamic_cast<ast::CompoundStmt*>(
     528                                handler->body.get_and_mutate() );
    539529                } else {
    540530                        handling_code = new ast::CompoundStmt(loc);
     
    600590        const ast::CompoundStmt * body = finally->body;
    601591
    602         ast::FunctionDecl * func_t = ast::deepCopy(finally_func_t);
     592        ast::FunctionDecl * func_t = make_finally_function( tryStmt->location );
    603593        func_t->stmts = body;
    604594
    605         // finally->set_block( nullptr );
    606         // delete finally;
    607595        tryStmt->finally = nullptr;
    608 
    609596
    610597        return func_t;
     
    617604
    618605        const CodeLocation loc = finally_wrapper->location;
    619         // Make Cleanup Attribute.
    620         /*
    621         std::list< ast::Attribute * > attributes;
    622         {
    623                 std::list<  > attr_params;
    624                 attr_params.push_back( nameOf( finally_wrapper ) );
    625                 attributes.push_back( new Attribute( "cleanup", attr_params ) );
    626         }
    627         */
    628 
    629606        return new ast::ObjectDecl(
    630607                loc,
     
    644621        // return false;
    645622        const CodeLocation loc = throwStmt->location;
    646         ast::Stmt * result = new ast::ReturnStmt(loc, 
     623        ast::Stmt * result = new ast::ReturnStmt(loc,
    647624                ast::ConstantExpr::from_bool( loc, false )
    648625                );
    649626        result->labels = throwStmt->labels;
    650         // delete throwStmt; done by postvisit
    651627        return result;
    652628}
     
    660636                assert( nullptr == except_decl );
    661637                except_decl = structDecl;
    662                 init_func_types();
    663638        } else if ( structDecl->name == "__cfaehm_try_resume_node" ) {
    664639                assert( nullptr == node_decl );
     
    706681                }
    707682        }
    708         // split( mutStmt->handlers,
    709         //              termination_handlers, resumption_handlers );
    710683
    711684        if ( resumption_handlers.size() ) {
  • src/InitTweak/FixGlobalInit.cc

    ref3c383 rd672350  
    113113                accept_all(translationUnit, fixer);
    114114
     115                // Say these magic declarations come at the end of the file.
     116                CodeLocation const & location = translationUnit.decls.back()->location;
     117
    115118                if ( !fixer.core.initStmts.empty() ) {
    116119                        std::vector<ast::ptr<ast::Expr>> ctorParams;
    117                         if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int({}, 200));
    118                         auto initFunction = new ast::FunctionDecl({}, "__global_init__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.initStmts)),
    119                                 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("constructor", std::move(ctorParams))});
     120                        if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int(location, 200));
     121                        auto initFunction = new ast::FunctionDecl(location,
     122                                "__global_init__", {}, {}, {},
     123                                new ast::CompoundStmt(location, std::move(fixer.core.initStmts)),
     124                                ast::Storage::Static, ast::Linkage::C,
     125                                {new ast::Attribute("constructor", std::move(ctorParams))});
    120126
    121127                        translationUnit.decls.emplace_back( initFunction );
     
    124130                if ( !fixer.core.destroyStmts.empty() ) {
    125131                        std::vector<ast::ptr<ast::Expr>> dtorParams;
    126                         if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int({}, 200));
    127                         auto destroyFunction = new ast::FunctionDecl({}, "__global_destroy__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.destroyStmts)),
    128                                 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("destructor", std::move(dtorParams))});
     132                        if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int(location, 200));
     133                        auto destroyFunction = new ast::FunctionDecl( location,
     134                                "__global_destroy__", {}, {}, {},
     135                                new ast::CompoundStmt(location, std::move(fixer.core.destroyStmts)),
     136                                ast::Storage::Static, ast::Linkage::C,
     137                                {new ast::Attribute("destructor", std::move(dtorParams))});
    129138
    130139                        translationUnit.decls.emplace_back(destroyFunction);
  • src/InitTweak/FixInitNew.cpp

    ref3c383 rd672350  
    1616#include "CodeGen/GenType.h"           // for genPrettyType
    1717#include "CodeGen/OperatorTable.h"
     18#include "Common/CodeLocationTools.hpp"
    1819#include "Common/PassVisitor.h"        // for PassVisitor, WithStmtsToAdd
    1920#include "Common/SemanticError.h"      // for SemanticError
     
    8586        /// generate/resolve copy construction expressions for each, and generate/resolve destructors for both
    8687        /// arguments and return value temporaries
    87         struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors> {
     88        struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors>, public ast::WithConstTranslationUnit {
    8889                const ast::Expr * postvisit( const ast::ImplicitCopyCtorExpr * impCpCtorExpr );
    8990                const ast::StmtExpr * previsit( const ast::StmtExpr * stmtExpr );
     
    189190        /// for any member that is missing a corresponding ctor/dtor call.
    190191        /// error if a member is used before constructed
    191         struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls> {
     192        struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls>, public ast::WithConstTranslationUnit {
    192193                void previsit( const ast::FunctionDecl * funcDecl );
    193194                const ast::DeclWithType * postvisit( const ast::FunctionDecl * funcDecl );
     
    214215
    215216        /// expands ConstructorExpr nodes into comma expressions, using a temporary for the first argument
    216         struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting {
     217        struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithConstTranslationUnit {
    217218                const ast::Expr * postvisit( const ast::ConstructorExpr * ctorExpr );
    218219        };
     
    509510                // (VariableExpr and already resolved expression)
    510511                CP_CTOR_PRINT( std::cerr << "ResolvingCtorDtor " << untyped << std::endl; )
    511                 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, symtab);
     512                ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, { symtab, transUnit().global } );
    512513                assert( resolved );
    513514                if ( resolved->env ) {
     
    553554                ast::ptr<ast::Expr> guard = mutArg;
    554555
    555                 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl({}, "__tmp", mutResult, nullptr );
     556                ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl(loc, "__tmp", mutResult, nullptr );
    556557
    557558                // create and resolve copy constructor
     
    587588
    588589        ast::Expr * ResolveCopyCtors::destructRet( const ast::ObjectDecl * ret, const ast::Expr * arg ) {
     590                auto global = transUnit().global;
    589591                // TODO: refactor code for generating cleanup attribute, since it's common and reused in ~3-4 places
    590592                // check for existing cleanup attribute before adding another(?)
    591593                // need to add __Destructor for _tmp_cp variables as well
    592594
    593                 assertf( ast::dtorStruct, "Destructor generation requires __Destructor definition." );
    594                 assertf( ast::dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );
    595                 assertf( ast::dtorStructDestroy, "Destructor generation requires __destroy_Destructor." );
     595                assertf( global.dtorStruct, "Destructor generation requires __Destructor definition." );
     596                assertf( global.dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );
     597                assertf( global.dtorDestroy, "Destructor generation requires __destroy_Destructor." );
    596598
    597599                const CodeLocation loc = ret->location;
     
    610612                auto dtorFunc = getDtorFunc( ret, new ast::ExprStmt(loc, dtor ), stmtsToAddBefore );
    611613
    612                 auto dtorStructType = new ast::StructInstType(ast::dtorStruct);
     614                auto dtorStructType = new ast::StructInstType( global.dtorStruct );
    613615
    614616                // what does this do???
     
    622624                static UniqueName namer( "_ret_dtor" );
    623625                auto retDtor = new ast::ObjectDecl(loc, namer.newName(), dtorStructType, new ast::ListInit(loc, { new ast::SingleInit(loc, ast::ConstantExpr::null(loc) ), new ast::SingleInit(loc, new ast::CastExpr( new ast::VariableExpr(loc, dtorFunc ), dtorType ) ) } ) );
    624                 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, ast::dtorStructDestroy ) } ) );
     626                retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, global.dtorDestroy ) } ) );
    625627                stmtsToAddBefore.push_back( new ast::DeclStmt(loc, retDtor ) );
    626628
    627629                if ( arg ) {
    628                         auto member = new ast::MemberExpr(loc, ast::dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );
     630                        auto member = new ast::MemberExpr(loc, global.dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );
    629631                        auto object = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, ret ) ), new ast::PointerType(new ast::VoidType() ) );
    630632                        ast::Expr * assign = createBitwiseAssignment( member, object );
     
    799801        // to prevent warnings ('_unq0' may be used uninitialized in this function),
    800802        // insert an appropriate zero initializer for UniqueExpr temporaries.
    801         ast::Init * makeInit( const ast::Type * t ) {
     803        ast::Init * makeInit( const ast::Type * t, CodeLocation const & loc ) {
    802804                if ( auto inst = dynamic_cast< const ast::StructInstType * >( t ) ) {
    803805                        // initizer for empty struct must be empty
    804                         if ( inst->base->members.empty() ) return new ast::ListInit({}, {});
     806                        if ( inst->base->members.empty() ) {
     807                                return new ast::ListInit( loc, {} );
     808                        }
    805809                } else if ( auto inst = dynamic_cast< const ast::UnionInstType * >( t ) ) {
    806810                        // initizer for empty union must be empty
    807                         if ( inst->base->members.empty() ) return new ast::ListInit({}, {});
    808                 }
    809 
    810                 return new ast::ListInit( {}, { new ast::SingleInit( {}, ast::ConstantExpr::from_int({}, 0) ) } );
     811                        if ( inst->base->members.empty() ) {
     812                                return new ast::ListInit( loc, {} );
     813                        }
     814                }
     815
     816                return new ast::ListInit( loc, {
     817                        new ast::SingleInit( loc, ast::ConstantExpr::from_int( loc, 0 ) )
     818                } );
    811819        }
    812820
     
    832840                        } else {
    833841                                // expr isn't a call expr, so create a new temporary variable to use to hold the value of the unique expression
    834                                 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result ) );
     842                                mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result, mutExpr->location ) );
    835843                                mutExpr->var = new ast::VariableExpr( mutExpr->location, mutExpr->object );
    836844                        }
     
    11721180                        auto guard = makeFuncGuard( [this]() { symtab.enterScope(); }, [this]() { symtab.leaveScope(); } );
    11731181                        symtab.addFunction( function );
     1182                        auto global = transUnit().global;
    11741183
    11751184                        // need to iterate through members in reverse in order for
     
    12171226
    12181227                                                        static UniqueName memberDtorNamer = { "__memberDtor" };
    1219                                                         assertf( ast::dtorStruct, "builtin __Destructor not found." );
    1220                                                         assertf( ast::dtorStructDestroy, "builtin __destroy_Destructor not found." );
     1228                                                        assertf( global.dtorStruct, "builtin __Destructor not found." );
     1229                                                        assertf( global.dtorDestroy, "builtin __destroy_Destructor not found." );
    12211230
    12221231                                                        ast::Expr * thisExpr = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, thisParam ) ), new ast::PointerType( new ast::VoidType(), ast::CV::Qualifiers() ) );
     
    12281237                                                        auto dtorType = new ast::PointerType( dtorFtype );
    12291238
    1230                                                         auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( ast::dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );
    1231                                                         destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr({}, ast::dtorStructDestroy ) } ) );
     1239                                                        auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( global.dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );
     1240                                                        destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( loc, global.dtorDestroy ) } ) );
    12321241                                                        mutStmts->push_front( new ast::DeclStmt(loc, destructor ) );
    12331242                                                        mutStmts->kids.splice( mutStmts->kids.begin(), stmtsToAdd );
     
    13231332
    13241333        const ast::Expr * GenStructMemberCalls::postvisit( const ast::UntypedExpr * untypedExpr ) {
    1325                 // Expression * newExpr = untypedExpr;
    13261334                // xxx - functions returning ast::ptr seems wrong...
    1327                 auto res = ResolvExpr::findVoidExpression( untypedExpr, symtab );
    1328                 return res.release();
    1329                 // return newExpr;
     1335                auto res = ResolvExpr::findVoidExpression( untypedExpr, { symtab, transUnit().global } );
     1336                // Fix CodeLocation (at least until resolver is fixed).
     1337                auto fix = localFillCodeLocations( untypedExpr->location, res.release() );
     1338                return strict_dynamic_cast<const ast::Expr *>( fix );
    13301339        }
    13311340
     
    13611370
    13621371                // resolve assignment and dispose of new env
    1363                 auto resolved = ResolvExpr::findVoidExpression( assign, symtab );
     1372                auto resolved = ResolvExpr::findVoidExpression( assign, { symtab, transUnit().global } );
    13641373                auto mut = resolved.get_and_mutate();
    13651374                assertf(resolved.get() == mut, "newly resolved expression must be unique");
  • src/Parser/parser.yy

    ref3c383 rd672350  
    1010// Created On       : Sat Sep  1 20:22:55 2001
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb 11 14:26:15 2022
    13 // Update Count     : 5174
     12// Last Modified On : Mon Mar 14 16:35:29 2022
     13// Update Count     : 5276
    1414//
    1515
     
    610610        // | RESUME '(' comma_expression ')' compound_statement
    611611        //      { SemanticError( yylloc, "Resume expression is currently unimplemented." ); $$ = nullptr; }
     612        | IDENTIFIER IDENTIFIER                                                         // syntax error
     613                {
     614                        SemanticError( yylloc, ::toString( "Adjacent identifiers are not meaningful in an expression. "
     615                                                                                           "Possible problem is identifier \"", *$1.str,
     616                                                                                           "\" is a misspelled typename or an incorrectly specified type name, "
     617                                                                                           "e.g., missing generic parameter or missing struct/union/enum before typename." ) );
     618                        $$ = nullptr;
     619                }
     620        | IDENTIFIER direct_type                                                        // syntax error
     621                {
     622                        SemanticError( yylloc, ::toString( "Identifier \"", *$1.str, "\" cannot appear before a type. "
     623                                                                                           "Possible problem is misspelled storage or CV qualifier." ) );
     624                        $$ = nullptr;
     625                }
    612626        ;
    613627
     
    638652                        // Historic, transitional: Disallow commas in subscripts.
    639653                        // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts.
    640                 // { SemanticError( yylloc, "New array subscript is currently unimplemented." ); $$ = nullptr; }
    641654                        // Current: Commas in subscripts make tuples.
    642655                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); }
     
    647660                // equivalent to the old x[i,j].
    648661                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); }
     662        | constant '[' assignment_expression ']'                        // 3[a], 'a'[a], 3.5[a]
     663                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); }
     664        | string_literal '[' assignment_expression ']'          // "abc"[3], 3["abc"]
     665                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, new ExpressionNode( $1 ), $3 ) ); }
    649666        | postfix_expression '{' argument_expression_list_opt '}' // CFA, constructor call
    650667                {
     
    10521069        identifier_or_type_name ':' attribute_list_opt statement
    10531070                { $$ = $4->add_label( $1, $3 ); }
     1071        | identifier_or_type_name ':' attribute_list_opt error // syntax error
     1072                {
     1073                        SemanticError( yylloc, ::toString( "Label \"", *$1.str, "\" must be associated with a statement, "
     1074                                                                                           "where a declaration, case, or default is not a statement. "
     1075                                                                                           "Move the label or terminate with a semi-colon." ) );
     1076                        $$ = nullptr;
     1077                }
    10541078        ;
    10551079
     
    10861110        | statement_list_nodecl statement
    10871111                { assert( $1 ); $1->set_last( $2 ); $$ = $1; }
     1112        | statement_list_nodecl error                                           // syntax error
     1113                { SemanticError( yylloc, "Declarations only allowed at the start of the switch body, i.e., after the '{'." ); $$ = nullptr; }
    10881114        ;
    10891115
     
    10931119        | MUTEX '(' ')' comma_expression ';'
    10941120                { $$ = new StatementNode( build_mutex( nullptr, new StatementNode( build_expr( $4 ) ) ) ); }
    1095                 // { SemanticError( yylloc, "Mutex expression is currently unimplemented." ); $$ = nullptr; }
    10961121        ;
    10971122
     
    11131138                        $$ = $7 ? new StatementNode( build_compound( (StatementNode *)((new StatementNode( $7 ))->set_last( sw )) ) ) : sw;
    11141139                }
     1140        | SWITCH '(' comma_expression ')' '{' error '}'         // CFA, syntax error
     1141                { SemanticError( yylloc, "Only declarations can appear before the list of case clauses." ); $$ = nullptr; }
    11151142        | CHOOSE '(' comma_expression ')' case_clause           // CFA
    11161143                { $$ = new StatementNode( build_switch( false, $3, $5 ) ); }
     
    11201147                        $$ = $7 ? new StatementNode( build_compound( (StatementNode *)((new StatementNode( $7 ))->set_last( sw )) ) ) : sw;
    11211148                }
     1149        | CHOOSE '(' comma_expression ')' '{' error '}'         // CFA, syntax error
     1150                { SemanticError( yylloc, "Only declarations can appear before the list of case clauses." ); $$ = nullptr; }
    11221151        ;
    11231152
     
    11581187
    11591188case_label:                                                                                             // CFA
    1160         CASE case_value_list ':'                                        { $$ = $2; }
     1189        CASE error                                                                                      // syntax error
     1190                { SemanticError( yylloc, "Missing case list after case." ); $$ = nullptr; }
     1191        | CASE case_value_list ':'                                      { $$ = $2; }
     1192        | CASE case_value_list error                                            // syntax error
     1193                { SemanticError( yylloc, "Missing colon after case list." ); $$ = nullptr; }
    11611194        | DEFAULT ':'                                                           { $$ = new StatementNode( build_default() ); }
    11621195                // A semantic check is required to ensure only one default clause per switch/choose statement.
    1163         ;
    1164 
    1165 //label_list_opt:
    1166 //      // empty
    1167 //      | identifier_or_type_name ':'
    1168 //      | label_list_opt identifier_or_type_name ':'
    1169 //      ;
     1196        | DEFAULT error                                                                         //  syntax error
     1197                { SemanticError( yylloc, "Missing colon after default." ); $$ = nullptr; }
     1198        ;
    11701199
    11711200case_label_list:                                                                                // CFA
     
    14031432        | when_clause_opt ELSE statement
    14041433                { $$ = build_waitfor_timeout( nullptr, maybe_build_compound( $3 ), $1 ); }
    1405                 // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless)
    1406         | when_clause_opt timeout statement WOR ELSE statement
     1434        // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless)
     1435        | when_clause_opt timeout statement WOR ELSE statement // syntax error
    14071436                { SemanticError( yylloc, "else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; }
    14081437        | when_clause_opt timeout statement WOR when_clause ELSE statement
  • src/ResolvExpr/CandidateFinder.cpp

    ref3c383 rd672350  
    1010// Created On       : Wed Jun 5 14:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Oct  1 14:55:00 2019
    13 // Update Count     : 2
     12// Last Modified On : Wed Mar 16 11:58:00 2022
     13// Update Count     : 3
    1414//
    1515
     
    595595        /// Actually visits expressions to find their candidate interpretations
    596596        class Finder final : public ast::WithShortCircuiting {
     597                const ResolveContext & context;
    597598                const ast::SymbolTable & symtab;
    598599        public:
     
    618619
    619620                Finder( CandidateFinder & f )
    620                 : symtab( f.localSyms ), selfFinder( f ), candidates( f.candidates ), tenv( f.env ),
    621                   targetType( f.targetType ) {}
     621                : context( f.context ), symtab( context.symtab ), selfFinder( f ),
     622                  candidates( f.candidates ), tenv( f.env ), targetType( f.targetType ) {}
    622623
    623624                void previsit( const ast::Node * ) { visit_children = false; }
     
    872873                        Tuples::handleTupleAssignment( selfFinder, untypedExpr, argCandidates );
    873874
    874                         CandidateFinder funcFinder{ symtab, tenv };
     875                        CandidateFinder funcFinder( context, tenv );
    875876                        if (auto nameExpr = untypedExpr->func.as<ast::NameExpr>()) {
    876877                                auto kind = ast::SymbolTable::getSpecialFunctionKind(nameExpr->name);
     
    918919                        // find function operators
    919920                        ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" };
    920                         CandidateFinder opFinder{ symtab, tenv };
     921                        CandidateFinder opFinder( context, tenv );
    921922                        // okay if there aren't any function operations
    922923                        opFinder.find( opExpr, ResolvMode::withoutFailFast() );
     
    10591060
    10601061                void postvisit( const ast::AddressExpr * addressExpr ) {
    1061                         CandidateFinder finder{ symtab, tenv };
     1062                        CandidateFinder finder( context, tenv );
    10621063                        finder.find( addressExpr->arg );
    10631064
     
    10791080                        ast::ptr< ast::Type > toType = castExpr->result;
    10801081                        assert( toType );
    1081                         toType = resolveTypeof( toType, symtab );
     1082                        toType = resolveTypeof( toType, context );
    10821083                        // toType = SymTab::validateType( castExpr->location, toType, symtab );
    10831084                        toType = adjustExprType( toType, tenv, symtab );
    10841085
    1085                         CandidateFinder finder{ symtab, tenv, toType };
     1086                        CandidateFinder finder( context, tenv, toType );
    10861087                        finder.find( castExpr->arg, ResolvMode::withAdjustment() );
    10871088
     
    11361137                void postvisit( const ast::VirtualCastExpr * castExpr ) {
    11371138                        assertf( castExpr->result, "Implicit virtual cast targets not yet supported." );
    1138                         CandidateFinder finder{ symtab, tenv };
     1139                        CandidateFinder finder( context, tenv );
    11391140                        // don't prune here, all alternatives guaranteed to have same type
    11401141                        finder.find( castExpr->arg, ResolvMode::withoutPrune() );
     
    11531154                        auto target = inst->base.get();
    11541155
    1155                         CandidateFinder finder{ symtab, tenv };
     1156                        CandidateFinder finder( context, tenv );
    11561157
    11571158                        auto pick_alternatives = [target, this](CandidateList & found, bool expect_ref) {
     
    12021203
    12031204                void postvisit( const ast::UntypedMemberExpr * memberExpr ) {
    1204                         CandidateFinder aggFinder{ symtab, tenv };
     1205                        CandidateFinder aggFinder( context, tenv );
    12051206                        aggFinder.find( memberExpr->aggregate, ResolvMode::withAdjustment() );
    12061207                        for ( CandidateRef & agg : aggFinder.candidates ) {
     
    12871288                                addCandidate(
    12881289                                        new ast::SizeofExpr{
    1289                                                 sizeofExpr->location, resolveTypeof( sizeofExpr->type, symtab ) },
     1290                                                sizeofExpr->location, resolveTypeof( sizeofExpr->type, context ) },
    12901291                                        tenv );
    12911292                        } else {
    12921293                                // find all candidates for the argument to sizeof
    1293                                 CandidateFinder finder{ symtab, tenv };
     1294                                CandidateFinder finder( context, tenv );
    12941295                                finder.find( sizeofExpr->expr );
    12951296                                // find the lowest-cost candidate, otherwise ambiguous
     
    13111312                                addCandidate(
    13121313                                        new ast::AlignofExpr{
    1313                                                 alignofExpr->location, resolveTypeof( alignofExpr->type, symtab ) },
     1314                                                alignofExpr->location, resolveTypeof( alignofExpr->type, context ) },
    13141315                                        tenv );
    13151316                        } else {
    13161317                                // find all candidates for the argument to alignof
    1317                                 CandidateFinder finder{ symtab, tenv };
     1318                                CandidateFinder finder( context, tenv );
    13181319                                finder.find( alignofExpr->expr );
    13191320                                // find the lowest-cost candidate, otherwise ambiguous
     
    13541355
    13551356                void postvisit( const ast::LogicalExpr * logicalExpr ) {
    1356                         CandidateFinder finder1{ symtab, tenv };
     1357                        CandidateFinder finder1( context, tenv );
    13571358                        finder1.find( logicalExpr->arg1, ResolvMode::withAdjustment() );
    13581359                        if ( finder1.candidates.empty() ) return;
    13591360
    1360                         CandidateFinder finder2{ symtab, tenv };
     1361                        CandidateFinder finder2( context, tenv );
    13611362                        finder2.find( logicalExpr->arg2, ResolvMode::withAdjustment() );
    13621363                        if ( finder2.candidates.empty() ) return;
     
    13841385                void postvisit( const ast::ConditionalExpr * conditionalExpr ) {
    13851386                        // candidates for condition
    1386                         CandidateFinder finder1{ symtab, tenv };
     1387                        CandidateFinder finder1( context, tenv );
    13871388                        finder1.find( conditionalExpr->arg1, ResolvMode::withAdjustment() );
    13881389                        if ( finder1.candidates.empty() ) return;
    13891390
    13901391                        // candidates for true result
    1391                         CandidateFinder finder2{ symtab, tenv };
     1392                        CandidateFinder finder2( context, tenv );
    13921393                        finder2.find( conditionalExpr->arg2, ResolvMode::withAdjustment() );
    13931394                        if ( finder2.candidates.empty() ) return;
    13941395
    13951396                        // candidates for false result
    1396                         CandidateFinder finder3{ symtab, tenv };
     1397                        CandidateFinder finder3( context, tenv );
    13971398                        finder3.find( conditionalExpr->arg3, ResolvMode::withAdjustment() );
    13981399                        if ( finder3.candidates.empty() ) return;
     
    14451446                void postvisit( const ast::CommaExpr * commaExpr ) {
    14461447                        ast::TypeEnvironment env{ tenv };
    1447                         ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, symtab, env );
    1448 
    1449                         CandidateFinder finder2{ symtab, env };
     1448                        ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, context, env );
     1449
     1450                        CandidateFinder finder2( context, env );
    14501451                        finder2.find( commaExpr->arg2, ResolvMode::withAdjustment() );
    14511452
     
    14601461
    14611462                void postvisit( const ast::ConstructorExpr * ctorExpr ) {
    1462                         CandidateFinder finder{ symtab, tenv };
     1463                        CandidateFinder finder( context, tenv );
    14631464                        finder.find( ctorExpr->callExpr, ResolvMode::withoutPrune() );
    14641465                        for ( CandidateRef & r : finder.candidates ) {
     
    14691470                void postvisit( const ast::RangeExpr * rangeExpr ) {
    14701471                        // resolve low and high, accept candidates where low and high types unify
    1471                         CandidateFinder finder1{ symtab, tenv };
     1472                        CandidateFinder finder1( context, tenv );
    14721473                        finder1.find( rangeExpr->low, ResolvMode::withAdjustment() );
    14731474                        if ( finder1.candidates.empty() ) return;
    14741475
    1475                         CandidateFinder finder2{ symtab, tenv };
     1476                        CandidateFinder finder2( context, tenv );
    14761477                        finder2.find( rangeExpr->high, ResolvMode::withAdjustment() );
    14771478                        if ( finder2.candidates.empty() ) return;
     
    15491550
    15501551                void postvisit( const ast::UniqueExpr * unqExpr ) {
    1551                         CandidateFinder finder{ symtab, tenv };
     1552                        CandidateFinder finder( context, tenv );
    15521553                        finder.find( unqExpr->expr, ResolvMode::withAdjustment() );
    15531554                        for ( CandidateRef & r : finder.candidates ) {
     
    15581559
    15591560                void postvisit( const ast::StmtExpr * stmtExpr ) {
    1560                         addCandidate( resolveStmtExpr( stmtExpr, symtab ), tenv );
     1561                        addCandidate( resolveStmtExpr( stmtExpr, context ), tenv );
    15611562                }
    15621563
     
    15701571                        for ( const ast::InitAlternative & initAlt : initExpr->initAlts ) {
    15711572                                // calculate target type
    1572                                 const ast::Type * toType = resolveTypeof( initAlt.type, symtab );
     1573                                const ast::Type * toType = resolveTypeof( initAlt.type, context );
    15731574                                // toType = SymTab::validateType( initExpr->location, toType, symtab );
    15741575                                toType = adjustExprType( toType, tenv, symtab );
     
    15761577                                // types are not bound to the initialization type, since return type variables are
    15771578                                // only open for the duration of resolving the UntypedExpr.
    1578                                 CandidateFinder finder{ symtab, tenv, toType };
     1579                                CandidateFinder finder( context, tenv, toType );
    15791580                                finder.find( initExpr->expr, ResolvMode::withAdjustment() );
    15801581                                for ( CandidateRef & cand : finder.candidates ) {
     
    16931694                }
    16941695                else {
    1695                         satisfyAssertions(candidate, localSyms, satisfied, errors);
     1696                        satisfyAssertions(candidate, context.symtab, satisfied, errors);
    16961697                        needRecomputeKey = true;
    16971698                }
     
    18551856                        r->expr = ast::mutate_field(
    18561857                                r->expr.get(), &ast::Expr::result,
    1857                                 adjustExprType( r->expr->result, r->env, localSyms ) );
     1858                                adjustExprType( r->expr->result, r->env, context.symtab ) );
    18581859                }
    18591860        }
     
    18731874
    18741875        for ( const auto & x : xs ) {
    1875                 out.emplace_back( localSyms, env );
     1876                out.emplace_back( context, env );
    18761877                out.back().find( x, ResolvMode::withAdjustment() );
    18771878
  • src/ResolvExpr/CandidateFinder.hpp

    ref3c383 rd672350  
    1010// Created On       : Wed Jun 5 14:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Oct  1  9:51:00 2019
    13 // Update Count     : 2
     12// Last Modified On : Wed Mar 16 15:22:00 2022
     13// Update Count     : 3
    1414//
    1515
     
    2525namespace ResolvExpr {
    2626
     27struct ResolveContext;
     28
    2729/// Data to perform expression resolution
    2830struct CandidateFinder {
    2931        CandidateList candidates;          ///< List of candidate resolutions
    30         const ast::SymbolTable & localSyms;   ///< Symbol table to lookup candidates
     32        const ResolveContext & context;  ///< Information about where the canditates are being found.
    3133        const ast::TypeEnvironment & env;  ///< Substitutions performed in this resolution
    3234        ast::ptr< ast::Type > targetType;  ///< Target type for resolution
     
    3436
    3537        CandidateFinder(
    36                 const ast::SymbolTable & syms, const ast::TypeEnvironment & env,
     38                const ResolveContext & context, const ast::TypeEnvironment & env,
    3739                const ast::Type * tt = nullptr )
    38         : candidates(), localSyms( syms ), env( env ), targetType( tt ) {}
     40        : candidates(), context( context ), env( env ), targetType( tt ) {}
    3941
    4042        /// Fill candidates with feasible resolutions for `expr`
  • src/ResolvExpr/CandidatePrinter.cpp

    ref3c383 rd672350  
    1010// Created On       : Tue Nov  9  9:54:00 2021
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Nov  9 15:47:00 2021
    13 // Update Count     : 0
     12// Last Modified On : Wed Mar 16 13:56:00 2022
     13// Update Count     : 1
    1414//
    1515
     
    2222#include "AST/TranslationUnit.hpp"
    2323#include "ResolvExpr/CandidateFinder.hpp"
     24#include "ResolvExpr/Resolver.h"
    2425
    2526#include <iostream>
     
    2930namespace {
    3031
    31 class CandidatePrintCore : public ast::WithSymbolTable {
     32class CandidatePrintCore : public ast::WithSymbolTable,
     33                public ast::WithConstTranslationUnit {
    3234        std::ostream & os;
    3335public:
     
    3638        void postvisit( const ast::ExprStmt * stmt ) {
    3739                ast::TypeEnvironment env;
    38                 CandidateFinder finder( symtab, env );
     40                CandidateFinder finder( { symtab, transUnit().global }, env );
    3941                finder.find( stmt->expr, ResolvMode::withAdjustment() );
    4042                int count = 1;
  • src/ResolvExpr/ResolveTypeof.cc

    ref3c383 rd672350  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:12:20 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 19 16:49:04 2015
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 16:09:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    2222#include "AST/Node.hpp"
    2323#include "AST/Pass.hpp"
     24#include "AST/TranslationUnit.hpp"
    2425#include "AST/Type.hpp"
    2526#include "AST/TypeEnvironment.hpp"
     
    119120namespace {
    120121        struct ResolveTypeof_new : public ast::WithShortCircuiting {
    121                 const ast::SymbolTable & localSymtab;
    122 
    123                 ResolveTypeof_new( const ast::SymbolTable & syms ) : localSymtab( syms ) {}
     122                const ResolveContext & context;
     123
     124                ResolveTypeof_new( const ResolveContext & context ) :
     125                        context( context ) {}
    124126
    125127                void previsit( const ast::TypeofType * ) { visit_children = false; }
     
    137139                                ast::TypeEnvironment dummy;
    138140                                ast::ptr< ast::Expr > newExpr =
    139                                         resolveInVoidContext( typeofType->expr, localSymtab, dummy );
     141                                        resolveInVoidContext( typeofType->expr, context, dummy );
    140142                                assert( newExpr->result && ! newExpr->result->isVoid() );
    141143                                newType = newExpr->result;
     
    161163} // anonymous namespace
    162164
    163 const ast::Type * resolveTypeof( const ast::Type * type , const ast::SymbolTable & symtab ) {
    164         ast::Pass< ResolveTypeof_new > mutator{ symtab };
     165const ast::Type * resolveTypeof( const ast::Type * type , const ResolveContext & context ) {
     166        ast::Pass< ResolveTypeof_new > mutator( context );
    165167        return type->accept( mutator );
    166168}
     
    168170struct FixArrayDimension {
    169171        // should not require a mutable symbol table - prevent pass template instantiation
    170         const ast::SymbolTable & _symtab;
    171         FixArrayDimension(const ast::SymbolTable & symtab): _symtab(symtab) {}
     172        const ResolveContext & context;
     173        FixArrayDimension(const ResolveContext & context) : context( context ) {}
    172174
    173175        const ast::ArrayType * previsit (const ast::ArrayType * arrayType) {
    174176                if (!arrayType->dimension) return arrayType;
    175177                auto mutType = mutate(arrayType);
    176                 ast::ptr<ast::Type> sizetype = ast::sizeType ? ast::sizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt);
    177                 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, _symtab);
     178                auto globalSizeType = context.global.sizeType;
     179                ast::ptr<ast::Type> sizetype = globalSizeType ? globalSizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt);
     180                mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, context );
    178181
    179182                if (InitTweak::isConstExpr(mutType->dimension)) {
     
    187190};
    188191
    189 const ast::Type * fixArrayType( const ast::Type * type, const ast::SymbolTable & symtab) {
    190         ast::Pass<FixArrayDimension> visitor {symtab};
     192const ast::Type * fixArrayType( const ast::Type * type, const ResolveContext & context ) {
     193        ast::Pass<FixArrayDimension> visitor(context);
    191194        return type->accept(visitor);
    192195}
    193196
    194 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab ) {
    195         if (!decl->isTypeFixed) { 
     197const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & context ) {
     198        if (!decl->isTypeFixed) {
    196199                auto mutDecl = mutate(decl);
    197                 auto resolvedType = resolveTypeof(decl->type, symtab);
    198                 resolvedType = fixArrayType(resolvedType, symtab);
     200                auto resolvedType = resolveTypeof(decl->type, context);
     201                resolvedType = fixArrayType(resolvedType, context);
    199202                mutDecl->type = resolvedType;
    200203
  • src/ResolvExpr/ResolveTypeof.h

    ref3c383 rd672350  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:14:53 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jul 22 09:38:35 2017
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 11:33:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    2222namespace ast {
    2323        class Type;
    24         class SymbolTable;
    2524        class ObjectDecl;
    2625}
    2726
    2827namespace ResolvExpr {
     28        struct ResolveContext;
     29
    2930        Type *resolveTypeof( Type*, const SymTab::Indexer &indexer );
    30         const ast::Type * resolveTypeof( const ast::Type *, const ast::SymbolTable & );
    31         const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab );
     31        const ast::Type * resolveTypeof( const ast::Type *, const ResolveContext & );
     32        const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & );
    3233} // namespace ResolvExpr
    3334
  • src/ResolvExpr/Resolver.cc

    ref3c383 rd672350  
    99// Author           : Aaron B. Moss
    1010// Created On       : Sun May 17 12:17:01 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Feb  1 16:27:14 2022
    13 // Update Count     : 245
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Fri Mar 18 10:41:00 2022
     13// Update Count     : 247
    1414//
    1515
     
    997997                /// Calls the CandidateFinder and finds the single best candidate
    998998                CandidateRef findUnfinishedKindExpression(
    999                         const ast::Expr * untyped, const ast::SymbolTable & symtab, const std::string & kind,
     999                        const ast::Expr * untyped, const ResolveContext & context, const std::string & kind,
    10001000                        std::function<bool(const Candidate &)> pred = anyCandidate, ResolvMode mode = {}
    10011001                ) {
     
    10071007                        ++recursion_level;
    10081008                        ast::TypeEnvironment env;
    1009                         CandidateFinder finder{ symtab, env };
     1009                        CandidateFinder finder( context, env );
    10101010                        finder.find( untyped, recursion_level == 1 ? mode.atTopLevel() : mode );
    10111011                        --recursion_level;
     
    11291129
    11301130        ast::ptr< ast::Expr > resolveInVoidContext(
    1131                 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env
     1131                const ast::Expr * expr, const ResolveContext & context,
     1132                ast::TypeEnvironment & env
    11321133        ) {
    11331134                assertf( expr, "expected a non-null expression" );
     
    11361137                ast::ptr< ast::CastExpr > untyped = new ast::CastExpr{ expr };
    11371138                CandidateRef choice = findUnfinishedKindExpression(
    1138                         untyped, symtab, "", anyCandidate, ResolvMode::withAdjustment() );
     1139                        untyped, context, "", anyCandidate, ResolvMode::withAdjustment() );
    11391140
    11401141                // a cast expression has either 0 or 1 interpretations (by language rules);
     
    11491150                /// context.
    11501151                ast::ptr< ast::Expr > findVoidExpression(
    1151                         const ast::Expr * untyped, const ast::SymbolTable & symtab
     1152                        const ast::Expr * untyped, const ResolveContext & context
    11521153                ) {
    11531154                        ast::TypeEnvironment env;
    1154                         ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, symtab, env );
     1155                        ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, context, env );
    11551156                        finishExpr( newExpr, env, untyped->env );
    11561157                        return newExpr;
     
    11631164                /// lowest cost, returning the resolved version
    11641165                ast::ptr< ast::Expr > findKindExpression(
    1165                         const ast::Expr * untyped, const ast::SymbolTable & symtab,
     1166                        const ast::Expr * untyped, const ResolveContext & context,
    11661167                        std::function<bool(const Candidate &)> pred = anyCandidate,
    11671168                        const std::string & kind = "", ResolvMode mode = {}
     
    11691170                        if ( ! untyped ) return {};
    11701171                        CandidateRef choice =
    1171                                 findUnfinishedKindExpression( untyped, symtab, kind, pred, mode );
     1172                                findUnfinishedKindExpression( untyped, context, kind, pred, mode );
    11721173                        ResolvExpr::finishExpr( choice->expr, choice->env, untyped->env );
    11731174                        return std::move( choice->expr );
     
    11761177                /// Resolve `untyped` to the single expression whose candidate is the best match
    11771178                ast::ptr< ast::Expr > findSingleExpression(
    1178                         const ast::Expr * untyped, const ast::SymbolTable & symtab
     1179                        const ast::Expr * untyped, const ResolveContext & context
    11791180                ) {
    11801181                        Stats::ResolveTime::start( untyped );
    1181                         auto res = findKindExpression( untyped, symtab );
     1182                        auto res = findKindExpression( untyped, context );
    11821183                        Stats::ResolveTime::stop();
    11831184                        return res;
     
    11861187
    11871188        ast::ptr< ast::Expr > findSingleExpression(
    1188                 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab
     1189                const ast::Expr * untyped, const ast::Type * type,
     1190                const ResolveContext & context
    11891191        ) {
    11901192                assert( untyped && type );
    11911193                ast::ptr< ast::Expr > castExpr = new ast::CastExpr{ untyped, type };
    1192                 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, symtab );
    1193                 removeExtraneousCast( newExpr, symtab );
     1194                ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, context );
     1195                removeExtraneousCast( newExpr, context.symtab );
    11941196                return newExpr;
    11951197        }
     
    12171219                /// Resolve `untyped` as an integral expression, returning the resolved version
    12181220                ast::ptr< ast::Expr > findIntegralExpression(
    1219                         const ast::Expr * untyped, const ast::SymbolTable & symtab
     1221                        const ast::Expr * untyped, const ResolveContext & context
    12201222                ) {
    1221                         return findKindExpression( untyped, symtab, hasIntegralType, "condition" );
     1223                        return findKindExpression( untyped, context, hasIntegralType, "condition" );
    12221224                }
    12231225
     
    12491251                // for work previously in GenInit
    12501252                static InitTweak::ManagedTypes_new managedTypes;
     1253                ResolveContext context;
    12511254
    12521255                bool inEnumDecl = false;
     
    12541257        public:
    12551258                static size_t traceId;
    1256                 Resolver_new() = default;
    1257                 Resolver_new( const ast::SymbolTable & syms ) { symtab = syms; }
     1259                Resolver_new( const ast::TranslationGlobal & global ) :
     1260                        context{ symtab, global } {}
     1261                Resolver_new( const ResolveContext & context ) :
     1262                        ast::WithSymbolTable{ context.symtab },
     1263                        context{ symtab, context.global } {}
    12581264
    12591265                const ast::FunctionDecl * previsit( const ast::FunctionDecl * );
     
    12721278                const ast::AsmStmt *         previsit( const ast::AsmStmt * );
    12731279                const ast::IfStmt *          previsit( const ast::IfStmt * );
    1274                 const ast::WhileDoStmt *       previsit( const ast::WhileDoStmt * );
     1280                const ast::WhileDoStmt *     previsit( const ast::WhileDoStmt * );
    12751281                const ast::ForStmt *         previsit( const ast::ForStmt * );
    12761282                const ast::SwitchStmt *      previsit( const ast::SwitchStmt * );
     
    12991305
    13001306        void resolve( ast::TranslationUnit& translationUnit ) {
    1301                 ast::Pass< Resolver_new >::run( translationUnit );
     1307                ast::Pass< Resolver_new >::run( translationUnit, translationUnit.global );
    13021308        }
    13031309
    13041310        ast::ptr< ast::Init > resolveCtorInit(
    1305                 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab
     1311                const ast::ConstructorInit * ctorInit, const ResolveContext & context
    13061312        ) {
    13071313                assert( ctorInit );
    1308                 ast::Pass< Resolver_new > resolver{ symtab };
     1314                ast::Pass< Resolver_new > resolver( context );
    13091315                return ctorInit->accept( resolver );
    13101316        }
    13111317
    13121318        const ast::Expr * resolveStmtExpr(
    1313                 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab
     1319                const ast::StmtExpr * stmtExpr, const ResolveContext & context
    13141320        ) {
    13151321                assert( stmtExpr );
    1316                 ast::Pass< Resolver_new > resolver{ symtab };
     1322                ast::Pass< Resolver_new > resolver( context );
    13171323                auto ret = mutate(stmtExpr->accept(resolver));
    13181324                strict_dynamic_cast< ast::StmtExpr * >( ret )->computeResult();
     
    13211327
    13221328        namespace {
    1323                 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ast::SymbolTable & symtab) {
     1329                const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ResolveContext & context) {
    13241330                        std::string name = attr->normalizedName();
    13251331                        if (name == "constructor" || name == "destructor") {
    13261332                                if (attr->params.size() == 1) {
    13271333                                        auto arg = attr->params.front();
    1328                                         auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), symtab );
     1334                                        auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), context );
    13291335                                        auto result = eval(arg);
    13301336
     
    13691375
    13701376                        for (auto & attr: mutDecl->attributes) {
    1371                                 attr = handleAttribute(mutDecl->location, attr, symtab);
     1377                                attr = handleAttribute(mutDecl->location, attr, context );
    13721378                        }
    13731379
     
    13821388                        }
    13831389                        for (auto & asst : mutDecl->assertions) {
    1384                                 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), symtab);
     1390                                asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), context);
    13851391                                symtab.addId(asst);
    13861392                                mutType->assertions.emplace_back(new ast::VariableExpr(functionDecl->location, asst));
     
    13941400
    13951401                        for (auto & param : mutDecl->params) {
    1396                                 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), symtab);
     1402                                param = fixObjectType(param.strict_as<ast::ObjectDecl>(), context);
    13971403                                symtab.addId(param);
    13981404                                paramTypes.emplace_back(param->get_type());
    13991405                        }
    14001406                        for (auto & ret : mutDecl->returns) {
    1401                                 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), symtab);
     1407                                ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), context);
    14021408                                returnTypes.emplace_back(ret->get_type());
    14031409                        }
     
    14701476                        // enumerator initializers should not use the enum type to initialize, since the
    14711477                        // enum type is still incomplete at this point. Use `int` instead.
    1472                         objectDecl = fixObjectType(objectDecl, symtab);
     1478                        objectDecl = fixObjectType(objectDecl, context);
    14731479                        currentObject = ast::CurrentObject{
    14741480                                objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
     
    14761482                else {
    14771483                        if (!objectDecl->isTypeFixed) {
    1478                                 auto newDecl = fixObjectType(objectDecl, symtab);
     1484                                auto newDecl = fixObjectType(objectDecl, context);
    14791485                                auto mutDecl = mutate(newDecl);
    14801486
     
    15071513                        // nested type decls are hoisted already. no need to do anything
    15081514                        if (auto obj = member.as<ast::ObjectDecl>()) {
    1509                                 member = fixObjectType(obj, symtab);
     1515                                member = fixObjectType(obj, context);
    15101516                        }
    15111517                }
     
    15301536                return ast::mutate_field(
    15311537                        assertDecl, &ast::StaticAssertDecl::cond,
    1532                         findIntegralExpression( assertDecl->cond, symtab ) );
     1538                        findIntegralExpression( assertDecl->cond, context ) );
    15331539        }
    15341540
    15351541        template< typename PtrType >
    1536         const PtrType * handlePtrType( const PtrType * type, const ast::SymbolTable & symtab ) {
     1542        const PtrType * handlePtrType( const PtrType * type, const ResolveContext & context ) {
    15371543                if ( type->dimension ) {
    1538                         ast::ptr< ast::Type > sizeType = ast::sizeType;
     1544                        ast::ptr< ast::Type > sizeType = context.global.sizeType;
    15391545                        ast::mutate_field(
    15401546                                type, &PtrType::dimension,
    1541                                 findSingleExpression( type->dimension, sizeType, symtab ) );
     1547                                findSingleExpression( type->dimension, sizeType, context ) );
    15421548                }
    15431549                return type;
     
    15451551
    15461552        const ast::ArrayType * Resolver_new::previsit( const ast::ArrayType * at ) {
    1547                 return handlePtrType( at, symtab );
     1553                return handlePtrType( at, context );
    15481554        }
    15491555
    15501556        const ast::PointerType * Resolver_new::previsit( const ast::PointerType * pt ) {
    1551                 return handlePtrType( pt, symtab );
     1557                return handlePtrType( pt, context );
    15521558        }
    15531559
     
    15571563
    15581564                return ast::mutate_field(
    1559                         exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, symtab ) );
     1565                        exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, context ) );
    15601566        }
    15611567
     
    15641570
    15651571                asmExpr = ast::mutate_field(
    1566                         asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, symtab ) );
     1572                        asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, context ) );
    15671573
    15681574                return asmExpr;
     
    15781584        const ast::IfStmt * Resolver_new::previsit( const ast::IfStmt * ifStmt ) {
    15791585                return ast::mutate_field(
    1580                         ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, symtab ) );
     1586                        ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, context ) );
    15811587        }
    15821588
    15831589        const ast::WhileDoStmt * Resolver_new::previsit( const ast::WhileDoStmt * whileDoStmt ) {
    15841590                return ast::mutate_field(
    1585                         whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, symtab ) );
     1591                        whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, context ) );
    15861592        }
    15871593
     
    15891595                if ( forStmt->cond ) {
    15901596                        forStmt = ast::mutate_field(
    1591                                 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, symtab ) );
     1597                                forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, context ) );
    15921598                }
    15931599
    15941600                if ( forStmt->inc ) {
    15951601                        forStmt = ast::mutate_field(
    1596                                 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, symtab ) );
     1602                                forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, context ) );
    15971603                }
    15981604
     
    16041610                switchStmt = ast::mutate_field(
    16051611                        switchStmt, &ast::SwitchStmt::cond,
    1606                         findIntegralExpression( switchStmt->cond, symtab ) );
     1612                        findIntegralExpression( switchStmt->cond, context ) );
    16071613                currentObject = ast::CurrentObject{ switchStmt->location, switchStmt->cond->result };
    16081614                return switchStmt;
     
    16171623                        ast::ptr< ast::Expr > untyped =
    16181624                                new ast::CastExpr{ caseStmt->location, caseStmt->cond, initAlts.front().type };
    1619                         ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, symtab );
     1625                        ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, context );
    16201626
    16211627                        // case condition cannot have a cast in C, so it must be removed here, regardless of
     
    16381644                        branchStmt = ast::mutate_field(
    16391645                                branchStmt, &ast::BranchStmt::computedTarget,
    1640                                 findSingleExpression( branchStmt->computedTarget, target, symtab ) );
     1646                                findSingleExpression( branchStmt->computedTarget, target, context ) );
    16411647                }
    16421648                return branchStmt;
     
    16481654                        returnStmt = ast::mutate_field(
    16491655                                returnStmt, &ast::ReturnStmt::expr,
    1650                                 findSingleExpression( returnStmt->expr, functionReturn, symtab ) );
     1656                                findSingleExpression( returnStmt->expr, functionReturn, context ) );
    16511657                }
    16521658                return returnStmt;
     
    16631669                        throwStmt = ast::mutate_field(
    16641670                                throwStmt, &ast::ThrowStmt::expr,
    1665                                 findSingleExpression( throwStmt->expr, exceptType, symtab ) );
     1671                                findSingleExpression( throwStmt->expr, exceptType, context ) );
    16661672                }
    16671673                return throwStmt;
     
    17071713
    17081714                        ast::TypeEnvironment env;
    1709                         CandidateFinder funcFinder{ symtab, env };
     1715                        CandidateFinder funcFinder( context, env );
    17101716
    17111717                        // Find all candidates for a function in canonical form
     
    19211927                                );
    19221928
    1923                                 clause2.target.args.emplace_back( findSingleExpression( init, symtab ) );
     1929                                clause2.target.args.emplace_back( findSingleExpression( init, context ) );
    19241930                        }
    19251931
    19261932                        // Resolve the conditions as if it were an IfStmt, statements normally
    1927                         clause2.cond = findSingleExpression( clause.cond, symtab );
     1933                        clause2.cond = findSingleExpression( clause.cond, context );
    19281934                        clause2.stmt = clause.stmt->accept( *visitor );
    19291935
     
    19401946                        ast::ptr< ast::Type > target =
    19411947                                new ast::BasicType{ ast::BasicType::LongLongUnsignedInt };
    1942                         timeout2.time = findSingleExpression( stmt->timeout.time, target, symtab );
    1943                         timeout2.cond = findSingleExpression( stmt->timeout.cond, symtab );
     1948                        timeout2.time = findSingleExpression( stmt->timeout.time, target, context );
     1949                        timeout2.cond = findSingleExpression( stmt->timeout.cond, context );
    19441950                        timeout2.stmt = stmt->timeout.stmt->accept( *visitor );
    19451951
     
    19541960                        ast::WaitForStmt::OrElse orElse2;
    19551961
    1956                         orElse2.cond = findSingleExpression( stmt->orElse.cond, symtab );
     1962                        orElse2.cond = findSingleExpression( stmt->orElse.cond, context );
    19571963                        orElse2.stmt = stmt->orElse.stmt->accept( *visitor );
    19581964
     
    19751981                for (auto & expr : exprs) {
    19761982                        // only struct- and union-typed expressions are viable candidates
    1977                         expr = findKindExpression( expr, symtab, structOrUnion, "with expression" );
     1983                        expr = findKindExpression( expr, context, structOrUnion, "with expression" );
    19781984
    19791985                        // if with expression might be impure, create a temporary so that it is evaluated once
     
    20012007                ast::ptr< ast::Expr > untyped = new ast::UntypedInitExpr{
    20022008                        singleInit->location, singleInit->value, currentObject.getOptions() };
    2003                 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, symtab );
     2009                ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, context );
    20042010                const ast::InitExpr * initExpr = newExpr.strict_as< ast::InitExpr >();
    20052011
  • src/ResolvExpr/Resolver.h

    ref3c383 rd672350  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:18:34 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Feb 18 20:40:38 2019
    13 // Update Count     : 4
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 11:32:00 2022
     13// Update Count     : 5
    1414//
    1515
     
    2323class Declaration;
    2424class Expression;
     25class DeletedExpr;
    2526class StmtExpr;
     27class Type;
    2628namespace SymTab {
    2729        class Indexer;
     
    3537        class StmtExpr;
    3638        class SymbolTable;
     39        class TranslationGlobal;
    3740        class TranslationUnit;
    3841        class Type;
     
    5558        void resolveWithExprs( std::list< Declaration * > & translationUnit );
    5659
     60        /// Helper Type: Passes around information between various sub-calls.
     61        struct ResolveContext {
     62                const ast::SymbolTable & symtab;
     63                const ast::TranslationGlobal & global;
     64        };
     65
    5766        /// Checks types and binds syntactic constructs to typed representations
    5867        void resolve( ast::TranslationUnit& translationUnit );
     
    6271        /// context.
    6372        ast::ptr< ast::Expr > resolveInVoidContext(
    64                 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env );
     73                const ast::Expr * expr, const ResolveContext &, ast::TypeEnvironment & env );
    6574        /// Resolve `untyped` to the single expression whose candidate is the best match for the
    6675        /// given type.
    6776        ast::ptr< ast::Expr > findSingleExpression(
    68                 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab );
     77                const ast::Expr * untyped, const ast::Type * type, const ResolveContext & );
    6978        ast::ptr< ast::Expr > findVoidExpression(
    70                 const ast::Expr * untyped, const ast::SymbolTable & symtab);
     79                const ast::Expr * untyped, const ResolveContext & );
    7180        /// Resolves a constructor init expression
    7281        ast::ptr< ast::Init > resolveCtorInit(
    73                 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab );
     82                const ast::ConstructorInit * ctorInit, const ResolveContext & context );
    7483        /// Resolves a statement expression
    7584        const ast::Expr * resolveStmtExpr(
    76                 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab );
     85                const ast::StmtExpr * stmtExpr, const ResolveContext & context );
    7786} // namespace ResolvExpr
    7887
  • src/ResolvExpr/Unify.cc

    ref3c383 rd672350  
    943943                        // check that the other type is compatible and named the same
    944944                        auto otherInst = dynamic_cast< const XInstType * >( other );
    945                         this->result = otherInst && inst->name == otherInst->name;
     945                        if (otherInst && inst->name == otherInst->name) this->result = otherInst;
    946946                        return otherInst;
    947947                }
  • src/SymTab/Validate.cc

    ref3c383 rd672350  
    194194        };
    195195
     196        // These structs are the sub-sub-passes of ForallPointerDecay_old.
     197
     198        struct TraitExpander_old final {
     199                void previsit( FunctionType * );
     200                void previsit( StructDecl * );
     201                void previsit( UnionDecl * );
     202        };
     203
     204        struct AssertionFixer_old final {
     205                void previsit( FunctionType * );
     206                void previsit( StructDecl * );
     207                void previsit( UnionDecl * );
     208        };
     209
     210        struct CheckOperatorTypes_old final {
     211                void previsit( ObjectDecl * );
     212        };
     213
     214        struct FixUniqueIds_old final {
     215                void previsit( DeclarationWithType * );
     216        };
     217
    196218        struct ReturnChecker : public WithGuards {
    197219                /// Checks that return statements return nothing if their return type is void
     
    386408
    387409        void validate_D( std::list< Declaration * > & translationUnit ) {
    388                 PassVisitor<ForallPointerDecay_old> fpd;
    389410                {
    390411                        Stats::Heap::newPass("validate-D");
     
    394415                        });
    395416                        Stats::Time::TimeBlock("Forall Pointer Decay", [&]() {
    396                                 acceptAll( translationUnit, fpd ); // must happen before autogenerateRoutines, after Concurrency::applyKeywords because uniqueIds must be set on declaration before resolution
     417                                decayForallPointers( translationUnit ); // must happen before autogenerateRoutines, after Concurrency::applyKeywords because uniqueIds must be set on declaration before resolution
    397418                        });
    398419                        Stats::Time::TimeBlock("Hoist Control Declarations", [&]() {
     
    454475
    455476        void decayForallPointers( std::list< Declaration * > & translationUnit ) {
    456                 PassVisitor<ForallPointerDecay_old> fpd;
    457                 acceptAll( translationUnit, fpd );
     477                PassVisitor<TraitExpander_old> te;
     478                acceptAll( translationUnit, te );
     479                PassVisitor<AssertionFixer_old> af;
     480                acceptAll( translationUnit, af );
     481                PassVisitor<CheckOperatorTypes_old> cot;
     482                acceptAll( translationUnit, cot );
     483                PassVisitor<FixUniqueIds_old> fui;
     484                acceptAll( translationUnit, fui );
     485        }
     486
     487        void decayForallPointersA( std::list< Declaration * > & translationUnit ) {
     488                PassVisitor<TraitExpander_old> te;
     489                acceptAll( translationUnit, te );
     490        }
     491        void decayForallPointersB( std::list< Declaration * > & translationUnit ) {
     492                PassVisitor<AssertionFixer_old> af;
     493                acceptAll( translationUnit, af );
     494        }
     495        void decayForallPointersC( std::list< Declaration * > & translationUnit ) {
     496                PassVisitor<CheckOperatorTypes_old> cot;
     497                acceptAll( translationUnit, cot );
     498        }
     499        void decayForallPointersD( std::list< Declaration * > & translationUnit ) {
     500                PassVisitor<FixUniqueIds_old> fui;
     501                acceptAll( translationUnit, fui );
    458502        }
    459503
     
    470514                PassVisitor<EnumAndPointerDecay_old> epc;
    471515                PassVisitor<LinkReferenceToTypes_old> lrt( indexer );
    472                 PassVisitor<ForallPointerDecay_old> fpd;
     516                PassVisitor<TraitExpander_old> te;
     517                PassVisitor<AssertionFixer_old> af;
     518                PassVisitor<CheckOperatorTypes_old> cot;
     519                PassVisitor<FixUniqueIds_old> fui;
    473520                type->accept( epc );
    474521                type->accept( lrt );
    475                 type->accept( fpd );
     522                type->accept( te );
     523                type->accept( af );
     524                type->accept( cot );
     525                type->accept( fui );
    476526        }
    477527
     
    9721022        }
    9731023
     1024        /// Replace all traits in assertion lists with their assertions.
     1025        void expandTraits( std::list< TypeDecl * > & forall ) {
     1026                for ( TypeDecl * type : forall ) {
     1027                        std::list< DeclarationWithType * > asserts;
     1028                        asserts.splice( asserts.end(), type->assertions );
     1029                        // expand trait instances into their members
     1030                        for ( DeclarationWithType * assertion : asserts ) {
     1031                                if ( TraitInstType * traitInst = dynamic_cast< TraitInstType * >( assertion->get_type() ) ) {
     1032                                        // expand trait instance into all of its members
     1033                                        expandAssertions( traitInst, back_inserter( type->assertions ) );
     1034                                        delete traitInst;
     1035                                } else {
     1036                                        // pass other assertions through
     1037                                        type->assertions.push_back( assertion );
     1038                                } // if
     1039                        } // for
     1040                }
     1041        }
     1042
     1043        /// Fix each function in the assertion list and check for invalid void type.
     1044        void fixAssertions(
     1045                        std::list< TypeDecl * > & forall, BaseSyntaxNode * node ) {
     1046                for ( TypeDecl * type : forall ) {
     1047                        for ( DeclarationWithType *& assertion : type->assertions ) {
     1048                                bool isVoid = fixFunction( assertion );
     1049                                if ( isVoid ) {
     1050                                        SemanticError( node, "invalid type void in assertion of function " );
     1051                                } // if
     1052                        } // for
     1053                }
     1054        }
     1055
    9741056        void ForallPointerDecay_old::previsit( ObjectDecl * object ) {
    9751057                // ensure that operator names only apply to functions or function pointers
     
    9941076        void ForallPointerDecay_old::previsit( UnionDecl * aggrDecl ) {
    9951077                forallFixer( aggrDecl->parameters, aggrDecl );
     1078        }
     1079
     1080        void TraitExpander_old::previsit( FunctionType * ftype ) {
     1081                expandTraits( ftype->forall );
     1082        }
     1083
     1084        void TraitExpander_old::previsit( StructDecl * aggrDecl ) {
     1085                expandTraits( aggrDecl->parameters );
     1086        }
     1087
     1088        void TraitExpander_old::previsit( UnionDecl * aggrDecl ) {
     1089                expandTraits( aggrDecl->parameters );
     1090        }
     1091
     1092        void AssertionFixer_old::previsit( FunctionType * ftype ) {
     1093                fixAssertions( ftype->forall, ftype );
     1094        }
     1095
     1096        void AssertionFixer_old::previsit( StructDecl * aggrDecl ) {
     1097                fixAssertions( aggrDecl->parameters, aggrDecl );
     1098        }
     1099
     1100        void AssertionFixer_old::previsit( UnionDecl * aggrDecl ) {
     1101                fixAssertions( aggrDecl->parameters, aggrDecl );
     1102        }
     1103
     1104        void CheckOperatorTypes_old::previsit( ObjectDecl * object ) {
     1105                // ensure that operator names only apply to functions or function pointers
     1106                if ( CodeGen::isOperator( object->name ) && ! dynamic_cast< FunctionType * >( object->type->stripDeclarator() ) ) {
     1107                        SemanticError( object->location, toCString( "operator ", object->name.c_str(), " is not a function or function pointer." )  );
     1108                }
     1109        }
     1110
     1111        void FixUniqueIds_old::previsit( DeclarationWithType * decl ) {
     1112                decl->fixUniqueId();
    9961113        }
    9971114
  • src/SymTab/Validate.h

    ref3c383 rd672350  
    4343        void validate_F( std::list< Declaration * > &translationUnit );
    4444        void decayForallPointers( std::list< Declaration * > & translationUnit );
     45        void decayForallPointersA( std::list< Declaration * > & translationUnit );
     46        void decayForallPointersB( std::list< Declaration * > & translationUnit );
     47        void decayForallPointersC( std::list< Declaration * > & translationUnit );
     48        void decayForallPointersD( std::list< Declaration * > & translationUnit );
    4549
    4650        const ast::Type * validateType(
  • src/Tuples/TupleAssignment.cc

    ref3c383 rd672350  
    99// Author           : Rodolfo G. Esteves
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Dec 13 23:45:33 2019
    13 // Update Count     : 9
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 14:06:00 2022
     13// Update Count     : 10
    1414//
    1515
     
    465465                                        // resolve ctor/dtor for the new object
    466466                                        ast::ptr< ast::Init > ctorInit = ResolvExpr::resolveCtorInit(
    467                                                         InitTweak::genCtorInit( location, ret ), spotter.crntFinder.localSyms );
     467                                                        InitTweak::genCtorInit( location, ret ), spotter.crntFinder.context );
    468468                                        // remove environments from subexpressions of stmtExpr
    469469                                        ast::Pass< EnvRemover > rm{ env };
     
    560560                                        // resolve the cast expression so that rhsCand return type is bound by the cast
    561561                                        // type as needed, and transfer the resulting environment
    562                                         ResolvExpr::CandidateFinder finder{ spotter.crntFinder.localSyms, env };
     562                                        ResolvExpr::CandidateFinder finder( spotter.crntFinder.context, env );
    563563                                        finder.find( rhsCand->expr, ResolvExpr::ResolvMode::withAdjustment() );
    564564                                        assert( finder.candidates.size() == 1 );
     
    609609                                        // explode the LHS so that each field of a tuple-valued expr is assigned
    610610                                        ResolvExpr::CandidateList lhs;
    611                                         explode( *lhsCand, crntFinder.localSyms, back_inserter(lhs), true );
     611                                        explode( *lhsCand, crntFinder.context.symtab, back_inserter(lhs), true );
    612612                                        for ( ResolvExpr::CandidateRef & cand : lhs ) {
    613613                                                // each LHS value must be a reference - some come in with a cast, if not
     
    629629                                                        if ( isTuple( rhsCand->expr ) ) {
    630630                                                                // multiple assignment
    631                                                                 explode( *rhsCand, crntFinder.localSyms, back_inserter(rhs), true );
     631                                                                explode( *rhsCand, crntFinder.context.symtab, back_inserter(rhs), true );
    632632                                                                matcher.reset(
    633633                                                                        new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } );
     
    648648                                                        // multiple assignment
    649649                                                        ResolvExpr::CandidateList rhs;
    650                                                         explode( rhsCand, crntFinder.localSyms, back_inserter(rhs), true );
     650                                                        explode( rhsCand, crntFinder.context.symtab, back_inserter(rhs), true );
    651651                                                        matcher.reset(
    652652                                                                new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } );
     
    678678                                )
    679679
    680                                 ResolvExpr::CandidateFinder finder{ crntFinder.localSyms, matcher->env };
     680                                ResolvExpr::CandidateFinder finder( crntFinder.context, matcher->env );
    681681
    682682                                try {
  • src/Validate/FindSpecialDeclsNew.cpp

    ref3c383 rd672350  
    3030
    3131struct FindDeclsCore : public ast::WithShortCircuiting {
    32         ast::TranslationUnit::Global & global;
    33         FindDeclsCore( ast::TranslationUnit::Global & g ) : global( g ) {}
     32        ast::TranslationGlobal & global;
     33        FindDeclsCore( ast::TranslationGlobal & g ) : global( g ) {}
    3434
    3535        void previsit( const ast::Decl * decl );
     
    7474        ast::Pass<FindDeclsCore>::run( translationUnit, translationUnit.global );
    7575
    76         // TODO: When everything gets the globals from the translation unit,
    77         // remove these.
    78         ast::dereferenceOperator = translationUnit.global.dereference;
    79         ast::dtorStruct = translationUnit.global.dtorStruct;
    80         ast::dtorStructDestroy = translationUnit.global.dtorDestroy;
    81 
    8276        // TODO: conditionally generate 'fake' declarations for missing features,
    8377        // so that translation can proceed in the event that builtins, prelude,
  • src/Validate/module.mk

    ref3c383 rd672350  
    2020        Validate/CompoundLiteral.cpp \
    2121        Validate/CompoundLiteral.hpp \
     22        Validate/ForallPointerDecay.cpp \
     23        Validate/ForallPointerDecay.hpp \
    2224        Validate/HandleAttributes.cc \
    2325        Validate/HandleAttributes.h \
  • src/Virtual/Tables.cc

    ref3c383 rd672350  
    1010// Created On       : Mon Aug 31 11:11:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Apr 21 15:36:00 2021
    13 // Update Count     : 2
    14 //
    15 
     12// Last Modified On : Fri Mar 11 10:40:00 2022
     13// Update Count     : 3
     14//
     15
     16#include "AST/Attribute.hpp"
     17#include "AST/Copy.hpp"
     18#include "AST/Decl.hpp"
     19#include "AST/Expr.hpp"
     20#include "AST/Init.hpp"
     21#include "AST/Stmt.hpp"
     22#include "AST/Type.hpp"
    1623#include <SynTree/Attribute.h>
    1724#include <SynTree/Declaration.h>
     
    7784}
    7885
     86static ast::ObjectDecl * makeVtableDeclaration(
     87                CodeLocation const & location, std::string const & name,
     88                ast::StructInstType const * type, ast::Init const * init ) {
     89        ast::Storage::Classes storage;
     90        if ( nullptr == init ) {
     91                storage.is_extern = true;
     92        }
     93        return new ast::ObjectDecl(
     94                location,
     95                name,
     96                type,
     97                init,
     98                storage,
     99                ast::Linkage::Cforall
     100        );
     101}
     102
    79103ObjectDecl * makeVtableForward( std::string const & name, StructInstType * type ) {
    80104        assert( type );
    81105        return makeVtableDeclaration( name, type, nullptr );
     106}
     107
     108ast::ObjectDecl * makeVtableForward(
     109                CodeLocation const & location, std::string const & name,
     110                ast::StructInstType const * vtableType ) {
     111        assert( vtableType );
     112        return makeVtableDeclaration( location, name, vtableType, nullptr );
    82113}
    83114
     
    123154}
    124155
     156static std::vector<ast::ptr<ast::Init>> buildInits(
     157                CodeLocation const & location,
     158                //std::string const & name,
     159                ast::StructInstType const * vtableType,
     160                ast::Type const * objectType ) {
     161        ast::StructDecl const * vtableStruct = vtableType->base;
     162
     163        std::vector<ast::ptr<ast::Init>> inits;
     164        inits.reserve( vtableStruct->members.size() );
     165
     166        // This is designed to run before the resolver.
     167        for ( auto field : vtableStruct->members ) {
     168                if ( std::string( "parent" ) == field->name ) {
     169                        // This will not work with polymorphic state.
     170                        auto oField = field.strict_as<ast::ObjectDecl>();
     171                        auto fieldType = oField->type.strict_as<ast::PointerType>();
     172                        auto parentType = fieldType->base.strict_as<ast::StructInstType>();
     173                        std::string const & parentInstance = instanceName( parentType->name );
     174                        inits.push_back(
     175                                        new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, parentInstance ) ) ) );
     176                } else if ( std::string( "__cfavir_typeid" ) == field->name ) {
     177                        std::string const & baseType = baseTypeName( vtableType->name );
     178                        std::string const & typeId = typeIdName( baseType );
     179                        inits.push_back( new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, typeId ) ) ) );
     180                } else if ( std::string( "size" ) == field->name ) {
     181                        inits.push_back( new ast::SingleInit( location, new ast::SizeofExpr( location, objectType )
     182                        ) );
     183                } else if ( std::string( "align" ) == field->name ) {
     184                        inits.push_back( new ast::SingleInit( location,
     185                                new ast::AlignofExpr( location, objectType )
     186                        ) );
     187                } else {
     188                        inits.push_back( new ast::SingleInit( location,
     189                                new ast::NameExpr( location, field->name )
     190                        ) );
     191                }
     192                //ast::Expr * expr = buildInitExpr(...);
     193                //inits.push_back( new ast::SingleInit( location, expr ) )
     194        }
     195
     196        return inits;
     197}
     198
     199ast::ObjectDecl * makeVtableInstance(
     200                CodeLocation const & location,
     201                std::string const & name,
     202                ast::StructInstType const * vtableType,
     203                ast::Type const * objectType,
     204                ast::Init const * init ) {
     205        assert( vtableType );
     206        assert( objectType );
     207
     208        // Build the initialization.
     209        if ( nullptr == init ) {
     210                init = new ast::ListInit( location,
     211                        buildInits( location, vtableType, objectType ) );
     212
     213        // The provided init should initialize everything except the parent
     214        // pointer, the size-of and align-of fields. These should be inserted.
     215        } else {
     216                // Except this is not yet supported.
     217                assert(false);
     218        }
     219        return makeVtableDeclaration( location, name, vtableType, init );
     220}
     221
    125222namespace {
    126223        std::string const functionName = "get_exception_vtable";
     
    140237                new ReferenceType( noQualifiers, vtableType ),
    141238                nullptr,
    142         { new Attribute("unused") }
     239                { new Attribute("unused") }
    143240        ) );
    144241        type->parameters.push_back( new ObjectDecl(
     
    157254                type,
    158255                nullptr
     256        );
     257}
     258
     259ast::FunctionDecl * makeGetExceptionForward(
     260                CodeLocation const & location,
     261                ast::Type const * vtableType,
     262                ast::Type const * exceptType ) {
     263        assert( vtableType );
     264        assert( exceptType );
     265        return new ast::FunctionDecl(
     266                location,
     267                functionName,
     268                { /* forall */ },
     269                { new ast::ObjectDecl(
     270                        location,
     271                        "__unused",
     272                        new ast::PointerType( exceptType )
     273                ) },
     274                { new ast::ObjectDecl(
     275                        location,
     276                        "_retvalue",
     277                        new ast::ReferenceType( vtableType )
     278                ) },
     279                nullptr,
     280                ast::Storage::Classes(),
     281                ast::Linkage::Cforall,
     282                { new ast::Attribute( "unused" ) }
    159283        );
    160284}
     
    172296}
    173297
     298ast::FunctionDecl * makeGetExceptionFunction(
     299                CodeLocation const & location,
     300                ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ) {
     301        assert( vtableInstance );
     302        assert( exceptType );
     303        ast::FunctionDecl * func = makeGetExceptionForward(
     304                        location, ast::deepCopy( vtableInstance->type ), exceptType );
     305        func->stmts = new ast::CompoundStmt( location, {
     306                new ast::ReturnStmt( location, new ast::VariableExpr( location, vtableInstance ) )
     307        } );
     308        return func;
     309}
     310
    174311ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ) {
    175312        assert( typeIdType );
     
    191328}
    192329
    193 }
     330ast::ObjectDecl * makeTypeIdInstance(
     331                CodeLocation const & location,
     332                ast::StructInstType const * typeIdType ) {
     333        assert( typeIdType );
     334        ast::StructInstType * type = ast::mutate( typeIdType );
     335        type->set_const( true );
     336        std::string const & typeid_name = typeIdTypeToInstance( typeIdType->name );
     337        return new ast::ObjectDecl(
     338                location,
     339                typeid_name,
     340                type,
     341                new ast::ListInit( location, {
     342                        new ast::SingleInit( location,
     343                                new ast::AddressExpr( location,
     344                                        new ast::NameExpr( location, "__cfatid_exception_t" ) ) )
     345                } ),
     346                ast::Storage::Classes(),
     347                ast::Linkage::Cforall,
     348                nullptr,
     349                { new ast::Attribute( "cfa_linkonce" ) }
     350        );
     351}
     352
     353}
  • src/Virtual/Tables.h

    ref3c383 rd672350  
    1010// Created On       : Mon Aug 31 11:07:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Apr 21 10:30:00 2021
    13 // Update Count     : 2
     12// Last Modified On : Wec Dec  8 16:58:00 2021
     13// Update Count     : 3
    1414//
    1515
    1616#include <list>  // for list
    1717
     18#include <string>
     19#include "AST/Fwd.hpp"
    1820class Declaration;
    1921class StructDecl;
     
    3537 * vtableType node is consumed.
    3638 */
     39ast::ObjectDecl * makeVtableForward(
     40        CodeLocation const & location, std::string const & name,
     41        ast::StructInstType const * vtableType );
    3742
    3843ObjectDecl * makeVtableInstance(
     
    4348 * vtableType and init (if provided) nodes are consumed.
    4449 */
     50ast::ObjectDecl * makeVtableInstance(
     51        CodeLocation const & location,
     52        std::string const & name,
     53        ast::StructInstType const * vtableType,
     54        ast::Type const * objectType,
     55        ast::Init const * init = nullptr );
    4556
    4657// Some special code for how exceptions interact with virtual tables.
     
    4960 * linking the vtableType to the exceptType. Both nodes are consumed.
    5061 */
     62ast::FunctionDecl * makeGetExceptionForward(
     63        CodeLocation const & location,
     64        ast::Type const * vtableType,
     65        ast::Type const * exceptType );
    5166
    5267FunctionDecl * makeGetExceptionFunction(
     
    5570 * exceptType node is consumed.
    5671 */
     72ast::FunctionDecl * makeGetExceptionFunction(
     73        CodeLocation const & location,
     74        ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType );
    5775
    5876ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType );
     
    6078 * TODO: Should take the parent type. Currently locked to the exception_t.
    6179 */
     80ast::ObjectDecl * makeTypeIdInstance(
     81        const CodeLocation & location, ast::StructInstType const * typeIdType );
    6282
    6383}
  • src/main.cc

    ref3c383 rd672350  
    1010// Created On       : Fri May 15 23:12:02 2015
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Jan 26 14:09:00 2022
    13 // Update Count     : 670
     12// Last Modified On : Fri Mar 11 10:39:00 2022
     13// Update Count     : 671
    1414//
    1515
     
    7676#include "Validate/Autogen.hpp"             // for autogenerateRoutines
    7777#include "Validate/FindSpecialDecls.h"      // for findGlobalDecls
     78#include "Validate/ForallPointerDecay.hpp"  // for decayForallPointers
    7879#include "Validate/CompoundLiteral.hpp"     // for handleCompoundLiterals
    7980#include "Validate/InitializerLength.hpp"   // for setLengthFromInitializer
     
    331332
    332333                if( useNewAST ) {
    333                         PASS( "Apply Concurrent Keywords", Concurrency::applyKeywords( translationUnit ) );
    334                         PASS( "Forall Pointer Decay", SymTab::decayForallPointers( translationUnit ) );
    335334                        CodeTools::fillLocations( translationUnit );
    336335
     
    342341
    343342                        forceFillCodeLocations( transUnit );
     343
     344                        PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords( transUnit ) );
     345
     346                        // Must be after implement concurrent keywords; because uniqueIds
     347                        //   must be set on declaration before resolution.
     348                        // Must happen before autogen routines are added.
     349                        PASS( "Forall Pointer Decay", Validate::decayForallPointers( transUnit ) );
    344350
    345351                        // Must happen before autogen routines are added.
     
    487493                        PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) );
    488494                }
    489 
    490                
    491495
    492496                PASS( "Gen Waitfor" , Concurrency::generateWaitFor( translationUnit ) );
  • tests/.expect/declarationSpecifier.arm64.txt

    ref3c383 rd672350  
    11321132char **_X13cfa_args_argvPPc_1;
    11331133char **_X13cfa_args_envpPPc_1;
    1134 signed int _X17cfa_main_returnedi_1 = ((signed int )0);
     1134__attribute__ ((weak)) extern signed int _X17cfa_main_returnedi_1;
    11351135signed int main(signed int _X4argci_1, char **_X4argvPPc_1, char **_X4envpPPc_1){
    11361136    __attribute__ ((unused)) signed int _X12_retval_maini_1;
     
    11491149    signed int _tmp_cp_ret6;
    11501150    signed int _X3reti_2 = (((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6);
    1151     {
    1152         ((void)(_X17cfa_main_returnedi_1=((signed int )1)));
     1151    if ( ((&_X17cfa_main_returnedi_1)!=((signed int *)0)) ) {
     1152        {
     1153            ((void)(_X17cfa_main_returnedi_1=((signed int )1)));
     1154        }
     1155
    11531156    }
    11541157
  • tests/.expect/gccExtensions.arm64.txt

    ref3c383 rd672350  
    324324char **_X13cfa_args_argvPPc_1;
    325325char **_X13cfa_args_envpPPc_1;
    326 signed int _X17cfa_main_returnedi_1 = ((signed int )0);
     326__attribute__ ((weak)) extern signed int _X17cfa_main_returnedi_1;
    327327signed int main(signed int _X4argci_1, char **_X4argvPPc_1, char **_X4envpPPc_1){
    328328    __attribute__ ((unused)) signed int _X12_retval_maini_1;
     
    341341    signed int _tmp_cp_ret6;
    342342    signed int _X3reti_2 = (((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6);
    343     {
    344         ((void)(_X17cfa_main_returnedi_1=((signed int )1)));
     343    if ( ((&_X17cfa_main_returnedi_1)!=((signed int *)0)) ) {
     344        {
     345            ((void)(_X17cfa_main_returnedi_1=((signed int )1)));
     346        }
     347
    345348    }
    346349
  • tests/.expect/random.arm64.txt

    ref3c383 rd672350  
    11õ
    22=
    3 V
     3K
    44-911259971
    556
    6 -4
     611
    771232105397
    880
    9 18
     911
    1010-914096085
    11111
    12 15
     1220
    13132077092859
    14141
    15 11
     1512
    16160.677254
    17170.678106775246139
  • tests/Makefile.am

    ref3c383 rd672350  
    6666PRETTY_PATH=mkdir -p $(dir $(abspath ${@})) && cd ${srcdir} &&
    6767
    68 .PHONY: list .validate
    69 .INTERMEDIATE: .validate .validate.cfa
     68.PHONY: list .validate .test_makeflags
     69.INTERMEDIATE: .validate .validate.cfa .test_makeflags
    7070EXTRA_PROGRAMS = avl_test linkonce .dummy_hack # build but do not install
    7171EXTRA_DIST = test.py \
     
    123123        @+${TEST_PY} --list ${concurrent}
    124124
     125.test_makeflags:
     126        @echo "${MAKEFLAGS}"
     127
    125128.validate: .validate.cfa
    126129        $(CFACOMPILE) .validate.cfa -fsyntax-only -Wall -Wextra -Werror
  • tests/collections/.expect/string-api-coverage.txt

    ref3c383 rd672350  
    11hello hello hello
     2
     3hello
    24true false
    35true false
  • tests/collections/.expect/string-gc.txt

    ref3c383 rd672350  
    3838x from 5 to 15
    3939y from 5 to 15
     40======================== fillNoCompact
     41about to expand, a = aaa
     42expanded, a = aaa
     43about to expand, a = aaa
     44expanded, a = aaa
     45about to expand, a = aaa
     46expanded, a = aaa
     47about to expand, a = aaa
     48expanded, a = aaa
     49about to expand, a = aaa
     50expanded, a = aaa
  • tests/collections/.expect/vector-err-pass-perm-it-byval.txt

    ref3c383 rd672350  
    1 error: Unique best alternative includes deleted identifier in Generated Cast of:
     1collections/vector-demo.cfa:95:1 error: Unique best alternative includes deleted identifier in Generated Cast of:
    22  Application of
    33    Deleted Expression
  • tests/collections/string-api-coverage.cfa

    ref3c383 rd672350  
    11#include <containers/string.hfa>
     2#include <string_sharectx.hfa>
    23
    34void assertWellFormedHandleList( int maxLen ) { // with(HeapArea)
     
    2526
    2627int main () {
     28
     29    #ifdef STRING_SHARING_OFF
     30    string_sharectx c = { NO_SHARING };
     31    #endif
     32
    2733    string s = "hello";
    2834    string s2 = "hello";
     
    3137
    3238    // IO operator, x2
    33     sout | s | s | s;
     39    sout | s | s | s;  // hello hello hello
     40
     41    // empty ctor then assign
     42    string sxx;
     43    sout | sxx;  // (blank line)
     44    sxx = s;
     45    sout | sxx;  // hello
    3446
    3547    // Comparisons
  • tests/collections/string-gc.cfa

    ref3c383 rd672350  
    22
    33size_t bytesRemaining() {
    4     return DEBUG_string_bytes_avail_until_gc( DEBUG_string_heap );
     4    return DEBUG_string_bytes_avail_until_gc( DEBUG_string_heap() );
    55}
    66
    77size_t heapOffsetStart( string_res & s ) {
    8     const char * startByte = DEBUG_string_heap_start( DEBUG_string_heap );
     8    const char * startByte = DEBUG_string_heap_start( DEBUG_string_heap() );
    99    assert( s.Handle.s >= startByte );
    1010    return s.Handle.s - startByte;
     
    120120}
    121121
     122void fillNoCompact() {
     123    // show that allocating in a heap filled with mostly live strings (no collectable garbage) causes heap growth
     124
     125    sout | "======================== fillNoCompact";
     126
     127    size_t lastTimeBytesAvail = bytesRemaining();
     128    assert( lastTimeBytesAvail >= 200 ); // starting this test with nontrivial room
     129
     130    // mostly fill the pad
     131    string_res a = "aaa";  // will have to be moved
     132    string_res z = "zzz";
     133    for (i; 5) {
     134        while ( bytesRemaining() > 10 ) {
     135            z += ".";
     136        }
     137        sout | "about to expand, a = " | a;
     138        while ( bytesRemaining() <= 10 ) {
     139            z += ".";
     140        }
     141        sout | "expanded, a = " | a;
     142
     143        // each growth gives more usable space than the last
     144        assert( bytesRemaining() > lastTimeBytesAvail );
     145        lastTimeBytesAvail = bytesRemaining();
     146    }
     147}
     148
    122149int main() {
    123150    basicFillCompact();
    124151    fillCompact_withSharedEdits();
     152    fillNoCompact();
    125153}
  • tests/collections/string-overwrite.cfa

    ref3c383 rd672350  
    11#include <containers/string.hfa>
     2#include <string_sharectx.hfa>
    23
    34/*
     
    1112WE = witness end
    1213
    13 The dest does:
     14The test does:
    1415  starts with the entire string being, initially, the alphabet; prints this entire alphabet
    1516  sets up modifier and witness as ranges within it, and prints a visualization of those ranges
     
    2425This API's convention has Start positions being inclusive and end positions being exclusive.
    2526
     27                                v Case number in output
    2628With 1 equivalence class:
    2729MS = ME = WS = WE               1
     
    118120    struct { int ms; int me; int ws; int we; char *replaceWith; char *label; } cases[] = {
    119121        { 12, 14, 10, 20, "xxxxx", "warmup" },
    120 //        { 12, 14, 12, 14, "xxxxx", ""       },  // the bug that got me into this test (should be a dup with case 6)
    121122        { 10, 10, 10, 10, "=====", "1"      },
    122123        { 10, 10, 10, 10, "=="   , ""       },
     
    223224        { 12, 14, 10, 16, "="    , ""       },
    224225        { 12, 14, 10, 16, ""     , ""       },
    225 /*
    226         { , , , , "=====", "NN"     },
    227         {  "=="   , ""       },
    228         {  "="    , ""       },
    229         {  ""     , ""       },
    230 */
    231226    };
    232227    for ( i; sizeof(cases)/sizeof(cases[0]) ) {
     
    238233
    239234
    240 // void f( string & s, string & toEdit ) {
    241 
    242 //     sout | s | "|" | toEdit | "|";
    243 
    244 //     s(14, 16) = "-";
    245 //     sout | s | "|" | toEdit | "|";
    246 // }
    247 
    248235int main() {
     236
     237    #ifdef STRING_SHARING_OFF
     238    string_sharectx c = { NO_SHARING };
     239    #endif
     240
     241
    249242    //          0         1         2
    250243    //          01234567890123456789012345
  • tests/concurrent/mutexstmt/.expect/locks.txt

    ref3c383 rd672350  
    33Start Test: multi lock deadlock/mutual exclusion
    44End Test: multi lock deadlock/mutual exclusion
    5 Start Test: single scoped lock mutual exclusion
    6 End Test: single scoped lock mutual exclusion
    7 Start Test: multi scoped lock deadlock/mutual exclusion
    8 End Test: multi scoped lock deadlock/mutual exclusion
     5Start Test: multi polymorphic lock deadlock/mutual exclusion
     6End Test: multi polymorphic lock deadlock/mutual exclusion
  • tests/concurrent/mutexstmt/locks.cfa

    ref3c383 rd672350  
    33
    44const unsigned int num_times = 10000;
     5
     6Duration default_preemption() { return 0; }
    57
    68single_acquisition_lock m1, m2, m3, m4, m5;
     
    2224}
    2325
     26void refTest( single_acquisition_lock & m ) {
     27        mutex ( m ) {
     28                assert(!insideFlag);
     29                insideFlag = true;
     30                assert(insideFlag);
     31                insideFlag = false;
     32        }
     33}
     34
    2435thread T_Multi {};
    2536
    2637void main( T_Multi & this ) {
    2738        for (unsigned int i = 0; i < num_times; i++) {
     39                refTest( m1 );
    2840                mutex ( m1 ) {
    2941                        assert(!insideFlag);
     
    5971}
    6072
    61 thread T_Mutex_Scoped {};
     73single_acquisition_lock l1;
     74linear_backoff_then_block_lock l2;
     75owner_lock l3;
    6276
    63 void main( T_Mutex_Scoped & this ) {
     77monitor monitor_t {};
     78
     79monitor_t l4;
     80
     81thread T_Multi_Poly {};
     82
     83void main( T_Multi_Poly & this ) {
    6484        for (unsigned int i = 0; i < num_times; i++) {
    65                 {
    66                         scoped_lock(single_acquisition_lock) s{m1};
    67                         count++;
    68                 }
    69                 {
    70                         scoped_lock(single_acquisition_lock) s{m1};
     85                refTest( l1 );
     86                mutex ( l1, l4 ) {
    7187                        assert(!insideFlag);
    7288                        insideFlag = true;
     
    7490                        insideFlag = false;
    7591                }
    76         }
    77 }
    78 
    79 thread T_Multi_Scoped {};
    80 
    81 void main( T_Multi_Scoped & this ) {
    82         for (unsigned int i = 0; i < num_times; i++) {
    83                 {
    84                         scoped_lock(single_acquisition_lock) s{m1};
     92                mutex ( l1, l2, l3 ) {
    8593                        assert(!insideFlag);
    8694                        insideFlag = true;
     
    8896                        insideFlag = false;
    8997                }
    90                 {
    91                         scoped_lock(single_acquisition_lock) s1{m1};
    92                         scoped_lock(single_acquisition_lock) s2{m2};
    93                         scoped_lock(single_acquisition_lock) s3{m3};
    94                         scoped_lock(single_acquisition_lock) s4{m4};
    95                         scoped_lock(single_acquisition_lock) s5{m5};
     98                mutex ( l3, l1, l4 ) {
    9699                        assert(!insideFlag);
    97100                        insideFlag = true;
     
    99102                        insideFlag = false;
    100103                }
    101                 {
    102                         scoped_lock(single_acquisition_lock) s1{m1};
    103                         scoped_lock(single_acquisition_lock) s3{m3};
    104                         assert(!insideFlag);
    105                         insideFlag = true;
    106                         assert(insideFlag);
    107                         insideFlag = false;
    108                 }
    109                 {
    110                         scoped_lock(single_acquisition_lock) s1{m1};
    111                         scoped_lock(single_acquisition_lock) s2{m2};
    112                         scoped_lock(single_acquisition_lock) s4{m4};
    113                         assert(!insideFlag);
    114                         insideFlag = true;
    115                         assert(insideFlag);
    116                         insideFlag = false;
    117                 }
    118                 {
    119                         scoped_lock(single_acquisition_lock) s1{m1};
    120                         scoped_lock(single_acquisition_lock) s3{m3};
    121                         scoped_lock(single_acquisition_lock) s4{m4};
    122                         scoped_lock(single_acquisition_lock) s5{m5};
     104                mutex ( l1, l2, l4 ) {
    123105                        assert(!insideFlag);
    124106                        insideFlag = true;
     
    131113int num_tasks = 10;
    132114int main() {
    133         processor p[10];
     115        processor p[num_tasks - 1];
    134116
    135117        printf("Start Test: single lock mutual exclusion\n");
    136118        {
    137                 T_Mutex t[10];
     119                T_Mutex t[num_tasks];
    138120        }
    139121        assert(count == num_tasks * num_times);
     
    141123        printf("Start Test: multi lock deadlock/mutual exclusion\n");
    142124        {
    143                 T_Multi t[10];
     125                T_Multi t[num_tasks];
    144126        }
    145127        printf("End Test: multi lock deadlock/mutual exclusion\n");
    146        
    147         count = 0;
    148         printf("Start Test: single scoped lock mutual exclusion\n");
     128        printf("Start Test: multi polymorphic lock deadlock/mutual exclusion\n");
    149129        {
    150                 T_Mutex_Scoped t[10];
     130                T_Multi_Poly t[num_tasks];
    151131        }
    152         assert(count == num_tasks * num_times);
    153         printf("End Test: single scoped lock mutual exclusion\n");
    154         printf("Start Test: multi scoped lock deadlock/mutual exclusion\n");
    155         {
    156                 T_Multi_Scoped t[10];
    157         }
    158         printf("End Test: multi scoped lock deadlock/mutual exclusion\n");     
     132        printf("End Test: multi polymorphic lock deadlock/mutual exclusion\n");
    159133}
  • tests/io/many_read.cfa

    ref3c383 rd672350  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // many_read.cfa -- Make sure that multiple concurrent reads to mess up.
     7// many_read.cfa -- Make sure that multiple concurrent reads don't mess up.
    88//
    99// Author           : Thierry Delisle
  • tests/meta/dumpable.cfa

    ref3c383 rd672350  
    7272        }
    7373
    74         if((buf.f_bsize * buf.f_bavail) < 536870912) {
    75                 serr | "Available diskspace is less than ~500Mb: " | (buf.f_bsize * buf.f_bavail);
     74        uint64_t avail = buf.f_bavail;
     75        avail *= buf.f_bsize;
     76        if(avail < 536870912_l64u) {
     77                serr | "Available diskspace is less than ~500Mb: " | avail;
    7678        }
    7779
  • tests/pybin/settings.py

    ref3c383 rd672350  
    155155        global generating
    156156        global make
     157        global make_jobfds
    157158        global output_width
    158159        global timeout
     
    168169        generating   = options.regenerate_expected
    169170        make         = ['make']
     171        make_jobfds  = []
    170172        output_width = 24
    171173        timeout      = Timeouts(options.timeout, options.global_timeout)
     
    177179                os.putenv('DISTCC_LOG', os.path.join(BUILDDIR, 'distcc_error.log'))
    178180
    179 def update_make_cmd(force, jobs):
     181def update_make_cmd(flags):
    180182        global make
    181 
    182         make = ['make'] if not force else ['make', "-j%i" % jobs]
     183        make = ['make', *flags]
     184
     185def update_make_fds(r, w):
     186        global make_jobfds
     187        make_jobfds = (r, w)
    183188
    184189def validate():
     
    187192        global distcc
    188193        distcc       = "DISTCC_CFA_PATH=~/.cfadistcc/%s/cfa" % tools.config_hash()
    189         errf = os.path.join(BUILDDIR, ".validate.err")
    190         make_ret, out = tools.make( ".validate", error_file = errf, output_file=subprocess.DEVNULL, error=subprocess.DEVNULL )
     194        make_ret, out, err = tools.make( ".validate", output_file=subprocess.PIPE, error=subprocess.PIPE )
    191195        if make_ret != 0:
    192                 with open (errf, "r") as myfile:
    193                         error=myfile.read()
    194196                print("ERROR: Invalid configuration %s:%s" % (arch.string, debug.string), file=sys.stderr)
    195                 print("       verify returned : \n%s" % error, file=sys.stderr)
    196                 tools.rm(errf)
     197                print("       verify returned : \n%s" % err, file=sys.stderr)
    197198                sys.exit(1)
    198 
    199         tools.rm(errf)
    200199
    201200def prep_output(tests):
  • tests/pybin/tools.py

    ref3c383 rd672350  
    2323
    2424# helper functions to run terminal commands
    25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False):
     25def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False, pass_fds = []):
    2626        try:
    2727                cmd = list(cmd)
     
    6565                                **({'input' : bytes(input_text, encoding='utf-8')} if input_text else {'stdin' : input_file}),
    6666                                stdout  = output_file,
    67                                 stderr  = error
     67                                stderr  = error,
     68                                pass_fds = pass_fds
    6869                        ) as proc:
    6970
    7071                                try:
    71                                         out, _ = proc.communicate(
     72                                        out, errout = proc.communicate(
    7273                                                timeout = settings.timeout.single if timeout else None
    7374                                        )
    7475
    75                                         return proc.returncode, out.decode("latin-1") if out else None
     76                                        return proc.returncode, out.decode("latin-1") if out else None, errout.decode("latin-1") if errout else None
    7677                                except subprocess.TimeoutExpired:
    7778                                        if settings.timeout2gdb:
    7879                                                print("Process {} timeout".format(proc.pid))
    7980                                                proc.communicate()
    80                                                 return 124, str(None)
     81                                                return 124, str(None), "Subprocess Timeout 2 gdb"
    8182                                        else:
    8283                                                proc.send_signal(signal.SIGABRT)
    8384                                                proc.communicate()
    84                                                 return 124, str(None)
     85                                                return 124, str(None), "Subprocess Timeout 2 gdb"
    8586
    8687        except Exception as ex:
     
    105106                return (False, "No file")
    106107
    107         code, out = sh("file", fname, output_file=subprocess.PIPE)
     108        code, out, err = sh("file", fname, output_file=subprocess.PIPE)
    108109        if code != 0:
    109                 return (False, "'file EXPECT' failed with code {}".format(code))
     110                return (False, "'file EXPECT' failed with code {} '{}'".format(code, err))
    110111
    111112        match = re.search(".*: (.*)", out)
     
    190191        ]
    191192        cmd = [s for s in cmd if s]
    192         return sh(*cmd, output_file=output_file, error=error)
     193        return sh(*cmd, output_file=output_file, error=error, pass_fds=settings.make_jobfds)
    193194
    194195def make_recon(target):
     
    241242# move a file
    242243def mv(source, dest):
    243         ret, _ = sh("mv", source, dest)
     244        ret, _, _ = sh("mv", source, dest)
    244245        return ret
    245246
    246247# cat one file into the other
    247248def cat(source, dest):
    248         ret, _ = sh("cat", source, output_file=dest)
     249        ret, _, _ = sh("cat", source, output_file=dest)
    249250        return ret
    250251
     
    289290#               system
    290291################################################################################
     292def jobserver_version():
     293        make_ret, out, err = sh('make', '.test_makeflags', '-j2', output_file=subprocess.PIPE, error=subprocess.PIPE)
     294        if make_ret != 0:
     295                print("ERROR: cannot find Makefile jobserver version", file=sys.stderr)
     296                print("       test returned : {} '{}'".format(make_ret, err), file=sys.stderr)
     297                sys.exit(1)
     298
     299        re_jobs = re.search("--jobserver-(auth|fds)", out)
     300        if not re_jobs:
     301                print("ERROR: cannot find Makefile jobserver version", file=sys.stderr)
     302                print("       MAKEFLAGS are : '{}'".format(out), file=sys.stderr)
     303                sys.exit(1)
     304
     305        return "--jobserver-{}".format(re_jobs.group(1))
     306
     307def prep_recursive_make(N):
     308        if N < 2:
     309                return []
     310
     311        # create the pipe
     312        (r, w) = os.pipe()
     313
     314        # feel it with N-1 tokens, (Why N-1 and not N, I don't know it's in the manpage for make)
     315        os.write(w, b'+' * (N - 1));
     316
     317        # prep the flags for make
     318        make_flags = ["-j{}".format(N), "--jobserver-auth={},{}".format(r, w)]
     319
     320        # tell make about the pipes
     321        os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = " ".join(make_flags)
     322
     323        # make sure pass the pipes to our children
     324        settings.update_make_fds(r, w)
     325
     326        return make_flags
     327
     328def prep_unlimited_recursive_make():
     329        # prep the flags for make
     330        make_flags = ["-j"]
     331
     332        # tell make about the pipes
     333        os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = "-j"
     334
     335        return make_flags
     336
     337
     338def eval_hardware():
     339        # we can create as many things as we want
     340        # how much hardware do we have?
     341        if settings.distribute:
     342                # remote hardware is allowed
     343                # how much do we have?
     344                ret, jstr, _ = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True)
     345                return int(jstr.strip()) if ret == 0 else multiprocessing.cpu_count()
     346        else:
     347                # remote isn't allowed, use local cpus
     348                return multiprocessing.cpu_count()
     349
    291350# count number of jobs to create
    292 def job_count( options, tests ):
     351def job_count( options ):
    293352        # check if the user already passed in a number of jobs for multi-threading
    294         if not options.jobs:
    295                 make_flags = os.environ.get('MAKEFLAGS')
    296                 force = bool(make_flags)
    297                 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
    298                 if make_jobs_fds :
    299                         tokens = os.read(int(make_jobs_fds.group(2)), 1024)
    300                         options.jobs = len(tokens)
    301                         os.write(int(make_jobs_fds.group(3)), tokens)
    302                 else :
    303                         if settings.distribute:
    304                                 ret, jstr = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True)
    305                                 if ret == 0:
    306                                         options.jobs = int(jstr.strip())
    307                                 else :
    308                                         options.jobs = multiprocessing.cpu_count()
    309                         else:
    310                                 options.jobs = multiprocessing.cpu_count()
     353        make_env = os.environ.get('MAKEFLAGS')
     354        make_flags = make_env.split() if make_env else None
     355        jobstr = jobserver_version()
     356
     357        if options.jobs and make_flags:
     358                print('WARNING: -j options should not be specified when called form Make', file=sys.stderr)
     359
     360        # Top level make is calling the shots, just follow
     361        if make_flags:
     362                # do we have -j and --jobserver-...
     363                jobopt = None
     364                exists_fds = None
     365                for f in make_flags:
     366                        jobopt = f if f.startswith("-j") else jobopt
     367                        exists_fds = f if f.startswith(jobstr) else exists_fds
     368
     369                # do we have limited parallelism?
     370                if exists_fds :
     371                        try:
     372                                rfd, wfd = tuple(exists_fds.split('=')[1].split(','))
     373                        except:
     374                                print("ERROR: jobserver has unrecoginzable format, was '{}'".format(exists_fds), file=sys.stderr)
     375                                sys.exit(1)
     376
     377                        # read the token pipe to count number of available tokens and restore the pipe
     378                        # this assumes the test suite script isn't invoked in parellel with something else
     379                        tokens = os.read(int(rfd), 65536)
     380                        os.write(int(wfd), tokens)
     381
     382                        # the number of tokens is off by one for obscure but well documented reason
     383                        # see man make for more details
     384                        options.jobs = len(tokens) + 1
     385
     386                # do we have unlimited parallelism?
     387                elif jobopt and jobopt != "-j1":
     388                        # check that this actually make sense
     389                        if jobopt != "-j":
     390                                print("ERROR: -j option passed by make but no {}, was '{}'".format(jobstr, jobopt), file=sys.stderr)
     391                                sys.exit(1)
     392
     393                        options.jobs = eval_hardware()
     394                        flags = prep_unlimited_recursive_make()
     395
     396
     397                # then no parallelism
     398                else:
     399                        options.jobs = 1
     400
     401                # keep all flags make passed along, except the weird 'w' which is about subdirectories
     402                flags = [f for f in make_flags if f != 'w']
     403
     404        # Arguments are calling the shots, fake the top level make
     405        elif options.jobs :
     406
     407                # make sure we have a valid number of jobs that corresponds to user input
     408                if options.jobs < 0 :
     409                        print('ERROR: Invalid number of jobs', file=sys.stderr)
     410                        sys.exit(1)
     411
     412                flags = prep_recursive_make(options.jobs)
     413
     414        # Arguments are calling the shots, fake the top level make, but 0 is a special case
     415        elif options.jobs == 0:
     416                options.jobs = eval_hardware()
     417                flags = prep_unlimited_recursive_make()
     418
     419        # No one says to run in parallel, then don't
    311420        else :
    312                 force = True
    313 
    314         # make sure we have a valid number of jobs that corresponds to user input
    315         if options.jobs <= 0 :
    316                 print('ERROR: Invalid number of jobs', file=sys.stderr)
    317                 sys.exit(1)
    318 
    319         return min( options.jobs, len(tests) ), force
     421                options.jobs = 1
     422                flags = []
     423
     424        # Make sure we call make as expected
     425        settings.update_make_cmd( flags )
     426
     427        # return the job count
     428        return options.jobs
    320429
    321430# enable core dumps for all the test children
     
    334443        distcc_hash = os.path.join(settings.SRCDIR, '../tools/build/distcc_hash')
    335444        config = "%s-%s" % (settings.arch.target, settings.debug.path)
    336         _, out = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)
     445        _, out, _ = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)
    337446        return out.strip()
    338447
     
    374483
    375484        if not os.path.isfile(core):
    376                 return 1, "ERR No core dump (limit soft: {} hard: {})".format(*resource.getrlimit(resource.RLIMIT_CORE))
     485                return 1, "ERR No core dump, expected '{}' (limit soft: {} hard: {})".format(core, *resource.getrlimit(resource.RLIMIT_CORE))
    377486
    378487        try:
    379                 return sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE)
     488                ret, out, err = sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE)
     489                if ret == 0:
     490                        return 0, out
     491                else:
     492                        return 1, err
    380493        except:
    381494                return 1, "ERR Could not read core with gdb"
  • tests/test.py

    ref3c383 rd672350  
    140140        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
    141141        parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
    142         parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
     142        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int)
    143143        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
    144144        parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true')
     
    195195        # build, skipping to next test on error
    196196        with Timed() as comp_dur:
    197                 make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
     197                make_ret, _, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
    198198
    199199        # ----------
     
    208208                                if settings.dry_run or is_exe(exe_file):
    209209                                        # run test
    210                                         retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
     210                                        retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
    211211                                else :
    212212                                        # simply cat the result into the output
     
    226226                        else :
    227227                                # fetch return code and error from the diff command
    228                                 retcode, error = diff(cmp_file, out_file)
     228                                retcode, error, _ = diff(cmp_file, out_file)
    229229
    230230                else:
     
    366366                        print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ')
    367367                        print(os.path.relpath(t.input() , settings.SRCDIR), end=' ')
    368                         code, out = make_recon(t.target())
     368                        code, out, err = make_recon(t.target())
    369369
    370370                        if code != 0:
    371                                 print('ERROR: recond failed for test {}'.format(t.target()), file=sys.stderr)
     371                                print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr)
    372372                                sys.exit(1)
    373373
     
    417417                        if is_empty(t.expect()):
    418418                                print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
     419
     420        options.jobs = job_count( options )
    419421
    420422        # for each build configurations, run the test
     
    430432                        local_tests = settings.ast.filter( tests )
    431433                        local_tests = settings.arch.filter( local_tests )
    432                         options.jobs, forceJobs = job_count( options, local_tests )
    433                         settings.update_make_cmd(forceJobs, options.jobs)
    434434
    435435                        # check the build configuration works
    436436                        settings.validate()
     437                        jobs = min(options.jobs, len(local_tests))
    437438
    438439                        # print configuration
     
    440441                                'Regenerating' if settings.generating else 'Running',
    441442                                len(local_tests),
    442                                 options.jobs,
     443                                jobs,
    443444                                settings.ast.string,
    444445                                settings.arch.string,
     
    450451
    451452                        # otherwise run all tests and make sure to return the correct error code
    452                         failed = run_tests(local_tests, options.jobs)
     453                        failed = run_tests(local_tests, jobs)
    453454                        if failed:
    454455                                if not settings.continue_:
Note: See TracChangeset for help on using the changeset viewer.