Changeset d672350
- Timestamp:
- Mar 21, 2022, 1:44:06 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- a76202d
- Parents:
- ef3c383 (diff), dbe2533 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 88 added
- 2 deleted
- 88 edited
- 4 moved
Legend:
- Unmodified
- Added
- Removed
-
Jenkinsfile
ref3c383 rd672350 108 108 109 109 // Configure libcfa 110 sh 'make -j 8--no-print-directory configure-libcfa'110 sh 'make -j $(nproc) --no-print-directory configure-libcfa' 111 111 } 112 112 } … … 116 116 dir (BuildDir) { 117 117 // Build driver 118 sh 'make -j 8--no-print-directory -C driver'118 sh 'make -j $(nproc) --no-print-directory -C driver' 119 119 120 120 // Build translator 121 sh 'make -j 8--no-print-directory -C src'121 sh 'make -j $(nproc) --no-print-directory -C src' 122 122 } 123 123 } … … 126 126 // Build outside of the src tree to ease cleaning 127 127 dir (BuildDir) { 128 sh "make -j 8--no-print-directory -C libcfa/${Settings.Architecture.name}-debug"128 sh "make -j $(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-debug" 129 129 } 130 130 } … … 133 133 // Build outside of the src tree to ease cleaning 134 134 dir (BuildDir) { 135 sh "make -j 8--no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"135 sh "make -j $(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug" 136 136 } 137 137 } … … 140 140 // Build outside of the src tree to ease cleaning 141 141 dir (BuildDir) { 142 sh "make -j 8--no-print-directory install"142 sh "make -j $(nproc) --no-print-directory install" 143 143 } 144 144 } … … 161 161 Tools.BuildStage('Test: full', Settings.RunAllTests) { 162 162 dir (BuildDir) { 163 jopt = " "163 jopt = "-j $(nproc)" 164 164 if( Settings.Architecture.node == 'x86' ) { 165 165 jopt = "-j2" -
benchmark/io/http/protocol.cfa
ref3c383 rd672350 173 173 } 174 174 175 static void zero_sqe(struct io_uring_sqe * sqe) {176 sqe->flags = 0;177 sqe->ioprio = 0;178 sqe->fd = 0;179 sqe->off = 0;180 sqe->addr = 0;181 sqe->len = 0;182 sqe->fsync_flags = 0;183 sqe->__pad2[0] = 0;184 sqe->__pad2[1] = 0;185 sqe->__pad2[2] = 0;186 sqe->fd = 0;187 sqe->off = 0;188 sqe->addr = 0;189 sqe->len = 0;190 }191 192 175 enum FSM_STATE { 193 176 Initial, -
doc/theses/mubeen_zulfiqar_MMath/Makefile
ref3c383 rd672350 1 DOC = uw-ethesis.pdf2 BASE = ${DOC:%.pdf=%} # remove suffix3 1 # directory for latex clutter files 4 BUILD = build 5 TEXSRC = $(wildcard *.tex) 6 FIGSRC = $(wildcard *.fig) 7 BIBSRC = $(wildcard *.bib) 8 TEXLIB = .:../../LaTeXmacros:${BUILD}: # common latex macros 9 BIBLIB = .:../../bibliography # common citation repository 2 Build = build 3 Figures = figures 4 Pictures = pictures 5 TeXSRC = ${wildcard *.tex} 6 FigSRC = ${notdir ${wildcard ${Figures}/*.fig}} 7 PicSRC = ${notdir ${wildcard ${Pictures}/*.fig}} 8 BIBSRC = ${wildcard *.bib} 9 TeXLIB = .:../../LaTeXmacros:${Build}: # common latex macros 10 BibLIB = .:../../bibliography # common citation repository 10 11 11 12 MAKEFLAGS = --no-print-directory # --silent 12 VPATH = ${B UILD}13 VPATH = ${Build} ${Figures} ${Pictures} # extra search path for file names used in document 13 14 14 15 ### Special Rules: … … 18 19 19 20 ### Commands: 20 LATEX = TEXINPUTS=${TEXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${BUILD} 21 BIBTEX = BIBINPUTS=${BIBLIB} bibtex 22 #GLOSSARY = INDEXSTYLE=${BUILD} makeglossaries-lite 21 22 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 23 BibTeX = BIBINPUTS=${BibLIB} bibtex 24 #Glossary = INDEXSTYLE=${Build} makeglossaries-lite 23 25 24 26 ### Rules and Recipes: 25 27 28 DOC = uw-ethesis.pdf 29 BASE = ${DOC:%.pdf=%} # remove suffix 30 26 31 all: ${DOC} 27 32 28 ${BUILD}/%.dvi: ${TEXSRC} ${FIGSRC:%.fig=%.tex} ${BIBSRC} Makefile | ${BUILD} 29 ${LATEX} ${BASE} 30 ${BIBTEX} ${BUILD}/${BASE} 31 ${LATEX} ${BASE} 32 # ${GLOSSARY} ${BUILD}/${BASE} 33 # ${LATEX} ${BASE} 33 clean: 34 @rm -frv ${DOC} ${Build} 34 35 35 ${BUILD}: 36 # File Dependencies # 37 38 ${Build}/%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BIBSRC} Makefile | ${Build} 39 ${LaTeX} ${BASE} 40 ${BibTeX} ${Build}/${BASE} 41 ${LaTeX} ${BASE} 42 # if nedded, run latex again to get citations 43 if fgrep -s "LaTeX Warning: Citation" ${basename $@}.log ; then ${LaTeX} ${BASE} ; fi 44 # ${Glossary} ${Build}/${BASE} 45 # ${LaTeX} ${BASE} 46 47 ${Build}: 36 48 mkdir $@ 37 49 38 %.pdf : ${B UILD}/%.ps | ${BUILD}50 %.pdf : ${Build}/%.ps | ${Build} 39 51 ps2pdf $< 40 52 41 %.ps : %.dvi | ${B UILD}53 %.ps : %.dvi | ${Build} 42 54 dvips $< -o $@ 43 55 44 %.tex : %.fig | ${B UILD}45 fig2dev -L eepic $< > ${B UILD}/$@56 %.tex : %.fig | ${Build} 57 fig2dev -L eepic $< > ${Build}/$@ 46 58 47 %.ps : %.fig | ${B UILD}48 fig2dev -L ps $< > ${B UILD}/$@59 %.ps : %.fig | ${Build} 60 fig2dev -L ps $< > ${Build}/$@ 49 61 50 %.pstex : %.fig | ${BUILD} 51 fig2dev -L pstex $< > ${BUILD}/$@ 52 fig2dev -L pstex_t -p ${BUILD}/$@ $< > ${BUILD}/$@_t 53 54 clean: 55 @rm -frv ${DOC} ${BUILD} *.fig.bak 62 %.pstex : %.fig | ${Build} 63 fig2dev -L pstex $< > ${Build}/$@ 64 fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t -
doc/theses/mubeen_zulfiqar_MMath/allocator.tex
ref3c383 rd672350 1 1 \chapter{Allocator} 2 2 3 \noindent 4 ==================== 5 6 Writing Points: 7 \begin{itemize} 8 \item 9 Objective of uHeapLmmm. 10 \item 11 Design philosophy. 12 \item 13 Background and previous design of uHeapLmmm. 14 \item 15 Distributed design of uHeapLmmm. 16 17 ----- SHOULD WE GIVE IMPLEMENTATION DETAILS HERE? ----- 18 19 \PAB{Maybe. There might be an Implementation chapter.} 20 \item 21 figure. 22 \item 23 Advantages of distributed design. 24 \end{itemize} 25 26 The new features added to uHeapLmmm (incl. @malloc\_size@ routine) 27 \CFA alloc interface with examples. 28 29 \begin{itemize} 30 \item 31 Why did we need it? 32 \item 33 The added benefits. 34 \end{itemize} 35 36 37 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 38 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 39 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% uHeapLmmm Design 40 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 41 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 42 43 \section{Objective of uHeapLmmm} 44 UHeapLmmm is a lightweight memory allocator. The objective behind uHeapLmmm is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements). 45 46 \subsection{Design philosophy} 47 The objective of uHeapLmmm's new design was to fulfill following requirements: 48 \begin{itemize} 49 \item It should be concurrent to be used in multi-threaded programs. 3 \section{uHeap} 4 uHeap is a lightweight memory allocator. The objective behind uHeap is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements). 5 6 The objective of uHeap's new design was to fulfill following requirements: 7 \begin{itemize} 8 \item It should be concurrent and thread-safe for multi-threaded programs. 50 9 \item It should avoid global locks, on resources shared across all threads, as much as possible. 51 10 \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators). … … 55 14 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 56 15 57 \section{Background and previous design of uHeapLmmm} 58 uHeapLmmm was originally designed by X in X (FIX ME: add original author after confirming with Peter). 59 (FIX ME: make and add figure of previous design with description) 60 61 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 62 63 \section{Distributed design of uHeapLmmm} 64 uHeapLmmm's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed: 65 66 \paragraph{Design 1: Decentralized} 16 \section{Design choices for uHeap} 17 uHeap's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed: 18 19 \paragraph{Design 1: Centralized} 20 One heap, but lower bucket sizes are N-shared across KTs. 21 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes. 22 When KTs $\le$ N, the important bucket sizes are uncontented. 23 When KTs $>$ N, the free buckets are contented. 24 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention. 25 \begin{cquote} 26 \centering 27 \input{AllocDS2} 28 \end{cquote} 29 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number. 30 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated). 31 32 \paragraph{Design 2: Decentralized N Heaps} 67 33 Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area. 68 34 Kernel threads (KT) are assigned to the N heaps. … … 77 43 Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT. 78 44 79 \paragraph{Design 2: Centralized} 80 One heap, but lower bucket sizes are N-shared across KTs. 81 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes. 82 When KTs $\le$ N, the important bucket sizes are uncontented. 83 When KTs $>$ N, the free buckets are contented. 84 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention. 85 \begin{cquote} 45 \paragraph{Design 3: Decentralized Per-thread Heaps} 46 Design 3 is similar to design 2 but instead of having an M:N model, it uses a 1:1 model. So, instead of having N heaos and sharing them among M KTs, Design 3 has one heap for each KT. 47 Dynamic number of heaps: create a thread-local heap for each kernel thread (KT) with a bump-area allocated from the @sbrk@ area. 48 Each KT will have its own exclusive thread-local heap. Heap will be uncontended between KTs regardless how many KTs have been created. 49 Operations on @sbrk@ area will still be protected by locks. 50 %\begin{cquote} 51 %\centering 52 %\input{AllocDS3} FIXME add figs 53 %\end{cquote} 54 Problems: We cannot destroy the heap when a KT exits because our dynamic objects have ownership and they are returned to the heap that created them when the program frees a dynamic object. All dynamic objects point back to their owner heap. If a thread A creates an object O, passes it to another thread B, and A itself exits. When B will free object O, O should return to A's heap so A's heap should be preserved for the lifetime of the whole program as their might be objects in-use of other threads that were allocated by A. Also, we need to know when a KT is created and destroyed to know when to create/destroy a heap for the KT. 55 56 \paragraph{Design 4: Decentralized Per-CPU Heaps} 57 Design 4 is similar to Design 3 but instead of having a heap for each thread, it creates a heap for each CPU. 58 Fixed number of heaps for a machine: create a heap for each CPU with a bump-area allocated from the @sbrk@ area. 59 Each CPU will have its own CPU-local heap. When the program does a dynamic memory operation, it will be entertained by the heap of the CPU where the process is currently running on. 60 Each CPU will have its own exclusive heap. Just like Design 3(FIXME cite), heap will be uncontended between KTs regardless how many KTs have been created. 61 Operations on @sbrk@ area will still be protected by locks. 62 To deal with preemtion during a dynamic memory operation, librseq(FIXME cite) will be used to make sure that the whole dynamic memory operation completes on one CPU. librseq's restartable sequences can make it possible to re-run a critical section and undo the current writes if a preemption happened during the critical section's execution. 63 %\begin{cquote} 64 %\centering 65 %\input{AllocDS4} FIXME add figs 66 %\end{cquote} 67 68 Problems: This approach was slower than the per-thread model. Also, librseq does not provide such restartable sequences to detect preemtions in user-level threading system which is important to us as CFA(FIXME cite) has its own threading system that we want to support. 69 70 Out of the four designs, Design 3 was chosen because of the following reasons. 71 \begin{itemize} 72 \item 73 Decentralized designes are better in general as compared to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designes shard the whole heap which has all the buckets with the addition of sharding sbrk area. So Design 1 was eliminated. 74 \item 75 Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenerio. 76 \item 77 Design 4 was eliminated because it was slower than Design 3 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achive user-threading safety which has some cost to it. Desing 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower. 78 \end{itemize} 79 80 81 \subsection{Advantages of distributed design} 82 83 The distributed design of uHeap is concurrent to work in multi-threaded applications. 84 85 Some key benefits of the distributed design of uHeap are as follows: 86 87 \begin{itemize} 88 \item 89 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The call to sbrk will be protected using locks but bump allocation (on memory taken from sbrk) will not be contended once the sbrk call has returned. 90 \item 91 Low or almost no contention on heap resources. 92 \item 93 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty. 94 \item 95 Distributed design avoids unnecassry locks on resources shared across all KTs. 96 \end{itemize} 97 98 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 99 100 \section{uHeap Structure} 101 102 As described in (FIXME cite 2.4) uHeap uses following features of multi-threaded memory allocators. 103 \begin{itemize} 104 \item 105 uHeap has multiple heaps without a global heap and uses 1:1 model. (FIXME cite 2.5 1:1 model) 106 \item 107 uHeap uses object ownership. (FIXME cite 2.5.2) 108 \item 109 uHeap does not use object containers (FIXME cite 2.6) or any coalescing technique. Instead each dynamic object allocated by uHeap has a header than contains bookkeeping information. 110 \item 111 Each thread-local heap in uHeap has its own allocation buffer that is taken from the system using sbrk() call. (FIXME cite 2.7) 112 \item 113 Unless a heap is freeing an object that is owned by another thread's heap or heap is using sbrk() system call, uHeap is mostly lock-free which eliminates most of the contention on shared resources. (FIXME cite 2.8) 114 \end{itemize} 115 116 As uHeap uses a heap per-thread model to reduce contention on heap resources, we manage a list of heaps (heap-list) that can be used by threads. The list is empty at the start of the program. When a kernel thread (KT) is created, we check if heap-list is empty. If no then a heap is removed from the heap-list and is given to this new KT to use exclusively. If yes then a new heap object is created in dynamic memory and is given to this new KT to use exclusively. When a KT exits, its heap is not destroyed but instead its heap is put on the heap-list and is ready to be reused by new KTs. 117 118 This reduces the memory footprint as the objects on free-lists of a KT that has exited can be reused by a new KT. Also, we preserve all the heaps that were created during the lifetime of the program till the end of the program. uHeap uses object ownership where an object is freed to the free-buckets of the heap that allocated it. Even after a KT A has exited, its heap has to be preserved as there might be objects in-use of other threads that were initially allocated by A and the passed to other threads. 119 120 \begin{figure} 86 121 \centering 87 \input{AllocDS2} 88 \end{cquote} 89 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number. 90 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated). 91 92 Out of the two designs, Design 1 was chosen because it's concurrency is better across all bucket-sizes as design-2 shards a few buckets of selected sizes while design-1 shards all the buckets. Design-2 shards the whole heap which has all the buckets with the addition of sharding sbrk area. 93 94 \subsection{Advantages of distributed design} 95 The distributed design of uHeapLmmm is concurrent to work in multi-threaded applications. 96 97 Some key benefits of the distributed design of uHeapLmmm are as follows: 98 99 \begin{itemize} 100 \item 101 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The lock on bump allocation (on memory taken from sbrk) will only be contended if KTs > N. The contention on sbrk area is less likely as it will only happen in the case if heaps assigned to two KTs get short of bump allocation reserve simultanously. 102 \item 103 N heaps are created at the start of the program and destroyed at the end of program. When a KT is created, we only assign it to one of the heaps. When a KT is destroyed, we only dissociate it from the assigned heap but we do not destroy that heap. That heap will go back to our pool-of-heaps, ready to be used by some new KT. And if that heap was shared among multiple KTs (like the case of KTs > N) then, on deletion of one KT, that heap will be still in-use of the other KTs. This will prevent creation and deletion of heaps during run-time as heaps are re-usable which helps in keeping low-memory footprint. 104 \item 105 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty. 106 \item 107 Distributed design avoids unnecassry locks on resources shared across all KTs. 108 \end{itemize} 109 110 FIX ME: Cite performance comparison of the two heap designs if required 122 \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps} 123 \caption{HeapStructure} 124 \label{fig:heapStructureFig} 125 \end{figure} 126 127 Each heap uses seggregated free-buckets that have free objects of a specific size. Each free-bucket of a specific size has following 2 lists in it: 128 \begin{itemize} 129 \item 130 Free list is used when a thread is freeing an object that is owned by its own heap so free list does not use any locks/atomic-operations as it is only used by the owner KT. 131 \item 132 Away list is used when a thread A is freeing an object that is owned by another KT B's heap. This object should be freed to the owner heap (B's heap) so A will place the object on the away list of B. Away list is lock protected as it is shared by all other threads. 133 \end{itemize} 134 135 When a dynamic object of a size S is requested. The thread-local heap will check if S is greater than or equal to the mmap threshhold. Any request larger than the mmap threshhold is fulfilled by allocating an mmap area of that size and such requests are not allocated on sbrk area. The value of this threshhold can be changed using mallopt routine but the new value should not be larger than our biggest free-bucket size. 136 137 Algorithm~\ref{alg:heapObjectAlloc} briefly shows how an allocation request is fulfilled. 138 139 \begin{algorithm} 140 \caption{Dynamic object allocation of size S}\label{alg:heapObjectAlloc} 141 \begin{algorithmic}[1] 142 \State $\textit{O} \gets \text{NULL}$ 143 \If {$S < \textit{mmap-threshhold}$} 144 \State $\textit{B} \gets (\text{smallest free-bucket} \geq S)$ 145 \If {$\textit{B's free-list is empty}$} 146 \If {$\textit{B's away-list is empty}$} 147 \If {$\textit{heap's allocation buffer} < S$} 148 \State $\text{get allocation buffer using system call sbrk()}$ 149 \EndIf 150 \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$ 151 \Else 152 \State $\textit{merge B's away-list into free-list}$ 153 \State $\textit{O} \gets \text{pop an object from B's free-list}$ 154 \EndIf 155 \Else 156 \State $\textit{O} \gets \text{pop an object from B's free-list}$ 157 \EndIf 158 \State $\textit{O's owner} \gets \text{B}$ 159 \Else 160 \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$ 161 \EndIf 162 \State $\Return \textit{ O}$ 163 \end{algorithmic} 164 \end{algorithm} 165 111 166 112 167 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 113 168 114 169 \section{Added Features and Methods} 115 To improve the UHeapLmmm allocator (FIX ME: cite uHeapLmmm) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built aCFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.170 To improve the uHeap allocator (FIX ME: cite uHeap) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a \CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator. 116 171 117 172 \subsection{C Interface} 118 173 We added a few more features and routines to the allocator's C interface that can make the allocator more usable to the programmers. THese features will programmer more control on the dynamic memory allocation. 119 174 120 \subsubsection void * aalloc( size\_t dim, size\_t elemSize ) 121 aalloc is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0. 122 \paragraph{Usage} 123 aalloc takes two parameters. 124 125 \begin{itemize} 126 \item 127 dim: number of objects in the array 128 \item 129 elemSize: size of the object in the array. 130 \end{itemize} 131 It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns NULL pointer. 132 133 \subsubsection void * resize( void * oaddr, size\_t size ) 134 resize is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is realloc but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object. 135 \paragraph{Usage} 136 resize takes two parameters. 137 138 \begin{itemize} 139 \item 140 oaddr: the address of the old object that needs to be resized. 141 \item 142 size: the new size requirement of the to which the old object needs to be resized. 143 \end{itemize} 144 It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns NULL pointer. 145 146 \subsubsection void * resize( void * oaddr, size\_t nalign, size\_t size ) 147 This resize is an extension of the above resize (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement. 175 \subsection{Out of Memory} 176 177 Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory; 178 hence the need to return an alternate value for a zero-sized allocation. 179 The alternative is to abort a program when out of memory. 180 In theory, notifying the programmer allows recovery; 181 in practice, it is almost impossible to gracefully when out of memory, so the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen. 182 183 184 \subsection{\lstinline{void * aalloc( size_t dim, size_t elemSize )}} 185 @aalloc@ is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0. 186 \paragraph{Usage} 187 @aalloc@ takes two parameters. 188 189 \begin{itemize} 190 \item 191 @dim@: number of objects in the array 192 \item 193 @elemSize@: size of the object in the array. 194 \end{itemize} 195 It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns a @NULL@ pointer. 196 197 \subsection{\lstinline{void * resize( void * oaddr, size_t size )}} 198 @resize@ is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is @realloc@ but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object. 199 \paragraph{Usage} 200 @resize@ takes two parameters. 201 202 \begin{itemize} 203 \item 204 @oaddr@: the address of the old object that needs to be resized. 205 \item 206 @size@: the new size requirement of the to which the old object needs to be resized. 207 \end{itemize} 208 It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns a @NULL@ pointer. 209 210 \subsection{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}} 211 This @resize@ is an extension of the above @resize@ (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement. 148 212 \paragraph{Usage} 149 213 This resize takes three parameters. It takes an additional parameter of nalign as compared to the above resize (FIX ME: cite above resize). … … 151 215 \begin{itemize} 152 216 \item 153 oaddr: the address of the old object that needs to be resized.154 \item 155 nalign: the new alignment to which the old object needs to be realigned.156 \item 157 size: the new size requirement of the to which the old object needs to be resized.158 \end{itemize} 159 It returns an object with the size and alignment given in the parameters. On failure, it returns a NULLpointer.160 161 \subs ubsection void * amemalign( size\_t alignment, size\_t dim, size\_t elemSize )217 @oaddr@: the address of the old object that needs to be resized. 218 \item 219 @nalign@: the new alignment to which the old object needs to be realigned. 220 \item 221 @size@: the new size requirement of the to which the old object needs to be resized. 222 \end{itemize} 223 It returns an object with the size and alignment given in the parameters. On failure, it returns a @NULL@ pointer. 224 225 \subsection{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}} 162 226 amemalign is a hybrid of memalign and aalloc. It allows programmer to allocate an aligned dynamic array of objects without calculating the total size of the array explicitly. It frees the programmer from calculating the total size of the array. 163 227 \paragraph{Usage} … … 166 230 \begin{itemize} 167 231 \item 168 alignment: the alignment to which the dynamic array needs to be aligned.169 \item 170 dim: number of objects in the array171 \item 172 elemSize: size of the object in the array.173 \end{itemize} 174 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns NULLpointer.175 176 \subs ubsection void * cmemalign( size\_t alignment, size\_t dim, size\_t elemSize )232 @alignment@: the alignment to which the dynamic array needs to be aligned. 233 \item 234 @dim@: number of objects in the array 235 \item 236 @elemSize@: size of the object in the array. 237 \end{itemize} 238 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns a @NULL@ pointer. 239 240 \subsection{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}} 177 241 cmemalign is a hybrid of amemalign and calloc. It allows programmer to allocate an aligned dynamic array of objects that is 0 filled. The current way to do this in other allocators is to allocate an aligned object with memalign and then fill it with 0 explicitly. This routine provides both features of aligning and 0 filling, implicitly. 178 242 \paragraph{Usage} … … 181 245 \begin{itemize} 182 246 \item 183 alignment: the alignment to which the dynamic array needs to be aligned.184 \item 185 dim: number of objects in the array186 \item 187 elemSize: size of the object in the array.188 \end{itemize} 189 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns NULLpointer.190 191 \subs ubsection size\_t malloc\_alignment( void * addr )192 malloc\_alignmentreturns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.193 \paragraph{Usage} 194 malloc\_alignmenttakes one parameters.195 196 \begin{itemize} 197 \item 198 addr: the address of the currently allocated dynamic object.199 \end{itemize} 200 malloc\_alignment returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeapLmmmallocator.201 202 \subs ubsection bool malloc\_zero\_fill( void * addr )203 malloc\_zero\_fillreturns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.204 \paragraph{Usage} 205 malloc\_zero\_filltakes one parameters.206 207 \begin{itemize} 208 \item 209 addr: the address of the currently allocated dynamic object.210 \end{itemize} 211 malloc\_zero\_fillreturns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.212 213 \subs ubsection size\_t malloc\_size( void * addr )214 malloc\_size returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is malloc\_usable\_size. But, malloc\_size is different from malloc\_usable\_size as malloc\_usabe\_size returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, malloc\_sizereturns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.215 \paragraph{Usage} 216 malloc\_sizetakes one parameters.217 218 \begin{itemize} 219 \item 220 addr: the address of the currently allocated dynamic object.221 \end{itemize} 222 malloc\_sizereturns the allocation size of the given dynamic object. On failure, it return zero.223 224 \subs ubsection void * realloc( void * oaddr, size\_t nalign, size\_t size )225 This realloc is an extension of the default realloc (FIX ME: cite default realloc). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.226 \paragraph{Usage} 227 This realloc takes three parameters. It takes an additional parameter of nalign as compared to the default realloc.228 229 \begin{itemize} 230 \item 231 oaddr: the address of the old object that needs to be reallocated.232 \item 233 nalign: the new alignment to which the old object needs to be realigned.234 \item 235 size: the new size requirement of the to which the old object needs to be resized.236 \end{itemize} 237 It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a NULLpointer.238 239 \subsection{ CFA Malloc Interface}240 We added some routines to the malloc interface of CFA. These routines can only be used in CFA and not in our standalone uHeapLmmm allocator as these routines use some features that are only provided byCFA and not by C. It makes the allocator even more usable to the programmers.241 CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.242 243 \subs ubsection T * malloc( void )247 @alignment@: the alignment to which the dynamic array needs to be aligned. 248 \item 249 @dim@: number of objects in the array 250 \item 251 @elemSize@: size of the object in the array. 252 \end{itemize} 253 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns a @NULL@ pointer. 254 255 \subsection{\lstinline{size_t malloc_alignment( void * addr )}} 256 @malloc_alignment@ returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment. 257 \paragraph{Usage} 258 @malloc_alignment@ takes one parameters. 259 260 \begin{itemize} 261 \item 262 @addr@: the address of the currently allocated dynamic object. 263 \end{itemize} 264 @malloc_alignment@ returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeap allocator. 265 266 \subsection{\lstinline{bool malloc_zero_fill( void * addr )}} 267 @malloc_zero_fill@ returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation. 268 \paragraph{Usage} 269 @malloc_zero_fill@ takes one parameters. 270 271 \begin{itemize} 272 \item 273 @addr@: the address of the currently allocated dynamic object. 274 \end{itemize} 275 @malloc_zero_fill@ returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false. 276 277 \subsection{\lstinline{size_t malloc_size( void * addr )}} 278 @malloc_size@ returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is @malloc_usable_size@. But, @malloc_size@ is different from @malloc_usable_size@ as @malloc_usabe_size@ returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, @malloc_size@ returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine. 279 \paragraph{Usage} 280 @malloc_size@ takes one parameters. 281 282 \begin{itemize} 283 \item 284 @addr@: the address of the currently allocated dynamic object. 285 \end{itemize} 286 @malloc_size@ returns the allocation size of the given dynamic object. On failure, it return zero. 287 288 \subsection{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}} 289 This @realloc@ is an extension of the default @realloc@ (FIX ME: cite default @realloc@). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement. 290 \paragraph{Usage} 291 This @realloc@ takes three parameters. It takes an additional parameter of nalign as compared to the default @realloc@. 292 293 \begin{itemize} 294 \item 295 @oaddr@: the address of the old object that needs to be reallocated. 296 \item 297 @nalign@: the new alignment to which the old object needs to be realigned. 298 \item 299 @size@: the new size requirement of the to which the old object needs to be resized. 300 \end{itemize} 301 It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a @NULL@ pointer. 302 303 \subsection{\CFA Malloc Interface} 304 We added some routines to the malloc interface of \CFA. These routines can only be used in \CFA and not in our standalone uHeap allocator as these routines use some features that are only provided by \CFA and not by C. It makes the allocator even more usable to the programmers. 305 \CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type. 306 307 \subsection{\lstinline{T * malloc( void )}} 244 308 This malloc is a simplified polymorphic form of defualt malloc (FIX ME: cite malloc). It does not take any parameter as compared to default malloc that takes one parameter. 245 309 \paragraph{Usage} 246 310 This malloc takes no parameters. 247 It returns a dynamic object of the size of type T. On failure, it return NULLpointer.248 249 \subs ubsection T * aalloc( size\_t dim )311 It returns a dynamic object of the size of type @T@. On failure, it returns a @NULL@ pointer. 312 313 \subsection{\lstinline{T * aalloc( size_t dim )}} 250 314 This aalloc is a simplified polymorphic form of above aalloc (FIX ME: cite aalloc). It takes one parameter as compared to the above aalloc that takes two parameters. 251 315 \paragraph{Usage} … … 254 318 \begin{itemize} 255 319 \item 256 dim: required number of objects in the array.257 \end{itemize} 258 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. On failure, it return NULLpointer.259 260 \subs ubsection T * calloc( size\_t dim )320 @dim@: required number of objects in the array. 321 \end{itemize} 322 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer. 323 324 \subsection{\lstinline{T * calloc( size_t dim )}} 261 325 This calloc is a simplified polymorphic form of defualt calloc (FIX ME: cite calloc). It takes one parameter as compared to the default calloc that takes two parameters. 262 326 \paragraph{Usage} … … 265 329 \begin{itemize} 266 330 \item 267 dim: required number of objects in the array.268 \end{itemize} 269 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. On failure, it return NULLpointer.270 271 \subs ubsection T * resize( T * ptr, size\_t size )272 This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as CFA provides gives allocator the liberty to get the alignment of the returned type.331 @dim@: required number of objects in the array. 332 \end{itemize} 333 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer. 334 335 \subsection{\lstinline{T * resize( T * ptr, size_t size )}} 336 This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type. 273 337 \paragraph{Usage} 274 338 This resize takes two parameters. … … 276 340 \begin{itemize} 277 341 \item 278 ptr: address of the old object.279 \item 280 size: the required size of the new object.281 \end{itemize} 282 It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type T. On failure, it return NULLpointer.283 284 \subs ubsection T * realloc( T * ptr, size\_t size )285 This realloc is a simplified polymorphic form of defualt realloc (FIX ME: cite realloc with align). It takes two parameters as compared to the above realloc that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation asCFA provides gives allocator the liberty to get the alignment of the returned type.286 \paragraph{Usage} 287 This realloctakes two parameters.288 289 \begin{itemize} 290 \item 291 ptr: address of the old object.292 \item 293 size: the required size of the new object.294 \end{itemize} 295 It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type T. On failure, it return NULLpointer.296 297 \subs ubsection T * memalign( size\_t align )342 @ptr@: address of the old object. 343 \item 344 @size@: the required size of the new object. 345 \end{itemize} 346 It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer. 347 348 \subsection{\lstinline{T * realloc( T * ptr, size_t size )}} 349 This @realloc@ is a simplified polymorphic form of defualt @realloc@ (FIX ME: cite @realloc@ with align). It takes two parameters as compared to the above @realloc@ that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type. 350 \paragraph{Usage} 351 This @realloc@ takes two parameters. 352 353 \begin{itemize} 354 \item 355 @ptr@: address of the old object. 356 \item 357 @size@: the required size of the new object. 358 \end{itemize} 359 It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer. 360 361 \subsection{\lstinline{T * memalign( size_t align )}} 298 362 This memalign is a simplified polymorphic form of defualt memalign (FIX ME: cite memalign). It takes one parameters as compared to the default memalign that takes two parameters. 299 363 \paragraph{Usage} … … 302 366 \begin{itemize} 303 367 \item 304 align: the required alignment of the dynamic object.305 \end{itemize} 306 It returns a dynamic object of the size of type T that is aligned to given parameter align. On failure, it return NULLpointer.307 308 \subs ubsection T * amemalign( size\_t align, size\_t dim )368 @align@: the required alignment of the dynamic object. 369 \end{itemize} 370 It returns a dynamic object of the size of type @T@ that is aligned to given parameter align. On failure, it returns a @NULL@ pointer. 371 372 \subsection{\lstinline{T * amemalign( size_t align, size_t dim )}} 309 373 This amemalign is a simplified polymorphic form of above amemalign (FIX ME: cite amemalign). It takes two parameter as compared to the above amemalign that takes three parameters. 310 374 \paragraph{Usage} … … 313 377 \begin{itemize} 314 378 \item 315 align: required alignment of the dynamic array.316 \item 317 dim: required number of objects in the array.318 \end{itemize} 319 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. The returned object is aligned to the given parameter align. On failure, it return NULLpointer.320 321 \subs ubsection T * cmemalign( size\_t align, size\_t dim )379 @align@: required alignment of the dynamic array. 380 \item 381 @dim@: required number of objects in the array. 382 \end{itemize} 383 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align. On failure, it returns a @NULL@ pointer. 384 385 \subsection{\lstinline{T * cmemalign( size_t align, size_t dim )}} 322 386 This cmemalign is a simplified polymorphic form of above cmemalign (FIX ME: cite cmemalign). It takes two parameter as compared to the above cmemalign that takes three parameters. 323 387 \paragraph{Usage} … … 326 390 \begin{itemize} 327 391 \item 328 align: required alignment of the dynamic array. 329 \item 330 dim: required number of objects in the array. 331 \end{itemize} 332 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type T. The returned object is aligned to the given parameter align and is zero filled. On failure, it return NULL pointer. 333 334 \subsubsection T * aligned\_alloc( size\_t align ) 335 This aligned\_alloc is a simplified polymorphic form of defualt aligned\_alloc (FIX ME: cite aligned\_alloc). It takes one parameter as compared to the default aligned\_alloc that takes two parameters. 336 \paragraph{Usage} 337 This aligned\_alloc takes one parameter. 338 339 \begin{itemize} 340 \item 341 align: required alignment of the dynamic object. 342 \end{itemize} 343 It returns a dynamic object of the size of type T that is aligned to the given parameter. On failure, it return NULL pointer. 344 345 \subsubsection int posix\_memalign( T ** ptr, size\_t align ) 346 This posix\_memalign is a simplified polymorphic form of defualt posix\_memalign (FIX ME: cite posix\_memalign). It takes two parameters as compared to the default posix\_memalign that takes three parameters. 347 \paragraph{Usage} 348 This posix\_memalign takes two parameter. 349 350 \begin{itemize} 351 \item 352 ptr: variable address to store the address of the allocated object. 353 \item 354 align: required alignment of the dynamic object. 355 \end{itemize} 356 357 It stores address of the dynamic object of the size of type T in given parameter ptr. This object is aligned to the given parameter. On failure, it return NULL pointer. 358 359 \subsubsection T * valloc( void ) 360 This valloc is a simplified polymorphic form of defualt valloc (FIX ME: cite valloc). It takes no parameters as compared to the default valloc that takes one parameter. 361 \paragraph{Usage} 362 valloc takes no parameters. 363 It returns a dynamic object of the size of type T that is aligned to the page size. On failure, it return NULL pointer. 364 365 \subsubsection T * pvalloc( void ) 366 This pcvalloc is a simplified polymorphic form of defualt pcvalloc (FIX ME: cite pcvalloc). It takes no parameters as compared to the default pcvalloc that takes one parameter. 367 \paragraph{Usage} 368 pvalloc takes no parameters. 369 It returns a dynamic object of the size that is calcutaed by rouding the size of type T. The returned object is also aligned to the page size. On failure, it return NULL pointer. 370 371 \subsection Alloc Interface 372 In addition to improve allocator interface both for CFA and our standalone allocator uHeapLmmm in C. We also added a new alloc interface in CFA that increases usability of dynamic memory allocation. 392 @align@: required alignment of the dynamic array. 393 \item 394 @dim@: required number of objects in the array. 395 \end{itemize} 396 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align and is zero filled. On failure, it returns a @NULL@ pointer. 397 398 \subsection{\lstinline{T * aligned_alloc( size_t align )}} 399 This @aligned_alloc@ is a simplified polymorphic form of defualt @aligned_alloc@ (FIX ME: cite @aligned_alloc@). It takes one parameter as compared to the default @aligned_alloc@ that takes two parameters. 400 \paragraph{Usage} 401 This @aligned_alloc@ takes one parameter. 402 403 \begin{itemize} 404 \item 405 @align@: required alignment of the dynamic object. 406 \end{itemize} 407 It returns a dynamic object of the size of type @T@ that is aligned to the given parameter. On failure, it returns a @NULL@ pointer. 408 409 \subsection{\lstinline{int posix_memalign( T ** ptr, size_t align )}} 410 This @posix_memalign@ is a simplified polymorphic form of defualt @posix_memalign@ (FIX ME: cite @posix_memalign@). It takes two parameters as compared to the default @posix_memalign@ that takes three parameters. 411 \paragraph{Usage} 412 This @posix_memalign@ takes two parameter. 413 414 \begin{itemize} 415 \item 416 @ptr@: variable address to store the address of the allocated object. 417 \item 418 @align@: required alignment of the dynamic object. 419 \end{itemize} 420 421 It stores address of the dynamic object of the size of type @T@ in given parameter ptr. This object is aligned to the given parameter. On failure, it returns a @NULL@ pointer. 422 423 \subsection{\lstinline{T * valloc( void )}} 424 This @valloc@ is a simplified polymorphic form of defualt @valloc@ (FIX ME: cite @valloc@). It takes no parameters as compared to the default @valloc@ that takes one parameter. 425 \paragraph{Usage} 426 @valloc@ takes no parameters. 427 It returns a dynamic object of the size of type @T@ that is aligned to the page size. On failure, it returns a @NULL@ pointer. 428 429 \subsection{\lstinline{T * pvalloc( void )}} 430 \paragraph{Usage} 431 @pvalloc@ takes no parameters. 432 It returns a dynamic object of the size that is calcutaed by rouding the size of type @T@. The returned object is also aligned to the page size. On failure, it returns a @NULL@ pointer. 433 434 \subsection{Alloc Interface} 435 In addition to improve allocator interface both for \CFA and our standalone allocator uHeap in C. We also added a new alloc interface in \CFA that increases usability of dynamic memory allocation. 373 436 This interface helps programmers in three major ways. 374 437 … … 379 442 Parametre Positions: alloc interface frees programmers from remembering parameter postions in call to routines. 380 443 \item 381 Object Size: alloc interface does not require programmer to mention the object size as CFA allows allocator to determince the object size from returned type of alloc call.382 \end{itemize} 383 384 Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.385 386 \subs ubsection{Routine: T * alloc( ... )}387 Call to alloc wihout any parameter returns one object of size of type Tallocated dynamically.444 Object Size: alloc interface does not require programmer to mention the object size as \CFA allows allocator to determince the object size from returned type of alloc call. 445 \end{itemize} 446 447 Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of \CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter. 448 449 \subsection{Routine: \lstinline{T * alloc( ... )}} 450 Call to alloc wihout any parameter returns one object of size of type @T@ allocated dynamically. 388 451 Only the dimension (dim) parameter for array allocation has the fixed position in the alloc routine. If programmer wants to allocate an array of objects that the required number of members in the array has to be given as the first parameter to the alloc routine. 389 alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except `realloc and `resize that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both `resize and `reallocare used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.452 alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except @`realloc@ and @`resize@ that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both @`resize@ and @`realloc@ are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced. 390 453 391 454 \paragraph{Dim} 392 This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type T.393 It represents the required number of members in the array allocation as in CFA's aalloc (FIX ME: cite aalloc).394 This parameter should be of type size\_t.395 396 Example: int a = alloc( 5 )455 This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type @T@. 456 It represents the required number of members in the array allocation as in \CFA's aalloc (FIX ME: cite aalloc). 457 This parameter should be of type @size_t@. 458 459 Example: @int a = alloc( 5 )@ 397 460 This call will return a dynamic array of five integers. 398 461 399 462 \paragraph{Align} 400 This parameter is position-free and uses a backtick routine align ( `align). The parameter passed with `align should be of type size\_t. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign inCFA) then the passed alignment parameter will be rejected and the default alignment will be used.401 402 Example: int b = alloc( 5 , 64`align )463 This parameter is position-free and uses a backtick routine align (@`align@). The parameter passed with @`align@ should be of type @size_t@. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in \CFA) then the passed alignment parameter will be rejected and the default alignment will be used. 464 465 Example: @int b = alloc( 5 , 64`align )@ 403 466 This call will return a dynamic array of five integers. It will align the allocated object to 64. 404 467 405 468 \paragraph{Fill} 406 This parameter is position-free and uses a backtick routine fill ( `fill). In case of realloc, only the extra space after copying the data in the old object will be filled with given parameter.469 This parameter is position-free and uses a backtick routine fill (@`fill@). In case of @realloc@, only the extra space after copying the data in the old object will be filled with given parameter. 407 470 Three types of parameters can be passed using `fill. 408 471 409 472 \begin{itemize} 410 473 \item 411 char: A char can be passed with `fillto fill the whole dynamic allocation with the given char recursively till the end of required allocation.412 \item 413 Object of returned type: An object of type of returned type can be passed with `fillto fill the whole dynamic allocation with the given object recursively till the end of required allocation.414 \item 415 Dynamic object of returned type: A dynamic object of type of returned type can be passed with `fill to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to `fillor the end of requested allocation reaches.416 \end{itemize} 417 418 Example: int b = alloc( 5 , 'a'`fill )474 @char@: A char can be passed with @`fill@ to fill the whole dynamic allocation with the given char recursively till the end of required allocation. 475 \item 476 Object of returned type: An object of type of returned type can be passed with @`fill@ to fill the whole dynamic allocation with the given object recursively till the end of required allocation. 477 \item 478 Dynamic object of returned type: A dynamic object of type of returned type can be passed with @`fill@ to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to @`fill@ or the end of requested allocation reaches. 479 \end{itemize} 480 481 Example: @int b = alloc( 5 , 'a'`fill )@ 419 482 This call will return a dynamic array of five integers. It will fill the allocated object with character 'a' recursively till the end of requested allocation size. 420 483 421 Example: int b = alloc( 5 , 4`fill )484 Example: @int b = alloc( 5 , 4`fill )@ 422 485 This call will return a dynamic array of five integers. It will fill the allocated object with integer 4 recursively till the end of requested allocation size. 423 486 424 Example: int b = alloc( 5 , a`fill ) where ais a pointer of int type487 Example: @int b = alloc( 5 , a`fill )@ where @a@ is a pointer of int type 425 488 This call will return a dynamic array of five integers. It will copy data in a to the returned object non-recursively untill end of a or the newly allocated object is reached. 426 489 427 490 \paragraph{Resize} 428 This parameter is position-free and uses a backtick routine resize ( `resize). It represents the old dynamic object (oaddr) that the programmer wants to491 This parameter is position-free and uses a backtick routine resize (@`resize@). It represents the old dynamic object (oaddr) that the programmer wants to 429 492 \begin{itemize} 430 493 \item … … 435 498 fill with something. 436 499 \end{itemize} 437 The data in old dynamic object will not be preserved in the new object. The type of object passed to `resizeand the returned type of alloc call can be different.438 439 Example: int b = alloc( 5 , a`resize )500 The data in old dynamic object will not be preserved in the new object. The type of object passed to @`resize@ and the returned type of alloc call can be different. 501 502 Example: @int b = alloc( 5 , a`resize )@ 440 503 This call will resize object a to a dynamic array that can contain 5 integers. 441 504 442 Example: int b = alloc( 5 , a`resize , 32`align )505 Example: @int b = alloc( 5 , a`resize , 32`align )@ 443 506 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. 444 507 445 Example: int b = alloc( 5 , a`resize , 32`align , 2`fill)508 Example: @int b = alloc( 5 , a`resize , 32`align , 2`fill )@ 446 509 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32 and will be filled with 2. 447 510 448 511 \paragraph{Realloc} 449 This parameter is position-free and uses a backtick routine realloc (`realloc). It represents the old dynamic object (oaddr) that the programmer wants to512 This parameter is position-free and uses a backtick routine @realloc@ (@`realloc@). It represents the old dynamic object (oaddr) that the programmer wants to 450 513 \begin{itemize} 451 514 \item … … 456 519 fill with something. 457 520 \end{itemize} 458 The data in old dynamic object will be preserved in the new object. The type of object passed to `reallocand the returned type of alloc call cannot be different.459 460 Example: int b = alloc( 5 , a`realloc )521 The data in old dynamic object will be preserved in the new object. The type of object passed to @`realloc@ and the returned type of alloc call cannot be different. 522 523 Example: @int b = alloc( 5 , a`realloc )@ 461 524 This call will realloc object a to a dynamic array that can contain 5 integers. 462 525 463 Example: int b = alloc( 5 , a`realloc , 32`align )526 Example: @int b = alloc( 5 , a`realloc , 32`align )@ 464 527 This call will realloc object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. 465 528 466 Example: int b = alloc( 5 , a`realloc , 32`align , 2`fill)529 Example: @int b = alloc( 5 , a`realloc , 32`align , 2`fill )@ 467 530 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. The extra space after copying data of a to the returned object will be filled with 2. -
doc/theses/mubeen_zulfiqar_MMath/background.tex
ref3c383 rd672350 1 \chapter{Background} 2 3 \noindent 1 \begin{comment} 4 2 ==================== 5 6 3 Writing Points: 7 4 \begin{itemize} … … 19 16 Features and limitations. 20 17 \end{itemize} 21 22 \noindent 23 ==================== 24 25 \section{Background} 26 27 % FIXME: cite wasik 28 \cite{wasik.thesis} 29 30 \subsection{Memory Allocation} 31 With dynamic allocation being an important feature of C, there are many standalone memory allocators that have been designed for different purposes. For this thesis, we chose 7 of the most popular and widely used memory allocators. 32 33 \paragraph{dlmalloc} 34 dlmalloc (FIX ME: cite allocator) is a thread-safe allocator that is single threaded and single heap. dlmalloc maintains free-lists of different sizes to store freed dynamic memory. (FIX ME: cite wasik) 35 36 \paragraph{hoard} 37 Hoard (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thred heaps that have thread-local free-lists, and a gloabl shared heap. (FIX ME: cite wasik) 38 39 \paragraph{jemalloc} 40 jemalloc (FIX ME: cite allocator) is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena. Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes. 41 42 \paragraph{ptmalloc} 43 ptmalloc (FIX ME: cite allocator) is a modification of dlmalloc. It is a thread-safe multi-threaded memory allocator that uses multiple heaps. ptmalloc heap has similar design to dlmalloc's heap. 44 45 \paragraph{rpmalloc} 46 rpmalloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses per-thread heap. Each heap has multiple size-classes and each size-calss contains memory regions of the relevant size. 47 48 \paragraph{tbb malloc} 49 tbb malloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size. 50 51 \paragraph{tc malloc} 52 tcmalloc (FIX ME: cite allocator) is a thread-safe allocator. It uses per-thread cache to store free objects that prevents contention on shared resources in multi-threaded application. A central free-list is used to refill per-thread cache when it gets empty. 53 54 \subsection{Benchmarks} 55 There are multiple benchmarks that are built individually and evaluate different aspects of a memory allocator. But, there is not standard set of benchamrks that can be used to evaluate multiple aspects of memory allocators. 56 57 \paragraph{threadtest} 58 (FIX ME: cite benchmark and hoard) Each thread repeatedly allocates and then deallocates 100,000 objects. Runtime of the benchmark evaluates its efficiency. 59 60 \paragraph{shbench} 61 (FIX ME: cite benchmark and hoard) Each thread allocates and randomly frees a number of random-sized objects. It is a stress test that also uses runtime to determine efficiency of the allocator. 62 63 \paragraph{larson} 64 (FIX ME: cite benchmark and hoard) Larson simulates a server environment. Multiple threads are created where each thread allocator and free a number of objects within a size range. Some objects are passed from threads to the child threads to free. It caluculates memory operations per second as an indicator of memory allocator's performance. 18 \end{comment} 19 20 \chapter[Background]{Background\footnote{Part of this chapter draws from similar background work in~\cite{wasik.thesis} with many updates.}} 21 22 23 A program dynamically allocates and deallocates the storage for a variable, referred to as an \newterm{object}, through calls such as @malloc@ and @free@ in C, and @new@ and @delete@ in \CC. 24 Space for each allocated object comes from the dynamic-allocation zone. 25 A \newterm{memory allocator} contains a complex data-structure and code that manages the layout of objects in the dynamic-allocation zone. 26 The management goals are to make allocation/deallocation operations as fast as possible while densely packing objects to make efficient use of memory. 27 Objects in C/\CC cannot be moved to aid the packing process, only adjacent free storage can be \newterm{coalesced} into larger free areas. 28 The allocator grows or shrinks the dynamic-allocation zone to obtain storage for objects and reduce memory usage via operating-system calls, such as @mmap@ or @sbrk@ in UNIX. 29 30 31 \section{Allocator Components} 32 \label{s:AllocatorComponents} 33 34 \VRef[Figure]{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}. 35 The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data. 36 The management data starts with fixed-sized information in the static-data memory that flows into the dynamic-allocation memory. 37 The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}. 38 Allocated objects (white) are variable sized, and allocated and maintained by the program; 39 \ie only the program knows the location of allocated storage, not the memory allocator. 40 \begin{figure}[h] 41 \centering 42 \input{AllocatorComponents} 43 \caption{Allocator Components (Heap)} 44 \label{f:AllocatorComponents} 45 \end{figure} 46 Freed objects (light grey) are memory deallocated by the program, which are linked into one or more lists facilitating easy location for new allocations. 47 Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks. 48 Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program; 49 if there are multiple reserved blocks, they are also chained together, usually internally. 50 51 Allocated and freed objects typically have additional management data embedded within them. 52 \VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object. 53 The header contains information about the object, \eg size, type, etc. 54 The trailer may be used to simplify an allocation implementation, \eg coalescing, and/or for security purposes to mark the end of an object. 55 An object may be preceded by padding to ensure proper alignment. 56 Some algorithms quantize allocation requests into distinct sizes resulting in additional spacing after objects less than the quantized value. 57 When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists. 58 A free object also contains management data, \eg size, chaining, etc. 59 The amount of management data for a free node defines the minimum allocation size, \eg if 16 bytes are needed for a free-list node, any allocation request less than 16 bytes must be rounded up, otherwise the free list cannot use internal chaining. 60 The information in an allocated or freed object is overwritten when it transitions from allocated to freed and vice-versa by new management information and possibly data. 61 62 \begin{figure} 63 \centering 64 \input{AllocatedObject} 65 \caption{Allocated Object} 66 \label{f:AllocatedObject} 67 \end{figure} 68 69 70 \section{Single-Threaded Memory-Allocator} 71 \label{s:SingleThreadedMemoryAllocator} 72 73 A single-threaded memory-allocator does not run any threads itself, but is used by a single-threaded program. 74 Because the memory allocator is only executed by a single thread, concurrency issues do not exist. 75 The primary issues in designing a single-threaded memory-allocator are fragmentation and locality. 76 77 78 \subsection{Fragmentation} 79 \label{s:Fragmentation} 80 81 Fragmentation is memory requested from the operating system but not used by the program; 82 hence, allocated objects are not fragmentation. 83 \VRef[Figure]{f:InternalExternalFragmentation}) shows fragmentation is divided into two forms: internal or external. 84 85 \begin{figure} 86 \centering 87 \input{IntExtFragmentation} 88 \caption{Internal and External Fragmentation} 89 \label{f:InternalExternalFragmentation} 90 \end{figure} 91 92 \newterm{Internal fragmentation} is memory space that is allocated to the program, but is not intended to be accessed by the program, such as headers, trailers, padding, and spacing around an allocated object. 93 This memory is typically used by the allocator for management purposes or required by the architecture for correctness, \eg alignment. 94 Internal fragmentation is problematic when management space is a significant proportion of an allocated object. 95 For example, if internal fragmentation is as large as the object being managed, then the memory usage for that object is doubled. 96 An allocator should strive to keep internal management information to a minimum. 97 98 \newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes freed objects, all external management data, and reserved memory. 99 This memory is problematic in two ways: heap blowup and highly fragmented memory. 100 \newterm{Heap blowup} occurs when memory freed by the program is not reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}. 101 Heap blowup can occur due to allocator policies that are too restrictive in reusing freed memory and/or no coalescing of free storage. 102 Memory can become \newterm{highly fragmented} after multiple allocations and deallocations of objects. 103 \VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time. 104 Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests. 105 Memory is highly fragmented when the sizes of most free blocks are unusable. 106 For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented. 107 If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system. 108 109 \begin{figure} 110 \centering 111 \input{MemoryFragmentation} 112 \caption{Memory Fragmentation} 113 \label{f:MemoryFragmentation} 114 \vspace{10pt} 115 \subfigure[Contiguous]{ 116 \input{ContigFragmentation} 117 \label{f:Contiguous} 118 } % subfigure 119 \subfigure[Highly Fragmented]{ 120 \input{NonContigFragmentation} 121 \label{f:HighlyFragmented} 122 } % subfigure 123 \caption{Fragmentation Quality} 124 \label{f:FragmentationQuality} 125 \end{figure} 126 127 For a single-threaded memory allocator, three basic approaches for controlling fragmentation have been identified~\cite{Johnstone99}. 128 The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size. 129 Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size. 130 Any storage larger than the request can become spacing after the object or be split into a smaller free object. 131 The cost of the search depends on the shape and quality of the free list, \eg a linear versus a binary-tree free-list, a sorted versus unsorted free-list. 132 133 The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects. 134 When an object is allocated, the requested size is rounded up to the nearest bin-size, possibly with spacing after the object. 135 A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used. 136 The fewer bin-sizes, the fewer lists need to be searched and maintained; 137 however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation. 138 The more bin-sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup. 139 A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size. 140 For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list. 141 For subsequent requests, the bin free-lists contain objects of different sizes, ranging from one bin-size to the next (8-16 in this example), and a sequential-fit algorithm may be used to find an object large enough for the requested size on the associated bin list. 142 143 The third approach is \newterm{splitting} and \newterm{coalescing algorithms}. 144 When an object is allocated, if there are no free objects of the requested size, a larger free object may be split into two smaller objects to satisfy the allocation request without obtaining more memory from the operating system. 145 For example, in the buddy system, a block of free memory is split into two equal chunks, one of those chunks is again split into two equal chunks, and so on until a block just large enough to fit the requested object is created. 146 When an object is deallocated it is coalesced with the objects immediately before and after it in memory, if they are free, turning them into one larger object. 147 Coalescing can be done eagerly at each deallocation or lazily when an allocation cannot be fulfilled. 148 In all cases, coalescing increases allocation latency, hence some allocations can cause unbounded delays during coalescing. 149 While coalescing does not reduce external fragmentation, the coalesced blocks improve fragmentation quality so future allocations are less likely to cause heap blowup. 150 Splitting and coalescing can be used with other algorithms to avoid highly fragmented memory. 151 152 153 \subsection{Locality} 154 \label{s:Locality} 155 156 The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}. 157 Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period. 158 Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array. 159 160 Hardware takes advantage of temporal and spatial locality through multiple levels of caching (\ie memory hierarchy). 161 When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time. 162 For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory. 163 A program exhibiting good locality has better performance due to fewer cache misses and page faults\footnote{With the advent of large RAM memory, paging is becoming less of an issue in modern programming.}. 164 165 Temporal locality is largely controlled by how a program accesses its variables~\cite{Feng05}. 166 Nevertheless, a memory allocator can have some indirect influence on temporal locality and largely dictates spatial locality. 167 For temporal locality, an allocator can return storage for new allocations that was just freed as these memory locations are still \emph{warm} in the memory hierarchy. 168 For spatial locality, an allocator can place objects used together close together in memory, so the working set of the program fits into the fewest possible cache lines and pages. 169 However, usage patterns are different for every program as is the underlying hardware memory architecture; 170 hence, no general-purpose memory-allocator can provide ideal locality for every program on every computer. 171 172 There are a number of ways a memory allocator can degrade locality by increasing the working set. 173 For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request (\eg sequential-fit algorithm). 174 If there are a (large) number of objects accessed in very different areas of memory, the allocator may perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}. 175 Another way locality can be degraded is by spatially separating related data. 176 For example, in a binning allocator, objects of different sizes are allocated from different bins that may be located in different pages of memory. 177 178 179 \section{Multi-Threaded Memory-Allocator} 180 \label{s:MultiThreadedMemoryAllocator} 181 182 A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program. 183 In addition to single-threaded design issues of locality and fragmentation, a multi-threaded allocator may be simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup. 184 185 186 \subsection{Mutual Exclusion} 187 \label{s:MutualExclusion} 188 189 \newterm{Mutual exclusion} provides sequential access to the shared management data of the heap. 190 There are two performance issues for mutual exclusion. 191 First is the overhead necessary to perform (at least) a hardware atomic operation every time a shared resource is accessed. 192 Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released. 193 Contention can be reduced in a number of ways: 194 using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks; 195 using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff; 196 using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}. 197 However, all of these approaches have degenerate cases where contention occurs. 198 199 200 \subsection{False Sharing} 201 \label{s:FalseSharing} 202 203 False sharing is a dynamic phenomenon leading to cache thrashing. 204 When two or more threads on separate CPUs simultaneously change different objects sharing a cache line, the change invalidates the other thread's associated cache, even though these threads may be uninterested in the other modified object. 205 False sharing can occur in three different ways: program induced, allocator-induced active, and allocator-induced passive; 206 a memory allocator can only affect the latter two. 207 208 \paragraph{\newterm{Program-induced false-sharing}} occurs when one thread passes an object sharing a cache line to another thread, and both threads modify the respective objects. 209 \VRef[Figure]{f:ProgramInducedFalseSharing} shows when Task$_1$ passes Object$_2$ to Task$_2$, a false-sharing situation forms when Task$_1$ modifies Object$_1$ and Task$_2$ modifies Object$_2$. 210 Changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line. 211 212 \begin{figure} 213 \centering 214 \subfigure[Program-Induced False-Sharing]{ 215 \input{ProgramFalseSharing} 216 \label{f:ProgramInducedFalseSharing} 217 } \\ 218 \vspace{5pt} 219 \subfigure[Allocator-Induced Active False-Sharing]{ 220 \input{AllocInducedActiveFalseSharing} 221 \label{f:AllocatorInducedActiveFalseSharing} 222 } \\ 223 \vspace{5pt} 224 \subfigure[Allocator-Induced Passive False-Sharing]{ 225 \input{AllocInducedPassiveFalseSharing} 226 \label{f:AllocatorInducedPassiveFalseSharing} 227 } % subfigure 228 \caption{False Sharing} 229 \label{f:FalseSharing} 230 \end{figure} 231 232 \paragraph{\newterm{Allocator-induced active false-sharing}} occurs when objects are allocated within the same cache line but to different threads. 233 For example, in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}, each task allocates an object and loads a cache-line of memory into its associated cache. 234 Again, changes to Object$_1$ invalidate CPU$_2$'s cache line, and changes to Object$_2$ invalidate CPU$_1$'s cache line. 235 236 \paragraph{\newterm{Allocator-induced passive false-sharing}} is another form of allocator-induced false-sharing caused by program-induced false-sharing. 237 When an object in a program-induced false-sharing situation is deallocated, a future allocation of that object may cause passive false-sharing. 238 For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Task$_1$ passes Object$_2$ to Task$_2$, and Task$_2$ subsequently deallocates Object$_2$. 239 Allocator-induced passive false-sharing occurs when Object$_2$ is reallocated to Task$_2$ while Task$_1$ is still using Object$_1$. 240 241 242 \subsection{Heap Blowup} 243 \label{s:HeapBlowup} 244 245 In a multi-threaded program, heap blowup can occur when memory freed by one thread is inaccessible to other threads due to the allocation strategy. 246 Specific examples are presented in later sections. 247 248 249 \section{Multi-Threaded Memory-Allocator Features} 250 \label{s:MultiThreadedMemoryAllocatorFeatures} 251 252 The following features are used in the construction of multi-threaded memory-allocators: 253 \begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=0.5ex\parsep=0pt\itemsep=0pt} 254 \item multiple heaps 255 \begin{list}{\alph{enumii})}{\usecounter{enumii}\topsep=0.5ex\parsep=0pt\itemsep=0pt} 256 \item with or without a global heap 257 \item with or without ownership 258 \end{list} 259 \item object containers 260 \begin{list}{\alph{enumii})}{\usecounter{enumii}\topsep=0.5ex\parsep=0pt\itemsep=0pt} 261 \item with or without ownership 262 \item fixed or variable sized 263 \item global or local free-lists 264 \end{list} 265 \item hybrid private/public heap 266 \item allocation buffer 267 \item lock-free operations 268 \end{list} 269 The first feature, multiple heaps, pertains to different kinds of heaps. 270 The second feature, object containers, pertains to the organization of objects within the storage area. 271 The remaining features apply to different parts of the allocator design or implementation. 272 273 274 \section{Multiple Heaps} 275 \label{s:MultipleHeaps} 276 277 A single-threaded allocator has at most one thread and heap, while a multi-threaded allocator has potentially multiple threads and heaps. 278 The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity. 279 The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see \VRef[Figure]{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see \VRef[Figure]{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see \VRef[Figure]{f:PerThreadHeap}), which is almost back to a single-threaded allocator. 280 281 282 \paragraph{T:1 model} where all threads allocate and deallocate objects from one heap. 283 Memory is obtained from the freed objects, or reserved memory in the heap, or from the operating system (OS); 284 the heap may also return freed memory to the operating system. 285 The arrows indicate the direction memory conceptually moves for each kind of operation: allocation moves memory along the path from the heap/operating-system to the user application, while deallocation moves memory along the path from the application back to the heap/operating-system. 286 To safely handle concurrency, a single heap uses locking to provide mutual exclusion. 287 Whether using a single lock for all heap operations or fine-grained locking for different operations, a single heap may be a significant source of contention for programs with a large amount of memory allocation. 288 289 \begin{figure} 290 \centering 291 \subfigure[T:1]{ 292 % \input{SingleHeap.pstex_t} 293 \input{SingleHeap} 294 \label{f:SingleHeap} 295 } % subfigure 296 \vrule 297 \subfigure[T:H]{ 298 % \input{MultipleHeaps.pstex_t} 299 \input{SharedHeaps} 300 \label{f:SharedHeaps} 301 } % subfigure 302 \vrule 303 \subfigure[1:1]{ 304 % \input{MultipleHeapsGlobal.pstex_t} 305 \input{PerThreadHeap} 306 \label{f:PerThreadHeap} 307 } % subfigure 308 \caption{Multiple Heaps, Thread:Heap Relationship} 309 \end{figure} 310 311 312 \paragraph{T:H model} where each thread allocates storage from several heaps depending on certain criteria, with the goal of reducing contention by spreading allocations/deallocations across the heaps. 313 The decision on when to create a new heap and which heap a thread allocates from depends on the allocator design. 314 The performance goal is to reduce the ratio of heaps to threads. 315 In general, locking is required, since more than one thread may concurrently access a heap during its lifetime, but contention is reduced because fewer threads access a specific heap. 316 317 For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues. 318 At creation, a thread is associated with a heap from the pool. 319 When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool. 320 If an unlocked heap is found, the thread changes its association and uses that heap. 321 If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool; 322 or the thread can block waiting for a heap to become available. 323 While the heap-pool approach often minimizes the number of extant heaps, the worse case can result in more heaps than threads; 324 \eg if the number of threads is large at startup with many allocations creating a large number of heaps and then the number of threads reduces. 325 326 Threads using multiple heaps need to determine the specific heap to access for an allocation/deallocation, \ie association of thread to heap. 327 A number of techniques are used to establish this association. 328 The simplest approach is for each thread to have a pointer to its associated heap (or to administrative information that points to the heap), and this pointer changes if the association changes. 329 For threading systems with thread-local storage, the heap pointer is created using this mechanism; 330 otherwise, the heap routines must simulate thread-local storage using approaches like hashing the thread's stack-pointer or thread-id to find its associated heap. 331 332 The storage management for multiple heaps is more complex than for a single heap (see \VRef[Figure]{f:AllocatorComponents}). 333 \VRef[Figure]{f:MultipleHeapStorage} illustrates the general storage layout for multiple heaps. 334 Allocated and free objects are labelled by the thread or heap they are associated with. 335 (Links between free objects are removed for simplicity.) 336 The management information in the static zone must be able to locate all heaps in the dynamic zone. 337 The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number. 338 Each heap in the dynamic zone is composed of a list of a free objects and a pointer to its reserved memory. 339 An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory. 340 Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur. 341 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area. 342 343 \begin{figure} 344 \centering 345 \input{MultipleHeapsStorage} 346 \caption{Multiple-Heap Storage} 347 \label{f:MultipleHeapStorage} 348 \end{figure} 349 350 Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup. 351 The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory. 352 Additionally, objects freed by one heap cannot be reused by other threads, except indirectly by returning free memory to the operating system, which can be expensive. 353 (Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.) 354 In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused. 355 356 Adding a \newterm{global heap} (G) attempts to reduce the cost of obtaining/returning memory among heaps (sharing) by buffering storage within the application address-space. 357 Now, each heap obtains and returns storage to/from the global heap rather than the operating system. 358 Storage is obtained from the global heap only when a heap allocation cannot be fulfilled, and returned to the global heap when a heap's free memory exceeds some threshold. 359 Similarly, the global heap buffers this memory, obtaining and returning storage to/from the operating system as necessary. 360 The global heap does not have its own thread and makes no internal allocation requests; 361 instead, it uses the application thread, which called one of the multiple heaps and then the global heap, to perform operations. 362 Hence, the worst-case cost of a memory operation includes all these steps. 363 With respect to heap blowup, the global heap provides an indirect mechanism to move free memory among heaps, which usually has a much lower cost than interacting with the operating system to achieve the same goal and is independent of the mechanism used by the operating system to present dynamic memory to an address space. 364 365 However, since any thread may indirectly perform a memory operation on the global heap, it is a shared resource that requires locking. 366 A single lock can be used to protect the global heap or fine-grained locking can be used to reduce contention. 367 In general, the cost is minimal since the majority of memory operations are completed without the use of the global heap. 368 369 370 \paragraph{1:1 model (thread heaps)} where each thread has its own heap, which eliminates most contention and locking because threads seldom accesses another thread's heap (see ownership in \VRef{s:Ownership}). 371 An additional benefit of thread heaps is improved locality due to better memory layout. 372 As each thread only allocates from its heap, all objects for a thread are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages. 373 In contrast, the T:H model spreads each thread's objects over a larger area in different heaps. 374 Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap. 375 For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory, no two threads share a page or cache line unless pointers are passed among them. 376 Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps. 377 378 When a thread terminates, there are two options for handling its heap. 379 First is to free all objects in the heap to the global heap and destroy the thread heap. 380 Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future. 381 Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads. 382 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap. 383 384 385 \subsection{User-Level Threading} 386 387 It is possible to use any of the heap models with user-level (M:N) threading. 388 However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000). 389 It is difficult to retain this goal, if the user-threading model is directly involved with the heap model. 390 \VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model provided by the language runtime. 391 Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing. 392 393 \begin{figure} 394 \centering 395 \input{UserKernelHeaps} 396 \caption{User-Level Kernel Heaps} 397 \label{f:UserLevelKernelHeaps} 398 \end{figure} 399 400 Adopting this model results in a subtle problem with shared heaps. 401 With kernel threading, an operation that is started by a kernel thread is always completed by that thread. 402 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted. 403 Any correctness locking associated with the shared heap is preserved across preemption. 404 405 However, this correctness property is not preserved for user-level threading. 406 A user thread can start an allocation/deallocation on one kernel thread, be preempted (time slice), and continue running on a different kernel thread to complete the operation~\cite{Dice02}. 407 When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it. 408 To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread. 409 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds). 410 Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically. 411 Occasionally ignoring a preemption should be benign. 412 413 414 \begin{figure} 415 \centering 416 \subfigure[Ownership]{ 417 \input{MultipleHeapsOwnership} 418 } % subfigure 419 \hspace{0.25in} 420 \subfigure[No Ownership]{ 421 \input{MultipleHeapsNoOwnership} 422 } % subfigure 423 \caption{Heap Ownership} 424 \label{f:HeapsOwnership} 425 \end{figure} 426 427 428 \subsection{Ownership} 429 \label{s:Ownership} 430 431 \newterm{Ownership} defines which heap an object is returned-to on deallocation. 432 If a thread returns an object to the heap it was originally allocated from, the heap has ownership of its objects. 433 Alternatively, a thread can return an object to the heap it is currently allocating from, which can be any heap accessible during a thread's lifetime. 434 \VRef[Figure]{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership. 435 Again, the arrows indicate the direction memory conceptually moves for each kind of operation. 436 For the 1:1 thread:heap relationship, a thread only allocates from its own heap, and without ownership, a thread only frees objects to its own heap, which means the heap is private to its owner thread and does not require any locking, called a \newterm{private heap}. 437 For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}. 438 439 \VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout. 440 (For simplicity assume the heaps all use the same size of reserves storage.) 441 In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap. 442 Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur. 443 The exception is for the 1:1 model if reserved memory does not overlap a cache-line because all allocated storage within a used area is associated with a single thread. 444 In this case, there is no allocator-induced active false-sharing (see \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}) because two adjacent allocated objects used by different threads cannot share a cache-line. 445 As well, there is no allocator-induced passive false-sharing (see \VRef[Figure]{f:AllocatorInducedActiveFalseSharing}) because two adjacent allocated objects used by different threads cannot occur because free objects are returned to the owner heap. 446 % Passive false-sharing may still occur, if delayed ownership is used (see below). 447 448 \begin{figure} 449 \centering 450 \input{MultipleHeapsOwnershipStorage.pstex_t} 451 \caption{Multiple-Heap Storage with Ownership} 452 \label{f:MultipleHeapStorageOwnership} 453 \end{figure} 454 455 The main advantage of ownership is preventing heap blowup by returning storage for reuse by the owner heap. 456 Ownership prevents the classical problem where one thread performs allocations from one heap, passes the object to another thread, and the receiving thread deallocates the object to another heap, hence draining the initial heap of storage. 457 As well, allocator-induced passive false-sharing is eliminated because returning an object to its owner heap means it can never be allocated to another thread. 458 For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, the deallocation by Task$_2$ returns Object$_2$ back to Task$_1$'s heap; 459 hence a subsequent allocation by Task$_2$ cannot return this storage. 460 The disadvantage of ownership is deallocating to another task's heap so heaps are no longer private and require locks to provide safe concurrent access. 461 462 Object ownership can be immediate or delayed, meaning free objects may be batched on a separate free list either by the returning or receiving thread. 463 While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap. 464 It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool. 465 Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads. 466 467 It is possible for heaps to steal objects rather than return them and reallocating these objects when storage runs out on a heap. 468 However, stealing can result in passive false-sharing. 469 For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Task$_2$'s heap initially. 470 If Task$_2$ reallocates Object$_2$ before it is returned to its owner heap, then passive false-sharing may occur. 471 472 473 \section{Object Containers} 474 \label{s:ObjectContainers} 475 476 Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}. 477 Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes. 478 As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective. 479 Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}: 480 while the header and object are together in memory, they are generally not accessed together; 481 \eg the object is accessed by the program when it is allocated, while the header is accessed by the allocator when the object is free. 482 483 \begin{figure} 484 \centering 485 \subfigure[Object Headers]{ 486 \input{ObjectHeaders} 487 \label{f:ObjectHeaders} 488 } % subfigure 489 \subfigure[Object Container]{ 490 \input{Container} 491 \label{f:ObjectContainer} 492 } % subfigure 493 \caption{Header Placement} 494 \label{f:HeaderPlacement} 495 \end{figure} 496 497 An alternative approach factors common header/trailer information to a separate location in memory and organizes associated free storage into blocks called \newterm{object containers} (\newterm{superblocks} in~\cite{Berger00}), as in \VRef[Figure]{f:ObjectContainer}. 498 The header for the container holds information necessary for all objects in the container; 499 a trailer may also be used at the end of the container. 500 Similar to the approach described for thread heaps in \VRef{s:MultipleHeaps}, if container boundaries do not overlap with memory of another container at crucial boundaries and all objects in a container are allocated to the same thread, allocator-induced active false-sharing is avoided. 501 502 The difficulty with object containers lies in finding the object header/trailer given only the object address, since that is normally the only information passed to the deallocation operation. 503 One way to do this is to start containers on aligned addresses in memory, then truncate the lower bits of the object address to obtain the header address (or round up and subtract the trailer size to obtain the trailer address). 504 For example, if an object at address 0xFC28\,EF08 is freed and containers are aligned on 64\,KB (0x0001\,0000) addresses, then the container header is at 0xFC28\,0000. 505 506 Normally, a container has homogeneous objects of fixed size, with fixed information in the header that applies to all container objects (\eg object size and ownership). 507 This approach greatly reduces internal fragmentation since far fewer headers are required, and potentially increases spatial locality as a cache line or page holds more objects since the objects are closer together due to the lack of headers. 508 However, although similar objects are close spatially within the same container, different sized objects are further apart in separate containers. 509 Depending on the program, this may or may not improve locality. 510 If the program uses several objects from a small number of containers in its working set, then locality is improved since fewer cache lines and pages are required. 511 If the program uses many containers, there is poor locality, as both caching and paging increase. 512 Another drawback is that external fragmentation may be increased since containers reserve space for objects that may never be allocated by the program, \ie there are often multiple containers for each size only partially full. 513 However, external fragmentation can be reduced by using small containers. 514 515 Containers with heterogeneous objects implies different headers describing them, which complicates the problem of locating a specific header solely by an address. 516 A couple of solutions can be used to implement containers with heterogeneous objects. 517 However, the problem with allowing objects of different sizes is that the number of objects, and therefore headers, in a single container is unpredictable. 518 One solution allocates headers at one end of the container, while allocating objects from the other end of the container; 519 when the headers meet the objects, the container is full. 520 Freed objects cannot be split or coalesced since this causes the number of headers to change. 521 The difficulty in this strategy remains in finding the header for a specific object; 522 in general, a search is necessary to find the object's header among the container headers. 523 A second solution combines the use of container headers and individual object headers. 524 Each object header stores the object's heterogeneous information, such as its size, while the container header stores the homogeneous information, such as the owner when using ownership. 525 This approach allows containers to hold different types of objects, but does not completely separate headers from objects. 526 The benefit of the container in this case is to reduce some redundant information that is factored into the container header. 527 528 In summary, object containers trade off internal fragmentation for external fragmentation by isolating common administration information to remove/reduce internal fragmentation, but at the cost of external fragmentation as some portion of a container may not be used and this portion is unusable for other kinds of allocations. 529 A consequence of this tradeoff is its effect on spatial locality, which can produce positive or negative results depending on program access-patterns. 530 531 532 \subsection{Container Ownership} 533 \label{s:ContainerOwnership} 534 535 Without ownership, objects in a container are deallocated to the heap currently associated with the thread that frees the object. 536 Thus, different objects in a container may be on different heap free-lists (see \VRef[Figure]{f:ContainerNoOwnershipFreelist}). 537 With ownership, all objects in a container belong to the same heap (see \VRef[Figure]{f:ContainerOwnershipFreelist}), so ownership of an object is determined by the container owner. 538 If multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur. 539 Only with the 1:1 model and ownership is active and passive false-sharing avoided (see \VRef{s:Ownership}). 540 Passive false-sharing may still occur, if delayed ownership is used. 541 542 \begin{figure} 543 \centering 544 \subfigure[No Ownership]{ 545 \input{ContainerNoOwnershipFreelist} 546 \label{f:ContainerNoOwnershipFreelist} 547 } % subfigure 548 \vrule 549 \subfigure[Ownership]{ 550 \input{ContainerOwnershipFreelist} 551 \label{f:ContainerOwnershipFreelist} 552 } % subfigure 553 \caption{Free-list Structure with Container Ownership} 554 \end{figure} 555 556 A fragmented heap has multiple containers that may be partially or completely free. 557 A completely free container can become reserved storage and be reset to allocate objects of a new size. 558 When a heap reaches a threshold of free objects, it moves some free storage to the global heap for reuse to prevent heap blowup. 559 Without ownership, when a heap frees objects to the global heap, individual objects must be passed, and placed on the global-heap's free-list. 560 Containers cannot be freed to the global heap unless completely free because 561 562 When a container changes ownership, the ownership of all objects within it change as well. 563 Moving a container involves moving all objects on the heap's free-list in that container to the new owner. 564 This approach can reduce contention for the global heap, since each request for objects from the global heap returns a container rather than individual objects. 565 566 Additional restrictions may be applied to the movement of containers to prevent active false-sharing. 567 For example, in \VRef[Figure]{f:ContainerFalseSharing1}, a container being used by Task$_1$ changes ownership, through the global heap. 568 In \VRef[Figure]{f:ContainerFalseSharing2}, when Task$_2$ allocates an object from the newly acquired container it is actively false-sharing even though no objects are passed among threads. 569 Note, once the object is freed by Task$_1$, no more false sharing can occur until the container changes ownership again. 570 To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free. 571 One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area. 572 573 \begin{figure} 574 \centering 575 \subfigure[]{ 576 \input{ContainerFalseSharing1} 577 \label{f:ContainerFalseSharing1} 578 } % subfigure 579 \subfigure[]{ 580 \input{ContainerFalseSharing2} 581 \label{f:ContainerFalseSharing2} 582 } % subfigure 583 \caption{Active False-Sharing using Containers} 584 \label{f:ActiveFalseSharingContainers} 585 \end{figure} 586 587 Using containers with ownership increases external fragmentation since a new container for a requested object size must be allocated separately for each thread requesting it. 588 In \VRef[Figure]{f:ExternalFragmentationContainerOwnership}, using object ownership allocates 80\% more space than without ownership. 589 590 \begin{figure} 591 \centering 592 \subfigure[No Ownership]{ 593 \input{ContainerNoOwnership} 594 } % subfigure 595 \\ 596 \subfigure[Ownership]{ 597 \input{ContainerOwnership} 598 } % subfigure 599 \caption{External Fragmentation with Container Ownership} 600 \label{f:ExternalFragmentationContainerOwnership} 601 \end{figure} 602 603 604 \subsection{Container Size} 605 \label{s:ContainerSize} 606 607 One way to control the external fragmentation caused by allocating a large container for a small number of requested objects is to vary the size of the container. 608 As described earlier, container boundaries need to be aligned on addresses that are a power of two to allow easy location of the header (by truncating lower bits). 609 Aligning containers in this manner also determines the size of the container. 610 However, the size of the container has different implications for the allocator. 611 612 The larger the container, the fewer containers are needed, and hence, the fewer headers need to be maintained in memory, improving both internal fragmentation and potentially performance. 613 However, with more objects in a container, there may be more objects that are unallocated, increasing external fragmentation. 614 With smaller containers, not only are there more containers, but a second new problem arises where objects are larger than the container. 615 In general, large objects, \eg greater than 64\,KB, are allocated directly from the operating system and are returned immediately to the operating system to reduce long-term external fragmentation. 616 If the container size is small, \eg 1\,KB, then a 1.5\,KB object is treated as a large object, which is likely to be inappropriate. 617 Ideally, it is best to use smaller containers for smaller objects, and larger containers for medium objects, which leads to the issue of locating the container header. 618 619 In order to find the container header when using different sized containers, a super container is used (see~\VRef[Figure]{f:SuperContainers}). 620 The super container spans several containers, contains a header with information for finding each container header, and starts on an aligned address. 621 Super-container headers are found using the same method used to find container headers by dropping the lower bits of an object address. 622 The containers within a super container may be different sizes or all the same size. 623 If the containers in the super container are different sizes, then the super-container header must be searched to determine the specific container for an object given its address. 624 If all containers in the super container are the same size, \eg 16KB, then a specific container header can be found by a simple calculation. 625 The free space at the end of a super container is used to allocate new containers. 626 627 \begin{figure} 628 \centering 629 \input{SuperContainers} 630 % \includegraphics{diagrams/supercontainer.eps} 631 \caption{Super Containers} 632 \label{f:SuperContainers} 633 \end{figure} 634 635 Minimal internal and external fragmentation is achieved by having as few containers as possible, each being as full as possible. 636 It is also possible to achieve additional benefit by using larger containers for popular small sizes, as it reduces the number of containers with associated headers. 637 However, this approach assumes it is possible for an allocator to determine in advance which sizes are popular. 638 Keeping statistics on requested sizes allows the allocator to make a dynamic decision about which sizes are popular. 639 For example, after receiving a number of allocation requests for a particular size, that size is considered a popular request size and larger containers are allocated for that size. 640 If the decision is incorrect, larger containers than necessary are allocated that remain mostly unused. 641 A programmer may be able to inform the allocator about popular object sizes, using a mechanism like @mallopt@, in order to select an appropriate container size for each object size. 642 643 644 \subsection{Container Free-Lists} 645 \label{s:containersfreelists} 646 647 The container header allows an alternate approach for managing the heap's free-list. 648 Rather than maintain a global free-list throughout the heap (see~\VRef[Figure]{f:GlobalFreeListAmongContainers}), the containers are linked through their headers and only the local free objects within a container are linked together (see~\VRef[Figure]{f:LocalFreeListWithinContainers}). 649 Note, maintaining free lists within a container assumes all free objects in the container are associated with the same heap; 650 thus, this approach only applies to containers with ownership. 651 652 This alternate free-list approach can greatly reduce the complexity of moving all freed objects belonging to a container to another heap. 653 To move a container using a global free-list, as in \VRef[Figure]{f:GlobalFreeListAmongContainers}, the free list is first searched to find all objects within the container. 654 Each object is then removed from the free list and linked together to form a local free-list for the move to the new heap. 655 With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list. 656 Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$. 657 The cost is adding information to a header, which increases the header size, and therefore internal fragmentation. 658 659 \begin{figure} 660 \centering 661 \subfigure[Global Free-List Among Containers]{ 662 \input{FreeListAmongContainers} 663 \label{f:GlobalFreeListAmongContainers} 664 } % subfigure 665 \hspace{0.25in} 666 \subfigure[Local Free-List Within Containers]{ 667 \input{FreeListWithinContainers} 668 \label{f:LocalFreeListWithinContainers} 669 } % subfigure 670 \caption{Container Free-List Structure} 671 \label{f:ContainerFreeListStructure} 672 \end{figure} 673 674 When all objects in the container are the same size, a single free-list is sufficient. 675 However, when objects in the container are different size, the header needs a free list for each size class when using a binning allocation algorithm, which can be a significant increase in the container-header size. 676 The alternative is to use a different allocation algorithm with a single free-list, such as a sequential-fit allocation-algorithm. 677 678 679 \subsection{Hybrid Private/Public Heap} 680 \label{s:HybridPrivatePublicHeap} 681 682 Section~\Vref{s:Ownership} discusses advantages and disadvantages of public heaps (T:H model and with ownership) and private heaps (thread heaps with ownership). 683 For thread heaps with ownership, it is possible to combine these approaches into a hybrid approach with both private and public heaps (see~\VRef[Figure]{f:HybridPrivatePublicHeap}). 684 The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup. 685 In the hybrid approach, a task first allocates from its private heap and second from its public heap if no free memory exists in the private heap. 686 Similarly, a task first deallocates an object its private heap, and second to the public heap. 687 Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps. 688 Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap. 689 Finally, when a task frees an object it does not own, the object is either freed immediately to its owner's public heap or put in the freeing task's private heap for delayed ownership, which allows the freeing task to temporarily reuse an object before returning it to its owner or batch objects for an owner heap into a single return. 690 691 \begin{figure} 692 \centering 693 \input{PrivatePublicHeaps.pstex_t} 694 \caption{Hybrid Private/Public Heap for Per-thread Heaps} 695 \label{f:HybridPrivatePublicHeap} 696 % \vspace{10pt} 697 % \input{RemoteFreeList.pstex_t} 698 % \caption{Remote Free-List} 699 % \label{f:RemoteFreeList} 700 \end{figure} 701 702 As mentioned, an implementation may have only one heap deal with the global heap, so the other heap can be simplified. 703 For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}. 704 To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage. 705 Since the remote free-list is occasionally cleared during an allocation, this adds to that cost. 706 Clearing the remote free-list is $O(1)$ if the list can simply be added to the end of the private-heap's free-list, or $O(N)$ if some action must be performed for each freed object. 707 708 If only the public heap interacts with other threads and the global heap, the private heap can handle thread-local allocations and deallocations without locking. 709 In this scenario, the private heap must deallocate storage after reaching a certain threshold to the public heap (and then eventually to the global heap from the public heap) or heap blowup can occur. 710 If the public heap does the major management, the private heap can be simplified to provide high-performance thread-local allocations and deallocations. 711 712 The main disadvantage of each thread having both a private and public heap is the complexity of managing two heaps and their interactions in an allocator. 713 Interestingly, heap implementations often focus on either a private or public heap, giving the impression a single versus a hybrid approach is being used. 714 In many case, the hybrid approach is actually being used, but the simpler heap is just folded into the complex heap, even though the operations logically belong in separate heaps. 715 For example, a remote free-list is actually a simple public-heap, but may be implemented as an integral component of the complex private-heap in an allocator, masking the presence of a hybrid approach. 716 717 718 \section{Allocation Buffer} 719 \label{s:AllocationBuffer} 720 721 An allocation buffer is reserved memory (see~\VRef{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty. 722 That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later. 723 Both any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively. 724 The allocation buffer reduces contention and the number of global/operating-system calls. 725 For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations. 726 727 Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts. 728 Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer. 729 Thus, allocation buffers are often allocated more frequently at program/thread start, and then their use often diminishes. 730 731 Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread. 732 For example, if all objects sharing a cache line come from the same allocation buffer, then these objects are allocated to the same thread, avoiding active false-sharing. 733 Active false-sharing may still occur if objects are freed to the global heap and reused by another heap. 734 735 Allocation buffers may increase external fragmentation, since some memory in the allocation buffer may never be allocated. 736 A smaller allocation buffer reduces the amount of external fragmentation, but increases the number of calls to the global heap or operating system. 737 The allocation buffer also slightly increases internal fragmentation, since a pointer is necessary to locate the next free object in the buffer. 738 739 The unused part of a container, neither allocated or freed, is an allocation buffer. 740 For example, when a container is created, rather than placing all objects within the container on the free list, the objects form an allocation buffer and are allocated from the buffer as allocation requests are made. 741 This lazy method of constructing objects is beneficial in terms of paging and caching. 742 For example, although an entire container, possibly spanning several pages, is allocated from the operating system, only a small part of the container is used in the working set of the allocator, reducing the number of pages and cache lines that are brought into higher levels of cache. 743 744 745 \section{Lock-Free Operations} 746 \label{s:LockFreeOperations} 747 748 A lock-free algorithm guarantees safe concurrent-access to a data structure, so that at least one thread can make progress in the system, but an individual task has no bound to execution, and hence, may starve~\cite[pp.~745--746]{Herlihy93}. 749 % A wait-free algorithm puts a finite bound on the number of steps any thread takes to complete an operation, so an individual task cannot starve 750 Lock-free operations can be used in an allocator to reduce or eliminate the use of locks. 751 Locks are a problem for high contention or if the thread holding the lock is preempted and other threads attempt to use that lock. 752 With respect to the heap, these situations are unlikely unless all threads makes extremely high use of dynamic-memory allocation, which can be an indication of poor design. 753 Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock; 754 on the other hand, a thread may busy-wait for an unbounded period. 755 Finally, lock-free implementations have greater complexity and hardware dependency. 756 Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack. 757 Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is more complex. 758 Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator. -
doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex
ref3c383 rd672350 41 41 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 42 42 43 44 \section{Benchmarks} 45 There are multiple benchmarks that are built individually and evaluate different aspects of a memory allocator. But, there is not standard set of benchamrks that can be used to evaluate multiple aspects of memory allocators. 46 47 \paragraph{threadtest} 48 (FIX ME: cite benchmark and hoard) Each thread repeatedly allocates and then deallocates 100,000 objects. Runtime of the benchmark evaluates its efficiency. 49 50 \paragraph{shbench} 51 (FIX ME: cite benchmark and hoard) Each thread allocates and randomly frees a number of random-sized objects. It is a stress test that also uses runtime to determine efficiency of the allocator. 52 53 \paragraph{larson} 54 (FIX ME: cite benchmark and hoard) Larson simulates a server environment. Multiple threads are created where each thread allocator and free a number of objects within a size range. Some objects are passed from threads to the child threads to free. It caluculates memory operations per second as an indicator of memory allocator's performance. 55 56 43 57 \section{Performance Matrices of Memory Allocators} 44 58 -
doc/theses/mubeen_zulfiqar_MMath/intro.tex
ref3c383 rd672350 1 1 \chapter{Introduction} 2 2 3 % Shared-memory multi-processor computers are ubiquitous and important for improving application performance. 4 % However, writing programs that take advantage of multiple processors is not an easy task~\cite{Alexandrescu01b}, \eg shared resources can become a bottleneck when increasing (scaling) threads. 5 % One crucial shared resource is program memory, since it is used by all threads in a shared-memory concurrent-program~\cite{Berger00}. 6 % Therefore, providing high-performance, scalable memory-management is important for virtually all shared-memory multi-threaded programs. 7 8 \vspace*{-23pt} 9 Memory management takes a sequence of program generated allocation/deallocation requests and attempts to satisfy them within a fixed-sized block of memory while minimizing the total amount of memory used. 10 A general-purpose dynamic-allocation algorithm cannot anticipate future allocation requests so its output is rarely optimal. 11 However, memory allocators do take advantage of regularities in allocation patterns for typical programs to produce excellent results, both in time and space (similar to LRU paging). 12 In general, allocators use a number of similar techniques, each optimizing specific allocation patterns. 13 Nevertheless, memory allocators are a series of compromises, occasionally with some static or dynamic tuning parameters to optimize specific program-request patterns. 14 15 16 \section{Memory Structure} 17 \label{s:MemoryStructure} 18 19 \VRef[Figure]{f:ProgramAddressSpace} shows the typical layout of a program's address space divided into the following zones (right to left): static code/data, dynamic allocation, dynamic code/data, and stack, with free memory surrounding the dynamic code/data~\cite{memlayout}. 20 Static code and data are placed into memory at load time from the executable and are fixed-sized at runtime. 21 Dynamic-allocation memory starts empty and grows/shrinks as the program dynamically creates/deletes variables with independent lifetime. 22 The programming-language's runtime manages this area, where management complexity is a function of the mechanism for deleting variables. 23 Dynamic code/data memory is managed by the dynamic loader for libraries loaded at runtime, which is complex especially in a multi-threaded program~\cite{Huang06}. 24 However, changes to the dynamic code/data space are typically infrequent, many occurring at program startup, and are largely outside of a program's control. 25 Stack memory is managed by the program call-mechanism using a simple LIFO technique, which works well for sequential programs. 26 For multi-threaded programs (and coroutines), a new stack is created for each thread; 27 these thread stacks are commonly created in dynamic-allocation memory. 28 This thesis focuses on management of the dynamic-allocation memory. 29 30 \begin{figure} 31 \centering 32 \input{AddressSpace} 33 \vspace{-5pt} 34 \caption{Program Address Space Divided into Zones} 35 \label{f:ProgramAddressSpace} 36 \end{figure} 37 38 39 \section{Dynamic Memory-Management} 40 \label{s:DynamicMemoryManagement} 41 42 Modern programming languages manage dynamic-allocation memory in different ways. 43 Some languages, such as Lisp~\cite{CommonLisp}, Java~\cite{Java}, Haskell~\cite{Haskell}, Go~\cite{Go}, provide explicit allocation but \emph{implicit} deallocation of data through garbage collection~\cite{Wilson92}. 44 In general, garbage collection supports memory compaction, where dynamic (live) data is moved during runtime to better utilize space. 45 However, moving data requires finding pointers to it and updating them to reflect new data locations. 46 Programming languages such as C~\cite{C}, \CC~\cite{C++}, and Rust~\cite{Rust} provide the programmer with explicit allocation \emph{and} deallocation of data. 47 These languages cannot find and subsequently move live data because pointers can be created to any storage zone, including internal components of allocated objects, and may contain temporary invalid values generated by pointer arithmetic. 48 Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise. 49 This thesis only examines dynamic memory-management with \emph{explicit} deallocation. 50 While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach. 51 52 Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime. 53 When this allocator proves inadequate, programmers often write specialize allocators for specific needs. 54 C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator. 55 (Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.) 56 However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved. 57 For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}. 58 This thesis examines the design of high-performance allocators for use by kernel and user multi-threaded applications written in C/\CC. 59 60 61 \section{Contributions} 62 \label{s:Contributions} 63 64 This work provides the following contributions in the area of concurrent dynamic allocation: 65 \begin{enumerate}[leftmargin=*] 66 \item 67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). 68 69 \item 70 Adopt returning of @nullptr@ for a zero-sized allocation, rather than an actual memory address, both of which can be passed to @free@. 71 72 \item 73 Extended the standard C heap functionality by preserving with each allocation its original request size versus the amount allocated, if an allocation is zero fill, and the allocation alignment. 74 75 \item 76 Use the zero fill and alignment as \emph{sticky} properties for @realloc@, to realign existing storage, or preserve existing zero-fill and alignment when storage is copied. 77 Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying. 78 This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient. 79 80 \item 81 Provide additional heap operations to complete programmer expectation with respect to accessing different allocation properties. 82 \begin{itemize} 83 \item 84 @resize( oaddr, size )@ re-purpose an old allocation for a new type \emph{without} preserving fill or alignment. 85 \item 86 @resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill. 87 \item 88 @realloc( oaddr, alignment, size )@ same as previous @realloc@ but adding or changing alignment. 89 \item 90 @aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled. 91 \item 92 @amemalign( alignment, dim, elemSize )@ same as @aalloc@ with memory alignment. 93 \item 94 @cmemalign( alignment, dim, elemSize )@ same as @calloc@ with memory alignment. 95 \end{itemize} 96 97 \item 98 Provide additional heap wrapper functions in \CFA to provide a complete orthogonal set of allocation operations and properties. 99 100 \item 101 Provide additional query operations to access information about an allocation: 102 \begin{itemize} 103 \item 104 @malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@. 105 If the allocation is not aligned or @addr@ is the @nulladdr@, the minimal alignment is returned. 106 \item 107 @malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@. 108 \item 109 @malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@. 110 \item 111 @malloc_usable_size( addr )@ returns the usable size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@. 112 \end{itemize} 113 114 \item 115 Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation. 116 117 \item 118 Provide complete, fast, and contention-free allocation statistics to help understand program behaviour: 119 \begin{itemize} 120 \item 121 @malloc_stats()@ print memory-allocation statistics on the file-descriptor set by @malloc_stats_fd@. 122 \item 123 @malloc_info( options, stream )@ print memory-allocation statistics as an XML string on the specified file-descriptor set by @malloc_stats_fd@. 124 \item 125 @malloc_stats_fd( fd )@ set file-descriptor number for printing memory-allocation statistics (default @STDERR_FILENO@). 126 This file descriptor is used implicitly by @malloc_stats@ and @malloc_info@. 127 \end{itemize} 128 129 \item 130 Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination. 131 132 \item 133 Build 4 different versions of the allocator: 134 \begin{itemize} 135 \item 136 static or dynamic linking 137 \item 138 statistic/debugging (testing) or no statistic/debugging (performance) 139 \end{itemize} 140 A program may link to any of these 4 versions of the allocator often without recompilation. 141 (It is possible to separate statistics and debugging, giving 8 different versions.) 142 143 \item 144 A micro-benchmark test-suite for comparing allocators rather than relying on a suite of arbitrary programs. 145 These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs 146 \end{enumerate} 147 148 \begin{comment} 3 149 \noindent 4 150 ==================== … … 26 172 27 173 \section{Introduction} 28 Dynamic memory allocation and management is one of the core features of C. It gives programmer the freedom to allocate, free, use, and manage dynamic memory himself. The programmer is not given the complete control of the dynamic memory management instead an interface of memory allocator is given to the progr mmer that can be used to allocate/free dynamic memory for the application's use.29 30 Memory allocator is a layer between th rprogrammer and the system. Allocator gets dynamic memory from the system in heap/mmap area of application storage and manages it for programmer's use.31 32 GNU C Library (FIX ME: cite this) provides an interchangeable memory allocator that can be replaced with a custom memory allocator that supports required features and fulfills application's custom needs. It also allows others to innovate in memory allocation and design their own memory allocator. GNU C Library has set guidelines that should be followed when designing a stand alone memory allocator. GNU C Library requires new memory allocators to have atlease following set of functions in their allocator's interface:174 Dynamic memory allocation and management is one of the core features of C. It gives programmer the freedom to allocate, free, use, and manage dynamic memory himself. The programmer is not given the complete control of the dynamic memory management instead an interface of memory allocator is given to the programmer that can be used to allocate/free dynamic memory for the application's use. 175 176 Memory allocator is a layer between the programmer and the system. Allocator gets dynamic memory from the system in heap/mmap area of application storage and manages it for programmer's use. 177 178 GNU C Library (FIX ME: cite this) provides an interchangeable memory allocator that can be replaced with a custom memory allocator that supports required features and fulfills application's custom needs. It also allows others to innovate in memory allocation and design their own memory allocator. GNU C Library has set guidelines that should be followed when designing a stand-alone memory allocator. GNU C Library requires new memory allocators to have at lease following set of functions in their allocator's interface: 33 179 34 180 \begin{itemize} … … 43 189 \end{itemize} 44 190 45 In addition to the above functions, GNU C Library also provides some more functions to increase the usability of the dynamic memory allocator. Most stand alone allocators also provide all or some of the above additional functions.191 In addition to the above functions, GNU C Library also provides some more functions to increase the usability of the dynamic memory allocator. Most stand-alone allocators also provide all or some of the above additional functions. 46 192 47 193 \begin{itemize} … … 60 206 \end{itemize} 61 207 62 With the rise of concurrent applications, memory allocators should be able to fulfill dynamic memory requests from multiple threads in parallel without causing contention on shared resources. There needs to be a set of a standard benchmarks that can be used to evaluate an allocator's performance in different scen erios.208 With the rise of concurrent applications, memory allocators should be able to fulfill dynamic memory requests from multiple threads in parallel without causing contention on shared resources. There needs to be a set of a standard benchmarks that can be used to evaluate an allocator's performance in different scenarios. 63 209 64 210 \section{Research Objectives} … … 69 215 Design a lightweight concurrent memory allocator with added features and usability that are currently not present in the other memory allocators. 70 216 \item 71 Design a suite of benchmarks to evalu te multiple aspects of a memory allocator.217 Design a suite of benchmarks to evaluate multiple aspects of a memory allocator. 72 218 \end{itemize} 73 219 74 220 \section{An outline of the thesis} 75 221 LAST FIX ME: add outline at the end 222 \end{comment} -
doc/theses/mubeen_zulfiqar_MMath/performance.tex
ref3c383 rd672350 18 18 \noindent 19 19 ==================== 20 21 \section{Machine Specification} 22 23 The performance experiments were run on three different multicore systems to determine if there is consistency across platforms: 24 \begin{itemize} 25 \item 26 AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz 27 \item 28 Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz 29 \item 30 Intel Xeon Gold 5220R, 48-core socket $\times$ 2, 2.20GHz 31 \end{itemize} 32 33 34 \section{Existing Memory Allocators} 35 With dynamic allocation being an important feature of C, there are many stand-alone memory allocators that have been designed for different purposes. For this thesis, we chose 7 of the most popular and widely used memory allocators. 36 37 \paragraph{dlmalloc} 38 dlmalloc (FIX ME: cite allocator) is a thread-safe allocator that is single threaded and single heap. dlmalloc maintains free-lists of different sizes to store freed dynamic memory. (FIX ME: cite wasik) 39 40 \paragraph{hoard} 41 Hoard (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap. (FIX ME: cite wasik) 42 43 \paragraph{jemalloc} 44 jemalloc (FIX ME: cite allocator) is a thread-safe allocator that uses multiple arenas. Each thread is assigned an arena. Each arena has chunks that contain contagious memory regions of same size. An arena has multiple chunks that contain regions of multiple sizes. 45 46 \paragraph{ptmalloc} 47 ptmalloc (FIX ME: cite allocator) is a modification of dlmalloc. It is a thread-safe multi-threaded memory allocator that uses multiple heaps. ptmalloc heap has similar design to dlmalloc's heap. 48 49 \paragraph{rpmalloc} 50 rpmalloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses per-thread heap. Each heap has multiple size-classes and each size-class contains memory regions of the relevant size. 51 52 \paragraph{tbb malloc} 53 tbb malloc (FIX ME: cite allocator) is a thread-safe allocator that is multi-threaded and uses private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size. 54 55 \paragraph{tc malloc} 56 tcmalloc (FIX ME: cite allocator) is a thread-safe allocator. It uses per-thread cache to store free objects that prevents contention on shared resources in multi-threaded application. A central free-list is used to refill per-thread cache when it gets empty. 57 20 58 21 59 \section{Memory Allocators} -
doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.bib
ref3c383 rd672350 34 34 year = "2008" 35 35 } 36 37 @article{Sleator85, 38 author = {Sleator, Daniel Dominic and Tarjan, Robert Endre}, 39 title = {Self-Adjusting Binary Search Trees}, 40 journal = jacm, 41 volume = 32, 42 number = 3, 43 year = 1985, 44 issn = {0004-5411}, 45 pages = {652-686}, 46 doi = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/3828.3835}, 47 address = {New York, NY, USA}, 48 } 49 50 @article{Berger00, 51 author = {Emery D. Berger and Kathryn S. McKinley and Robert D. Blumofe and Paul R. Wilson}, 52 title = {Hoard: A Scalable Memory Allocator for Multithreaded Applications}, 53 booktitle = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)}, 54 journal = sigplan, 55 volume = 35, 56 number = 11, 57 month = nov, 58 year = 2000, 59 pages = {117-128}, 60 note = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX)}, 61 } 62 63 @inproceedings{berger02reconsidering, 64 author = {Emery D. Berger and Benjamin G. Zorn and Kathryn S. McKinley}, 65 title = {Reconsidering Custom Memory Allocation}, 66 booktitle = {Proceedings of the 17th ACM SIGPLAN Conference on Object-Oriented Programming: Systems, Languages, and Applications (OOPSLA) 2002}, 67 month = nov, 68 year = 2002, 69 location = {Seattle, Washington, USA}, 70 publisher = {ACM}, 71 address = {New York, NY, USA}, 72 } 73 74 @article{larson99memory, 75 author = {Per-{\AA}ke Larson and Murali Krishnan}, 76 title = {Memory Allocation for Long-Running Server Applications}, 77 journal = sigplan, 78 volume = 34, 79 number = 3, 80 pages = {176-185}, 81 year = 1999, 82 url = {http://citeseer.ist.psu.edu/article/larson98memory.html} 83 } 84 85 @techreport{gidpt04, 86 author = {Anders Gidenstam and Marina Papatriantafilou and Philippas Tsigas}, 87 title = {Allocating Memory in a Lock-Free Manner}, 88 number = {2004-04}, 89 institution = {Computing Science}, 90 address = {Chalmers University of Technology}, 91 year = 2004, 92 url = {http://citeseer.ist.psu.edu/gidenstam04allocating.html} 93 } 94 95 @phdthesis{berger02thesis, 96 author = {Emery Berger}, 97 title = {Memory Management for High-Performance Applications}, 98 school = {The University of Texas at Austin}, 99 year = 2002, 100 month = aug, 101 url = {http://citeseer.ist.psu.edu/article/berger02memory.html} 102 } 103 104 @misc{sgimisc, 105 author = {SGI}, 106 title = {The Standard Template Library for {C++}}, 107 note = {\textsf{www.sgi.com/\-tech/\-stl/\-Allocators.html}}, 108 } 109 110 @misc{dlmalloc, 111 author = {Doug Lea}, 112 title = {dlmalloc version 2.8.4}, 113 month = may, 114 year = 2009, 115 note = {\textsf{ftp://g.oswego.edu/\-pub/\-misc/\-malloc.c}}, 116 } 117 118 @misc{ptmalloc2, 119 author = {Wolfram Gloger}, 120 title = {ptmalloc version 2}, 121 month = jun, 122 year = 2006, 123 note = {\textsf{http://www.malloc.de/\-malloc/\-ptmalloc2-current.tar.gz}}, 124 } 125 126 @misc{nedmalloc, 127 author = {Niall Douglas}, 128 title = {nedmalloc version 1.06 Beta}, 129 month = jan, 130 year = 2010, 131 note = {\textsf{http://\-prdownloads.\-sourceforge.\-net/\-nedmalloc/\-nedmalloc\_v1.06beta1\_svn1151.zip}}, 132 } 133 134 @misc{hoard, 135 author = {Emery D. Berger}, 136 title = {hoard version 3.8}, 137 month = nov, 138 year = 2009, 139 note = {\textsf{http://www.cs.umass.edu/\-$\sim$emery/\-hoard/\-hoard-3.8/\-source/hoard-38.tar.gz}}, 140 } 141 142 @comment{mtmalloc, 143 author = {Greg Nakhimovsky}, 144 title = {Improving Scalability of Multithreaded Dynamic Memory Allocation}, 145 journal = {Dr. Dobb's}, 146 month = jul, 147 year = 2001, 148 url = {http://www.ddj.com/mobile/184404685?pgno=1} 149 } 150 151 @misc{mtmalloc, 152 key = {mtmalloc}, 153 title = {mtmalloc.c}, 154 year = 2009, 155 note = {\textsf{http://src.opensolaris.org/\-source/\-xref/\-onnv/\-onnv-gate/\-usr/\-src/\-lib/\-libmtmalloc/\-common/\-mtmalloc.c}}, 156 } 157 158 @misc{tcmalloc, 159 author = {Sanjay Ghemawat and Paul Menage}, 160 title = {tcmalloc version 1.5}, 161 month = jan, 162 year = 2010, 163 note = {\textsf{http://google-perftools.\-googlecode.\-com/\-files/\-google-perftools-1.5.tar.gz}}, 164 } 165 166 @inproceedings{streamflow, 167 author = {Scott Schneider and Christos D. Antonopoulos and Dimitrios S. Nikolopoulos}, 168 title = {Scalable Locality-Conscious Multithreaded Memory Allocation}, 169 booktitle = {International Symposium on Memory Management (ISSM'06)}, 170 month = jun, 171 year = 2006, 172 pages = {84-94}, 173 location = {Ottawa, Ontario, Canada}, 174 publisher = {ACM}, 175 address = {New York, NY, USA}, 176 } 177 178 @misc{streamflowweb, 179 author = {Scott Schneider and Christos Antonopoulos and Dimitrios Nikolopoulos}, 180 title = {Streamflow}, 181 note = {\textsf{http://people.cs.vt.edu/\-\char`\~scschnei/\-streamflow}}, 182 } 183 184 @inproceedings{Blumofe94, 185 author = {R. Blumofe and C. Leiserson}, 186 title = {Scheduling Multithreaded Computations by Work Stealing}, 187 booktitle = {Proceedings of the 35th Annual Symposium on Foundations of Computer Science, Santa Fe, New Mexico.}, 188 pages = {356-368}, 189 year = 1994, 190 month = nov, 191 url = {http://citeseer.ist.psu.edu/article/blumofe94scheduling.html} 192 } 193 194 @article{Johnstone99, 195 author = {Mark S. Johnstone and Paul R. Wilson}, 196 title = {The Memory Fragmentation Problem: Solved?}, 197 journal = sigplan, 198 volume = 34, 199 number = 3, 200 pages = {26-36}, 201 year = 1999, 202 } 203 204 @inproceedings{Grunwald93, 205 author = {Dirk Grunwald and Benjamin G. Zorn and Robert Henderson}, 206 title = {Improving the Cache Locality of Memory Allocation}, 207 booktitle = {{SIGPLAN} Conference on Programming Language Design and Implementation}, 208 pages = {177-186}, 209 year = 1993, 210 url = {http://citeseer.ist.psu.edu/grunwald93improving.html} 211 } 212 213 @inproceedings{Wilson95, 214 author = {Wilson, Paul R. and Johnstone, Mark S. and Neely, Michael and Boles, David}, 215 title = {Dynamic Storage Allocation: A Survey and Critical Review}, 216 booktitle = {Proc. Int. Workshop on Memory Management}, 217 address = {Kinross Scotland, UK}, 218 year = 1995, 219 url = {http://citeseer.ist.psu.edu/wilson95dynamic.html} 220 } 221 222 @inproceedings{Siebert00, 223 author = {Fridtjof Siebert}, 224 title = {Eliminating External Fragmentation in a Non-moving Garbage Collector for Java}, 225 booktitle = {CASES '00: Proceedings of the 2000 international conference on Compilers, architecture, and synthesis for embedded systems}, 226 year = 2000, 227 isbn = {1-58113-338-3}, 228 pages = {9-17}, 229 location = {San Jose, California, United States}, 230 doi = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/354880.354883}, 231 publisher = {ACM Press}, 232 address = {New York, NY, USA} 233 } 234 235 @inproceedings{Lim98, 236 author = {Tian F. Lim and Przemyslaw Pardyak and Brian N. Bershad}, 237 title = {A Memory-Efficient Real-Time Non-copying Garbage Collector}, 238 booktitle = {ISMM '98: Proceedings of the 1st international symposium on Memory management}, 239 year = 1998, 240 isbn = {1-58113-114-3}, 241 pages = {118-129}, 242 location = {Vancouver, British Columbia, Canada}, 243 doi = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/286860.286873}, 244 publisher = {ACM Press}, 245 address = {New York, NY, USA} 246 } 247 248 @article{Chang01, 249 author = {J. Morris Chang and Woo Hyong Lee and Witawas Srisa-an}, 250 title = {A Study of the Allocation Behavior of {C++} Programs}, 251 journal = {J. Syst. Softw.}, 252 volume = 57, 253 number = 2, 254 year = 2001, 255 issn = {0164-1212}, 256 pages = {107-118}, 257 doi = {http://dx.doi.org/10.1016/S0164-1212(00)00122-9}, 258 publisher = {Elsevier Science Inc.}, 259 address = {New York, NY, USA} 260 } 261 262 @article{Herlihy93, 263 author = {Maurice Herlihy}, 264 title = {A Methodology for Implementing Highly Concurrent Data Objects}, 265 journal = toplas, 266 volume = 15, 267 number = 5, 268 year = 1993, 269 issn = {0164-0925}, 270 pages = {745-770}, 271 doi = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/161468.161469}, 272 publisher = {ACM Press}, 273 address = {New York, NY, USA} 274 } 275 276 @article{Denning05, 277 author = {Peter J. Denning}, 278 title = {The Locality Principle}, 279 journal = cacm, 280 volume = 48, 281 number = 7, 282 year = 2005, 283 issn = {0001-0782}, 284 pages = {19-24}, 285 doi = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/1070838.1070856}, 286 publisher = {ACM Press}, 287 address = {New York, NY, USA} 288 } 289 290 @misc{wilson-locality, 291 author = {Paul R. Wilson}, 292 title = {Locality of Reference, Patterns in Program Behavior, Memory Management, and Memory Hierarchies}, 293 url = {http://citeseer.ist.psu.edu/337869.html} 294 } 295 296 @inproceedings{Feng05, 297 author = {Yi Feng and Emery D. Berger}, 298 title = {A Locality-Improving Dynamic Memory Allocator}, 299 booktitle = {Proceedings of the 2005 Workshop on Memory System Performance}, 300 location = {Chicago, Illinois}, 301 publisher = {ACM}, 302 address = {New York, NY, USA}, 303 month = jun, 304 year = 2005, 305 pages = {68-77}, 306 } 307 308 @inproceedings{grunwald-locality, 309 author = {Dirk Grunwald and Benjamin Zorn and Robert Henderson}, 310 title = {Improving the Cache Locality of Memory Allocation}, 311 booktitle = {PLDI '93: Proceedings of the ACM SIGPLAN 1993 conference on Programming language design and implementation}, 312 year = 1993, 313 isbn = {0-89791-598-4}, 314 pages = {177-186}, 315 location = {Albuquerque, New Mexico, United States}, 316 doi = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/155090.155107}, 317 publisher = {ACM Press}, 318 address = {New York, NY, USA} 319 } 320 321 @article{Alexandrescu01b, 322 author = {Andrei Alexandrescu}, 323 title = {{volatile} -- Multithreaded Programmer's Best Friend}, 324 journal = {Dr. Dobb's}, 325 month = feb, 326 year = 2001, 327 url = {http://www.ddj.com/cpp/184403766} 328 } 329 330 @article{Attardi03, 331 author = {Joseph Attardi and Neelakanth Nadgir}, 332 title = {A Comparison of Memory Allocators in Multiprocessors}, 333 journal = {Sun Developer Network}, 334 month = jun, 335 year = 2003, 336 note = {\textsf{http://developers.sun.com/\-solaris/\-articles/\-multiproc/\-multiproc.html}}, 337 } 338 339 @unpublished{memlayout, 340 author = {Peter Jay Salzman}, 341 title = {Memory Layout and the Stack}, 342 journal = {Using GNU's GDB Debugger}, 343 note = {\textsf{http://dirac.org/\-linux/\-gdb/\-02a-Memory\_Layout\_And\_The\_Stack.php}}, 344 } 345 346 @unpublished{Ferguson07, 347 author = {Justin N. Ferguson}, 348 title = {Understanding the Heap by Breaking It}, 349 note = {\textsf{https://www.blackhat.com/\-presentations/\-bh-usa-07/Ferguson/\-Whitepaper/\-bh-usa-07-ferguson-WP.pdf}}, 350 } 351 352 @inproceedings{Huang06, 353 author = {Xianglong Huang and Brian T Lewis and Kathryn S McKinley}, 354 title = {Dynamic Code Management: Improving Whole Program Code Locality in Managed Runtimes}, 355 booktitle = {VEE '06: Proceedings of the 2nd international conference on Virtual execution environments}, 356 year = 2006, 357 isbn = {1-59593-332-6}, 358 pages = {133-143}, 359 location = {Ottawa, Ontario, Canada}, 360 doi = {http://doi.acm.org/10.1145/1134760.1134779}, 361 publisher = {ACM Press}, 362 address = {New York, NY, USA} 363 } 364 365 @inproceedings{Herlihy03, 366 author = {M. Herlihy and V. Luchangco and M. Moir}, 367 title = {Obstruction-free Synchronization: Double-ended Queues as an Example}, 368 booktitle = {Proceedings of the 23rd IEEE International Conference on Distributed Computing Systems}, 369 year = 2003, 370 month = may, 371 url = {http://www.cs.brown.edu/~mph/publications.html} 372 } 373 374 @techreport{Detlefs93, 375 author = {David L. Detlefs and Al Dosser and Benjamin Zorn}, 376 title = {Memory Allocation Costs in Large {C} and {C++} Programs}, 377 number = {CU-CS-665-93}, 378 institution = {University of Colorado}, 379 address = {130 Lytton Avenue, Palo Alto, CA 94301 and Campus Box 430, Boulder, CO 80309}, 380 year = 1993, 381 url = {http://citeseer.ist.psu.edu/detlefs93memory.html} 382 } 383 384 @inproceedings{Oyama99, 385 author = {Y. Oyama and K. Taura and A. Yonezawa}, 386 title = {Executing Parallel Programs With Synchronization Bottlenecks Efficiently}, 387 booktitle = {Proceedings of International Workshop on Parallel and Distributed Computing for Symbolic and Irregular Applications (PDSIA '99)}, 388 year = {1999}, 389 pages = {182--204}, 390 publisher = {World Scientific}, 391 address = {Sendai, Japan}, 392 } 393 394 @inproceedings{Dice02, 395 author = {Dave Dice and Alex Garthwaite}, 396 title = {Mostly Lock-Free Malloc}, 397 booktitle = {Proceedings of the 3rd international symposium on Memory management (ISMM'02)}, 398 month = jun, 399 year = 2002, 400 pages = {163-174}, 401 location = {Berlin, Germany}, 402 publisher = {ACM}, 403 address = {New York, NY, USA}, 404 } -
doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex
ref3c383 rd672350 85 85 \usepackage{comment} % Removes large sections of the document. 86 86 \usepackage{tabularx} 87 \usepackage{subfigure} 88 89 \usepackage{algorithm} 90 \usepackage{algpseudocode} 87 91 88 92 % Hyperlinks make it very easy to navigate an electronic document. … … 168 172 %\usepackageinput{common} 169 173 \CFAStyle % CFA code-style for all languages 170 \lstset{basicstyle=\linespread{0.9}\tt} % CFA typewriter font 174 \lstset{basicstyle=\linespread{0.9}\sf} % CFA typewriter font 175 \newcommand{\uC}{$\mu$\CC} 171 176 \newcommand{\PAB}[1]{{\color{red}PAB: #1}} 172 177 … … 224 229 \addcontentsline{toc}{chapter}{\textbf{References}} 225 230 226 \bibliography{ uw-ethesis,pl}231 \bibliography{pl,uw-ethesis} 227 232 % Tip: You can create multiple .bib files to organize your references. 228 233 % Just list them all in the \bibliogaphy command, separated by commas (no spaces). -
doc/theses/thierry_delisle_PhD/thesis/text/existing.tex
ref3c383 rd672350 1 1 \chapter{Previous Work}\label{existing} 2 Scheduling is a topic with a very long history, predating its use in computer science. As such, early work in computed science was inspired from other fields and focused principally on solving scheduling upfront rather that as the system is running. 2 Scheduling is the process of assigning resources to incomming requests. 3 A very common form of this is assigning available workers to work-requests. 4 The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but it is also common in other fields. 5 For example, assmebly lines are an example of scheduling where parts needed assembly are assigned to line workers. 6 7 In all these cases, the choice of a scheduling algorithm generally depends first and formost on how much information is available to the scheduler. 8 Workloads that are well-kown, consistent and homegenous can benefit from a scheduler that is optimized to use this information while ill-defined inconsistent heterogenous workloads will require general algorithms. 9 A secondary aspect to that is how much information can be gathered versus how much information must be given as part of the input. 10 There is therefore a spectrum of scheduling algorithms, going from static schedulers that are well informed from the start, to schedulers that gather most of the information needed, to schedulers that can only rely on very limitted information. 11 Note that this description includes both infomation about each requests, \eg time to complete or resources needed, and information about the relationships between request, \eg whether or not some request must be completed before another request starts. 12 13 Scheduling physical resources, for example in assembly lines, is generally amenable to using very well informed scheduling since information can be gathered much faster than the physical resources can be assigned and workloads are likely to stay stable for long periods of time. 14 When a faster pace is needed and changes are much more frequent gathering information on workloads, up-front or live, can become much more limiting and more general schedulers are needed. 3 15 4 16 \section{Naming Convention} … … 6 18 7 19 \section{Static Scheduling} 8 Static schedulers require that programmers explicitly and exhaustively specify dependencies among tasks in order to schedule them. The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere. An example application for these schedulers 9 20 Static schedulers require that tasks have their dependencies and costs explicitly and exhaustively specified prior schedule. 21 The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere. 22 This approach is generally popular in real-time systems since the need for strong guarantees justifies the cost of supplying this information. 10 23 In general, static schedulers are less relavant to this project since they require input from the programmers that \CFA does not have as part of its concurrency semantic. 11 \todo{Rate-monotonic scheduling} 24 Specifying this information explicitly can add a significant burden on the programmers and reduces flexibility, for this reason the \CFA scheduler does not require this information. 12 25 13 26 14 27 \section{Dynamic Scheduling} 15 It may be difficult to fulfill the requirements of static scheduler if dependencies are beconditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.28 It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}. 16 29 17 30 \subsection{Explicitly Informed Dynamic Schedulers} … … 29 42 \subsubsection{Feedback Scheduling} 30 43 As mentionned, Schedulers may also gather information about each tasks to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain tasks, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information. 31 32 Feedback scheduler33 44 34 45 -
doc/theses/thierry_delisle_PhD/thesis/text/io.tex
ref3c383 rd672350 1 1 \chapter{User Level \io} 2 2 As mentioned in Section~\ref{prev:io}, User-Level \io requires multiplexing the \io operations of many \glspl{thrd} onto fewer \glspl{proc} using asynchronous \io operations. 3 Different operating systems offer various forms of asynchronous operations and as mentioned in Chapter~\ref{intro}, this work is exclusively focused on the Linux operating-system.3 Different operating systems offer various forms of asynchronous operations and, as mentioned in Chapter~\ref{intro}, this work is exclusively focused on the Linux operating-system. 4 4 5 5 \section{Kernel Interface} … … 178 178 Since completions are sent to the instance where requests were submitted, all instances with pending operations must be polled continously 179 179 \footnote{As will be described in Chapter~\ref{practice}, this does not translate into constant cpu usage.}. 180 Note that once an operation completes, there is nothing that ties it to the @io_uring@ instance that handled it. 181 There is nothing preventing a new operation with, for example, the same file descriptors to a different @io_uring@ instance. 180 182 181 183 A complicating aspect of submission is @io_uring@'s support for chains of operations, where the completion of an operation triggers the submission of the next operation on the link. … … 240 242 To remove this requirement, a \gls{thrd} would need the ability to ``yield to a specific \gls{proc}'', \ie, park with the promise that it will be run next on a specific \gls{proc}, the \gls{proc} attached to the correct ring.} 241 243 , greatly simplifying both allocation and submission. 242 In this design, allocation and submission form a ringpartitionned ring buffer as shown in Figure~\ref{fig:pring}.244 In this design, allocation and submission form a partitionned ring buffer as shown in Figure~\ref{fig:pring}. 243 245 Once added to the ring buffer, the attached \gls{proc} has a significant amount of flexibility with regards to when to do the system call. 244 Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of threads\glspl{thrd}, etc.246 Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of \glspl{thrd}, etc. 245 247 246 248 \begin{figure} -
doc/user/user.tex
ref3c383 rd672350 11 11 %% Created On : Wed Apr 6 14:53:29 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Sat Feb 12 17:04:03202214 %% Update Count : 53 7613 %% Last Modified On : Mon Feb 14 17:20:39 2022 14 %% Update Count : 5382 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 8223 8223 Random numbers are values generated independently, i.e., new values do not depend on previous values (independent trials), \eg lottery numbers, shuffled cards, dice roll, coin flip. 8224 8224 While a primary goal of programming is computing values that are \emph{not} random, random values are useful in simulation, cryptography, games, etc. 8225 A random-number generator is an algorithm computingindependent values.8226 If the algorithm uses deterministic computation ( predictable sequence of values), it generates \emph{pseudo} random numbers versus \emph{true} random numbers.8225 A random-number generator is an algorithm that computes independent values. 8226 If the algorithm uses deterministic computation (a predictable sequence of values), it generates \emph{pseudo} random numbers versus \emph{true} random numbers. 8227 8227 8228 8228 All \newterm{pseudo random-number generators} (\newterm{PRNG}) involve some technique to scramble bits of a value, \eg multiplicative recurrence: … … 8249 8249 Finally, a PRNG usually generates a range of large values, \eg ©[0, UINT_MAX]©, which are scaled using the modulus operator, \eg ©prng() % 5© produces random values in the range 0--4. 8250 8250 8251 \CFA provides a sequential and concurrent PRNGs.8251 \CFA provides a sequential PRNG type only accessible by a single thread (not thread-safe) and a set of global and companion thread PRNG functions accessible by multiple threads without contention. 8252 8252 \begin{itemize} 8253 8253 \item 8254 For sequential programs, like coroutining, the PRNG is used to randomize behaviour or values during execution, \eg in games, a character makes a random move or an object takes on a random value. 8254 The ©PRNG© type is for sequential programs, like coroutining: 8255 8255 \begin{cfa} 8256 8256 struct PRNG { ... }; $\C[3.75in]{// opaque type}$ … … 8264 8264 uint32_t calls( PRNG & prng ); $\C{// number of calls}\CRT$ 8265 8265 \end{cfa} 8266 Sequential execution is repeatable given the same starting seeds for all ©PRNG©s. 8267 In this scenario, it is useful to have multiple ©PRNG©, \eg one per player or object so a type is provided to generate multiple instances. 8266 A ©PRNG© object is used to randomize behaviour or values during execution, \eg in games, a character makes a random move or an object takes on a random value. 8267 In this scenario, it is useful to have multiple ©PRNG© objects, \eg one per player or object. 8268 However, sequential execution is still repeatable given the same starting seeds for all ©PRNG©s. 8268 8269 \VRef[Figure]{f:SequentialPRNG} shows an example that creates two sequential ©PRNG©s, sets both to the same seed (1009), and illustrates the three forms for generating random values, where both ©PRNG©s generate the same sequence of values. 8269 8270 … … 8307 8308 \end{tabular} 8308 8309 \end{cquote} 8309 \vspace{-10pt}8310 8310 \caption{Sequential PRNG} 8311 8311 \label{f:SequentialPRNG} … … 8313 8313 8314 8314 \item 8315 For concurrent programs, it is important the PRNG is thread-safe and not a point of contention. 8316 A PRNG in concurrent programs is often used to randomize execution in short-running programs, \eg ©yield( prng() % 5 )©. 8317 8318 Because concurrent execution is non-deterministic, seeding the concurrent PRNG is less important, as repeatable execution is impossible. 8319 Hence, there is one system-wide PRNG (global seed) but each \CFA thread has its own non-contended PRNG state. 8320 If the global seed is set, threads start with this seed, until it is reset and than threads start with the reset seed. 8321 Hence, these threads generate the same sequence of random numbers from their specific starting seed. 8322 If the global seed is \emph{not} set, threads start with a random seed, until the global seed is set. 8323 Hence, these threads generate different sequences of random numbers. 8324 If each thread needs its own seed, use a sequential ©PRNG© in each thread. 8325 8326 There are two versions of the PRNG functions to manipulate the thread-local PRNG-state, which are differentiated by performance. 8315 The PRNG global and companion thread functions are for concurrent programming, such as randomizing execution in short-running programs, \eg ©yield( prng() % 5 )©. 8327 8316 \begin{cfa} 8328 8317 void set_seed( uint32_t seed ); $\C[3.75in]{// set global seed}$ … … 8337 8326 uint32_t prng( $thread\LstStringStyle{\textdollar}$ & th, uint32_t l, uint32_t u ); $\C{// [l,u]}\CRT$ 8338 8327 \end{cfa} 8339 The slower ©prng© functions call ©active_thread© internally to access the thread-local PRNG-state, while the faster ©prng© functions are passed a pointer to the active thread. 8340 If the thread pointer is known, \eg in a thread ©main©, eliminating the call to ©active_thread© significantly reduces the cost for accessing the thread's PRNG state. 8328 The only difference between the two sets of ©prng© routines is performance. 8329 8330 Because concurrent execution is non-deterministic, seeding the concurrent PRNG is less important, as repeatable execution is impossible. 8331 Hence, there is one system-wide PRNG (global seed) but each \CFA thread has its own non-contended PRNG state. 8332 If the global seed is set, threads start with this seed, until it is reset and then threads start with the reset seed. 8333 Hence, these threads generate the same sequence of random numbers from their specific starting seed. 8334 If the global seed is \emph{not} set, threads start with a random seed, until the global seed is set. 8335 Hence, these threads generate different sequences of random numbers. 8336 If each thread needs its own seed, use a sequential ©PRNG© in each thread. 8337 The slower ©prng© functions \emph{without} a thread argument call ©active_thread© internally to indirectly access the current thread's PRNG state, while the faster ©prng© functions \emph{with} a thread argument directly access the thread through the thread parameter. 8338 If a thread pointer is available, \eg in thread main, eliminating the call to ©active_thread© significantly reduces the cost of accessing the thread's PRNG state. 8341 8339 \VRef[Figure]{f:ConcurrentPRNG} shows an example using the slower/faster concurrent PRNG in the program main and a thread. 8342 8340 -
driver/cc1.cc
ref3c383 rd672350 10 10 // Created On : Fri Aug 26 14:23:51 2005 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 21 09:46:24 202113 // Update Count : 4 1912 // Last Modified On : Thu Feb 17 18:04:23 2022 13 // Update Count : 422 14 14 // 15 15 … … 61 61 static string __CFA_FLAGPREFIX__( "__CFA_FLAG" ); // "__CFA_FLAG__=" suffix 62 62 63 static void checkEnv1( const char * args[], int & nargs ) {// stage 163 static void checkEnv1() { // stage 1 64 64 extern char ** environ; 65 65 … … 155 155 cerr << "Stage1" << endl; 156 156 #endif // __DEBUG_H__ 157 checkEnv1( args, nargs );// arguments passed via environment variables157 checkEnv1(); // arguments passed via environment variables 158 158 #ifdef __DEBUG_H__ 159 159 for ( int i = 1; i < argc; i += 1 ) { -
libcfa/src/Makefile.am
ref3c383 rd672350 63 63 containers/queueLockFree.hfa \ 64 64 containers/stackLockFree.hfa \ 65 containers/string_sharectx.hfa \ 65 66 containers/vector2.hfa \ 66 67 vec/vec.hfa \ … … 118 119 concurrency/exception.hfa \ 119 120 concurrency/kernel.hfa \ 121 concurrency/kernel/cluster.hfa \ 120 122 concurrency/locks.hfa \ 121 123 concurrency/monitor.hfa \ … … 133 135 concurrency/io/call.cfa \ 134 136 concurrency/iofwd.hfa \ 135 concurrency/kernel _private.hfa \137 concurrency/kernel/private.hfa \ 136 138 concurrency/kernel/startup.cfa \ 137 139 concurrency/preemption.cfa \ -
libcfa/src/concurrency/coroutine.cfa
ref3c383 rd672350 27 27 #include <unwind.h> 28 28 29 #include "kernel _private.hfa"29 #include "kernel/private.hfa" 30 30 #include "exception.hfa" 31 31 #include "math.hfa" -
libcfa/src/concurrency/io.cfa
ref3c383 rd672350 41 41 #include "kernel.hfa" 42 42 #include "kernel/fwd.hfa" 43 #include "kernel _private.hfa"43 #include "kernel/private.hfa" 44 44 #include "io/types.hfa" 45 45 … … 93 93 extern void __kernel_unpark( thread$ * thrd, unpark_hint ); 94 94 95 bool __cfa_io_drain( processor * proc) {95 bool __cfa_io_drain( $io_context * ctx ) { 96 96 /* paranoid */ verify( ! __preemption_enabled() ); 97 97 /* paranoid */ verify( ready_schedule_islocked() ); 98 /* paranoid */ verify( proc ); 99 /* paranoid */ verify( proc->io.ctx ); 98 /* paranoid */ verify( ctx ); 100 99 101 100 // Drain the queue 102 $io_context * ctx = proc->io.ctx;103 101 unsigned head = *ctx->cq.head; 104 102 unsigned tail = *ctx->cq.tail; … … 110 108 if(count == 0) return false; 111 109 110 if(!__atomic_try_acquire(&ctx->cq.lock)) { 111 return false; 112 } 113 112 114 for(i; count) { 113 115 unsigned idx = (head + i) & mask; … … 130 132 /* paranoid */ verify( ready_schedule_islocked() ); 131 133 /* paranoid */ verify( ! __preemption_enabled() ); 134 135 __atomic_unlock(&ctx->cq.lock); 132 136 133 137 return true; … … 175 179 /* paranoid */ verify( ! __preemption_enabled() ); 176 180 177 ctx.proc->io.pending = false;181 __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED); 178 182 } 179 183 180 184 ready_schedule_lock(); 181 bool ret = __cfa_io_drain( proc);185 bool ret = __cfa_io_drain( &ctx ); 182 186 ready_schedule_unlock(); 183 187 return ret; … … 287 291 //============================================================================================= 288 292 // submission 289 static inline void __submit ( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {293 static inline void __submit_only( struct $io_context * ctx, __u32 idxs[], __u32 have) { 290 294 // We can proceed to the fast path 291 295 // Get the right objects … … 304 308 sq.to_submit += have; 305 309 306 ctx->proc->io.pending = true; 307 ctx->proc->io.dirty = true; 310 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED); 311 __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED); 312 } 313 314 static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) { 315 __sub_ring_t & sq = ctx->sq; 316 __submit_only(ctx, idxs, have); 317 308 318 if(sq.to_submit > 30) { 309 319 __tls_stats()->io.flush.full++; … … 402 412 // I/O Arbiter 403 413 //============================================================================================= 404 static inline void block(__outstanding_io_queue & queue, __outstanding_io & item) { 414 static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) { 415 bool was_empty; 416 405 417 // Lock the list, it's not thread safe 406 418 lock( queue.lock __cfaabi_dbg_ctx2 ); 407 419 { 420 was_empty = empty(queue.queue); 421 408 422 // Add our request to the list 409 423 add( queue.queue, item ); … … 414 428 unlock( queue.lock ); 415 429 416 wait( item.sem );430 return was_empty; 417 431 } 418 432 … … 432 446 pa.want = want; 433 447 434 block(this.pending, (__outstanding_io&)pa); 448 enqueue(this.pending, (__outstanding_io&)pa); 449 450 wait( pa.sem ); 435 451 436 452 return pa.ctx; … … 485 501 ei.lazy = lazy; 486 502 487 block(ctx->ext_sq, (__outstanding_io&)ei); 503 bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei); 504 505 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST); 506 507 if( we ) { 508 sigval_t value = { PREEMPT_IO }; 509 pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value); 510 } 511 512 wait( ei.sem ); 488 513 489 514 __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have); … … 501 526 __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue ); 502 527 503 __submit (&ctx, ei.idxs, ei.have, ei.lazy);528 __submit_only(&ctx, ei.idxs, ei.have); 504 529 505 530 post( ei.sem ); -
libcfa/src/concurrency/io/setup.cfa
ref3c383 rd672350 39 39 40 40 #else 41 #pragma GCC diagnostic push 42 #pragma GCC diagnostic ignored "-Waddress-of-packed-member" 41 43 #include <errno.h> 42 44 #include <stdint.h> … … 56 58 57 59 #include "bitmanip.hfa" 58 #include "kernel_private.hfa" 60 #include "fstream.hfa" 61 #include "kernel/private.hfa" 59 62 #include "thread.hfa" 63 #pragma GCC diagnostic pop 60 64 61 65 void ?{}(io_context_params & this) { … … 111 115 this.ext_sq.empty = true; 112 116 (this.ext_sq.queue){}; 113 __io_uring_setup( this, cl.io.params, proc->idle_ fd );117 __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd ); 114 118 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this); 115 119 } … … 121 125 __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd); 122 126 } 123 124 extern void __disable_interrupts_hard();125 extern void __enable_interrupts_hard();126 127 127 128 static void __io_uring_setup( $io_context & this, const io_context_params & params_in, int procfd ) { … … 213 214 214 215 // completion queue 216 cq.lock = 0; 215 217 cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head); 216 218 cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail); … … 226 228 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 227 229 228 __disable_interrupts_hard();229 230 230 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 231 231 if (ret < 0) { 232 232 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 233 233 } 234 235 __enable_interrupts_hard();236 234 237 235 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); … … 258 256 struct __sub_ring_t & sq = this.sq; 259 257 struct __cmp_ring_t & cq = this.cq; 258 { 259 __u32 fhead = sq.free_ring.head; 260 __u32 ftail = sq.free_ring.tail; 261 262 __u32 total = *sq.num; 263 __u32 avail = ftail - fhead; 264 265 if(avail != total) abort | "Processor (" | (void*)this.proc | ") tearing down ring with" | (total - avail) | "entries allocated but not submitted, out of" | total; 266 } 260 267 261 268 // unmap the submit queue entries -
libcfa/src/concurrency/io/types.hfa
ref3c383 rd672350 23 23 #include "bits/locks.hfa" 24 24 #include "bits/queue.hfa" 25 #include "iofwd.hfa" 25 26 #include "kernel/fwd.hfa" 26 27 … … 77 78 78 79 struct __cmp_ring_t { 80 volatile bool lock; 81 79 82 // Head and tail of the ring 80 83 volatile __u32 * head; … … 170 173 // void __ioctx_prepare_block($io_context & ctx); 171 174 #endif 172 173 //-----------------------------------------------------------------------174 // IO user data175 struct io_future_t {176 future_t self;177 __s32 result;178 };179 180 static inline {181 thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {182 this.result = result;183 return fulfil(this.self, do_unpark);184 }185 186 // Wait for the future to be fulfilled187 bool wait ( io_future_t & this ) { return wait (this.self); }188 void reset ( io_future_t & this ) { return reset (this.self); }189 bool available( io_future_t & this ) { return available(this.self); }190 } -
libcfa/src/concurrency/iofwd.hfa
ref3c383 rd672350 19 19 extern "C" { 20 20 #include <asm/types.h> 21 #include <sys/stat.h> // needed for mode_t 21 22 #if CFA_HAVE_LINUX_IO_URING_H 22 23 #include <linux/io_uring.h> … … 24 25 } 25 26 #include "bits/defs.hfa" 27 #include "kernel/fwd.hfa" 26 28 #include "time.hfa" 27 29 … … 47 49 48 50 struct cluster; 49 struct io_future_t;50 51 struct $io_context; 51 52 … … 57 58 58 59 struct io_uring_sqe; 60 61 //----------------------------------------------------------------------- 62 // IO user data 63 struct io_future_t { 64 future_t self; 65 __s32 result; 66 }; 67 68 static inline { 69 thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) { 70 this.result = result; 71 return fulfil(this.self, do_unpark); 72 } 73 74 // Wait for the future to be fulfilled 75 bool wait ( io_future_t & this ) { return wait (this.self); } 76 void reset ( io_future_t & this ) { return reset (this.self); } 77 bool available( io_future_t & this ) { return available(this.self); } 78 } 59 79 60 80 //---------- … … 133 153 // Check if a function is blocks a only the user thread 134 154 bool has_user_level_blocking( fptr_t func ); 155 156 #if CFA_HAVE_LINUX_IO_URING_H 157 static inline void zero_sqe(struct io_uring_sqe * sqe) { 158 sqe->flags = 0; 159 sqe->ioprio = 0; 160 sqe->fd = 0; 161 sqe->off = 0; 162 sqe->addr = 0; 163 sqe->len = 0; 164 sqe->fsync_flags = 0; 165 sqe->__pad2[0] = 0; 166 sqe->__pad2[1] = 0; 167 sqe->__pad2[2] = 0; 168 sqe->fd = 0; 169 sqe->off = 0; 170 sqe->addr = 0; 171 sqe->len = 0; 172 } 173 #endif -
libcfa/src/concurrency/kernel.cfa
ref3c383 rd672350 19 19 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 20 20 21 #pragma GCC diagnostic push 22 #pragma GCC diagnostic ignored "-Waddress-of-packed-member" 23 21 24 //C Includes 22 25 #include <errno.h> … … 25 28 #include <signal.h> 26 29 #include <unistd.h> 30 27 31 extern "C" { 28 32 #include <sys/eventfd.h> … … 31 35 32 36 //CFA Includes 33 #include "kernel _private.hfa"37 #include "kernel/private.hfa" 34 38 #include "preemption.hfa" 35 39 #include "strstream.hfa" … … 40 44 #define __CFA_INVOKE_PRIVATE__ 41 45 #include "invoke.h" 46 #pragma GCC diagnostic pop 42 47 43 48 #if !defined(__CFA_NO_STATISTICS__) … … 131 136 static void mark_awake(__cluster_proc_list & idles, processor & proc); 132 137 133 extern void __cfa_io_start( processor * ); 134 extern bool __cfa_io_drain( processor * ); 138 extern bool __cfa_io_drain( $io_context * ); 135 139 extern bool __cfa_io_flush( processor *, int min_comp ); 136 extern void __cfa_io_stop ( processor * );137 140 static inline bool __maybe_io_drain( processor * ); 138 141 … … 159 162 verify(this); 160 163 161 io_future_t future; // used for idle sleep when io_uring is present 162 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 163 eventfd_t idle_val; 164 iovec idle_iovec = { &idle_val, sizeof(idle_val) }; 165 166 __cfa_io_start( this ); 164 /* paranoid */ verify( this->idle_wctx.ftr != 0p ); 165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p ); 166 167 // used for idle sleep when io_uring is present 168 // mark it as already fulfilled so we know if there is a pending request or not 169 this->idle_wctx.ftr->self.ptr = 1p; 170 iovec idle_iovec = { this->idle_wctx.rdbuf, sizeof(eventfd_t) }; 167 171 168 172 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); … … 231 235 } 232 236 233 idle_sleep( this, future, idle_iovec );237 idle_sleep( this, *this->idle_wctx.ftr, idle_iovec ); 234 238 235 239 // We were woken up, remove self from idle … … 251 255 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 252 256 253 if( this->io.pending && !this->io.dirty) {257 if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 254 258 __IO_STATS__(true, io.flush.dirty++; ) 255 259 __cfa_io_flush( this, 0 ); … … 259 263 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 260 264 } 261 262 for(int i = 0; !available(future); i++) {263 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);264 __cfa_io_flush( this, 1 );265 }266 267 __cfa_io_stop( this );268 265 269 266 post( this->terminated ); … … 634 631 635 632 int fd = 1; 636 if( __atomic_load_n(&fdp-> fd, __ATOMIC_SEQ_CST) != 1 ) {637 fd = __atomic_exchange_n(&fdp-> fd, 1, __ATOMIC_RELAXED);633 if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 634 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 638 635 } 639 636 … … 677 674 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 678 675 679 this->idle_wctx. fd= 1;676 this->idle_wctx.sem = 1; 680 677 681 678 eventfd_t val; 682 679 val = 1; 683 eventfd_write( this->idle_ fd, val );680 eventfd_write( this->idle_wctx.evfd, val ); 684 681 685 682 /* paranoid */ verify( ! __preemption_enabled() ); … … 689 686 // Tell everyone we are ready to go do sleep 690 687 for() { 691 int expected = this->idle_wctx. fd;688 int expected = this->idle_wctx.sem; 692 689 693 690 // Someone already told us to wake-up! No time for a nap. … … 695 692 696 693 // Try to mark that we are going to sleep 697 if(__atomic_compare_exchange_n(&this->idle_wctx. fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {694 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 698 695 // Every one agreed, taking a nap 699 696 break; … … 713 710 { 714 711 eventfd_t val; 715 ssize_t ret = read( this->idle_ fd, &val, sizeof(val) );712 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 716 713 if(ret < 0) { 717 714 switch((int)errno) { … … 740 737 reset(future); 741 738 742 __kernel_read(this, future, iov, this->idle_ fd );739 __kernel_read(this, future, iov, this->idle_wctx.evfd ); 743 740 } 744 741 … … 750 747 __STATS__(true, ready.sleep.halts++; ) 751 748 752 proc.idle_wctx. fd= 0;749 proc.idle_wctx.sem = 0; 753 750 754 751 /* paranoid */ verify( ! __preemption_enabled() ); … … 842 839 if(head == tail) return false; 843 840 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc);841 ret = __cfa_io_drain( ctx ); 845 842 ready_schedule_unlock(); 846 843 #endif -
libcfa/src/concurrency/kernel.hfa
ref3c383 rd672350 48 48 extern struct cluster * mainCluster; 49 49 50 // Processor id, required for scheduling threads 51 52 50 // Coroutine used py processors for the 2-step context switch 53 51 coroutine processorCtx_t { 54 52 struct processor * proc; 55 53 }; 56 54 57 55 struct io_future_t; 56 57 // Information needed for idle sleep 58 58 struct __fd_waitctx { 59 volatile int fd; 59 // semaphore/future like object 60 // values can be 0, 1 or some file descriptor. 61 // 0 - is the default state 62 // 1 - means the proc should wake-up immediately 63 // FD - means the proc is going asleep and should be woken by writing to the FD. 64 volatile int sem; 65 66 // The event FD that corresponds to this processor 67 int evfd; 68 69 // buffer into which the proc will read from evfd 70 // unused if not using io_uring for idle sleep 71 void * rdbuf; 72 73 // future use to track the read of the eventfd 74 // unused if not using io_uring for idle sleep 75 io_future_t * ftr; 60 76 }; 61 77 … … 92 108 struct { 93 109 $io_context * ctx; 94 bool pending; 95 bool dirty; 110 unsigned id; 111 unsigned target; 112 volatile bool pending; 113 volatile bool dirty; 96 114 } io; 97 115 … … 103 121 bool pending_preemption; 104 122 105 // Idle lock (kernel semaphore) 106 int idle_fd; 107 108 // Idle waitctx 123 // context for idle sleep 109 124 struct __fd_waitctx idle_wctx; 110 125 … … 155 170 void ^?{}(__intrusive_lane_t & this); 156 171 157 // Aligned timestamps which are used by the re laxed ready queue172 // Aligned timestamps which are used by the ready queue and io subsystem 158 173 struct __attribute__((aligned(128))) __timestamp_t { 159 174 volatile unsigned long long tv; … … 161 176 }; 162 177 178 static inline void ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; } 179 static inline void ^?{}(__timestamp_t &) {} 180 181 163 182 struct __attribute__((aligned(16))) __cache_id_t { 164 183 volatile unsigned id; 165 184 }; 166 167 // Aligned timestamps which are used by the relaxed ready queue168 struct __attribute__((aligned(128))) __help_cnts_t {169 volatile unsigned long long src;170 volatile unsigned long long dst;171 volatile unsigned long long tri;172 };173 174 static inline void ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }175 static inline void ^?{}(__timestamp_t &) {}176 177 struct __attribute__((aligned(128))) __ready_queue_caches_t;178 void ?{}(__ready_queue_caches_t & this);179 void ^?{}(__ready_queue_caches_t & this);180 181 //TODO adjust cache size to ARCHITECTURE182 // Structure holding the ready queue183 struct __ready_queue_t {184 // Data tracking the actual lanes185 // On a seperate cacheline from the used struct since186 // used can change on each push/pop but this data187 // only changes on shrink/grow188 struct {189 // Arary of lanes190 __intrusive_lane_t * volatile data;191 192 // Array of times193 __timestamp_t * volatile tscs;194 195 __cache_id_t * volatile caches;196 197 // Array of stats198 __help_cnts_t * volatile help;199 200 // Number of lanes (empty or not)201 volatile size_t count;202 } lanes;203 };204 205 void ?{}(__ready_queue_t & this);206 void ^?{}(__ready_queue_t & this);207 #if !defined(__CFA_NO_STATISTICS__)208 unsigned cnt(const __ready_queue_t & this, unsigned idx);209 #endif210 185 211 186 // Idle Sleep … … 233 208 // Cluster 234 209 struct __attribute__((aligned(128))) cluster { 235 // Ready queue for threads 236 __ready_queue_t ready_queue; 210 struct { 211 struct { 212 // Arary of subqueues 213 __intrusive_lane_t * data; 214 215 // Time since subqueues were processed 216 __timestamp_t * tscs; 217 218 // Number of subqueue / timestamps 219 size_t count; 220 } readyQ; 221 222 struct { 223 // Array of $io_ 224 $io_context ** data; 225 226 // Time since subqueues were processed 227 __timestamp_t * tscs; 228 229 // Number of I/O subqueues 230 size_t count; 231 } io; 232 233 // Cache each kernel thread belongs to 234 __cache_id_t * caches; 235 } sched; 236 237 // // Ready queue for threads 238 // __ready_queue_t ready_queue; 237 239 238 240 // Name of the cluster -
libcfa/src/concurrency/kernel/fwd.hfa
ref3c383 rd672350 347 347 struct oneshot * want = expected == 0p ? 1p : 2p; 348 348 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 349 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p);return 0p; }349 if( expected == 0p ) { return 0p; } 350 350 thread$ * ret = post( *expected, do_unpark ); 351 351 __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); -
libcfa/src/concurrency/kernel/private.hfa
ref3c383 rd672350 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // kernel _private.hfa --7 // kernel/private.hfa -- 8 8 // 9 9 // Author : Thierry Delisle … … 17 17 18 18 #if !defined(__cforall_thread__) 19 #error kernel _private.hfa should only be included in libcfathread source19 #error kernel/private.hfa should only be included in libcfathread source 20 20 #endif 21 21 … … 33 33 #else 34 34 #ifndef _GNU_SOURCE 35 #error kernel _private requires gnu_source35 #error kernel/private requires gnu_source 36 36 #endif 37 37 #include <sched.h> … … 59 59 60 60 extern bool __preemption_enabled(); 61 62 enum { 63 PREEMPT_NORMAL = 0, 64 PREEMPT_TERMINATE = 1, 65 PREEMPT_IO = 2, 66 }; 61 67 62 68 static inline void __disable_interrupts_checked() { … … 359 365 void ready_queue_shrink(struct cluster * cltr); 360 366 367 //----------------------------------------------------------------------- 368 // Decrease the width of the ready queue (number of lanes) by 4 369 void ready_queue_close(struct cluster * cltr); 361 370 362 371 // Local Variables: // -
libcfa/src/concurrency/kernel/startup.cfa
ref3c383 rd672350 18 18 19 19 // C Includes 20 #include <errno.h> 20 #include <errno.h> // errno 21 21 #include <signal.h> 22 #include <string.h> 23 #include <unistd.h> 22 #include <string.h> // strerror 23 #include <unistd.h> // sysconf 24 24 25 25 extern "C" { 26 #include <limits.h> 27 #include <unistd.h> 28 #include <sys/eventfd.h> 29 #include <sys/mman.h> 30 #include <sys/resource.h> 26 #include <limits.h> // PTHREAD_STACK_MIN 27 #include <unistd.h> // syscall 28 #include <sys/eventfd.h> // eventfd 29 #include <sys/mman.h> // mprotect 30 #include <sys/resource.h> // getrlimit 31 31 } 32 32 33 33 // CFA Includes 34 #include "kernel_private.hfa" 35 #include "startup.hfa" // STARTUP_PRIORITY_XXX 34 #include "kernel/private.hfa" 35 #include "iofwd.hfa" 36 #include "startup.hfa" // STARTUP_PRIORITY_XXX 36 37 #include "limits.hfa" 37 38 #include "math.hfa" … … 97 98 extern void __kernel_alarm_startup(void); 98 99 extern void __kernel_alarm_shutdown(void); 100 extern void __cfa_io_start( processor * ); 101 extern void __cfa_io_stop ( processor * ); 99 102 100 103 //----------------------------------------------------------------------------- … … 111 114 KERNEL_STORAGE(__stack_t, mainThreadCtx); 112 115 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 116 KERNEL_STORAGE(eventfd_t, mainIdleEventFd); 117 KERNEL_STORAGE(io_future_t, mainIdleFuture); 113 118 #if !defined(__CFA_NO_STATISTICS__) 114 119 KERNEL_STORAGE(__stats_t, mainProcStats); … … 224 229 (*mainProcessor){}; 225 230 231 mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd; 232 mainProcessor->idle_wctx.ftr = (io_future_t*)&storage_mainIdleFuture; 233 /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) ); 234 226 235 register_tls( mainProcessor ); 236 __cfa_io_start( mainProcessor ); 227 237 228 238 // Start by initializing the main thread … … 304 314 mainProcessor->local_data = 0p; 305 315 316 __cfa_io_stop( mainProcessor ); 306 317 unregister_tls( mainProcessor ); 307 318 … … 355 366 register_tls( proc ); 356 367 368 __cfa_io_start( proc ); 369 370 // used for idle sleep when io_uring is present 371 io_future_t future; 372 eventfd_t idle_buf; 373 proc->idle_wctx.ftr = &future; 374 proc->idle_wctx.rdbuf = &idle_buf; 375 376 357 377 // SKULLDUGGERY: We want to create a context for the processor coroutine 358 378 // which is needed for the 2-step context switch. However, there is no reason … … 381 401 // Main routine of the core returned, the core is now fully terminated 382 402 __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner); 403 404 __cfa_io_stop( proc ); 383 405 384 406 #if !defined(__CFA_NO_STATISTICS__) … … 515 537 this.rdq.its = 0; 516 538 this.rdq.itr = 0; 517 this.rdq.id = MAX;539 this.rdq.id = 0; 518 540 this.rdq.target = MAX; 519 541 this.rdq.last = MAX; … … 532 554 this.local_data = 0p; 533 555 534 this.idle_fd = eventfd(0, 0);535 if (idle_ fd < 0) {556 idle_wctx.evfd = eventfd(0, 0); 557 if (idle_wctx.evfd < 0) { 536 558 abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno)); 537 559 } 538 560 539 this.idle_wctx.fd= 0;561 idle_wctx.sem = 0; 540 562 541 563 // I'm assuming these two are reserved for standard input and output 542 564 // so I'm using them as sentinels with idle_wctx. 543 /* paranoid */ verify( this.idle_fd != 0 );544 /* paranoid */ verify( this.idle_fd != 1 );565 /* paranoid */ verify( idle_wctx.evfd != 0 ); 566 /* paranoid */ verify( idle_wctx.evfd != 1 ); 545 567 546 568 #if !defined(__CFA_NO_STATISTICS__) … … 554 576 // Not a ctor, it just preps the destruction but should not destroy members 555 577 static void deinit(processor & this) { 556 close(this.idle_ fd);578 close(this.idle_wctx.evfd); 557 579 } 558 580 … … 605 627 this.name = name; 606 628 this.preemption_rate = preemption_rate; 607 ready_queue{}; 629 this.sched.readyQ.data = 0p; 630 this.sched.readyQ.tscs = 0p; 631 this.sched.readyQ.count = 0; 632 this.sched.io.tscs = 0p; 633 this.sched.caches = 0p; 608 634 609 635 #if !defined(__CFA_NO_STATISTICS__) … … 644 670 // Unlock the RWlock 645 671 ready_mutate_unlock( last_size ); 672 673 ready_queue_close( &this ); 674 /* paranoid */ verify( this.sched.readyQ.data == 0p ); 675 /* paranoid */ verify( this.sched.readyQ.tscs == 0p ); 676 /* paranoid */ verify( this.sched.readyQ.count == 0 ); 677 /* paranoid */ verify( this.sched.io.tscs == 0p ); 678 /* paranoid */ verify( this.sched.caches == 0p ); 679 646 680 enable_interrupts( false ); // Don't poll, could be in main cluster 681 647 682 648 683 #if !defined(__CFA_NO_STATISTICS__) … … 736 771 check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 737 772 738 size_t stacksize = DEFAULT_STACK_SIZE;773 size_t stacksize = max( PTHREAD_STACK_MIN, DEFAULT_STACK_SIZE ); 739 774 740 775 void * stack; -
libcfa/src/concurrency/locks.cfa
ref3c383 rd672350 19 19 20 20 #include "locks.hfa" 21 #include "kernel _private.hfa"21 #include "kernel/private.hfa" 22 22 23 23 #include <kernel.hfa> -
libcfa/src/concurrency/locks.hfa
ref3c383 rd672350 164 164 } 165 165 166 static inline boollock(linear_backoff_then_block_lock & this) with(this) {166 static inline void lock(linear_backoff_then_block_lock & this) with(this) { 167 167 // if owner just return 168 if (active_thread() == owner) return true;168 if (active_thread() == owner) return; 169 169 size_t compare_val = 0; 170 170 int spin = spin_start; … … 172 172 for( ;; ) { 173 173 compare_val = 0; 174 if (internal_try_lock(this, compare_val)) return true;174 if (internal_try_lock(this, compare_val)) return; 175 175 if (2 == compare_val) break; 176 176 for (int i = 0; i < spin; i++) Pause(); … … 179 179 } 180 180 181 if(2 != compare_val && try_lock_contention(this)) return true;181 if(2 != compare_val && try_lock_contention(this)) return; 182 182 // block until signalled 183 while (block(this)) if(try_lock_contention(this)) return true; 184 185 // this should never be reached as block(this) always returns true 186 return false; 183 while (block(this)) if(try_lock_contention(this)) return; 187 184 } 188 185 -
libcfa/src/concurrency/monitor.cfa
ref3c383 rd672350 22 22 #include <inttypes.h> 23 23 24 #include "kernel _private.hfa"24 #include "kernel/private.hfa" 25 25 26 26 #include "bits/algorithm.hfa" -
libcfa/src/concurrency/mutex.cfa
ref3c383 rd672350 21 21 #include "mutex.hfa" 22 22 23 #include "kernel _private.hfa"23 #include "kernel/private.hfa" 24 24 25 25 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/mutex_stmt.hfa
ref3c383 rd672350 12 12 }; 13 13 14 15 struct __mutex_stmt_lock_guard { 16 void ** lockarr; 17 __lock_size_t count; 18 }; 19 20 static inline void ?{}( __mutex_stmt_lock_guard & this, void * lockarr [], __lock_size_t count ) { 21 this.lockarr = lockarr; 22 this.count = count; 23 24 // Sort locks based on address 25 __libcfa_small_sort(this.lockarr, count); 26 27 // acquire locks in order 28 // for ( size_t i = 0; i < count; i++ ) { 29 // lock(*this.lockarr[i]); 30 // } 31 } 32 33 static inline void ^?{}( __mutex_stmt_lock_guard & this ) with(this) { 34 // for ( size_t i = count; i > 0; i-- ) { 35 // unlock(*lockarr[i - 1]); 36 // } 37 } 38 14 39 forall(L & | is_lock(L)) { 15 16 struct __mutex_stmt_lock_guard {17 L ** lockarr;18 __lock_size_t count;19 };20 21 static inline void ?{}( __mutex_stmt_lock_guard(L) & this, L * lockarr [], __lock_size_t count ) {22 this.lockarr = lockarr;23 this.count = count;24 25 // Sort locks based on address26 __libcfa_small_sort(this.lockarr, count);27 28 // acquire locks in order29 for ( size_t i = 0; i < count; i++ ) {30 lock(*this.lockarr[i]);31 }32 }33 34 static inline void ^?{}( __mutex_stmt_lock_guard(L) & this ) with(this) {35 for ( size_t i = count; i > 0; i-- ) {36 unlock(*lockarr[i - 1]);37 }38 }39 40 40 41 struct scoped_lock { … … 51 52 } 52 53 53 static inline L * __get_ptr( L & this ) {54 static inline void * __get_mutexstmt_lock_ptr( L & this ) { 54 55 return &this; 55 56 } 56 57 57 static inline L __get_ type( L & this );58 static inline L __get_mutexstmt_lock_type( L & this ); 58 59 59 static inline L __get_ type( L * this );60 static inline L __get_mutexstmt_lock_type( L * this ); 60 61 } -
libcfa/src/concurrency/preemption.cfa
ref3c383 rd672350 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Nov 6 07:42:13 202013 // Update Count : 5 412 // Last Modified On : Thu Feb 17 11:18:57 2022 13 // Update Count : 59 14 14 // 15 15 … … 31 31 #include "bits/debug.hfa" 32 32 #include "bits/signal.hfa" 33 #include "kernel _private.hfa"33 #include "kernel/private.hfa" 34 34 35 35 … … 97 97 } 98 98 99 enum {100 PREEMPT_NORMAL = 0,101 PREEMPT_TERMINATE = 1,102 };103 104 99 //============================================================================================= 105 100 // Kernel Preemption logic … … 243 238 //---------- 244 239 // special case for preemption since used often 245 bool __preemption_enabled() {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() { 246 241 // create a assembler label before 247 242 // marked as clobber all to avoid movement … … 664 659 choose(sfp->si_value.sival_int) { 665 660 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 661 case PREEMPT_IO : ;// I/O asked to stop spinning, nothing to do here 666 662 case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 667 663 default: -
libcfa/src/concurrency/ready_queue.cfa
ref3c383 rd672350 20 20 21 21 22 // #define USE_RELAXED_FIFO23 // #define USE_WORK_STEALING24 // #define USE_CPU_WORK_STEALING25 22 #define USE_AWARE_STEALING 26 23 27 24 #include "bits/defs.hfa" 28 25 #include "device/cpu.hfa" 29 #include "kernel _private.hfa"30 31 #include "stdlib.hfa" 26 #include "kernel/cluster.hfa" 27 #include "kernel/private.hfa" 28 32 29 #include "limits.hfa" 33 #include "math.hfa" 34 35 #include <errno.h> 36 #include <unistd.h> 37 38 extern "C" { 39 #include <sys/syscall.h> // __NR_xxx 40 } 30 31 // #include <errno.h> 32 // #include <unistd.h> 41 33 42 34 #include "ready_subqueue.hfa" … … 50 42 #endif 51 43 52 // No overriden function, no environment variable, no define53 // fall back to a magic number54 #ifndef __CFA_MAX_PROCESSORS__55 #define __CFA_MAX_PROCESSORS__ 102456 #endif57 58 #if defined(USE_AWARE_STEALING)59 #define READYQ_SHARD_FACTOR 260 #define SEQUENTIAL_SHARD 261 #elif defined(USE_CPU_WORK_STEALING)62 #define READYQ_SHARD_FACTOR 263 #elif defined(USE_RELAXED_FIFO)64 #define BIAS 465 #define READYQ_SHARD_FACTOR 466 #define SEQUENTIAL_SHARD 167 #elif defined(USE_WORK_STEALING)68 #define READYQ_SHARD_FACTOR 269 #define SEQUENTIAL_SHARD 270 #else71 #error no scheduling strategy selected72 #endif73 74 44 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); 75 45 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); 76 46 static inline struct thread$ * search(struct cluster * cltr); 77 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);78 79 80 // returns the maximum number of processors the RWLock support81 __attribute__((weak)) unsigned __max_processors() {82 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");83 if(!max_cores_s) {84 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");85 return __CFA_MAX_PROCESSORS__;86 }87 88 char * endptr = 0p;89 long int max_cores_l = strtol(max_cores_s, &endptr, 10);90 if(max_cores_l < 1 || max_cores_l > 65535) {91 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);92 return __CFA_MAX_PROCESSORS__;93 }94 if('\0' != *endptr) {95 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);96 return __CFA_MAX_PROCESSORS__;97 }98 99 return max_cores_l;100 }101 102 #if defined(CFA_HAVE_LINUX_LIBRSEQ)103 // No forward declaration needed104 #define __kernel_rseq_register rseq_register_current_thread105 #define __kernel_rseq_unregister rseq_unregister_current_thread106 #elif defined(CFA_HAVE_LINUX_RSEQ_H)107 static void __kernel_raw_rseq_register (void);108 static void __kernel_raw_rseq_unregister(void);109 110 #define __kernel_rseq_register __kernel_raw_rseq_register111 #define __kernel_rseq_unregister __kernel_raw_rseq_unregister112 #else113 // No forward declaration needed114 // No initialization needed115 static inline void noop(void) {}116 117 #define __kernel_rseq_register noop118 #define __kernel_rseq_unregister noop119 #endif120 121 //=======================================================================122 // Cluster wide reader-writer lock123 //=======================================================================124 void ?{}(__scheduler_RWLock_t & this) {125 this.max = __max_processors();126 this.alloc = 0;127 this.ready = 0;128 this.data = alloc(this.max);129 this.write_lock = false;130 131 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));132 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));133 134 }135 void ^?{}(__scheduler_RWLock_t & this) {136 free(this.data);137 }138 139 140 //=======================================================================141 // Lock-Free registering/unregistering of threads142 unsigned register_proc_id( void ) with(*__scheduler_lock) {143 __kernel_rseq_register();144 145 bool * handle = (bool *)&kernelTLS().sched_lock;146 147 // Step - 1 : check if there is already space in the data148 uint_fast32_t s = ready;149 150 // Check among all the ready151 for(uint_fast32_t i = 0; i < s; i++) {152 bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems153 /* paranoid */ verify( handle != *cell );154 155 bool * null = 0p; // Re-write every loop since compare thrashes it156 if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null157 && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {158 /* paranoid */ verify(i < ready);159 /* paranoid */ verify( (kernelTLS().sched_id = i, true) );160 return i;161 }162 }163 164 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);165 166 // Step - 2 : F&A to get a new spot in the array.167 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);168 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);169 170 // Step - 3 : Mark space as used and then publish it.171 data[n] = handle;172 while() {173 unsigned copy = n;174 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n175 && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))176 break;177 Pause();178 }179 180 // Return new spot.181 /* paranoid */ verify(n < ready);182 /* paranoid */ verify( (kernelTLS().sched_id = n, true) );183 return n;184 }185 186 void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {187 /* paranoid */ verify(id < ready);188 /* paranoid */ verify(id == kernelTLS().sched_id);189 /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);190 191 bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems192 193 __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);194 195 __kernel_rseq_unregister();196 }197 198 //-----------------------------------------------------------------------199 // Writer side : acquire when changing the ready queue, e.g. adding more200 // queues or removing them.201 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {202 /* paranoid */ verify( ! __preemption_enabled() );203 204 // Step 1 : lock global lock205 // It is needed to avoid processors that register mid Critical-Section206 // to simply lock their own lock and enter.207 __atomic_acquire( &write_lock );208 209 // Make sure we won't deadlock ourself210 // Checking before acquiring the writer lock isn't safe211 // because someone else could have locked us.212 /* paranoid */ verify( ! kernelTLS().sched_lock );213 214 // Step 2 : lock per-proc lock215 // Processors that are currently being registered aren't counted216 // but can't be in read_lock or in the critical section.217 // All other processors are counted218 uint_fast32_t s = ready;219 for(uint_fast32_t i = 0; i < s; i++) {220 volatile bool * llock = data[i];221 if(llock) __atomic_acquire( llock );222 }223 224 /* paranoid */ verify( ! __preemption_enabled() );225 return s;226 }227 228 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {229 /* paranoid */ verify( ! __preemption_enabled() );230 231 // Step 1 : release local locks232 // This must be done while the global lock is held to avoid233 // threads that where created mid critical section234 // to race to lock their local locks and have the writer235 // immidiately unlock them236 // Alternative solution : return s in write_lock and pass it to write_unlock237 for(uint_fast32_t i = 0; i < last_s; i++) {238 volatile bool * llock = data[i];239 if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);240 }241 242 // Step 2 : release global lock243 /*paranoid*/ assert(true == write_lock);244 __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);245 246 /* paranoid */ verify( ! __preemption_enabled() );247 }248 249 //=======================================================================250 // caches handling251 252 struct __attribute__((aligned(128))) __ready_queue_caches_t {253 // Count States:254 // - 0 : No one is looking after this cache255 // - 1 : No one is looking after this cache, BUT it's not empty256 // - 2+ : At least one processor is looking after this cache257 volatile unsigned count;258 };259 260 void ?{}(__ready_queue_caches_t & this) { this.count = 0; }261 void ^?{}(__ready_queue_caches_t & this) {}262 263 static inline void depart(__ready_queue_caches_t & cache) {264 /* paranoid */ verify( cache.count > 1);265 __atomic_fetch_add(&cache.count, -1, __ATOMIC_SEQ_CST);266 /* paranoid */ verify( cache.count != 0);267 /* paranoid */ verify( cache.count < 65536 ); // This verify assumes no cluster will have more than 65000 kernel threads mapped to a single cache, which could be correct but is super weird.268 }269 270 static inline void arrive(__ready_queue_caches_t & cache) {271 // for() {272 // unsigned expected = cache.count;273 // unsigned desired = 0 == expected ? 2 : expected + 1;274 // }275 }276 47 277 48 //======================================================================= 278 49 // Cforall Ready Queue used for scheduling 279 50 //======================================================================= 280 unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) { 281 /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc ); 282 /* paranoid */ verifyf( instsc < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc ); 283 /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg ); 284 285 const unsigned long long new_val = currtsc > instsc ? currtsc - instsc : 0; 286 const unsigned long long total_weight = 16; 287 const unsigned long long new_weight = 4; 288 const unsigned long long old_weight = total_weight - new_weight; 289 const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight; 290 return ret; 291 } 292 293 void ?{}(__ready_queue_t & this) with (this) { 294 #if defined(USE_CPU_WORK_STEALING) 295 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR; 296 lanes.data = alloc( lanes.count ); 297 lanes.tscs = alloc( lanes.count ); 298 lanes.help = alloc( cpu_info.hthrd_count ); 299 300 for( idx; (size_t)lanes.count ) { 301 (lanes.data[idx]){}; 302 lanes.tscs[idx].tv = rdtscl(); 303 lanes.tscs[idx].ma = rdtscl(); 304 } 305 for( idx; (size_t)cpu_info.hthrd_count ) { 306 lanes.help[idx].src = 0; 307 lanes.help[idx].dst = 0; 308 lanes.help[idx].tri = 0; 309 } 310 #else 311 lanes.data = 0p; 312 lanes.tscs = 0p; 313 lanes.caches = 0p; 314 lanes.help = 0p; 315 lanes.count = 0; 316 #endif 317 } 318 319 void ^?{}(__ready_queue_t & this) with (this) { 320 #if !defined(USE_CPU_WORK_STEALING) 321 verify( SEQUENTIAL_SHARD == lanes.count ); 322 #endif 323 324 free(lanes.data); 325 free(lanes.tscs); 326 free(lanes.caches); 327 free(lanes.help); 328 } 329 330 //----------------------------------------------------------------------- 331 #if defined(USE_AWARE_STEALING) 332 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 333 processor * const proc = kernelTLS().this_processor; 334 const bool external = (!proc) || (cltr != proc->cltr); 335 const bool remote = hint == UNPARK_REMOTE; 336 337 unsigned i; 338 if( external || remote ) { 339 // Figure out where thread was last time and make sure it's valid 340 /* paranoid */ verify(thrd->preferred >= 0); 341 if(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count) { 342 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count); 343 unsigned start = thrd->preferred * READYQ_SHARD_FACTOR; 344 do { 345 unsigned r = __tls_rand(); 346 i = start + (r % READYQ_SHARD_FACTOR); 347 /* paranoid */ verify( i < lanes.count ); 348 // If we can't lock it retry 349 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 350 } else { 351 do { 352 i = __tls_rand() % lanes.count; 353 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 354 } 51 // void ?{}(__ready_queue_t & this) with (this) { 52 // lanes.data = 0p; 53 // lanes.tscs = 0p; 54 // lanes.caches = 0p; 55 // lanes.count = 0; 56 // } 57 58 // void ^?{}(__ready_queue_t & this) with (this) { 59 // free(lanes.data); 60 // free(lanes.tscs); 61 // free(lanes.caches); 62 // } 63 64 //----------------------------------------------------------------------- 65 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) { 66 processor * const proc = kernelTLS().this_processor; 67 const bool external = (!proc) || (cltr != proc->cltr); 68 const bool remote = hint == UNPARK_REMOTE; 69 const size_t lanes_count = readyQ.count; 70 71 /* paranoid */ verify( __shard_factor.readyq > 0 ); 72 /* paranoid */ verify( lanes_count > 0 ); 73 74 unsigned i; 75 if( external || remote ) { 76 // Figure out where thread was last time and make sure it's valid 77 /* paranoid */ verify(thrd->preferred >= 0); 78 unsigned start = thrd->preferred * __shard_factor.readyq; 79 if(start < lanes_count) { 80 do { 81 unsigned r = __tls_rand(); 82 i = start + (r % __shard_factor.readyq); 83 /* paranoid */ verify( i < lanes_count ); 84 // If we can't lock it retry 85 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 355 86 } else { 356 87 do { 357 unsigned r = proc->rdq.its++; 358 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); 359 /* paranoid */ verify( i < lanes.count ); 360 // If we can't lock it retry 361 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 362 } 363 364 // Actually push it 365 push(lanes.data[i], thrd); 366 367 // Unlock and return 368 __atomic_unlock( &lanes.data[i].lock ); 369 370 #if !defined(__CFA_NO_STATISTICS__) 371 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 372 else __tls_stats()->ready.push.local.success++; 373 #endif 374 } 375 376 static inline unsigned long long calc_cutoff(const unsigned long long ctsc, const processor * proc, __ready_queue_t & rdq) { 377 unsigned start = proc->rdq.id; 378 unsigned long long max = 0; 379 for(i; READYQ_SHARD_FACTOR) { 380 unsigned long long ptsc = ts(rdq.lanes.data[start + i]); 381 if(ptsc != -1ull) { 382 /* paranoid */ verify( start + i < rdq.lanes.count ); 383 unsigned long long tsc = moving_average(ctsc, ptsc, rdq.lanes.tscs[start + i].ma); 384 if(tsc > max) max = tsc; 385 } 386 } 387 return (max + 2 * max) / 2; 388 } 389 390 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 391 /* paranoid */ verify( lanes.count > 0 ); 392 /* paranoid */ verify( kernelTLS().this_processor ); 393 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 394 395 processor * const proc = kernelTLS().this_processor; 396 unsigned this = proc->rdq.id; 397 /* paranoid */ verify( this < lanes.count ); 398 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this); 399 400 // Figure out the current cpu and make sure it is valid 401 const int cpu = __kernel_getcpu(); 402 /* paranoid */ verify(cpu >= 0); 403 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 404 unsigned this_cache = cpu_info.llc_map[cpu].cache; 405 406 // Super important: don't write the same value over and over again 407 // We want to maximise our chances that his particular values stays in cache 408 if(lanes.caches[this / READYQ_SHARD_FACTOR].id != this_cache) 409 __atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED); 410 411 const unsigned long long ctsc = rdtscl(); 412 413 if(proc->rdq.target == MAX) { 414 uint64_t chaos = __tls_rand(); 415 unsigned ext = chaos & 0xff; 416 unsigned other = (chaos >> 8) % (lanes.count); 417 418 if(ext < 3 || __atomic_load_n(&lanes.caches[other / READYQ_SHARD_FACTOR].id, __ATOMIC_RELAXED) == this_cache) { 419 proc->rdq.target = other; 420 } 421 } 422 else { 423 const unsigned target = proc->rdq.target; 424 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, lanes.tscs[target].tv); 425 /* paranoid */ verify( lanes.tscs[target].tv != MAX ); 426 if(target < lanes.count) { 427 const unsigned long long cutoff = calc_cutoff(ctsc, proc, cltr->ready_queue); 428 const unsigned long long age = moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma); 429 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); 430 if(age > cutoff) { 431 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 432 if(t) return t; 433 } 434 } 435 proc->rdq.target = MAX; 436 } 437 438 for(READYQ_SHARD_FACTOR) { 439 unsigned i = this + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 440 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 441 } 442 443 // All lanes where empty return 0p 444 return 0p; 445 446 } 447 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 448 unsigned i = __tls_rand() % lanes.count; 449 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 450 } 451 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 452 return search(cltr); 453 } 454 #endif 455 #if defined(USE_CPU_WORK_STEALING) 456 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 457 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 458 459 processor * const proc = kernelTLS().this_processor; 460 const bool external = (!proc) || (cltr != proc->cltr); 461 462 // Figure out the current cpu and make sure it is valid 463 const int cpu = __kernel_getcpu(); 464 /* paranoid */ verify(cpu >= 0); 465 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 466 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 467 468 // Figure out where thread was last time and make sure it's 469 /* paranoid */ verify(thrd->preferred >= 0); 470 /* paranoid */ verify(thrd->preferred < cpu_info.hthrd_count); 471 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count); 472 const int prf = thrd->preferred * READYQ_SHARD_FACTOR; 473 474 const cpu_map_entry_t & map; 475 choose(hint) { 476 case UNPARK_LOCAL : &map = &cpu_info.llc_map[cpu]; 477 case UNPARK_REMOTE: &map = &cpu_info.llc_map[prf]; 478 } 479 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 480 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 481 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 482 483 const int start = map.self * READYQ_SHARD_FACTOR; 484 unsigned i; 88 i = __tls_rand() % lanes_count; 89 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 90 } 91 } else { 485 92 do { 486 unsigned r; 487 if(unlikely(external)) { r = __tls_rand(); } 488 else { r = proc->rdq.its++; } 489 choose(hint) { 490 case UNPARK_LOCAL : i = start + (r % READYQ_SHARD_FACTOR); 491 case UNPARK_REMOTE: i = prf + (r % READYQ_SHARD_FACTOR); 492 } 93 unsigned r = proc->rdq.its++; 94 i = proc->rdq.id + (r % __shard_factor.readyq); 95 /* paranoid */ verify( i < lanes_count ); 493 96 // If we can't lock it retry 494 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 495 496 // Actually push it 497 push(lanes.data[i], thrd); 498 499 // Unlock and return 500 __atomic_unlock( &lanes.data[i].lock ); 501 502 #if !defined(__CFA_NO_STATISTICS__) 503 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 504 else __tls_stats()->ready.push.local.success++; 505 #endif 506 507 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 508 509 } 510 511 // Pop from the ready queue from a given cluster 512 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 513 /* paranoid */ verify( lanes.count > 0 ); 514 /* paranoid */ verify( kernelTLS().this_processor ); 515 516 processor * const proc = kernelTLS().this_processor; 517 const int cpu = __kernel_getcpu(); 518 /* paranoid */ verify(cpu >= 0); 519 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 520 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 521 522 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 523 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 524 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 525 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 526 527 const int start = map.self * READYQ_SHARD_FACTOR; 528 const unsigned long long ctsc = rdtscl(); 529 530 // Did we already have a help target 531 if(proc->rdq.target == MAX) { 532 unsigned long long max = 0; 533 for(i; READYQ_SHARD_FACTOR) { 534 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma); 535 if(tsc > max) max = tsc; 536 } 537 // proc->rdq.cutoff = (max + 2 * max) / 2; 538 /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores. 539 /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores. 540 541 if(0 == (__tls_rand() % 100)) { 542 proc->rdq.target = __tls_rand() % lanes.count; 543 } else { 544 unsigned cpu_chaos = map.start + (__tls_rand() % map.count); 545 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (__tls_rand() % READYQ_SHARD_FACTOR); 546 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR)); 547 /* paranoid */ verify(proc->rdq.target < ((map.start + map.count) * READYQ_SHARD_FACTOR)); 548 } 549 550 /* paranoid */ verify(proc->rdq.target != MAX); 551 } 552 else { 553 unsigned long long max = 0; 554 for(i; READYQ_SHARD_FACTOR) { 555 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma); 556 if(tsc > max) max = tsc; 557 } 558 const unsigned long long cutoff = (max + 2 * max) / 2; 559 { 560 unsigned target = proc->rdq.target; 561 proc->rdq.target = MAX; 562 lanes.help[target / READYQ_SHARD_FACTOR].tri++; 563 if(moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma) > cutoff) { 564 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 565 proc->rdq.last = target; 566 if(t) return t; 567 } 568 proc->rdq.target = MAX; 569 } 570 571 unsigned last = proc->rdq.last; 572 if(last != MAX && moving_average(ctsc, lanes.tscs[last].tv, lanes.tscs[last].ma) > cutoff) { 573 thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 574 if(t) return t; 575 } 576 else { 577 proc->rdq.last = MAX; 578 } 579 } 580 581 for(READYQ_SHARD_FACTOR) { 582 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 583 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 584 } 585 586 // All lanes where empty return 0p 587 return 0p; 588 } 589 590 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 591 processor * const proc = kernelTLS().this_processor; 592 unsigned last = proc->rdq.last; 593 if(last != MAX) { 594 struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal)); 595 if(t) return t; 596 proc->rdq.last = MAX; 597 } 598 599 unsigned i = __tls_rand() % lanes.count; 600 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 601 } 602 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 603 return search(cltr); 604 } 605 #endif 606 #if defined(USE_RELAXED_FIFO) 607 //----------------------------------------------------------------------- 608 // get index from random number with or without bias towards queues 609 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { 610 unsigned i; 611 bool local; 612 unsigned rlow = r % BIAS; 613 unsigned rhigh = r / BIAS; 614 if((0 != rlow) && preferred >= 0) { 615 // (BIAS - 1) out of BIAS chances 616 // Use perferred queues 617 i = preferred + (rhigh % READYQ_SHARD_FACTOR); 618 local = true; 619 } 620 else { 621 // 1 out of BIAS chances 622 // Use all queues 623 i = rhigh; 624 local = false; 625 } 626 return [i, local]; 627 } 628 629 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 630 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 631 632 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 633 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 634 635 bool local; 636 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id; 637 638 // Try to pick a lane and lock it 639 unsigned i; 640 do { 641 // Pick the index of a lane 642 unsigned r = __tls_rand_fwd(); 643 [i, local] = idx_from_r(r, preferred); 644 645 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 646 647 #if !defined(__CFA_NO_STATISTICS__) 648 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED); 649 else if(local) __tls_stats()->ready.push.local.attempt++; 650 else __tls_stats()->ready.push.share.attempt++; 651 #endif 652 653 // If we can't lock it retry 654 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 655 656 // Actually push it 657 push(lanes.data[i], thrd); 658 659 // Unlock and return 660 __atomic_unlock( &lanes.data[i].lock ); 661 662 // Mark the current index in the tls rng instance as having an item 663 __tls_rand_advance_bck(); 664 665 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 666 667 // Update statistics 668 #if !defined(__CFA_NO_STATISTICS__) 669 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 670 else if(local) __tls_stats()->ready.push.local.success++; 671 else __tls_stats()->ready.push.share.success++; 672 #endif 673 } 674 675 // Pop from the ready queue from a given cluster 676 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 677 /* paranoid */ verify( lanes.count > 0 ); 678 /* paranoid */ verify( kernelTLS().this_processor ); 679 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 680 681 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 682 int preferred = kernelTLS().this_processor->rdq.id; 683 684 685 // As long as the list is not empty, try finding a lane that isn't empty and pop from it 686 for(25) { 687 // Pick two lists at random 688 unsigned ri = __tls_rand_bck(); 689 unsigned rj = __tls_rand_bck(); 690 691 unsigned i, j; 692 __attribute__((unused)) bool locali, localj; 693 [i, locali] = idx_from_r(ri, preferred); 694 [j, localj] = idx_from_r(rj, preferred); 695 696 i %= count; 697 j %= count; 698 699 // try popping from the 2 picked lists 700 struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help))); 701 if(thrd) { 702 return thrd; 703 } 704 } 705 706 // All lanes where empty return 0p 707 return 0p; 708 } 709 710 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); } 711 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 712 return search(cltr); 713 } 714 #endif 715 #if defined(USE_WORK_STEALING) 716 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 717 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 718 719 // #define USE_PREFERRED 720 #if !defined(USE_PREFERRED) 721 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 722 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 723 #else 724 unsigned preferred = thrd->preferred; 725 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == MAX || thrd->curr_cluster != cltr; 726 /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count ); 727 728 unsigned r = preferred % READYQ_SHARD_FACTOR; 729 const unsigned start = preferred - r; 730 #endif 731 732 // Try to pick a lane and lock it 733 unsigned i; 734 do { 735 #if !defined(__CFA_NO_STATISTICS__) 736 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED); 737 else __tls_stats()->ready.push.local.attempt++; 738 #endif 739 740 if(unlikely(external)) { 741 i = __tls_rand() % lanes.count; 742 } 743 else { 744 #if !defined(USE_PREFERRED) 745 processor * proc = kernelTLS().this_processor; 746 unsigned r = proc->rdq.its++; 747 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); 748 #else 749 i = start + (r++ % READYQ_SHARD_FACTOR); 750 #endif 751 } 752 // If we can't lock it retry 753 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 754 755 // Actually push it 756 push(lanes.data[i], thrd); 757 758 // Unlock and return 759 __atomic_unlock( &lanes.data[i].lock ); 760 761 #if !defined(__CFA_NO_STATISTICS__) 762 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 763 else __tls_stats()->ready.push.local.success++; 764 #endif 765 766 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 767 } 768 769 // Pop from the ready queue from a given cluster 770 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 771 /* paranoid */ verify( lanes.count > 0 ); 772 /* paranoid */ verify( kernelTLS().this_processor ); 773 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 774 775 processor * proc = kernelTLS().this_processor; 776 777 if(proc->rdq.target == MAX) { 778 unsigned long long min = ts(lanes.data[proc->rdq.id]); 779 for(int i = 0; i < READYQ_SHARD_FACTOR; i++) { 780 unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]); 781 if(tsc < min) min = tsc; 782 } 783 proc->rdq.cutoff = min; 784 proc->rdq.target = __tls_rand() % lanes.count; 785 } 786 else { 787 unsigned target = proc->rdq.target; 788 proc->rdq.target = MAX; 789 const unsigned long long bias = 0; //2_500_000_000; 790 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 791 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 97 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 98 } 99 100 // Actually push it 101 push(readyQ.data[i], thrd); 102 103 // Unlock and return 104 __atomic_unlock( &readyQ.data[i].lock ); 105 106 #if !defined(__CFA_NO_STATISTICS__) 107 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 108 else __tls_stats()->ready.push.local.success++; 109 #endif 110 } 111 112 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) { 113 const size_t lanes_count = readyQ.count; 114 115 /* paranoid */ verify( __shard_factor.readyq > 0 ); 116 /* paranoid */ verify( lanes_count > 0 ); 117 /* paranoid */ verify( kernelTLS().this_processor ); 118 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count ); 119 120 processor * const proc = kernelTLS().this_processor; 121 unsigned this = proc->rdq.id; 122 /* paranoid */ verify( this < lanes_count ); 123 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this); 124 125 // Figure out the current cache is 126 const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq); 127 const unsigned long long ctsc = rdtscl(); 128 129 if(proc->rdq.target == MAX) { 130 uint64_t chaos = __tls_rand(); 131 unsigned ext = chaos & 0xff; 132 unsigned other = (chaos >> 8) % (lanes_count); 133 134 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) { 135 proc->rdq.target = other; 136 } 137 } 138 else { 139 const unsigned target = proc->rdq.target; 140 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv); 141 /* paranoid */ verify( readyQ.tscs[target].tv != MAX ); 142 if(target < lanes_count) { 143 const unsigned long long cutoff = calc_cutoff(ctsc, proc, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq); 144 const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma); 145 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); 146 if(age > cutoff) { 792 147 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 793 148 if(t) return t; 794 149 } 795 150 } 796 797 for(READYQ_SHARD_FACTOR) { 798 unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 799 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 800 } 801 return 0p; 802 } 803 804 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 805 unsigned i = __tls_rand() % lanes.count; 806 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 807 } 808 809 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) { 810 return search(cltr); 811 } 812 #endif 151 proc->rdq.target = MAX; 152 } 153 154 for(__shard_factor.readyq) { 155 unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq); 156 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 157 } 158 159 // All lanes where empty return 0p 160 return 0p; 161 162 } 163 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { 164 unsigned i = __tls_rand() % (cltr->sched.readyQ.count); 165 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 166 } 167 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 168 return search(cltr); 169 } 813 170 814 171 //======================================================================= … … 820 177 //----------------------------------------------------------------------- 821 178 // try to pop from a lane given by index w 822 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr-> ready_queue) {823 /* paranoid */ verify( w < lanes.count );179 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { 180 /* paranoid */ verify( w < readyQ.count ); 824 181 __STATS( stats.attempt++; ) 825 182 826 183 // Get relevant elements locally 827 __intrusive_lane_t & lane = lanes.data[w];184 __intrusive_lane_t & lane = readyQ.data[w]; 828 185 829 186 // If list looks empty retry … … 845 202 // Actually pop the list 846 203 struct thread$ * thrd; 847 #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING) 848 unsigned long long tsc_before = ts(lane); 849 #endif 204 unsigned long long tsc_before = ts(lane); 850 205 unsigned long long tsv; 851 206 [thrd, tsv] = pop(lane); … … 861 216 __STATS( stats.success++; ) 862 217 863 #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING) 864 if (tsv != MAX) { 865 unsigned long long now = rdtscl(); 866 unsigned long long pma = __atomic_load_n(&lanes.tscs[w].ma, __ATOMIC_RELAXED); 867 __atomic_store_n(&lanes.tscs[w].tv, tsv, __ATOMIC_RELAXED); 868 __atomic_store_n(&lanes.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED); 869 } 870 #endif 871 872 #if defined(USE_AWARE_STEALING) || defined(USE_CPU_WORK_STEALING) 873 thrd->preferred = w / READYQ_SHARD_FACTOR; 874 #else 875 thrd->preferred = w; 876 #endif 218 if (tsv != MAX) { 219 unsigned long long now = rdtscl(); 220 unsigned long long pma = __atomic_load_n(&readyQ.tscs[w].ma, __ATOMIC_RELAXED); 221 __atomic_store_n(&readyQ.tscs[w].tv, tsv, __ATOMIC_RELAXED); 222 __atomic_store_n(&readyQ.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED); 223 } 224 225 thrd->preferred = w / __shard_factor.readyq; 877 226 878 227 // return the popped thread … … 883 232 // try to pop from any lanes making sure you don't miss any threads push 884 233 // before the start of the function 885 static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) { 886 /* paranoid */ verify( lanes.count > 0 ); 887 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 234 static inline struct thread$ * search(struct cluster * cltr) { 235 const size_t lanes_count = cltr->sched.readyQ.count; 236 /* paranoid */ verify( lanes_count > 0 ); 237 unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED ); 888 238 unsigned offset = __tls_rand(); 889 239 for(i; count) { … … 902 252 // get preferred ready for new thread 903 253 unsigned ready_queue_new_preferred() { 904 unsigned pref = 0;254 unsigned pref = MAX; 905 255 if(struct thread$ * thrd = publicTLS_get( this_thread )) { 906 256 pref = thrd->preferred; 907 257 } 908 else {909 #if defined(USE_CPU_WORK_STEALING)910 pref = __kernel_getcpu();911 #endif912 }913 914 #if defined(USE_CPU_WORK_STEALING)915 /* paranoid */ verify(pref >= 0);916 /* paranoid */ verify(pref < cpu_info.hthrd_count);917 #endif918 258 919 259 return pref; … … 921 261 922 262 //----------------------------------------------------------------------- 923 // Check that all the intrusive queues in the data structure are still consistent924 static void check( __ready_queue_t & q ) with (q) {925 #if defined(__CFA_WITH_VERIFY__)926 {927 for( idx ; lanes.count ) {928 __intrusive_lane_t & sl = lanes.data[idx];929 assert(!lanes.data[idx].lock);930 931 if(is_empty(sl)) {932 assert( sl.anchor.next == 0p );933 assert( sl.anchor.ts == -1llu );934 assert( mock_head(sl) == sl.prev );935 } else {936 assert( sl.anchor.next != 0p );937 assert( sl.anchor.ts != -1llu );938 assert( mock_head(sl) != sl.prev );939 }940 }941 }942 #endif943 }944 945 //-----------------------------------------------------------------------946 263 // Given 2 indexes, pick the list with the oldest push an try to pop from it 947 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr-> ready_queue) {264 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { 948 265 // Pick the bet list 949 266 int w = i; 950 if( __builtin_expect(!is_empty( lanes.data[j]), true) ) {951 w = (ts( lanes.data[i]) < ts(lanes.data[j])) ? i : j;267 if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) { 268 w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j; 952 269 } 953 270 954 271 return try_pop(cltr, w __STATS(, stats)); 955 272 } 956 957 // Call this function of the intrusive list was moved using memcpy958 // fixes the list so that the pointers back to anchors aren't left dangling959 static inline void fix(__intrusive_lane_t & ll) {960 if(is_empty(ll)) {961 verify(ll.anchor.next == 0p);962 ll.prev = mock_head(ll);963 }964 }965 966 static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {967 processor * it = &list`first;968 for(unsigned i = 0; i < count; i++) {969 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);970 it->rdq.id = value;971 it->rdq.target = MAX;972 value += READYQ_SHARD_FACTOR;973 it = &(*it)`next;974 }975 }976 977 static void reassign_cltr_id(struct cluster * cltr) {978 unsigned preferred = 0;979 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);980 assign_list(preferred, cltr->procs.idles , cltr->procs.idle );981 }982 983 static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {984 #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING)985 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);986 for(i; lanes.count) {987 lanes.tscs[i].tv = rdtscl();988 lanes.tscs[i].ma = 0;989 }990 #endif991 }992 993 #if defined(USE_CPU_WORK_STEALING)994 // ready_queue size is fixed in this case995 void ready_queue_grow(struct cluster * cltr) {}996 void ready_queue_shrink(struct cluster * cltr) {}997 #else998 // Grow the ready queue999 void ready_queue_grow(struct cluster * cltr) {1000 size_t ncount;1001 int target = cltr->procs.total;1002 1003 /* paranoid */ verify( ready_mutate_islocked() );1004 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");1005 1006 // Make sure that everything is consistent1007 /* paranoid */ check( cltr->ready_queue );1008 1009 // grow the ready queue1010 with( cltr->ready_queue ) {1011 // Find new count1012 // Make sure we always have atleast 1 list1013 if(target >= 2) {1014 ncount = target * READYQ_SHARD_FACTOR;1015 } else {1016 ncount = SEQUENTIAL_SHARD;1017 }1018 1019 // Allocate new array (uses realloc and memcpies the data)1020 lanes.data = alloc( ncount, lanes.data`realloc );1021 1022 // Fix the moved data1023 for( idx; (size_t)lanes.count ) {1024 fix(lanes.data[idx]);1025 }1026 1027 // Construct new data1028 for( idx; (size_t)lanes.count ~ ncount) {1029 (lanes.data[idx]){};1030 }1031 1032 // Update original1033 lanes.count = ncount;1034 1035 lanes.caches = alloc( target, lanes.caches`realloc );1036 }1037 1038 fix_times(cltr);1039 1040 reassign_cltr_id(cltr);1041 1042 // Make sure that everything is consistent1043 /* paranoid */ check( cltr->ready_queue );1044 1045 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");1046 1047 /* paranoid */ verify( ready_mutate_islocked() );1048 }1049 1050 // Shrink the ready queue1051 void ready_queue_shrink(struct cluster * cltr) {1052 /* paranoid */ verify( ready_mutate_islocked() );1053 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");1054 1055 // Make sure that everything is consistent1056 /* paranoid */ check( cltr->ready_queue );1057 1058 int target = cltr->procs.total;1059 1060 with( cltr->ready_queue ) {1061 // Remember old count1062 size_t ocount = lanes.count;1063 1064 // Find new count1065 // Make sure we always have atleast 1 list1066 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;1067 /* paranoid */ verify( ocount >= lanes.count );1068 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );1069 1070 // for printing count the number of displaced threads1071 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)1072 __attribute__((unused)) size_t displaced = 0;1073 #endif1074 1075 // redistribute old data1076 for( idx; (size_t)lanes.count ~ ocount) {1077 // Lock is not strictly needed but makes checking invariants much easier1078 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);1079 verify(locked);1080 1081 // As long as we can pop from this lane to push the threads somewhere else in the queue1082 while(!is_empty(lanes.data[idx])) {1083 struct thread$ * thrd;1084 unsigned long long _;1085 [thrd, _] = pop(lanes.data[idx]);1086 1087 push(cltr, thrd, true);1088 1089 // for printing count the number of displaced threads1090 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)1091 displaced++;1092 #endif1093 }1094 1095 // Unlock the lane1096 __atomic_unlock(&lanes.data[idx].lock);1097 1098 // TODO print the queue statistics here1099 1100 ^(lanes.data[idx]){};1101 }1102 1103 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);1104 1105 // Allocate new array (uses realloc and memcpies the data)1106 lanes.data = alloc( lanes.count, lanes.data`realloc );1107 1108 // Fix the moved data1109 for( idx; (size_t)lanes.count ) {1110 fix(lanes.data[idx]);1111 }1112 1113 lanes.caches = alloc( target, lanes.caches`realloc );1114 }1115 1116 fix_times(cltr);1117 1118 1119 reassign_cltr_id(cltr);1120 1121 // Make sure that everything is consistent1122 /* paranoid */ check( cltr->ready_queue );1123 1124 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");1125 /* paranoid */ verify( ready_mutate_islocked() );1126 }1127 #endif1128 1129 #if !defined(__CFA_NO_STATISTICS__)1130 unsigned cnt(const __ready_queue_t & this, unsigned idx) {1131 /* paranoid */ verify(this.lanes.count > idx);1132 return this.lanes.data[idx].cnt;1133 }1134 #endif1135 1136 1137 #if defined(CFA_HAVE_LINUX_LIBRSEQ)1138 // No definition needed1139 #elif defined(CFA_HAVE_LINUX_RSEQ_H)1140 1141 #if defined( __x86_64 ) || defined( __i386 )1142 #define RSEQ_SIG 0x530530531143 #elif defined( __ARM_ARCH )1144 #ifdef __ARMEB__1145 #define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */1146 #else1147 #define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */1148 #endif1149 #endif1150 1151 extern void __disable_interrupts_hard();1152 extern void __enable_interrupts_hard();1153 1154 static void __kernel_raw_rseq_register (void) {1155 /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );1156 1157 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);1158 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);1159 if(ret != 0) {1160 int e = errno;1161 switch(e) {1162 case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");1163 case ENOSYS: abort("KERNEL ERROR: rseq register no supported");1164 case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");1165 case EBUSY : abort("KERNEL ERROR: rseq register already registered");1166 case EPERM : abort("KERNEL ERROR: rseq register sig argument on unregistration does not match the signature received on registration");1167 default: abort("KERNEL ERROR: rseq register unexpected return %d", e);1168 }1169 }1170 }1171 1172 static void __kernel_raw_rseq_unregister(void) {1173 /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );1174 1175 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);1176 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);1177 if(ret != 0) {1178 int e = errno;1179 switch(e) {1180 case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");1181 case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");1182 case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");1183 case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");1184 case EPERM : abort("KERNEL ERROR: rseq unregister sig argument on unregistration does not match the signature received on registration");1185 default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);1186 }1187 }1188 }1189 #else1190 // No definition needed1191 #endif -
libcfa/src/concurrency/ready_subqueue.hfa
ref3c383 rd672350 25 25 ); 26 26 return rhead; 27 }28 29 // Ctor30 void ?{}( __intrusive_lane_t & this ) {31 this.lock = false;32 this.prev = mock_head(this);33 this.anchor.next = 0p;34 this.anchor.ts = -1llu;35 #if !defined(__CFA_NO_STATISTICS__)36 this.cnt = 0;37 #endif38 39 // We add a boat-load of assertions here because the anchor code is very fragile40 /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );41 /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );42 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) );43 /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next );44 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts );45 /* paranoid */ verify( mock_head(this)->link.next == 0p );46 /* paranoid */ verify( mock_head(this)->link.ts == -1llu );47 /* paranoid */ verify( mock_head(this) == this.prev );48 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );49 /* paranoid */ verify( __alignof__(this) == 128 );50 /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) );51 }52 53 // Dtor is trivial54 void ^?{}( __intrusive_lane_t & this ) {55 // Make sure the list is empty56 /* paranoid */ verify( this.anchor.next == 0p );57 /* paranoid */ verify( this.anchor.ts == -1llu );58 /* paranoid */ verify( mock_head(this) == this.prev );59 27 } 60 28 -
libcfa/src/concurrency/thread.cfa
ref3c383 rd672350 19 19 #include "thread.hfa" 20 20 21 #include "kernel _private.hfa"21 #include "kernel/private.hfa" 22 22 #include "exception.hfa" 23 23 -
libcfa/src/containers/string.cfa
ref3c383 rd672350 92 92 } 93 93 94 string ?=?(string & this, string other) {94 string & ?=?(string & this, string & other) { //// <---- straw man change 95 95 (*this.inner) = (*other.inner); 96 96 return this; … … 235 235 int find(const string &s, const char* search, size_t searchsize) { 236 236 return find( *s.inner, search, searchsize); 237 } 238 239 int findFrom(const string &s, size_t fromPos, char search) { 240 return findFrom( *s.inner, fromPos, search ); 241 } 242 243 int findFrom(const string &s, size_t fromPos, const string &search) { 244 return findFrom( *s.inner, fromPos, *search.inner ); 245 } 246 247 int findFrom(const string &s, size_t fromPos, const char* search) { 248 return findFrom( *s.inner, fromPos, search ); 249 } 250 251 int findFrom(const string &s, size_t fromPos, const char* search, size_t searchsize) { 252 return findFrom( *s.inner, fromPos, search, searchsize ); 237 253 } 238 254 -
libcfa/src/containers/string.hfa
ref3c383 rd672350 41 41 void ?=?(string &s, const string &other); 42 42 void ?=?(string &s, char other); 43 string ?=?(string &s, string other); // string tolerates memcpys; still saw calls to autogen44 43 string & ?=?(string &s, string &other); // surprising ret seems to help avoid calls to autogen 44 //string ?=?( string &, string ) = void; 45 45 void ^?{}(string &s); 46 46 … … 93 93 int find(const string &s, const char* search, size_t searchsize); 94 94 95 int findFrom(const string &s, size_t fromPos, char search); 96 int findFrom(const string &s, size_t fromPos, const string &search); 97 int findFrom(const string &s, size_t fromPos, const char* search); 98 int findFrom(const string &s, size_t fromPos, const char* search, size_t searchsize); 99 95 100 bool includes(const string &s, const string &search); 96 101 bool includes(const string &s, const char* search); -
libcfa/src/containers/string_res.cfa
ref3c383 rd672350 15 15 16 16 #include "string_res.hfa" 17 #include <stdlib.hfa> // e.g. malloc 18 #include <string.h> // e.g. strlen 17 #include "string_sharectx.hfa" 18 #include "stdlib.hfa" 19 20 // Workaround for observed performance penalty from calling CFA's alloc. 21 // Workaround is: EndVbyte = TEMP_ALLOC(char, CurrSize) 22 // Should be: EndVbyte = alloc(CurrSize) 23 #define TEMP_ALLOC(T, n) (( T* ) malloc( n * sizeof( T ) )) 24 25 #include <assert.h> 19 26 20 27 //######################### VbyteHeap "header" ######################### 21 22 23 24 25 26 27 28 29 // DON'T COMMIT:30 // #define VbyteDebug31 32 33 34 35 28 36 29 #ifdef VbyteDebug … … 54 47 55 48 56 static inlinevoid compaction( VbyteHeap & ); // compaction of the byte area57 static inline void garbage( VbyteHeap &); // garbage collect the byte area58 static inlinevoid extend( VbyteHeap &, int ); // extend the size of the byte area59 static inlinevoid reduce( VbyteHeap &, int ); // reduce the size of the byte area60 61 static inline void ?{}( VbyteHeap &, int = 1000 );62 static inlinevoid ^?{}( VbyteHeap & );63 static inline void ByteCopy( VbyteHeap &, char *, int, int, char *, int, int ); // copy a block of bytes from one location in the heap to another 64 static in line int ByteCmp( VbyteHeap &,char *, int, int, char *, int, int ); // compare 2 blocks of bytes65 static inlinechar *VbyteAlloc( VbyteHeap &, int ); // allocate a block bytes in the heap66 67 68 static inlinevoid AddThisAfter( HandleNode &, HandleNode & );69 static inlinevoid DeleteNode( HandleNode & );70 static inlinevoid MoveThisAfter( HandleNode &, const HandleNode & ); // move current handle after parameter handle49 static void compaction( VbyteHeap & ); // compaction of the byte area 50 static void garbage( VbyteHeap &, int ); // garbage collect the byte area 51 static void extend( VbyteHeap &, int ); // extend the size of the byte area 52 static void reduce( VbyteHeap &, int ); // reduce the size of the byte area 53 54 static void ?{}( VbyteHeap &, size_t = 1000 ); 55 static void ^?{}( VbyteHeap & ); 56 57 static int ByteCmp( char *, int, int, char *, int, int ); // compare 2 blocks of bytes 58 static char *VbyteAlloc( VbyteHeap &, int ); // allocate a block bytes in the heap 59 static char *VbyteTryAdjustLast( VbyteHeap &, int ); 60 61 static void AddThisAfter( HandleNode &, HandleNode & ); 62 static void DeleteNode( HandleNode & ); 63 static void MoveThisAfter( HandleNode &, const HandleNode & ); // move current handle after parameter handle 71 64 72 65 73 66 // Allocate the storage for the variable sized area and intialize the heap variables. 74 67 75 static inline void ?{}( VbyteHeap & this, int Size ) with(this) {68 static void ?{}( VbyteHeap & this, size_t Size ) with(this) { 76 69 #ifdef VbyteDebug 77 70 serr | "enter:VbyteHeap::VbyteHeap, this:" | &this | " Size:" | Size; … … 79 72 NoOfCompactions = NoOfExtensions = NoOfReductions = 0; 80 73 InitSize = CurrSize = Size; 81 StartVbyte = EndVbyte = alloc(CurrSize);74 StartVbyte = EndVbyte = TEMP_ALLOC(char, CurrSize); 82 75 ExtVbyte = (void *)( StartVbyte + CurrSize ); 83 76 Header.flink = Header.blink = &Header; 77 Header.ulink = & this; 84 78 #ifdef VbyteDebug 85 79 HeaderPtr = &Header; … … 91 85 // Release the dynamically allocated storage for the byte area. 92 86 93 static inlinevoid ^?{}( VbyteHeap & this ) with(this) {87 static void ^?{}( VbyteHeap & this ) with(this) { 94 88 free( StartVbyte ); 95 89 } // ~VbyteHeap … … 102 96 // creator. 103 97 104 void ?{}( HandleNode & this ) with(this) {98 static void ?{}( HandleNode & this ) with(this) { 105 99 #ifdef VbyteDebug 106 100 serr | "enter:HandleNode::HandleNode, this:" | &this; … … 117 111 // collection. 118 112 119 void ?{}( HandleNode & this, VbyteHeap & vh ) with(this) {113 static void ?{}( HandleNode & this, VbyteHeap & vh ) with(this) { 120 114 #ifdef VbyteDebug 121 115 serr | "enter:HandleNode::HandleNode, this:" | &this; … … 123 117 s = 0; 124 118 lnth = 0; 119 ulink = &vh; 125 120 AddThisAfter( this, *vh.Header.blink ); 126 121 #ifdef VbyteDebug … … 133 128 // is the responsibility of the creator to destroy it. 134 129 135 void ^?{}( HandleNode & this ) with(this) {130 static void ^?{}( HandleNode & this ) with(this) { 136 131 #ifdef VbyteDebug 137 132 serr | "enter:HandleNode::~HandleNode, this:" | & this; … … 149 144 } // ~HandleNode 150 145 146 147 //######################### String Sharing Context ######################### 148 149 static string_sharectx * ambient_string_sharectx; // fickle top of stack 150 static string_sharectx default_string_sharectx = {NEW_SHARING}; // stable bottom of stack 151 152 void ?{}( string_sharectx & this, StringSharectx_Mode mode ) with( this ) { 153 (older){ ambient_string_sharectx }; 154 if ( mode == NEW_SHARING ) { 155 (activeHeap){ new( (size_t) 1000 ) }; 156 } else { 157 verify( mode == NO_SHARING ); 158 (activeHeap){ 0p }; 159 } 160 ambient_string_sharectx = & this; 161 } 162 163 void ^?{}( string_sharectx & this ) with( this ) { 164 if ( activeHeap ) delete( activeHeap ); 165 166 // unlink this from older-list starting from ambient_string_sharectx 167 // usually, this==ambient_string_sharectx and the loop runs zero times 168 string_sharectx *& c = ambient_string_sharectx; 169 while ( c != &this ) &c = &c->older; // find this 170 c = this.older; // unlink 171 } 172 151 173 //######################### String Resource ######################### 152 174 153 175 154 VbyteHeap HeapArea; 155 156 VbyteHeap * DEBUG_string_heap = & HeapArea; 176 VbyteHeap * DEBUG_string_heap() { 177 assert( ambient_string_sharectx->activeHeap && "No sharing context is active" ); 178 return ambient_string_sharectx->activeHeap; 179 } 157 180 158 181 size_t DEBUG_string_bytes_avail_until_gc( VbyteHeap * heap ) { … … 160 183 } 161 184 185 size_t DEBUG_string_bytes_in_heap( VbyteHeap * heap ) { 186 return heap->CurrSize; 187 } 188 162 189 const char * DEBUG_string_heap_start( VbyteHeap * heap ) { 163 190 return heap->StartVbyte; 164 191 } 165 166 192 167 193 // Returns the size of the string in bytes … … 187 213 // Store auto-newline state so it can be restored 188 214 bool anl = getANL$(out); 189 nlOff(out); 190 for (size_t i = 0; i < s.Handle.lnth; i++) { 191 // Need to re-apply on the last output operator, for whole-statement version 192 if (anl && i == s.Handle.lnth-1) nlOn(out); 193 out | s[i]; 194 } 195 return out; 215 if( s.Handle.lnth == 0 ) { 216 sout | ""; 217 } else { 218 nlOff(out); 219 for (size_t i = 0; i < s.Handle.lnth; i++) { 220 // Need to re-apply on the last output operator, for whole-statement version 221 if (anl && i == s.Handle.lnth-1) nlOn(out); 222 out | s[i]; 223 } 224 } 196 225 } 197 226 198 227 // Empty constructor 199 228 void ?{}(string_res &s) with(s) { 200 (Handle){ HeapArea }; 229 if( ambient_string_sharectx->activeHeap ) { 230 (Handle){ * ambient_string_sharectx->activeHeap }; 231 (shareEditSet_owns_ulink){ false }; 232 verify( Handle.s == 0p && Handle.lnth == 0 ); 233 } else { 234 (Handle){ * new( (size_t) 10 ) }; // TODO: can I lazily avoid allocating for empty string 235 (shareEditSet_owns_ulink){ true }; 236 Handle.s = Handle.ulink->StartVbyte; 237 verify( Handle.lnth == 0 ); 238 } 201 239 s.shareEditSet_prev = &s; 202 240 s.shareEditSet_next = &s; 203 241 } 204 242 243 static void eagerCopyCtorHelper(string_res &s, const char* rhs, size_t rhslnth) with(s) { 244 if( ambient_string_sharectx->activeHeap ) { 245 (Handle){ * ambient_string_sharectx->activeHeap }; 246 (shareEditSet_owns_ulink){ false }; 247 } else { 248 (Handle){ * new( rhslnth ) }; 249 (shareEditSet_owns_ulink){ true }; 250 } 251 Handle.s = VbyteAlloc(*Handle.ulink, rhslnth); 252 Handle.lnth = rhslnth; 253 memmove( Handle.s, rhs, rhslnth ); 254 s.shareEditSet_prev = &s; 255 s.shareEditSet_next = &s; 256 } 257 205 258 // Constructor from a raw buffer and size 206 259 void ?{}(string_res &s, const char* rhs, size_t rhslnth) with(s) { 207 (Handle){ HeapArea }; 208 Handle.s = VbyteAlloc(HeapArea, rhslnth); 260 eagerCopyCtorHelper(s, rhs, rhslnth); 261 } 262 263 // private ctor (not in header): use specified heap (ignore ambient) and copy chars in 264 void ?{}( string_res &s, VbyteHeap & heap, const char* rhs, size_t rhslnth ) with(s) { 265 (Handle){ heap }; 266 Handle.s = VbyteAlloc(*Handle.ulink, rhslnth); 209 267 Handle.lnth = rhslnth; 210 for ( int i = 0; i < rhslnth; i += 1 ) { // copy characters 211 Handle.s[i] = rhs[i]; 212 } // for 268 (s.shareEditSet_owns_ulink){ false }; 269 memmove( Handle.s, rhs, rhslnth ); 213 270 s.shareEditSet_prev = &s; 214 271 s.shareEditSet_next = &s; 215 272 } 216 273 217 // String literal constructor218 void ?{}(string_res &s, const char* rhs) {219 (s){ rhs, strlen(rhs) };220 }221 222 274 // General copy constructor 223 275 void ?{}(string_res &s, const string_res & s2, StrResInitMode mode, size_t start, size_t end ) { 224 276 225 (s.Handle){ HeapArea }; 226 s.Handle.s = s2.Handle.s + start; 227 s.Handle.lnth = end - start; 228 MoveThisAfter(s.Handle, s2.Handle ); // insert this handle after rhs handle 229 // ^ bug? skip others at early point in string 230 231 if (mode == COPY_VALUE) { 232 // make s alone in its shareEditSet 233 s.shareEditSet_prev = &s; 234 s.shareEditSet_next = &s; 277 verify( start <= end && end <= s2.Handle.lnth ); 278 279 if (s2.Handle.ulink != ambient_string_sharectx->activeHeap && mode == COPY_VALUE) { 280 // crossing heaps (including private): copy eagerly 281 eagerCopyCtorHelper(s, s2.Handle.s + start, end - start); 282 verify(s.shareEditSet_prev == &s); 283 verify(s.shareEditSet_next == &s); 235 284 } else { 236 assert( mode == SHARE_EDITS ); 237 238 // s2 is logically const but not implementation const 239 string_res & s2mod = (string_res &) s2; 240 241 // insert s after s2 on shareEditSet 242 s.shareEditSet_next = s2mod.shareEditSet_next; 243 s.shareEditSet_prev = &s2mod; 244 s.shareEditSet_next->shareEditSet_prev = &s; 245 s.shareEditSet_prev->shareEditSet_next = &s; 246 } 247 } 248 249 void assign(string_res &this, const char* buffer, size_t bsize) { 250 251 // traverse the incumbent share-edit set (SES) to recover the range of a base string to which `this` belongs 252 string_res * shareEditSetStartPeer = & this; 253 string_res * shareEditSetEndPeer = & this; 254 for (string_res * editPeer = this.shareEditSet_next; editPeer != &this; editPeer = editPeer->shareEditSet_next) { 255 if ( editPeer->Handle.s < shareEditSetStartPeer->Handle.s ) { 256 shareEditSetStartPeer = editPeer; 285 (s.Handle){}; 286 s.Handle.s = s2.Handle.s + start; 287 s.Handle.lnth = end - start; 288 s.Handle.ulink = s2.Handle.ulink; 289 290 AddThisAfter(s.Handle, s2.Handle ); // insert this handle after rhs handle 291 // ^ bug? skip others at early point in string 292 293 if (mode == COPY_VALUE) { 294 verify(s2.Handle.ulink == ambient_string_sharectx->activeHeap); 295 // requested logical copy in same heap: defer copy until write 296 297 (s.shareEditSet_owns_ulink){ false }; 298 299 // make s alone in its shareEditSet 300 s.shareEditSet_prev = &s; 301 s.shareEditSet_next = &s; 302 } else { 303 verify( mode == SHARE_EDITS ); 304 // sharing edits with source forces same heap as source (ignore context) 305 306 (s.shareEditSet_owns_ulink){ s2.shareEditSet_owns_ulink }; 307 308 // s2 is logically const but not implementation const 309 string_res & s2mod = (string_res &) s2; 310 311 // insert s after s2 on shareEditSet 312 s.shareEditSet_next = s2mod.shareEditSet_next; 313 s.shareEditSet_prev = &s2mod; 314 s.shareEditSet_next->shareEditSet_prev = &s; 315 s.shareEditSet_prev->shareEditSet_next = &s; 257 316 } 258 if ( shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth < editPeer->Handle.s + editPeer->Handle.lnth) { 259 shareEditSetEndPeer = editPeer; 260 } 261 } 262 263 // full string is from start of shareEditSetStartPeer thru end of shareEditSetEndPeer 264 // `this` occurs in the middle of it, to be replaced 265 // build up the new text in `pasting` 266 267 string_res pasting = { 268 shareEditSetStartPeer->Handle.s, // start of SES 269 this.Handle.s - shareEditSetStartPeer->Handle.s }; // length of SES, before this 270 append( pasting, 271 buffer, // start of replacement for this 272 bsize ); // length of replacement for this 273 append( pasting, 274 this.Handle.s + this.Handle.lnth, // start of SES after this 275 shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth - 276 (this.Handle.s + this.Handle.lnth) ); // length of SES, after this 277 278 // The above string building can trigger compaction. 279 // The reference points (that are arguments of the string building) may move during that building. 280 // From this point on, they are stable. 281 // So now, capture their values for use in the overlap cases, below. 282 // Do not factor these definitions with the arguments used above. 317 } 318 } 319 320 static void assignEditSet(string_res & this, string_res * shareEditSetStartPeer, string_res * shareEditSetEndPeer, 321 char * resultSesStart, 322 size_t resultSesLnth, 323 HandleNode * resultPadPosition, size_t bsize ) { 283 324 284 325 char * beforeBegin = shareEditSetStartPeer->Handle.s; … … 290 331 size_t oldLnth = this.Handle.lnth; 291 332 292 this.Handle.s = pasting.Handle.s+ beforeLen;333 this.Handle.s = resultSesStart + beforeLen; 293 334 this.Handle.lnth = bsize; 294 MoveThisAfter( this.Handle, pasting.Handle ); 335 if (resultPadPosition) 336 MoveThisAfter( this.Handle, *resultPadPosition ); 295 337 296 338 // adjust all substring string and handle locations, and check if any substring strings are outside the new base string 297 char *limit = pasting.Handle.s + pasting.Handle.lnth;339 char *limit = resultSesStart + resultSesLnth; 298 340 for (string_res * p = this.shareEditSet_next; p != &this; p = p->shareEditSet_next) { 299 assert(p->Handle.s >= beforeBegin);341 verify (p->Handle.s >= beforeBegin); 300 342 if ( p->Handle.s >= afterBegin ) { 301 assert( p->Handle.s <= afterBegin + afterLen );302 assert( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen );343 verify ( p->Handle.s <= afterBegin + afterLen ); 344 verify ( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen ); 303 345 // p starts after the edit 304 346 // take start and end as end-anchored … … 318 360 } else { 319 361 // p ends after the edit 320 assert( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen );362 verify ( p->Handle.s + p->Handle.lnth <= afterBegin + afterLen ); 321 363 // take end as end-anchored 322 364 // stretch-shrink p according to the edit … … 326 368 // take start as start-anchored 327 369 size_t startOffsetFromStart = p->Handle.s - beforeBegin; 328 p->Handle.s = pasting.Handle.s+ startOffsetFromStart;370 p->Handle.s = resultSesStart + startOffsetFromStart; 329 371 } else { 330 assert( p->Handle.s < afterBegin );372 verify ( p->Handle.s < afterBegin ); 331 373 // p starts during the edit 332 assert( p->Handle.s + p->Handle.lnth >= beforeBegin + beforeLen );374 verify( p->Handle.s + p->Handle.lnth >= beforeBegin + beforeLen ); 333 375 if ( p->Handle.s + p->Handle.lnth < afterBegin ) { 334 376 // p ends during the edit; p does not include the last character replaced … … 344 386 } 345 387 } 346 MoveThisAfter( p->Handle, pasting.Handle ); // move substring handle to maintain sorted order by string position 347 } 348 } 349 350 void ?=?(string_res &s, const char* other) { 351 assign(s, other, strlen(other)); 352 } 353 354 void ?=?(string_res &s, char other) { 355 assign(s, &other, 1); 388 if (resultPadPosition) 389 MoveThisAfter( p->Handle, *resultPadPosition ); // move substring handle to maintain sorted order by string position 390 } 391 } 392 393 static string_res & assign_(string_res &this, const char* buffer, size_t bsize, const string_res & valSrc) { 394 395 // traverse the incumbent share-edit set (SES) to recover the range of a base string to which `this` belongs 396 string_res * shareEditSetStartPeer = & this; 397 string_res * shareEditSetEndPeer = & this; 398 for (string_res * editPeer = this.shareEditSet_next; editPeer != &this; editPeer = editPeer->shareEditSet_next) { 399 if ( editPeer->Handle.s < shareEditSetStartPeer->Handle.s ) { 400 shareEditSetStartPeer = editPeer; 401 } 402 if ( shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth < editPeer->Handle.s + editPeer->Handle.lnth) { 403 shareEditSetEndPeer = editPeer; 404 } 405 } 406 407 verify( shareEditSetEndPeer->Handle.s >= shareEditSetStartPeer->Handle.s ); 408 size_t origEditSetLength = shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth - shareEditSetStartPeer->Handle.s; 409 verify( origEditSetLength >= this.Handle.lnth ); 410 411 if ( this.shareEditSet_owns_ulink ) { // assigning to private context 412 // ok to overwrite old value within LHS 413 char * prefixStartOrig = shareEditSetStartPeer->Handle.s; 414 int prefixLen = this.Handle.s - prefixStartOrig; 415 char * suffixStartOrig = this.Handle.s + this.Handle.lnth; 416 int suffixLen = shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth - suffixStartOrig; 417 418 int delta = bsize - this.Handle.lnth; 419 if ( char * oldBytes = VbyteTryAdjustLast( *this.Handle.ulink, delta ) ) { 420 // growing: copy from old to new 421 char * dest = VbyteAlloc( *this.Handle.ulink, origEditSetLength + delta ); 422 char *destCursor = dest; memcpy(destCursor, prefixStartOrig, prefixLen); 423 destCursor += prefixLen; memcpy(destCursor, buffer , bsize ); 424 destCursor += bsize; memcpy(destCursor, suffixStartOrig, suffixLen); 425 assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer, 426 dest, 427 origEditSetLength + delta, 428 0p, bsize); 429 free( oldBytes ); 430 } else { 431 // room is already allocated in-place: bubble suffix and overwite middle 432 memmove( suffixStartOrig + delta, suffixStartOrig, suffixLen ); 433 memcpy( this.Handle.s, buffer, bsize ); 434 435 assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer, 436 shareEditSetStartPeer->Handle.s, 437 origEditSetLength + delta, 438 0p, bsize); 439 } 440 441 } else if ( // assigning to shared context 442 this.Handle.lnth == origEditSetLength && // overwriting entire run of SES 443 & valSrc && // sourcing from a managed string 444 valSrc.Handle.ulink == this.Handle.ulink ) { // sourcing from same heap 445 446 // SES's result will only use characters from the source string => reuse source 447 assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer, 448 valSrc.Handle.s, 449 valSrc.Handle.lnth, 450 &((string_res&)valSrc).Handle, bsize); 451 452 } else { 453 // overwriting a proper substring of some string: mash characters from old and new together (copy on write) 454 // OR we are importing characters: need to copy eagerly (can't refer to source) 455 456 // full string is from start of shareEditSetStartPeer thru end of shareEditSetEndPeer 457 // `this` occurs in the middle of it, to be replaced 458 // build up the new text in `pasting` 459 460 string_res pasting = { 461 * this.Handle.ulink, // maintain same heap, regardless of context 462 shareEditSetStartPeer->Handle.s, // start of SES 463 this.Handle.s - shareEditSetStartPeer->Handle.s }; // length of SES, before this 464 append( pasting, 465 buffer, // start of replacement for this 466 bsize ); // length of replacement for this 467 append( pasting, 468 this.Handle.s + this.Handle.lnth, // start of SES after this 469 shareEditSetEndPeer->Handle.s + shareEditSetEndPeer->Handle.lnth - 470 (this.Handle.s + this.Handle.lnth) ); // length of SES, after this 471 472 // The above string building can trigger compaction. 473 // The reference points (that are arguments of the string building) may move during that building. 474 // From this point on, they are stable. 475 476 assignEditSet(this, shareEditSetStartPeer, shareEditSetEndPeer, 477 pasting.Handle.s, 478 pasting.Handle.lnth, 479 &pasting.Handle, bsize); 480 } 481 482 return this; 483 } 484 485 string_res & assign(string_res &this, const char* buffer, size_t bsize) { 486 return assign_(this, buffer, bsize, *0p); 487 } 488 489 string_res & ?=?(string_res &s, char other) { 490 return assign(s, &other, 1); 356 491 } 357 492 358 493 // Copy assignment operator 359 void?=?(string_res & this, const string_res & rhs) with( this ) {360 assign(this, rhs.Handle.s, rhs.Handle.lnth);361 } 362 363 void?=?(string_res & this, string_res & rhs) with( this ) {494 string_res & ?=?(string_res & this, const string_res & rhs) with( this ) { 495 return assign_(this, rhs.Handle.s, rhs.Handle.lnth, rhs); 496 } 497 498 string_res & ?=?(string_res & this, string_res & rhs) with( this ) { 364 499 const string_res & rhs2 = rhs; 365 this = rhs2;500 return this = rhs2; 366 501 } 367 502 … … 374 509 s.shareEditSet_prev->shareEditSet_next = s.shareEditSet_next; 375 510 s.shareEditSet_next->shareEditSet_prev = s.shareEditSet_prev; 376 s.shareEditSet_next = &s; 377 s.shareEditSet_prev = &s; 511 // s.shareEditSet_next = &s; 512 // s.shareEditSet_prev = &s; 513 514 if (shareEditSet_owns_ulink && s.shareEditSet_next == &s) { // last one out 515 delete( s.Handle.ulink ); 516 } 378 517 } 379 518 … … 387 526 } 388 527 528 void assignAt(const string_res &s, size_t index, char val) { 529 string_res editZone = { s, SHARE_EDITS, index, index+1 }; 530 assign(editZone, &val, 1); 531 } 532 389 533 390 534 /////////////////////////////////////////////////////////////////// … … 392 536 393 537 void append(string_res &str1, const char * buffer, size_t bsize) { 394 size_t clnth = s ize(str1)+ bsize;395 if ( str1.Handle.s + s ize(str1)== buffer ) { // already juxtapose ?538 size_t clnth = str1.Handle.lnth + bsize; 539 if ( str1.Handle.s + str1.Handle.lnth == buffer ) { // already juxtapose ? 396 540 // no-op 397 541 } else { // must copy some text 398 if ( str1.Handle.s + s ize(str1) == VbyteAlloc(HeapArea, 0) ) { // str1 at end of string area ?399 VbyteAlloc( HeapArea, bsize); // create room for 2nd part at the end of string area542 if ( str1.Handle.s + str1.Handle.lnth == VbyteAlloc(*str1.Handle.ulink, 0) ) { // str1 at end of string area ? 543 VbyteAlloc( *str1.Handle.ulink, bsize ); // create room for 2nd part at the end of string area 400 544 } else { // copy the two parts 401 char * str1oldBuf = str1.Handle.s; 402 str1.Handle.s = VbyteAlloc( HeapArea, clnth ); 403 ByteCopy( HeapArea, str1.Handle.s, 0, str1.Handle.lnth, str1oldBuf, 0, str1.Handle.lnth); 545 char * str1newBuf = VbyteAlloc( *str1.Handle.ulink, clnth ); 546 char * str1oldBuf = str1.Handle.s; // must read after VbyteAlloc call in case it gs's 547 str1.Handle.s = str1newBuf; 548 memcpy( str1.Handle.s, str1oldBuf, str1.Handle.lnth ); 404 549 } // if 405 ByteCopy( HeapArea, str1.Handle.s, str1.Handle.lnth, bsize, (char*)buffer, 0, (int)bsize); 406 // VbyteHeap & this, char *Dst, int DstStart, int DstLnth, char *Src, int SrcStart, int SrcLnth 550 memcpy( str1.Handle.s + str1.Handle.lnth, buffer, bsize ); 407 551 } // if 408 552 str1.Handle.lnth = clnth; … … 417 561 } 418 562 419 void ?+=?(string_res &s, const char* other) {420 append( s, other, strlen(other) );421 }422 563 423 564 … … 429 570 430 571 bool ?==?(const string_res &s1, const string_res &s2) { 431 return ByteCmp( HeapArea,s1.Handle.s, 0, s1.Handle.lnth, s2.Handle.s, 0, s2.Handle.lnth) == 0;572 return ByteCmp( s1.Handle.s, 0, s1.Handle.lnth, s2.Handle.s, 0, s2.Handle.lnth) == 0; 432 573 } 433 574 … … 455 596 456 597 int find(const string_res &s, char search) { 457 for (i; size(s)) { 458 if (s[i] == search) return i; 459 } 460 return size(s); 461 } 598 return findFrom(s, 0, search); 599 } 600 601 int findFrom(const string_res &s, size_t fromPos, char search) { 602 // FIXME: This paricular overload (find of single char) is optimized to use memchr. 603 // The general overload (find of string, memchr applying to its first character) and `contains` should be adjusted to match. 604 char * searchFrom = s.Handle.s + fromPos; 605 size_t searchLnth = s.Handle.lnth - fromPos; 606 int searchVal = search; 607 char * foundAt = (char *) memchr(searchFrom, searchVal, searchLnth); 608 if (foundAt == 0p) return s.Handle.lnth; 609 else return foundAt - s.Handle.s; 610 } 611 612 int find(const string_res &s, const string_res &search) { 613 return findFrom(s, 0, search); 614 } 615 616 int findFrom(const string_res &s, size_t fromPos, const string_res &search) { 617 return findFrom(s, fromPos, search.Handle.s, search.Handle.lnth); 618 } 619 620 int find(const string_res &s, const char* search) { 621 return findFrom(s, 0, search); 622 } 623 int findFrom(const string_res &s, size_t fromPos, const char* search) { 624 return findFrom(s, fromPos, search, strlen(search)); 625 } 626 627 int find(const string_res &s, const char* search, size_t searchsize) { 628 return findFrom(s, 0, search, searchsize); 629 } 630 631 int findFrom(const string_res &s, size_t fromPos, const char* search, size_t searchsize) { 462 632 463 633 /* Remaining implementations essentially ported from Sunjay's work */ 464 634 465 int find(const string_res &s, const string_res &search) { 466 return find(s, search.Handle.s, search.Handle.lnth); 467 } 468 469 int find(const string_res &s, const char* search) { 470 return find(s, search, strlen(search)); 471 } 472 473 int find(const string_res &s, const char* search, size_t searchsize) { 635 474 636 // FIXME: This is a naive algorithm. We probably want to switch to someting 475 637 // like Boyer-Moore in the future. … … 481 643 } 482 644 483 for (size_t i = 0; i < s.Handle.lnth; i++) {645 for (size_t i = fromPos; i < s.Handle.lnth; i++) { 484 646 size_t remaining = s.Handle.lnth - i; 485 647 // Never going to find the search string if the remaining string is … … 596 758 // Add a new HandleNode node n after the current HandleNode node. 597 759 598 static inlinevoid AddThisAfter( HandleNode & this, HandleNode & n ) with(this) {760 static void AddThisAfter( HandleNode & this, HandleNode & n ) with(this) { 599 761 #ifdef VbyteDebug 600 762 serr | "enter:AddThisAfter, this:" | &this | " n:" | &n; 601 763 #endif // VbyteDebug 764 // Performance note: we are on the critical path here. MB has ensured that the verifies don't contribute to runtime (are compiled away, like they're supposed to be). 765 verify( n.ulink != 0p ); 766 verify( this.ulink == n.ulink ); 602 767 flink = n.flink; 603 768 blink = &n; … … 624 789 // Delete the current HandleNode node. 625 790 626 static inlinevoid DeleteNode( HandleNode & this ) with(this) {791 static void DeleteNode( HandleNode & this ) with(this) { 627 792 #ifdef VbyteDebug 628 793 serr | "enter:DeleteNode, this:" | &this; … … 638 803 639 804 // Allocates specified storage for a string from byte-string area. If not enough space remains to perform the 640 // allocation, the garbage collection routine is called and a second attempt is made to allocate the space. If the 641 // second attempt fails, a further attempt is made to create a new, larger byte-string area. 642 643 static inline char * VbyteAlloc( VbyteHeap & this, int size ) with(this) { 805 // allocation, the garbage collection routine is called. 806 807 static char * VbyteAlloc( VbyteHeap & this, int size ) with(this) { 644 808 #ifdef VbyteDebug 645 809 serr | "enter:VbyteAlloc, size:" | size; … … 650 814 NoBytes = ( uintptr_t )EndVbyte + size; 651 815 if ( NoBytes > ( uintptr_t )ExtVbyte ) { // enough room for new byte-string ? 652 garbage( this ); // firer up the garbage collector 653 NoBytes = ( uintptr_t )EndVbyte + size; // try again 654 if ( NoBytes > ( uintptr_t )ExtVbyte ) { // enough room for new byte-string ? 655 assert( 0 && "need to implement actual growth" ); 656 // extend( size ); // extend the byte-string area 657 } // if 816 garbage( this, size ); // firer up the garbage collector 817 verify( (( uintptr_t )EndVbyte + size) <= ( uintptr_t )ExtVbyte && "garbage run did not free up required space" ); 658 818 } // if 659 819 r = EndVbyte; … … 666 826 667 827 828 // Adjusts the last allocation in this heap by delta bytes, or resets this heap to be able to offer 829 // new allocations of its original size + delta bytes. Positive delta means bigger; 830 // negative means smaller. A null return indicates that the original heap location has room for 831 // the requested growth. A non-null return indicates that copying to a new location is required 832 // but has not been done; the returned value is the old heap storage location; `this` heap is 833 // modified to reference the new location. In the copy-requred case, the caller should use 834 // VbyteAlloc to claim the new space, while doing optimal copying from old to new, then free old. 835 836 static char * VbyteTryAdjustLast( VbyteHeap & this, int delta ) with(this) { 837 838 if ( ( uintptr_t )EndVbyte + delta <= ( uintptr_t )ExtVbyte ) { 839 // room available 840 EndVbyte += delta; 841 return 0p; 842 } 843 844 char *oldBytes = StartVbyte; 845 846 NoOfExtensions += 1; 847 CurrSize *= 2; 848 StartVbyte = EndVbyte = TEMP_ALLOC(char, CurrSize); 849 ExtVbyte = StartVbyte + CurrSize; 850 851 return oldBytes; 852 } 853 854 668 855 // Move an existing HandleNode node h somewhere after the current HandleNode node so that it is in ascending order by 669 856 // the address in the byte string area. 670 857 671 static inlinevoid MoveThisAfter( HandleNode & this, const HandleNode & h ) with(this) {858 static void MoveThisAfter( HandleNode & this, const HandleNode & h ) with(this) { 672 859 #ifdef VbyteDebug 673 860 serr | "enter:MoveThisAfter, this:" | & this | " h:" | & h; 674 861 #endif // VbyteDebug 862 verify( h.ulink != 0p ); 863 verify( this.ulink == h.ulink ); 675 864 if ( s < h.s ) { // check argument values 676 865 // serr | "VbyteSM: Error - Cannot move byte string starting at:" | s | " after byte string starting at:" 677 866 // | ( h->s ) | " and keep handles in ascending order"; 678 867 // exit(-1 ); 679 assert( 0 && "VbyteSM: Error - Cannot move byte strings as requested and keep handles in ascending order");868 verify( 0 && "VbyteSM: Error - Cannot move byte strings as requested and keep handles in ascending order"); 680 869 } // if 681 870 … … 709 898 //######################### VbyteHeap ######################### 710 899 711 // Move characters from one location in the byte-string area to another. The routine handles the following situations:712 //713 // if the |Src| > |Dst| => truncate714 // if the |Dst| > |Src| => pad Dst with blanks715 716 void ByteCopy( VbyteHeap & this, char *Dst, int DstStart, int DstLnth, char *Src, int SrcStart, int SrcLnth ) {717 for ( int i = 0; i < DstLnth; i += 1 ) {718 if ( i == SrcLnth ) { // |Dst| > |Src|719 for ( ; i < DstLnth; i += 1 ) { // pad Dst with blanks720 Dst[DstStart + i] = ' ';721 } // for722 break;723 } // exit724 Dst[DstStart + i] = Src[SrcStart + i];725 } // for726 } // ByteCopy727 728 900 // Compare two byte strings in the byte-string area. The routine returns the following values: 729 901 // … … 732 904 // -1 => Src1-byte-string < Src2-byte-string 733 905 734 int ByteCmp( VbyteHeap & this, char *Src1, int Src1Start, int Src1Lnth, char *Src2, int Src2Start, int Src2Lnth ) with(this){906 int ByteCmp( char *Src1, int Src1Start, int Src1Lnth, char *Src2, int Src2Start, int Src2Lnth ) { 735 907 #ifdef VbyteDebug 736 908 serr | "enter:ByteCmp, Src1Start:" | Src1Start | " Src1Lnth:" | Src1Lnth | " Src2Start:" | Src2Start | " Src2Lnth:" | Src2Lnth; … … 789 961 h = Header.flink; // ignore header node 790 962 for (;;) { 791 ByteCopy( this, EndVbyte, 0, h->lnth, h->s, 0, h->lnth );963 memmove( EndVbyte, h->s, h->lnth ); 792 964 obase = h->s; 793 965 h->s = EndVbyte; … … 810 982 811 983 984 static double heap_expansion_freespace_threshold = 0.1; // default inherited from prior work: expand heap when less than 10% "free" (i.e. garbage) 985 // probably an unreasonable default, but need to assess early-round tests on changing it 986 987 void TUNING_set_string_heap_liveness_threshold( double val ) { 988 heap_expansion_freespace_threshold = 1.0 - val; 989 } 990 991 812 992 // Garbage determines the amount of free space left in the heap and then reduces, leave the same, or extends the size of 813 993 // the heap. The heap is then compacted in the existing heap or into the newly allocated heap. 814 994 815 void garbage(VbyteHeap & this ) with(this) {995 void garbage(VbyteHeap & this, int minreq ) with(this) { 816 996 #ifdef VbyteDebug 817 997 serr | "enter:garbage"; … … 837 1017 AmountFree = ( uintptr_t )ExtVbyte - ( uintptr_t )StartVbyte - AmountUsed; 838 1018 839 if ( AmountFree < ( int )( CurrSize * 0.1 )) { // free space less than 10% ? 840 841 assert( 0 && "need to implement actual growth" ); 842 // extend( CurrSize ); // extend the heap 1019 if ( ( double ) AmountFree < ( CurrSize * heap_expansion_freespace_threshold ) || AmountFree < minreq ) { // free space less than threshold or not enough to serve cur request 1020 1021 extend( this, max( CurrSize, minreq ) ); // extend the heap 843 1022 844 1023 // Peter says, "This needs work before it should be used." … … 846 1025 // reduce(( AmountFree / CurrSize - 3 ) * CurrSize ); // reduce the memory 847 1026 848 } // if 849 compaction(this); // compact the byte area, in the same or new heap area 1027 // `extend` implies a `compaction` during the copy 1028 1029 } else { 1030 compaction(this); // in-place 1031 }// if 850 1032 #ifdef VbyteDebug 851 1033 { … … 867 1049 #undef VbyteDebug 868 1050 869 //WIP870 #if 0871 1051 872 1052 … … 874 1054 // area is deleted. 875 1055 876 void VbyteHeap::extend( int size) {1056 void extend( VbyteHeap & this, int size ) with (this) { 877 1057 #ifdef VbyteDebug 878 1058 serr | "enter:extend, size:" | size; … … 884 1064 885 1065 CurrSize += size > InitSize ? size : InitSize; // minimum extension, initial size 886 StartVbyte = EndVbyte = new char[CurrSize];1066 StartVbyte = EndVbyte = TEMP_ALLOC(char, CurrSize); 887 1067 ExtVbyte = (void *)( StartVbyte + CurrSize ); 888 compaction( ); // copy from old heap to new & adjust pointers to new heap889 delete OldStartVbyte; // release old heap1068 compaction(this); // copy from old heap to new & adjust pointers to new heap 1069 free( OldStartVbyte ); // release old heap 890 1070 #ifdef VbyteDebug 891 1071 serr | "exit:extend, CurrSize:" | CurrSize; … … 893 1073 } // extend 894 1074 1075 //WIP 1076 #if 0 895 1077 896 1078 // Extend the size of the byte-string area by creating a new area and copying the old area into it. The old byte-string -
libcfa/src/containers/string_res.hfa
ref3c383 rd672350 17 17 18 18 #include <fstream.hfa> 19 #include <string.h> // e.g. strlen 19 20 20 21 … … 27 28 HandleNode *flink; // forward link 28 29 HandleNode *blink; // backward link 30 VbyteHeap *ulink; // upward link 29 31 30 32 char *s; // pointer to byte string … … 32 34 }; // HandleNode 33 35 34 void ?{}( HandleNode & ); // constructor for header node 35 36 void ?{}( HandleNode &, VbyteHeap & ); // constructor for nodes in the handle list 37 void ^?{}( HandleNode & ); // destructor for handle nodes 38 39 extern VbyteHeap * DEBUG_string_heap; 36 VbyteHeap * DEBUG_string_heap(); 37 size_t DEBUG_string_bytes_in_heap( VbyteHeap * heap ); 40 38 size_t DEBUG_string_bytes_avail_until_gc( VbyteHeap * heap ); 41 39 const char * DEBUG_string_heap_start( VbyteHeap * heap ); 42 40 41 void TUNING_set_string_heap_liveness_threshold( double val ); 43 42 44 43 //######################### String ######################### … … 47 46 struct string_res { 48 47 HandleNode Handle; // chars, start, end, global neighbours 48 bool shareEditSet_owns_ulink; 49 49 string_res * shareEditSet_prev; 50 50 string_res * shareEditSet_next; … … 74 74 // Constructors, Assignment Operators, Destructor 75 75 void ?{}(string_res &s); // empty string 76 void ?{}(string_res &s, const char* initial); // copy from string literal (NULL-terminated)77 76 void ?{}(string_res &s, const char* buffer, size_t bsize); // copy specific length from buffer 77 static inline void ?{}(string_res &s, const char* rhs) { // copy from string literal (NULL-terminated) 78 (s){ rhs, strlen(rhs) }; 79 } 78 80 79 81 void ?{}(string_res &s, const string_res & s2) = void; … … 86 88 } 87 89 88 void assign(string_res &s, const char* buffer, size_t bsize); // copy specific length from buffer 89 void ?=?(string_res &s, const char* other); // copy from string literal (NULL-terminated) 90 void ?=?(string_res &s, const string_res &other); 91 void ?=?(string_res &s, string_res &other); 92 void ?=?(string_res &s, char other); 90 string_res & assign(string_res &s, const char* buffer, size_t bsize); // copy specific length from buffer 91 static inline string_res & ?=?(string_res &s, const char* other) { // copy from string literal (NULL-terminated) 92 return assign(s, other, strlen(other)); 93 } 94 string_res & ?=?(string_res &s, const string_res &other); 95 string_res & ?=?(string_res &s, string_res &other); 96 string_res & ?=?(string_res &s, char other); 93 97 94 98 void ^?{}(string_res &s); … … 99 103 100 104 // Concatenation 105 void append(string_res &s, const char* buffer, size_t bsize); 101 106 void ?+=?(string_res &s, char other); // append a character 102 107 void ?+=?(string_res &s, const string_res &s2); // append-concatenate to first string 103 void ?+=?(string_res &s, const char* other); 104 void append(string_res &s, const char* buffer, size_t bsize); 108 static inline void ?+=?(string_res &s, const char* other) { 109 append( s, other, strlen(other) ); 110 } 105 111 106 112 // Character access 113 void assignAt(const string_res &s, size_t index, char val); 107 114 char ?[?](const string_res &s, size_t index); // Mike changed to ret by val from Sunjay's ref, to match Peter's 108 115 //char codePointAt(const string_res &s, size_t index); // revisit under Unicode … … 121 128 int find(const string_res &s, const char* search); 122 129 int find(const string_res &s, const char* search, size_t searchsize); 130 131 int findFrom(const string_res &s, size_t fromPos, char search); 132 int findFrom(const string_res &s, size_t fromPos, const string_res &search); 133 int findFrom(const string_res &s, size_t fromPos, const char* search); 134 int findFrom(const string_res &s, size_t fromPos, const char* search, size_t searchsize); 123 135 124 136 bool includes(const string_res &s, const string_res &search); -
libcfa/src/math.trait.hfa
ref3c383 rd672350 16 16 #pragma once 17 17 18 trait Not( T) {19 void ?{}( T&, zero_t );20 int !?( T);18 trait Not( U ) { 19 void ?{}( U &, zero_t ); 20 int !?( U ); 21 21 }; // Not 22 22 … … 26 26 }; // Equality 27 27 28 trait Relational( T | Equality( T) ) {29 int ?<?( T, T);30 int ?<=?( T, T);31 int ?>?( T, T);32 int ?>=?( T, T);28 trait Relational( U | Equality( U ) ) { 29 int ?<?( U, U ); 30 int ?<=?( U, U ); 31 int ?>?( U, U ); 32 int ?>=?( U, U ); 33 33 }; // Relational 34 34 … … 39 39 }; // Signed 40 40 41 trait Additive( T | Signed( T) ) {42 T ?+?( T, T);43 T ?-?( T, T);44 T ?+=?( T &, T);45 T ?-=?( T &, T);41 trait Additive( U | Signed( U ) ) { 42 U ?+?( U, U ); 43 U ?-?( U, U ); 44 U ?+=?( U &, U ); 45 U ?-=?( U &, U ); 46 46 }; // Additive 47 47 … … 49 49 void ?{}( T &, one_t ); 50 50 // T ?++( T & ); 51 // T ++?( T & );51 // T ++?( T & ); 52 52 // T ?--( T & ); 53 53 // T --?( T & ); 54 54 }; // Incdec 55 55 56 trait Multiplicative( T | Incdec( T) ) {57 T ?*?( T, T);58 T ?/?( T, T);59 T ?%?( T, T);60 T ?/=?( T &, T);56 trait Multiplicative( U | Incdec( U ) ) { 57 U ?*?( U, U ); 58 U ?/?( U, U ); 59 U ?%?( U, U ); 60 U ?/=?( U &, U ); 61 61 }; // Multiplicative 62 62 -
src/AST/Convert.cpp
ref3c383 rd672350 9 9 // Author : Thierry Delisle 10 10 // Created On : Thu May 09 15::37::05 2019 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Feb 2 13:19:22202213 // Update Count : 4 111 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 15:01:00 2022 13 // Update Count : 42 14 14 // 15 15 … … 49 49 //================================================================================================ 50 50 namespace ast { 51 52 // This is to preserve the FindSpecialDecls hack. It does not (and perhaps should not) 53 // allow us to use the same stratagy in the new ast. 54 // xxx - since convert back pass works, this concern seems to be unnecessary. 55 56 // these need to be accessed in new FixInit now 57 ast::ptr<ast::Type> sizeType = nullptr; 58 const ast::FunctionDecl * dereferenceOperator = nullptr; 59 const ast::StructDecl * dtorStruct = nullptr; 60 const ast::FunctionDecl * dtorStructDestroy = nullptr; 51 // These are the shared local information used by ConverterNewToOld and 52 // ConverterOldToNew to update the global information in the two versions. 53 54 static ast::ptr<ast::Type> sizeType = nullptr; 55 static const ast::FunctionDecl * dereferenceOperator = nullptr; 56 static const ast::StructDecl * dtorStruct = nullptr; 57 static const ast::FunctionDecl * dtorStructDestroy = nullptr; 61 58 62 59 } -
src/AST/Decl.cpp
ref3c383 rd672350 39 39 if ( uniqueId ) return; // ensure only set once 40 40 uniqueId = ++lastUniqueId; 41 idMap[ uniqueId ] = this; 41 // The extra readonly pointer is causing some reference counting issues. 42 // idMap[ uniqueId ] = this; 42 43 } 43 44 44 45 readonly<Decl> Decl::fromId( UniqueId id ) { 46 // Right now this map is always empty, so don't use it. 47 assert( false ); 45 48 IdMapType::const_iterator i = idMap.find( id ); 46 49 if ( i != idMap.end() ) return i->second; -
src/AST/Fwd.hpp
ref3c383 rd672350 141 141 142 142 class TranslationUnit; 143 // TODO: Get from the TranslationUnit: 144 extern ptr<Type> sizeType; 145 extern const FunctionDecl * dereferenceOperator; 146 extern const StructDecl * dtorStruct; 147 extern const FunctionDecl * dtorStructDestroy; 143 class TranslationGlobal; 148 144 149 145 } -
src/AST/GenericSubstitution.cpp
ref3c383 rd672350 45 45 visit_children = false; 46 46 const AggregateDecl * aggr = ty->aggr(); 47 sub = TypeSubstitution { aggr->params.begin(), aggr->params.end(), ty->params.begin() };47 sub = TypeSubstitution( aggr->params, ty->params ); 48 48 } 49 49 -
src/AST/TranslationUnit.hpp
ref3c383 rd672350 10 10 // Created On : Tue Jun 11 15:30:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Jun 11 15:42:00 201913 // Update Count : 012 // Last Modified On : Tue Mar 11 11:19:00 2022 13 // Update Count : 1 14 14 // 15 15 … … 23 23 namespace ast { 24 24 25 class TranslationGlobal { 26 public: 27 std::map< UniqueId, Decl * > idMap; 28 29 ptr<Type> sizeType; 30 const FunctionDecl * dereference; 31 const StructDecl * dtorStruct; 32 const FunctionDecl * dtorDestroy; 33 }; 34 25 35 class TranslationUnit { 26 36 public: 27 37 std::list< ptr< Decl > > decls; 28 29 struct Global { 30 std::map< UniqueId, Decl * > idMap; 31 32 ptr<Type> sizeType; 33 const FunctionDecl * dereference; 34 const StructDecl * dtorStruct; 35 const FunctionDecl * dtorDestroy; 36 } global; 38 TranslationGlobal global; 37 39 }; 38 40 -
src/AST/TypeSubstitution.hpp
ref3c383 rd672350 37 37 public: 38 38 TypeSubstitution(); 39 template< typename FormalContainer, typename ActualContainer > 40 TypeSubstitution( FormalContainer formals, ActualContainer actuals ); 39 41 template< typename FormalIterator, typename ActualIterator > 40 42 TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ); … … 76 78 bool empty() const; 77 79 80 template< typename FormalContainer, typename ActualContainer > 81 void addAll( FormalContainer formals, ActualContainer actuals ); 78 82 template< typename FormalIterator, typename ActualIterator > 79 void add ( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );83 void addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ); 80 84 81 85 /// create a new TypeSubstitution using bindings from env containing all of the type variables in expr … … 112 116 }; 113 117 118 template< typename FormalContainer, typename ActualContainer > 119 TypeSubstitution::TypeSubstitution( FormalContainer formals, ActualContainer actuals ) { 120 assert( formals.size() == actuals.size() ); 121 addAll( formals.begin(), formals.end(), actuals.begin() ); 122 } 123 124 template< typename FormalIterator, typename ActualIterator > 125 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) { 126 addAll( formalBegin, formalEnd, actualBegin ); 127 } 128 129 template< typename FormalContainer, typename ActualContainer > 130 void TypeSubstitution::addAll( FormalContainer formals, ActualContainer actuals ) { 131 assert( formals.size() == actuals.size() ); 132 addAll( formals.begin(), formals.end(), actuals.begin() ); 133 } 134 114 135 // this is the only place where type parameters outside a function formal may be substituted. 115 136 template< typename FormalIterator, typename ActualIterator > 116 void TypeSubstitution::add ( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {137 void TypeSubstitution::addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) { 117 138 // FormalIterator points to a TypeDecl 118 139 // ActualIterator points to a Type … … 129 150 } // if 130 151 } else { 131 152 // Is this an error? 132 153 } // if 133 154 } // for 134 155 } 135 136 137 138 template< typename FormalIterator, typename ActualIterator >139 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {140 add( formalBegin, formalEnd, actualBegin );141 }142 143 156 144 157 } // namespace ast -
src/AST/module.mk
ref3c383 rd672350 16 16 17 17 SRC_AST = \ 18 AST/AssertAcyclic.cpp \19 AST/AssertAcyclic.hpp \20 18 AST/Attribute.cpp \ 21 19 AST/Attribute.hpp \ … … 64 62 AST/TypeSubstitution.cpp \ 65 63 AST/TypeSubstitution.hpp \ 64 AST/Util.cpp \ 65 AST/Util.hpp \ 66 66 AST/Visitor.hpp 67 67 -
src/Common/CodeLocationTools.cpp
ref3c383 rd672350 9 9 // Author : Andrew Beach 10 10 // Created On : Fri Dec 4 15:42:00 2020 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue Feb 1 09:14:39202213 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Mar 14 15:14:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 239 239 }; 240 240 241 class LocalFillCore : public ast::WithGuards { 242 CodeLocation const * parent; 243 public: 244 LocalFillCore( CodeLocation const & location ) : parent( &location ) { 245 assert( location.isSet() ); 246 } 247 248 template<typename node_t> 249 auto previsit( node_t const * node ) 250 -> typename std::enable_if<has_code_location<node_t>::value, node_t const *>::type { 251 if ( node->location.isSet() ) { 252 GuardValue( parent ) = &node->location; 253 return node; 254 } else { 255 node_t * mut = ast::mutate( node ); 256 mut->location = *parent; 257 return mut; 258 } 259 } 260 }; 261 241 262 } // namespace 242 263 … … 278 299 ast::Pass<FillCore>::run( unit ); 279 300 } 301 302 ast::Node const * localFillCodeLocations( 303 CodeLocation const & location , ast::Node const * node ) { 304 ast::Pass<LocalFillCore> visitor( location ); 305 return node->accept( visitor ); 306 } -
src/Common/CodeLocationTools.hpp
ref3c383 rd672350 10 10 // Created On : Fri Dec 4 15:35:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Dec 9 9:53:00 202013 // Update Count : 112 // Last Modified On : Mon Mar 14 15:14:00 2022 13 // Update Count : 2 14 14 // 15 15 16 16 #pragma once 17 17 18 struct CodeLocation; 18 19 namespace ast { 20 class Node; 19 21 class TranslationUnit; 20 22 } … … 28 30 // Assign a nearby code-location to any unset code locations in the forest. 29 31 void forceFillCodeLocations( ast::TranslationUnit & unit ); 32 33 // Fill in code-locations with a parent code location, 34 // using the provided CodeLocation as the base. 35 ast::Node const * 36 localFillCodeLocations( CodeLocation const &, ast::Node const * ); -
src/Common/Examine.cc
ref3c383 rd672350 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // Examine. h --7 // Examine.cc -- Helpers for examining AST code. 8 8 // 9 9 // Author : Andrew Beach 10 10 // Created On : Wed Sept 2 14:02 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Sep 8 12:15 202013 // Update Count : 012 // Last Modified On : Fri Dec 10 10:27 2021 13 // Update Count : 1 14 14 // 15 15 16 16 #include "Common/Examine.h" 17 17 18 #include "AST/Type.hpp" 18 19 #include "CodeGen/OperatorTable.h" 20 #include "InitTweak/InitTweak.h" 19 21 20 22 DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ) { … … 36 38 37 39 namespace { 40 41 // getTypeofThis but does some extra checks used in this module. 42 const ast::Type * getTypeofThisSolo( const ast::FunctionDecl * func ) { 43 if ( 1 != func->params.size() ) { 44 return nullptr; 45 } 46 auto ref = func->type->params.front().as<ast::ReferenceType>(); 47 return (ref) ? ref->base : nullptr; 48 } 49 50 } 51 52 const ast::DeclWithType * isMainFor( 53 const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ) { 54 if ( "main" != func->name ) return nullptr; 55 if ( 1 != func->params.size() ) return nullptr; 56 57 auto param = func->params.front(); 58 59 auto type = dynamic_cast<const ast::ReferenceType *>( param->get_type() ); 60 if ( !type ) return nullptr; 61 62 auto obj = type->base.as<ast::StructInstType>(); 63 if ( !obj ) return nullptr; 64 65 if ( kind != obj->base->kind ) return nullptr; 66 67 return param; 68 } 69 70 namespace { 38 71 Type * getDestructorParam( FunctionDecl * func ) { 39 72 if ( !CodeGen::isDestructor( func->name ) ) return nullptr; … … 48 81 return nullptr; 49 82 } 83 84 const ast::Type * getDestructorParam( const ast::FunctionDecl * func ) { 85 if ( !CodeGen::isDestructor( func->name ) ) return nullptr; 86 //return InitTweak::getParamThis( func )->type; 87 return getTypeofThisSolo( func ); 88 } 89 50 90 } 51 91 … … 57 97 return false; 58 98 } 99 100 bool isDestructorFor( 101 const ast::FunctionDecl * func, const ast::StructDecl * type_decl ) { 102 if ( const ast::Type * type = getDestructorParam( func ) ) { 103 auto stype = dynamic_cast<const ast::StructInstType *>( type ); 104 return stype && stype->base.get() == type_decl; 105 } 106 return false; 107 } -
src/Common/Examine.h
ref3c383 rd672350 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // Examine.h -- 7 // Examine.h -- Helpers for examining AST code. 8 8 // 9 9 // Author : Andrew Beach 10 10 // Created On : Wed Sept 2 13:57 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Sep 8 12:08 202013 // Update Count : 012 // Last Modified On : Fri Dec 10 10:28 2021 13 // Update Count : 1 14 14 // 15 15 16 #include "AST/Decl.hpp" 16 17 #include "SynTree/Declaration.h" 17 18 18 19 /// Check if this is a main function for a type of an aggregate kind. 19 20 DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ); 21 const ast::DeclWithType * isMainFor( 22 const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ); 20 23 // Returns a pointer to the parameter if true, nullptr otherwise. 21 24 22 25 /// Check if this function is a destructor for the given structure. 23 26 bool isDestructorFor( FunctionDecl * func, StructDecl * type_decl ); 27 bool isDestructorFor( 28 const ast::FunctionDecl * func, const ast::StructDecl * type ); -
src/Concurrency/Keywords.cc
ref3c383 rd672350 422 422 ; 423 423 else if ( auto param = isMainFor( decl, cast_target ) ) { 424 // This should never trigger. 425 assert( vtable_decl ); 424 if ( !vtable_decl ) { 425 SemanticError( decl, context_error ); 426 } 426 427 // Should be safe because of isMainFor. 427 428 StructInstType * struct_type = static_cast<StructInstType *>( … … 1203 1204 //new TypeofType( noQualifiers, args.front()->clone() ) 1204 1205 new TypeofType( noQualifiers, new UntypedExpr( 1205 new NameExpr( "__get_ type" ),1206 new NameExpr( "__get_mutexstmt_lock_type" ), 1206 1207 { args.front()->clone() } 1207 1208 ) … … 1215 1216 map_range < std::list<Initializer*> > ( args, [](Expression * var ){ 1216 1217 return new SingleInit( new UntypedExpr( 1217 new NameExpr( "__get_ ptr" ),1218 new NameExpr( "__get_mutexstmt_lock_ptr" ), 1218 1219 { var } 1219 1220 ) ); … … 1226 1227 TypeExpr * lock_type_expr = new TypeExpr( 1227 1228 new TypeofType( noQualifiers, new UntypedExpr( 1228 new NameExpr( "__get_ type" ),1229 new NameExpr( "__get_mutexstmt_lock_type" ), 1229 1230 { args.front()->clone() } 1230 1231 ) -
src/Concurrency/KeywordsNew.cpp
ref3c383 rd672350 10 10 // Created On : Tue Nov 16 9:53:00 2021 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Dec 1 11:24:00 202113 // Update Count : 112 // Last Modified On : Fri Mar 11 10:40:00 2022 13 // Update Count : 2 14 14 // 15 16 #include <iostream> 15 17 16 18 #include "Concurrency/Keywords.h" … … 18 20 #include "AST/Copy.hpp" 19 21 #include "AST/Decl.hpp" 22 #include "AST/Expr.hpp" 20 23 #include "AST/Pass.hpp" 21 24 #include "AST/Stmt.hpp" 25 #include "AST/DeclReplacer.hpp" 22 26 #include "AST/TranslationUnit.hpp" 23 27 #include "CodeGen/OperatorTable.h" 28 #include "Common/Examine.h" 24 29 #include "Common/utility.h" 30 #include "Common/UniqueName.h" 31 #include "ControlStruct/LabelGeneratorNew.hpp" 25 32 #include "InitTweak/InitTweak.h" 33 #include "Virtual/Tables.h" 26 34 27 35 namespace Concurrency { … … 29 37 namespace { 30 38 31 inline static bool isThread( const ast::DeclWithType * decl ) { 39 // -------------------------------------------------------------------------- 40 // Loose Helper Functions: 41 42 /// Detect threads constructed with the keyword thread. 43 bool isThread( const ast::DeclWithType * decl ) { 32 44 auto baseType = decl->get_type()->stripDeclarator(); 33 45 auto instType = dynamic_cast<const ast::StructInstType *>( baseType ); … … 36 48 } 37 49 50 /// Get the virtual type id if given a type name. 51 std::string typeIdType( std::string const & exception_name ) { 52 return exception_name.empty() ? std::string() 53 : Virtual::typeIdType( exception_name ); 54 } 55 56 /// Get the vtable type name if given a type name. 57 std::string vtableTypeName( std::string const & exception_name ) { 58 return exception_name.empty() ? std::string() 59 : Virtual::vtableTypeName( exception_name ); 60 } 61 62 static ast::Type * mutate_under_references( ast::ptr<ast::Type>& type ) { 63 ast::Type * mutType = type.get_and_mutate(); 64 for ( ast::ReferenceType * mutRef 65 ; (mutRef = dynamic_cast<ast::ReferenceType *>( mutType )) 66 ; mutType = mutRef->base.get_and_mutate() ); 67 return mutType; 68 } 69 70 // Describe that it adds the generic parameters and the uses of the generic 71 // parameters on the function and first "this" argument. 72 ast::FunctionDecl * fixupGenerics( 73 const ast::FunctionDecl * func, const ast::StructDecl * decl ) { 74 const CodeLocation & location = decl->location; 75 // We have to update both the declaration 76 auto mutFunc = ast::mutate( func ); 77 auto mutType = mutFunc->type.get_and_mutate(); 78 79 if ( decl->params.empty() ) { 80 return mutFunc; 81 } 82 83 assert( 0 != mutFunc->params.size() ); 84 assert( 0 != mutType->params.size() ); 85 86 // Add the "forall" clause information. 87 for ( const ast::ptr<ast::TypeDecl> & typeParam : decl->params ) { 88 auto typeDecl = ast::deepCopy( typeParam ); 89 mutFunc->type_params.push_back( typeDecl ); 90 mutType->forall.push_back( 91 new ast::TypeInstType( typeDecl->name, typeDecl ) ); 92 for ( auto & assertion : typeDecl->assertions ) { 93 mutFunc->assertions.push_back( assertion ); 94 mutType->assertions.emplace_back( 95 new ast::VariableExpr( location, assertion ) ); 96 } 97 typeDecl->assertions.clear(); 98 } 99 100 // Even chain_mutate is not powerful enough for this: 101 ast::ptr<ast::Type>& paramType = strict_dynamic_cast<ast::ObjectDecl *>( 102 mutFunc->params[0].get_and_mutate() )->type; 103 auto paramTypeInst = strict_dynamic_cast<ast::StructInstType *>( 104 mutate_under_references( paramType ) ); 105 auto typeParamInst = strict_dynamic_cast<ast::StructInstType *>( 106 mutate_under_references( mutType->params[0] ) ); 107 108 for ( const ast::ptr<ast::TypeDecl> & typeDecl : mutFunc->type_params ) { 109 paramTypeInst->params.push_back( 110 new ast::TypeExpr( location, 111 new ast::TypeInstType( typeDecl->name, typeDecl ) ) ); 112 typeParamInst->params.push_back( 113 new ast::TypeExpr( location, 114 new ast::TypeInstType( typeDecl->name, typeDecl ) ) ); 115 } 116 117 return mutFunc; 118 } 119 38 120 // -------------------------------------------------------------------------- 39 struct MutexKeyword final { 121 struct ConcurrentSueKeyword : public ast::WithDeclsToAdd<> { 122 ConcurrentSueKeyword( 123 std::string&& type_name, std::string&& field_name, 124 std::string&& getter_name, std::string&& context_error, 125 std::string&& exception_name, 126 bool needs_main, ast::AggregateDecl::Aggregate cast_target 127 ) : 128 type_name( type_name ), field_name( field_name ), 129 getter_name( getter_name ), context_error( context_error ), 130 exception_name( exception_name ), 131 typeid_name( typeIdType( exception_name ) ), 132 vtable_name( vtableTypeName( exception_name ) ), 133 needs_main( needs_main ), cast_target( cast_target ) 134 {} 135 136 virtual ~ConcurrentSueKeyword() {} 137 138 const ast::Decl * postvisit( const ast::StructDecl * decl ); 139 const ast::DeclWithType * postvisit( const ast::FunctionDecl * decl ); 140 const ast::Expr * postvisit( const ast::KeywordCastExpr * expr ); 141 142 struct StructAndField { 143 const ast::StructDecl * decl; 144 const ast::ObjectDecl * field; 145 }; 146 147 const ast::StructDecl * handleStruct( const ast::StructDecl * ); 148 void handleMain( const ast::FunctionDecl *, const ast::StructInstType * ); 149 void addTypeId( const ast::StructDecl * ); 150 void addVtableForward( const ast::StructDecl * ); 151 const ast::FunctionDecl * forwardDeclare( const ast::StructDecl * ); 152 StructAndField addField( const ast::StructDecl * ); 153 void addGetRoutines( const ast::ObjectDecl *, const ast::FunctionDecl * ); 154 void addLockUnlockRoutines( const ast::StructDecl * ); 155 156 private: 157 const std::string type_name; 158 const std::string field_name; 159 const std::string getter_name; 160 const std::string context_error; 161 const std::string exception_name; 162 const std::string typeid_name; 163 const std::string vtable_name; 164 const bool needs_main; 165 const ast::AggregateDecl::Aggregate cast_target; 166 167 const ast::StructDecl * type_decl = nullptr; 168 const ast::FunctionDecl * dtor_decl = nullptr; 169 const ast::StructDecl * except_decl = nullptr; 170 const ast::StructDecl * typeid_decl = nullptr; 171 const ast::StructDecl * vtable_decl = nullptr; 172 173 }; 174 175 // Handles thread type declarations: 176 // 177 // thread Mythread { struct MyThread { 178 // int data; int data; 179 // a_struct_t more_data; a_struct_t more_data; 180 // => thread$ __thrd_d; 181 // }; }; 182 // static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; } 183 // 184 struct ThreadKeyword final : public ConcurrentSueKeyword { 185 ThreadKeyword() : ConcurrentSueKeyword( 186 "thread$", 187 "__thrd", 188 "get_thread", 189 "thread keyword requires threads to be in scope, add #include <thread.hfa>\n", 190 "ThreadCancelled", 191 true, 192 ast::AggregateDecl::Thread ) 193 {} 194 195 virtual ~ThreadKeyword() {} 196 }; 197 198 // Handles coroutine type declarations: 199 // 200 // coroutine MyCoroutine { struct MyCoroutine { 201 // int data; int data; 202 // a_struct_t more_data; a_struct_t more_data; 203 // => coroutine$ __cor_d; 204 // }; }; 205 // static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; } 206 // 207 struct CoroutineKeyword final : public ConcurrentSueKeyword { 208 CoroutineKeyword() : ConcurrentSueKeyword( 209 "coroutine$", 210 "__cor", 211 "get_coroutine", 212 "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa>\n", 213 "CoroutineCancelled", 214 true, 215 ast::AggregateDecl::Coroutine ) 216 {} 217 218 virtual ~CoroutineKeyword() {} 219 }; 220 221 // Handles monitor type declarations: 222 // 223 // monitor MyMonitor { struct MyMonitor { 224 // int data; int data; 225 // a_struct_t more_data; a_struct_t more_data; 226 // => monitor$ __mon_d; 227 // }; }; 228 // static inline monitor$ * get_coroutine( MyMonitor * this ) { 229 // return &this->__cor_d; 230 // } 231 // void lock(MyMonitor & this) { 232 // lock(get_monitor(this)); 233 // } 234 // void unlock(MyMonitor & this) { 235 // unlock(get_monitor(this)); 236 // } 237 // 238 struct MonitorKeyword final : public ConcurrentSueKeyword { 239 MonitorKeyword() : ConcurrentSueKeyword( 240 "monitor$", 241 "__mon", 242 "get_monitor", 243 "monitor keyword requires monitors to be in scope, add #include <monitor.hfa>\n", 244 "", 245 false, 246 ast::AggregateDecl::Monitor ) 247 {} 248 249 virtual ~MonitorKeyword() {} 250 }; 251 252 // Handles generator type declarations: 253 // 254 // generator MyGenerator { struct MyGenerator { 255 // int data; int data; 256 // a_struct_t more_data; a_struct_t more_data; 257 // => int __generator_state; 258 // }; }; 259 // 260 struct GeneratorKeyword final : public ConcurrentSueKeyword { 261 GeneratorKeyword() : ConcurrentSueKeyword( 262 "generator$", 263 "__generator_state", 264 "get_generator", 265 "Unable to find builtin type generator$\n", 266 "", 267 true, 268 ast::AggregateDecl::Generator ) 269 {} 270 271 virtual ~GeneratorKeyword() {} 272 }; 273 274 const ast::Decl * ConcurrentSueKeyword::postvisit( 275 const ast::StructDecl * decl ) { 276 if ( !decl->body ) { 277 return decl; 278 } else if ( cast_target == decl->kind ) { 279 return handleStruct( decl ); 280 } else if ( type_name == decl->name ) { 281 assert( !type_decl ); 282 type_decl = decl; 283 } else if ( exception_name == decl->name ) { 284 assert( !except_decl ); 285 except_decl = decl; 286 } else if ( typeid_name == decl->name ) { 287 assert( !typeid_decl ); 288 typeid_decl = decl; 289 } else if ( vtable_name == decl->name ) { 290 assert( !vtable_decl ); 291 vtable_decl = decl; 292 } 293 return decl; 294 } 295 296 // Try to get the full definition, but raise an error on conflicts. 297 const ast::FunctionDecl * getDefinition( 298 const ast::FunctionDecl * old_decl, 299 const ast::FunctionDecl * new_decl ) { 300 if ( !new_decl->stmts ) { 301 return old_decl; 302 } else if ( !old_decl->stmts ) { 303 return new_decl; 304 } else { 305 assert( !old_decl->stmts || !new_decl->stmts ); 306 return nullptr; 307 } 308 } 309 310 const ast::DeclWithType * ConcurrentSueKeyword::postvisit( 311 const ast::FunctionDecl * decl ) { 312 if ( type_decl && isDestructorFor( decl, type_decl ) ) { 313 // Check for forward declarations, try to get the full definition. 314 dtor_decl = (dtor_decl) ? getDefinition( dtor_decl, decl ) : decl; 315 } else if ( !vtable_name.empty() && decl->has_body() ) { 316 if (const ast::DeclWithType * param = isMainFor( decl, cast_target )) { 317 if ( !vtable_decl ) { 318 SemanticError( decl, context_error ); 319 } 320 // Should be safe because of isMainFor. 321 const ast::StructInstType * struct_type = 322 static_cast<const ast::StructInstType *>( 323 static_cast<const ast::ReferenceType *>( 324 param->get_type() )->base.get() ); 325 326 handleMain( decl, struct_type ); 327 } 328 } 329 return decl; 330 } 331 332 const ast::Expr * ConcurrentSueKeyword::postvisit( 333 const ast::KeywordCastExpr * expr ) { 334 if ( cast_target == expr->target ) { 335 // Convert `(thread &)ex` to `(thread$ &)*get_thread(ex)`, etc. 336 if ( !type_decl || !dtor_decl ) { 337 SemanticError( expr, context_error ); 338 } 339 assert( nullptr == expr->result ); 340 auto cast = ast::mutate( expr ); 341 cast->result = new ast::ReferenceType( new ast::StructInstType( type_decl ) ); 342 cast->concrete_target.field = field_name; 343 cast->concrete_target.getter = getter_name; 344 return cast; 345 } 346 return expr; 347 } 348 349 const ast::StructDecl * ConcurrentSueKeyword::handleStruct( 350 const ast::StructDecl * decl ) { 351 assert( decl->body ); 352 353 if ( !type_decl || !dtor_decl ) { 354 SemanticError( decl, context_error ); 355 } 356 357 if ( !exception_name.empty() ) { 358 if( !typeid_decl || !vtable_decl ) { 359 SemanticError( decl, context_error ); 360 } 361 addTypeId( decl ); 362 addVtableForward( decl ); 363 } 364 365 const ast::FunctionDecl * func = forwardDeclare( decl ); 366 StructAndField addFieldRet = addField( decl ); 367 decl = addFieldRet.decl; 368 const ast::ObjectDecl * field = addFieldRet.field; 369 370 addGetRoutines( field, func ); 371 // Add routines to monitors for use by mutex stmt. 372 if ( ast::AggregateDecl::Monitor == cast_target ) { 373 addLockUnlockRoutines( decl ); 374 } 375 376 return decl; 377 } 378 379 void ConcurrentSueKeyword::handleMain( 380 const ast::FunctionDecl * decl, const ast::StructInstType * type ) { 381 assert( vtable_decl ); 382 assert( except_decl ); 383 384 const CodeLocation & location = decl->location; 385 386 std::vector<ast::ptr<ast::Expr>> poly_args = { 387 new ast::TypeExpr( location, type ), 388 }; 389 ast::ObjectDecl * vtable_object = Virtual::makeVtableInstance( 390 location, 391 "_default_vtable_object_declaration", 392 new ast::StructInstType( vtable_decl, copy( poly_args ) ), 393 type, 394 nullptr 395 ); 396 declsToAddAfter.push_back( vtable_object ); 397 declsToAddAfter.push_back( 398 new ast::ObjectDecl( 399 location, 400 Virtual::concurrentDefaultVTableName(), 401 new ast::ReferenceType( vtable_object->type, ast::CV::Const ), 402 new ast::SingleInit( location, 403 new ast::VariableExpr( location, vtable_object ) ), 404 ast::Storage::Classes(), 405 ast::Linkage::Cforall 406 ) 407 ); 408 declsToAddAfter.push_back( Virtual::makeGetExceptionFunction( 409 location, 410 vtable_object, 411 new ast::StructInstType( except_decl, copy( poly_args ) ) 412 ) ); 413 } 414 415 void ConcurrentSueKeyword::addTypeId( const ast::StructDecl * decl ) { 416 assert( typeid_decl ); 417 const CodeLocation & location = decl->location; 418 419 ast::StructInstType * typeid_type = 420 new ast::StructInstType( typeid_decl, ast::CV::Const ); 421 typeid_type->params.push_back( 422 new ast::TypeExpr( location, new ast::StructInstType( decl ) ) ); 423 declsToAddBefore.push_back( 424 Virtual::makeTypeIdInstance( location, typeid_type ) ); 425 // If the typeid_type is going to be kept, the other reference will have 426 // been made by now, but we also get to avoid extra mutates. 427 ast::ptr<ast::StructInstType> typeid_cleanup = typeid_type; 428 } 429 430 void ConcurrentSueKeyword::addVtableForward( const ast::StructDecl * decl ) { 431 assert( vtable_decl ); 432 const CodeLocation& location = decl->location; 433 434 std::vector<ast::ptr<ast::Expr>> poly_args = { 435 new ast::TypeExpr( location, new ast::StructInstType( decl ) ), 436 }; 437 declsToAddBefore.push_back( Virtual::makeGetExceptionForward( 438 location, 439 new ast::StructInstType( vtable_decl, copy( poly_args ) ), 440 new ast::StructInstType( except_decl, copy( poly_args ) ) 441 ) ); 442 ast::ObjectDecl * vtable_object = Virtual::makeVtableForward( 443 location, 444 "_default_vtable_object_declaration", 445 new ast::StructInstType( vtable_decl, std::move( poly_args ) ) 446 ); 447 declsToAddBefore.push_back( vtable_object ); 448 declsToAddBefore.push_back( 449 new ast::ObjectDecl( 450 location, 451 Virtual::concurrentDefaultVTableName(), 452 new ast::ReferenceType( vtable_object->type, ast::CV::Const ), 453 nullptr, 454 ast::Storage::Extern, 455 ast::Linkage::Cforall 456 ) 457 ); 458 } 459 460 const ast::FunctionDecl * ConcurrentSueKeyword::forwardDeclare( 461 const ast::StructDecl * decl ) { 462 const CodeLocation & location = decl->location; 463 464 ast::StructDecl * forward = ast::deepCopy( decl ); 465 { 466 // If removing members makes ref-count go to zero, do not free. 467 ast::ptr<ast::StructDecl> forward_ptr = forward; 468 forward->body = false; 469 forward->members.clear(); 470 forward_ptr.release(); 471 } 472 473 ast::ObjectDecl * this_decl = new ast::ObjectDecl( 474 location, 475 "this", 476 new ast::ReferenceType( new ast::StructInstType( decl ) ), 477 nullptr, 478 ast::Storage::Classes(), 479 ast::Linkage::Cforall 480 ); 481 482 ast::ObjectDecl * ret_decl = new ast::ObjectDecl( 483 location, 484 "ret", 485 new ast::PointerType( new ast::StructInstType( type_decl ) ), 486 nullptr, 487 ast::Storage::Classes(), 488 ast::Linkage::Cforall 489 ); 490 491 ast::FunctionDecl * get_decl = new ast::FunctionDecl( 492 location, 493 getter_name, 494 {}, // forall 495 { this_decl }, // params 496 { ret_decl }, // returns 497 nullptr, // stmts 498 ast::Storage::Static, 499 ast::Linkage::Cforall, 500 { new ast::Attribute( "const" ) }, 501 ast::Function::Inline 502 ); 503 get_decl = fixupGenerics( get_decl, decl ); 504 505 ast::FunctionDecl * main_decl = nullptr; 506 if ( needs_main ) { 507 // `this_decl` is copied here because the original was used above. 508 main_decl = new ast::FunctionDecl( 509 location, 510 "main", 511 {}, 512 { ast::deepCopy( this_decl ) }, 513 {}, 514 nullptr, 515 ast::Storage::Classes(), 516 ast::Linkage::Cforall 517 ); 518 main_decl = fixupGenerics( main_decl, decl ); 519 } 520 521 declsToAddBefore.push_back( forward ); 522 if ( needs_main ) declsToAddBefore.push_back( main_decl ); 523 declsToAddBefore.push_back( get_decl ); 524 525 return get_decl; 526 } 527 528 ConcurrentSueKeyword::StructAndField ConcurrentSueKeyword::addField( 529 const ast::StructDecl * decl ) { 530 const CodeLocation & location = decl->location; 531 532 ast::ObjectDecl * field = new ast::ObjectDecl( 533 location, 534 field_name, 535 new ast::StructInstType( type_decl ), 536 nullptr, 537 ast::Storage::Classes(), 538 ast::Linkage::Cforall 539 ); 540 541 auto mutDecl = ast::mutate( decl ); 542 mutDecl->members.push_back( field ); 543 544 return {mutDecl, field}; 545 } 546 547 void ConcurrentSueKeyword::addGetRoutines( 548 const ast::ObjectDecl * field, const ast::FunctionDecl * forward ) { 549 // Say it is generated at the "same" places as the forward declaration. 550 const CodeLocation & location = forward->location; 551 552 const ast::DeclWithType * param = forward->params.front(); 553 ast::Stmt * stmt = new ast::ReturnStmt( location, 554 new ast::AddressExpr( location, 555 new ast::MemberExpr( location, 556 field, 557 new ast::CastExpr( location, 558 new ast::VariableExpr( location, param ), 559 ast::deepCopy( param->get_type()->stripReferences() ), 560 ast::ExplicitCast 561 ) 562 ) 563 ) 564 ); 565 566 ast::FunctionDecl * decl = ast::deepCopy( forward ); 567 decl->stmts = new ast::CompoundStmt( location, { stmt } ); 568 declsToAddAfter.push_back( decl ); 569 } 570 571 void ConcurrentSueKeyword::addLockUnlockRoutines( 572 const ast::StructDecl * decl ) { 573 // This should only be used on monitors. 574 assert( ast::AggregateDecl::Monitor == cast_target ); 575 576 const CodeLocation & location = decl->location; 577 578 // The parameter for both routines. 579 ast::ObjectDecl * this_decl = new ast::ObjectDecl( 580 location, 581 "this", 582 new ast::ReferenceType( new ast::StructInstType( decl ) ), 583 nullptr, 584 ast::Storage::Classes(), 585 ast::Linkage::Cforall 586 ); 587 588 ast::FunctionDecl * lock_decl = new ast::FunctionDecl( 589 location, 590 "lock", 591 { /* forall */ }, 592 { 593 // Copy the declaration of this. 594 ast::deepCopy( this_decl ), 595 }, 596 { /* returns */ }, 597 nullptr, 598 ast::Storage::Static, 599 ast::Linkage::Cforall, 600 { /* attributes */ }, 601 ast::Function::Inline 602 ); 603 lock_decl = fixupGenerics( lock_decl, decl ); 604 605 lock_decl->stmts = new ast::CompoundStmt( location, { 606 new ast::ExprStmt( location, 607 new ast::UntypedExpr( location, 608 new ast::NameExpr( location, "lock" ), 609 { 610 new ast::UntypedExpr( location, 611 new ast::NameExpr( location, "get_monitor" ), 612 { new ast::VariableExpr( location, 613 InitTweak::getParamThis( lock_decl ) ) } 614 ) 615 } 616 ) 617 ) 618 } ); 619 620 ast::FunctionDecl * unlock_decl = new ast::FunctionDecl( 621 location, 622 "unlock", 623 { /* forall */ }, 624 { 625 // Last use, consume the declaration of this. 626 this_decl, 627 }, 628 { /* returns */ }, 629 nullptr, 630 ast::Storage::Static, 631 ast::Linkage::Cforall, 632 { /* attributes */ }, 633 ast::Function::Inline 634 ); 635 unlock_decl = fixupGenerics( unlock_decl, decl ); 636 637 unlock_decl->stmts = new ast::CompoundStmt( location, { 638 new ast::ExprStmt( location, 639 new ast::UntypedExpr( location, 640 new ast::NameExpr( location, "unlock" ), 641 { 642 new ast::UntypedExpr( location, 643 new ast::NameExpr( location, "get_monitor" ), 644 { new ast::VariableExpr( location, 645 InitTweak::getParamThis( unlock_decl ) ) } 646 ) 647 } 648 ) 649 ) 650 } ); 651 652 declsToAddAfter.push_back( lock_decl ); 653 declsToAddAfter.push_back( unlock_decl ); 654 } 655 656 657 // -------------------------------------------------------------------------- 658 struct SuspendKeyword final : 659 public ast::WithStmtsToAdd<>, public ast::WithGuards { 660 SuspendKeyword() = default; 661 virtual ~SuspendKeyword() = default; 662 663 void previsit( const ast::FunctionDecl * ); 664 const ast::DeclWithType * postvisit( const ast::FunctionDecl * ); 665 const ast::Stmt * postvisit( const ast::SuspendStmt * ); 666 667 private: 668 bool is_real_suspend( const ast::FunctionDecl * ); 669 670 const ast::Stmt * make_generator_suspend( const ast::SuspendStmt * ); 671 const ast::Stmt * make_coroutine_suspend( const ast::SuspendStmt * ); 672 673 struct LabelPair { 674 ast::Label obj; 675 int idx; 676 }; 677 678 LabelPair make_label(const ast::Stmt * stmt ) { 679 labels.push_back( ControlStruct::newLabel( "generator", stmt ) ); 680 return { labels.back(), int(labels.size()) }; 681 } 682 683 const ast::DeclWithType * in_generator = nullptr; 684 const ast::FunctionDecl * decl_suspend = nullptr; 685 std::vector<ast::Label> labels; 686 }; 687 688 void SuspendKeyword::previsit( const ast::FunctionDecl * decl ) { 689 GuardValue( in_generator ); in_generator = nullptr; 690 691 // If it is the real suspend, grab it if we don't have one already. 692 if ( is_real_suspend( decl ) ) { 693 decl_suspend = decl_suspend ? decl_suspend : decl; 694 return; 695 } 696 697 // Otherwise check if this is a generator main and, if so, handle it. 698 auto param = isMainFor( decl, ast::AggregateDecl::Generator ); 699 if ( !param ) return; 700 701 if ( 0 != decl->returns.size() ) { 702 SemanticError( decl->location, "Generator main must return void" ); 703 } 704 705 in_generator = param; 706 GuardValue( labels ); labels.clear(); 707 } 708 709 const ast::DeclWithType * SuspendKeyword::postvisit( 710 const ast::FunctionDecl * decl ) { 711 // Only modify a full definition of a generator with states. 712 if ( !decl->stmts || !in_generator || labels.empty() ) return decl; 713 714 const CodeLocation & location = decl->location; 715 716 // Create a new function body: 717 // static void * __generator_labels[] = {&&s0, &&s1, ...}; 718 // void * __generator_label = __generator_labels[GEN.__generator_state]; 719 // goto * __generator_label; 720 // s0: ; 721 // OLD_BODY 722 723 // This is the null statement inserted right before the body. 724 ast::NullStmt * noop = new ast::NullStmt( location ); 725 noop->labels.push_back( ControlStruct::newLabel( "generator", noop ) ); 726 const ast::Label & first_label = noop->labels.back(); 727 728 // Add each label to the init, starting with the first label. 729 std::vector<ast::ptr<ast::Init>> inits = { 730 new ast::SingleInit( location, 731 new ast::LabelAddressExpr( location, copy( first_label ) ) ) }; 732 // Then go through all the stored labels, and clear the store. 733 for ( auto && label : labels ) { 734 inits.push_back( new ast::SingleInit( label.location, 735 new ast::LabelAddressExpr( label.location, std::move( label ) 736 ) ) ); 737 } 738 labels.clear(); 739 // Then construct the initializer itself. 740 auto init = new ast::ListInit( location, std::move( inits ) ); 741 742 ast::ObjectDecl * generatorLabels = new ast::ObjectDecl( 743 location, 744 "__generator_labels", 745 new ast::ArrayType( 746 new ast::PointerType( new ast::VoidType() ), 747 nullptr, 748 ast::FixedLen, 749 ast::DynamicDim 750 ), 751 init, 752 ast::Storage::Classes(), 753 ast::Linkage::AutoGen 754 ); 755 756 ast::ObjectDecl * generatorLabel = new ast::ObjectDecl( 757 location, 758 "__generator_label", 759 new ast::PointerType( new ast::VoidType() ), 760 new ast::SingleInit( location, 761 new ast::UntypedExpr( location, 762 new ast::NameExpr( location, "?[?]" ), 763 { 764 // TODO: Could be a variable expr. 765 new ast::NameExpr( location, "__generator_labels" ), 766 new ast::UntypedMemberExpr( location, 767 new ast::NameExpr( location, "__generator_state" ), 768 new ast::VariableExpr( location, in_generator ) 769 ) 770 } 771 ) 772 ), 773 ast::Storage::Classes(), 774 ast::Linkage::AutoGen 775 ); 776 777 ast::BranchStmt * theGoTo = new ast::BranchStmt( 778 location, new ast::VariableExpr( location, generatorLabel ) 779 ); 780 781 // The noop goes here in order. 782 783 ast::CompoundStmt * body = new ast::CompoundStmt( location, { 784 { new ast::DeclStmt( location, generatorLabels ) }, 785 { new ast::DeclStmt( location, generatorLabel ) }, 786 { theGoTo }, 787 { noop }, 788 { decl->stmts }, 789 } ); 790 791 auto mutDecl = ast::mutate( decl ); 792 mutDecl->stmts = body; 793 return mutDecl; 794 } 795 796 const ast::Stmt * SuspendKeyword::postvisit( const ast::SuspendStmt * stmt ) { 797 switch ( stmt->type ) { 798 case ast::SuspendStmt::None: 799 // Use the context to determain the implicit target. 800 if ( in_generator ) { 801 return make_generator_suspend( stmt ); 802 } else { 803 return make_coroutine_suspend( stmt ); 804 } 805 case ast::SuspendStmt::Coroutine: 806 return make_coroutine_suspend( stmt ); 807 case ast::SuspendStmt::Generator: 808 // Generator suspends must be directly in a generator. 809 if ( !in_generator ) SemanticError( stmt->location, "'suspend generator' must be used inside main of generator type." ); 810 return make_generator_suspend( stmt ); 811 } 812 assert( false ); 813 return stmt; 814 } 815 816 /// Find the real/official suspend declaration. 817 bool SuspendKeyword::is_real_suspend( const ast::FunctionDecl * decl ) { 818 return ( !decl->linkage.is_mangled 819 && 0 == decl->params.size() 820 && 0 == decl->returns.size() 821 && "__cfactx_suspend" == decl->name ); 822 } 823 824 const ast::Stmt * SuspendKeyword::make_generator_suspend( 825 const ast::SuspendStmt * stmt ) { 826 assert( in_generator ); 827 // Target code is: 828 // GEN.__generator_state = X; 829 // THEN 830 // return; 831 // __gen_X:; 832 833 const CodeLocation & location = stmt->location; 834 835 LabelPair label = make_label( stmt ); 836 837 // This is the context saving statement. 838 stmtsToAddBefore.push_back( new ast::ExprStmt( location, 839 new ast::UntypedExpr( location, 840 new ast::NameExpr( location, "?=?" ), 841 { 842 new ast::UntypedMemberExpr( location, 843 new ast::NameExpr( location, "__generator_state" ), 844 new ast::VariableExpr( location, in_generator ) 845 ), 846 ast::ConstantExpr::from_int( location, label.idx ), 847 } 848 ) 849 ) ); 850 851 // The THEN component is conditional (return is not). 852 if ( stmt->then ) { 853 stmtsToAddBefore.push_back( stmt->then.get() ); 854 } 855 stmtsToAddBefore.push_back( new ast::ReturnStmt( location, nullptr ) ); 856 857 // The null statement replaces the old suspend statement. 858 return new ast::NullStmt( location, { label.obj } ); 859 } 860 861 const ast::Stmt * SuspendKeyword::make_coroutine_suspend( 862 const ast::SuspendStmt * stmt ) { 863 // The only thing we need from the old statement is the location. 864 const CodeLocation & location = stmt->location; 865 866 if ( !decl_suspend ) { 867 SemanticError( location, "suspend keyword applied to coroutines requires coroutines to be in scope, add #include <coroutine.hfa>\n" ); 868 } 869 if ( stmt->then ) { 870 SemanticError( location, "Compound statement following coroutines is not implemented." ); 871 } 872 873 return new ast::ExprStmt( location, 874 new ast::UntypedExpr( location, 875 ast::VariableExpr::functionPointer( location, decl_suspend ) ) 876 ); 877 } 878 879 // -------------------------------------------------------------------------- 880 struct MutexKeyword final : public ast::WithDeclsToAdd<> { 40 881 const ast::FunctionDecl * postvisit( const ast::FunctionDecl * decl ); 41 882 void postvisit( const ast::StructDecl * decl ); … … 50 891 ast::CompoundStmt * addStatements( const ast::CompoundStmt * body, const std::vector<ast::ptr<ast::Expr>> & args ); 51 892 ast::CompoundStmt * addThreadDtorStatements( const ast::FunctionDecl* func, const ast::CompoundStmt * body, const std::vector<const ast::DeclWithType *> & args ); 52 893 ast::ExprStmt * genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param); 894 ast::IfStmt * genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ); 53 895 private: 54 896 const ast::StructDecl * monitor_decl = nullptr; … … 59 901 60 902 static ast::ptr<ast::Type> generic_func; 903 904 UniqueName mutex_func_namer = UniqueName("__lock_unlock_curr"); 61 905 }; 62 906 … … 160 1004 161 1005 const ast::Stmt * MutexKeyword::postvisit( const ast::MutexStmt * stmt ) { 1006 if ( !lock_guard_decl ) { 1007 SemanticError( stmt->location, "mutex stmt requires a header, add #include <mutex_stmt.hfa>\n" ); 1008 } 162 1009 ast::CompoundStmt * body = 163 1010 new ast::CompoundStmt( stmt->location, { stmt->stmt } ); 164 addStatements( body, stmt->mutexObjs );165 return body;1011 1012 return addStatements( body, stmt->mutexObjs );; 166 1013 } 167 1014 … … 251 1098 { 252 1099 new ast::SingleInit( location, 253 new ast::AddressExpr( 1100 new ast::AddressExpr( location, 254 1101 new ast::VariableExpr( location, monitor ) ) ), 255 1102 new ast::SingleInit( location, … … 358 1205 } 359 1206 1207 // generates a cast to the void ptr to the appropriate lock type and dereferences it before calling lock or unlock on it 1208 // used to undo the type erasure done by storing all the lock pointers as void 1209 ast::ExprStmt * MutexKeyword::genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param ) { 1210 return new ast::ExprStmt( location, 1211 new ast::UntypedExpr( location, 1212 new ast::NameExpr( location, fnName ), { 1213 ast::UntypedExpr::createDeref( 1214 location, 1215 new ast::CastExpr( location, 1216 param, 1217 new ast::PointerType( new ast::TypeofType( new ast::UntypedExpr( 1218 expr->location, 1219 new ast::NameExpr( expr->location, "__get_mutexstmt_lock_type" ), 1220 { expr } 1221 ) ) ), 1222 ast::GeneratedFlag::ExplicitCast 1223 ) 1224 ) 1225 } 1226 ) 1227 ); 1228 } 1229 1230 ast::IfStmt * MutexKeyword::genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ) { 1231 ast::IfStmt * outerLockIf = nullptr; 1232 ast::IfStmt * lastLockIf = nullptr; 1233 1234 //adds an if/elif clause for each lock to assign type from void ptr based on ptr address 1235 for ( long unsigned int i = 0; i < args.size(); i++ ) { 1236 1237 ast::UntypedExpr * ifCond = new ast::UntypedExpr( location, 1238 new ast::NameExpr( location, "?==?" ), { 1239 ast::deepCopy( thisParam ), 1240 new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() )) 1241 } 1242 ); 1243 1244 ast::IfStmt * currLockIf = new ast::IfStmt( 1245 location, 1246 ifCond, 1247 genVirtLockUnlockExpr( fnName, args.at(i), location, ast::deepCopy( thisParam ) ) 1248 ); 1249 1250 if ( i == 0 ) { 1251 outerLockIf = currLockIf; 1252 } else { 1253 // add ifstmt to else of previous stmt 1254 lastLockIf->else_ = currLockIf; 1255 } 1256 1257 lastLockIf = currLockIf; 1258 } 1259 return outerLockIf; 1260 } 1261 360 1262 ast::CompoundStmt * MutexKeyword::addStatements( 361 1263 const ast::CompoundStmt * body, 362 1264 const std::vector<ast::ptr<ast::Expr>> & args ) { 363 ast::CompoundStmt * mutBody = ast::mutate( body );364 1265 365 1266 // Code is generated near the beginning of the compound statement. 366 const CodeLocation & location = mutBody->location; 1267 const CodeLocation & location = body->location; 1268 1269 // final body to return 1270 ast::CompoundStmt * newBody = new ast::CompoundStmt( location ); 1271 1272 // std::string lockFnName = mutex_func_namer.newName(); 1273 // std::string unlockFnName = mutex_func_namer.newName(); 367 1274 368 1275 // Make pointer to the monitors. … … 372 1279 new ast::ArrayType( 373 1280 new ast::PointerType( 374 new ast::TypeofType( 375 new ast::UntypedExpr( 376 location, 377 new ast::NameExpr( location, "__get_type" ), 378 { args.front() } 379 ) 380 ) 1281 new ast::VoidType() 381 1282 ), 382 1283 ast::ConstantExpr::from_ulong( location, args.size() ), … … 392 1293 new ast::UntypedExpr( 393 1294 expr->location, 394 new ast::NameExpr( expr->location, "__get_ ptr" ),1295 new ast::NameExpr( expr->location, "__get_mutexstmt_lock_ptr" ), 395 1296 { expr } 396 1297 ) … … 405 1306 ast::StructInstType * lock_guard_struct = 406 1307 new ast::StructInstType( lock_guard_decl ); 407 ast::TypeExpr * lock_type_expr = new ast::TypeExpr( 408 location, 409 new ast::TypeofType( 410 new ast::UntypedExpr( 411 location, 412 new ast::NameExpr( location, "__get_type" ), 413 { args.front() } 414 ) 415 ) 416 ); 417 418 lock_guard_struct->params.push_back( lock_type_expr ); 419 420 // In reverse order: 1308 1309 // use try stmts to lock and finally to unlock 1310 ast::TryStmt * outerTry = nullptr; 1311 ast::TryStmt * currentTry; 1312 ast::CompoundStmt * lastBody = nullptr; 1313 1314 // adds a nested try stmt for each lock we are locking 1315 for ( long unsigned int i = 0; i < args.size(); i++ ) { 1316 ast::UntypedExpr * innerAccess = new ast::UntypedExpr( 1317 location, 1318 new ast::NameExpr( location,"?[?]" ), { 1319 new ast::NameExpr( location, "__monitors" ), 1320 ast::ConstantExpr::from_int( location, i ) 1321 } 1322 ); 1323 1324 // make the try body 1325 ast::CompoundStmt * currTryBody = new ast::CompoundStmt( location ); 1326 ast::IfStmt * lockCall = genTypeDiscrimLockUnlock( "lock", args, location, innerAccess ); 1327 currTryBody->push_back( lockCall ); 1328 1329 // make the finally stmt 1330 ast::CompoundStmt * currFinallyBody = new ast::CompoundStmt( location ); 1331 ast::IfStmt * unlockCall = genTypeDiscrimLockUnlock( "unlock", args, location, innerAccess ); 1332 currFinallyBody->push_back( unlockCall ); 1333 1334 // construct the current try 1335 currentTry = new ast::TryStmt( 1336 location, 1337 currTryBody, 1338 {}, 1339 new ast::FinallyStmt( location, currFinallyBody ) 1340 ); 1341 if ( i == 0 ) outerTry = currentTry; 1342 else { 1343 // pushback try into the body of the outer try 1344 lastBody->push_back( currentTry ); 1345 } 1346 lastBody = currTryBody; 1347 } 1348 1349 // push body into innermost try body 1350 if ( lastBody != nullptr ) { 1351 lastBody->push_back( body ); 1352 newBody->push_front( outerTry ); 1353 } 1354 421 1355 // monitor_guard_t __guard = { __monitors, # }; 422 mutBody->push_front(1356 newBody->push_front( 423 1357 new ast::DeclStmt( 424 1358 location, … … 447 1381 448 1382 // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 449 mutBody->push_front( new ast::DeclStmt( location, monitors ) ); 450 451 return mutBody; 1383 newBody->push_front( new ast::DeclStmt( location, monitors ) ); 1384 1385 // // The parameter for both __lock_curr/__unlock_curr routines. 1386 // ast::ObjectDecl * this_decl = new ast::ObjectDecl( 1387 // location, 1388 // "this", 1389 // new ast::PointerType( new ast::VoidType() ), 1390 // nullptr, 1391 // {}, 1392 // ast::Linkage::Cforall 1393 // ); 1394 1395 // ast::FunctionDecl * lock_decl = new ast::FunctionDecl( 1396 // location, 1397 // lockFnName, 1398 // { /* forall */ }, 1399 // { 1400 // // Copy the declaration of this. 1401 // this_decl, 1402 // }, 1403 // { /* returns */ }, 1404 // nullptr, 1405 // 0, 1406 // ast::Linkage::Cforall, 1407 // { /* attributes */ }, 1408 // ast::Function::Inline 1409 // ); 1410 1411 // ast::FunctionDecl * unlock_decl = new ast::FunctionDecl( 1412 // location, 1413 // unlockFnName, 1414 // { /* forall */ }, 1415 // { 1416 // // Copy the declaration of this. 1417 // ast::deepCopy( this_decl ), 1418 // }, 1419 // { /* returns */ }, 1420 // nullptr, 1421 // 0, 1422 // ast::Linkage::Cforall, 1423 // { /* attributes */ }, 1424 // ast::Function::Inline 1425 // ); 1426 1427 // ast::IfStmt * outerLockIf = nullptr; 1428 // ast::IfStmt * outerUnlockIf = nullptr; 1429 // ast::IfStmt * lastLockIf = nullptr; 1430 // ast::IfStmt * lastUnlockIf = nullptr; 1431 1432 // //adds an if/elif clause for each lock to assign type from void ptr based on ptr address 1433 // for ( long unsigned int i = 0; i < args.size(); i++ ) { 1434 // ast::VariableExpr * thisParam = new ast::VariableExpr( location, InitTweak::getParamThis( lock_decl ) ); 1435 // ast::UntypedExpr * ifCond = new ast::UntypedExpr( location, 1436 // new ast::NameExpr( location, "?==?" ), { 1437 // thisParam, 1438 // new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() )) 1439 // } 1440 // ); 1441 1442 // ast::IfStmt * currLockIf = new ast::IfStmt( 1443 // location, 1444 // ast::deepCopy( ifCond ), 1445 // genVirtLockUnlockExpr( "lock", args.at(i), location, ast::deepCopy( thisParam ) ) 1446 // ); 1447 1448 // ast::IfStmt * currUnlockIf = new ast::IfStmt( 1449 // location, 1450 // ifCond, 1451 // genVirtLockUnlockExpr( "unlock", args.at(i), location, ast::deepCopy( thisParam ) ) 1452 // ); 1453 1454 // if ( i == 0 ) { 1455 // outerLockIf = currLockIf; 1456 // outerUnlockIf = currUnlockIf; 1457 // } else { 1458 // // add ifstmt to else of previous stmt 1459 // lastLockIf->else_ = currLockIf; 1460 // lastUnlockIf->else_ = currUnlockIf; 1461 // } 1462 1463 // lastLockIf = currLockIf; 1464 // lastUnlockIf = currUnlockIf; 1465 // } 1466 1467 // // add pointer typing if/elifs to body of routines 1468 // lock_decl->stmts = new ast::CompoundStmt( location, { outerLockIf } ); 1469 // unlock_decl->stmts = new ast::CompoundStmt( location, { outerUnlockIf } ); 1470 1471 // // add routines to scope 1472 // declsToAddBefore.push_back( lock_decl ); 1473 // declsToAddBefore.push_back( unlock_decl ); 1474 1475 // newBody->push_front(new ast::DeclStmt( location, lock_decl )); 1476 // newBody->push_front(new ast::DeclStmt( location, unlock_decl )); 1477 1478 return newBody; 452 1479 } 453 1480 … … 564 1591 565 1592 // -------------------------------------------------------------------------- 1593 // Interface Functions: 566 1594 567 1595 void implementKeywords( ast::TranslationUnit & translationUnit ) { 568 (void)translationUnit; 569 assertf(false, "Apply Keywords not implemented." ); 1596 ast::Pass<ThreadKeyword>::run( translationUnit ); 1597 ast::Pass<CoroutineKeyword>::run( translationUnit ); 1598 ast::Pass<MonitorKeyword>::run( translationUnit ); 1599 ast::Pass<GeneratorKeyword>::run( translationUnit ); 1600 ast::Pass<SuspendKeyword>::run( translationUnit ); 570 1601 } 571 1602 -
src/ControlStruct/ExceptTranslateNew.cpp
ref3c383 rd672350 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 8 11:53:00 2021 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Mon Jan 31 18:49:58202213 // Update Count : 111 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Mar 11 17:51:00 2022 13 // Update Count : 2 14 14 // 15 15 … … 27 27 28 28 typedef std::list<ast::CatchStmt*> CatchList; 29 30 void split( CatchList& allHandlers, CatchList& terHandlers,31 CatchList& resHandlers ) {32 while ( !allHandlers.empty() ) {33 ast::CatchStmt * stmt = allHandlers.front();34 allHandlers.pop_front();35 if (stmt->kind == ast::ExceptionKind::Terminate) {36 terHandlers.push_back(stmt);37 } else {38 resHandlers.push_back(stmt);39 }40 }41 }42 29 43 30 void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) { … … 171 158 ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt ); 172 159 173 // Types used in translation, make sure to use clone. 160 // Types used in translation, first group are internal. 161 ast::ObjectDecl * make_index_object( CodeLocation const & ) const; 162 ast::ObjectDecl * make_exception_object( CodeLocation const & ) const; 163 ast::ObjectDecl * make_bool_object( CodeLocation const & ) const; 164 ast::ObjectDecl * make_voidptr_object( CodeLocation const & ) const; 165 ast::ObjectDecl * make_unused_index_object( CodeLocation const & ) const; 174 166 // void (*function)(); 175 ast::FunctionDecl * try_func_t;167 ast::FunctionDecl * make_try_function( CodeLocation const & ) const; 176 168 // void (*function)(int, exception); 177 ast::FunctionDecl * catch_func_t;169 ast::FunctionDecl * make_catch_function( CodeLocation const & ) const; 178 170 // int (*function)(exception); 179 ast::FunctionDecl * ma tch_func_t;171 ast::FunctionDecl * make_match_function( CodeLocation const & ) const; 180 172 // bool (*function)(exception); 181 ast::FunctionDecl * handle_func_t;173 ast::FunctionDecl * make_handle_function( CodeLocation const & ) const; 182 174 // void (*function)(__attribute__((unused)) void *); 183 ast::FunctionDecl * finally_func_t; 184 185 ast::StructInstType * create_except_type() { 186 assert( except_decl ); 187 return new ast::StructInstType( except_decl ); 188 } 189 void init_func_types(); 175 ast::FunctionDecl * make_finally_function( CodeLocation const & ) const; 190 176 191 177 public: … … 199 185 }; 200 186 201 void TryMutatorCore::init_func_types() { 187 ast::ObjectDecl * TryMutatorCore::make_index_object( 188 CodeLocation const & location ) const { 189 return new ast::ObjectDecl( 190 location, 191 "__handler_index", 192 new ast::BasicType(ast::BasicType::SignedInt), 193 nullptr, //init 194 ast::Storage::Classes{}, 195 ast::Linkage::Cforall 196 ); 197 } 198 199 ast::ObjectDecl * TryMutatorCore::make_exception_object( 200 CodeLocation const & location ) const { 202 201 assert( except_decl ); 203 204 ast::ObjectDecl index_obj( 205 {}, 206 "__handler_index", 207 new ast::BasicType(ast::BasicType::SignedInt) 208 ); 209 ast::ObjectDecl exception_obj( 210 {}, 202 return new ast::ObjectDecl( 203 location, 211 204 "__exception_inst", 212 205 new ast::PointerType( 213 206 new ast::StructInstType( except_decl ) 214 207 ), 215 NULL 216 ); 217 ast::ObjectDecl bool_obj( 218 {}, 208 nullptr, //init 209 ast::Storage::Classes{}, 210 ast::Linkage::Cforall 211 ); 212 } 213 214 ast::ObjectDecl * TryMutatorCore::make_bool_object( 215 CodeLocation const & location ) const { 216 return new ast::ObjectDecl( 217 location, 219 218 "__ret_bool", 220 219 new ast::BasicType( ast::BasicType::Bool ), … … 225 224 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 226 225 ); 227 ast::ObjectDecl voidptr_obj( 228 {}, 226 } 227 228 ast::ObjectDecl * TryMutatorCore::make_voidptr_object( 229 CodeLocation const & location ) const { 230 return new ast::ObjectDecl( 231 location, 229 232 "__hook", 230 233 new ast::PointerType( … … 237 240 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 238 241 ); 239 240 ast::ObjectDecl unused_index_obj( 241 {}, 242 } 243 244 ast::ObjectDecl * TryMutatorCore::make_unused_index_object( 245 CodeLocation const & location ) const { 246 return new ast::ObjectDecl( 247 location, 242 248 "__handler_index", 243 249 new ast::BasicType(ast::BasicType::SignedInt), … … 248 254 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 249 255 ); 250 //unused_index_obj->attributes.push_back( new Attribute( "unused" ) ); 251 252 try_func_t = new ast::FunctionDecl( 253 {}, 256 } 257 258 ast::FunctionDecl * TryMutatorCore::make_try_function( 259 CodeLocation const & location ) const { 260 return new ast::FunctionDecl( 261 location, 254 262 "try", 255 263 {}, //forall … … 260 268 ast::Linkage::Cforall 261 269 ); 262 263 catch_func_t = new ast::FunctionDecl( 264 {}, 270 } 271 272 ast::FunctionDecl * TryMutatorCore::make_catch_function( 273 CodeLocation const & location ) const { 274 return new ast::FunctionDecl( 275 location, 265 276 "catch", 266 277 {}, //forall 267 { ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param278 { make_index_object( location ), make_exception_object( location ) }, 268 279 {}, //return void 269 280 nullptr, … … 271 282 ast::Linkage::Cforall 272 283 ); 273 274 match_func_t = new ast::FunctionDecl( 275 {}, 284 } 285 286 ast::FunctionDecl * TryMutatorCore::make_match_function( 287 CodeLocation const & location ) const { 288 return new ast::FunctionDecl( 289 location, 276 290 "match", 277 291 {}, //forall 278 { ast::deepCopy(&exception_obj)},279 { ast::deepCopy(&unused_index_obj)},292 { make_exception_object( location ) }, 293 { make_unused_index_object( location ) }, 280 294 nullptr, 281 295 ast::Storage::Classes{}, 282 296 ast::Linkage::Cforall 283 297 ); 284 285 handle_func_t = new ast::FunctionDecl( 286 {}, 298 } 299 300 ast::FunctionDecl * TryMutatorCore::make_handle_function( 301 CodeLocation const & location ) const { 302 return new ast::FunctionDecl( 303 location, 287 304 "handle", 288 305 {}, //forall 289 { ast::deepCopy(&exception_obj)},290 { ast::deepCopy(&bool_obj)},306 { make_exception_object( location ) }, 307 { make_bool_object( location ) }, 291 308 nullptr, 292 309 ast::Storage::Classes{}, 293 310 ast::Linkage::Cforall 294 311 ); 295 296 finally_func_t = new ast::FunctionDecl( 297 {}, 312 } 313 314 ast::FunctionDecl * TryMutatorCore::make_finally_function( 315 CodeLocation const & location ) const { 316 return new ast::FunctionDecl( 317 location, 298 318 "finally", 299 319 {}, //forall 300 { ast::deepCopy(&voidptr_obj)},320 { make_voidptr_object( location ) }, 301 321 {}, //return void 302 322 nullptr, … … 304 324 ast::Linkage::Cforall 305 325 ); 306 307 //catch_func_t.get_parameters().push_back( index_obj.clone() );308 //catch_func_t.get_parameters().push_back( exception_obj.clone() );309 //match_func_t.get_returnVals().push_back( unused_index_obj );310 //match_func_t.get_parameters().push_back( exception_obj.clone() );311 //handle_func_t.get_returnVals().push_back( bool_obj.clone() );312 //handle_func_t.get_parameters().push_back( exception_obj.clone() );313 //finally_func_t.get_parameters().push_back( voidptr_obj.clone() );314 326 } 315 327 316 328 // TryStmt Mutation Helpers 317 318 /*319 ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) {320 ast::CompoundStmt * block = tryStmt->body;321 tryStmt->body = nullptr;322 return block;323 }324 */325 329 326 330 ast::FunctionDecl * TryMutatorCore::create_try_wrapper( 327 331 const ast::CompoundStmt *body ) { 328 332 329 ast::FunctionDecl * ret = ast::deepCopy(try_func_t);333 ast::FunctionDecl * ret = make_try_function( body->location ); 330 334 ret->stmts = body; 331 335 return ret; … … 339 343 const CodeLocation loc = handlers.front()->location; 340 344 341 ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t);345 ast::FunctionDecl * func_t = make_catch_function( loc ); 342 346 const ast::DeclWithType * index_obj = func_t->params.front(); 343 347 const ast::DeclWithType * except_obj = func_t->params.back(); … … 386 390 // handler->body = nullptr; 387 391 388 handler_wrappers.push_back( new ast::CaseStmt(loc, 392 handler_wrappers.push_back( new ast::CaseStmt(loc, 389 393 ast::ConstantExpr::from_int(loc, index) , 390 394 { block, new ast::ReturnStmt( loc, nullptr ) } … … 393 397 // TODO: Some sort of meaningful error on default perhaps? 394 398 395 /* 396 std::list<Statement*> stmt_handlers; 397 while ( !handler_wrappers.empty() ) { 398 stmt_handlers.push_back( handler_wrappers.front() ); 399 handler_wrappers.pop_front(); 400 } 401 */ 402 403 ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc, 399 ast::SwitchStmt * handler_lookup = new ast::SwitchStmt( loc, 404 400 new ast::VariableExpr( loc, index_obj ), 405 401 std::move(handler_wrappers) 406 402 ); 407 ast::CompoundStmt * body = new ast::CompoundStmt(loc, 408 {handler_lookup}); 403 ast::CompoundStmt * body = new ast::CompoundStmt( loc, {handler_lookup} ); 409 404 410 405 func_t->stmts = body; … … 433 428 434 429 // Check for type match. 435 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 430 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 436 431 new ast::VariableExpr(loc, except_obj ), 437 432 local_except->get_type() … … 445 440 } 446 441 // Construct the match condition. 447 block->push_back( new ast::IfStmt(loc, 442 block->push_back( new ast::IfStmt(loc, 448 443 cond, modded_handler->body, nullptr ) ); 449 444 450 // xxx - how does this work in new ast451 //modded_handler->set_decl( nullptr );452 //modded_handler->set_cond( nullptr );453 //modded_handler->set_body( nullptr );454 //delete modded_handler;455 445 return block; 456 446 } … … 467 457 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 468 458 469 ast::FunctionDecl * func_t = ast::deepCopy(match_func_t);459 ast::FunctionDecl * func_t = make_match_function( loc ); 470 460 const ast::DeclWithType * except_obj = func_t->params.back(); 471 461 … … 490 480 } 491 481 492 body->push_back( new ast::ReturnStmt(loc, 482 body->push_back( new ast::ReturnStmt(loc, 493 483 ast::ConstantExpr::from_int( loc, 0 ) )); 494 484 … … 525 515 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 526 516 527 ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t);517 ast::FunctionDecl * func_t = make_handle_function( loc ); 528 518 const ast::DeclWithType * except_obj = func_t->params.back(); 529 519 … … 535 525 ast::CompoundStmt * handling_code; 536 526 if (handler->body.as<ast::CompoundStmt>()) { 537 handling_code = 538 strict_dynamic_cast<ast::CompoundStmt*>(handler->body.get_and_mutate() );527 handling_code = strict_dynamic_cast<ast::CompoundStmt*>( 528 handler->body.get_and_mutate() ); 539 529 } else { 540 530 handling_code = new ast::CompoundStmt(loc); … … 600 590 const ast::CompoundStmt * body = finally->body; 601 591 602 ast::FunctionDecl * func_t = ast::deepCopy(finally_func_t);592 ast::FunctionDecl * func_t = make_finally_function( tryStmt->location ); 603 593 func_t->stmts = body; 604 594 605 // finally->set_block( nullptr );606 // delete finally;607 595 tryStmt->finally = nullptr; 608 609 596 610 597 return func_t; … … 617 604 618 605 const CodeLocation loc = finally_wrapper->location; 619 // Make Cleanup Attribute.620 /*621 std::list< ast::Attribute * > attributes;622 {623 std::list< > attr_params;624 attr_params.push_back( nameOf( finally_wrapper ) );625 attributes.push_back( new Attribute( "cleanup", attr_params ) );626 }627 */628 629 606 return new ast::ObjectDecl( 630 607 loc, … … 644 621 // return false; 645 622 const CodeLocation loc = throwStmt->location; 646 ast::Stmt * result = new ast::ReturnStmt(loc, 623 ast::Stmt * result = new ast::ReturnStmt(loc, 647 624 ast::ConstantExpr::from_bool( loc, false ) 648 625 ); 649 626 result->labels = throwStmt->labels; 650 // delete throwStmt; done by postvisit651 627 return result; 652 628 } … … 660 636 assert( nullptr == except_decl ); 661 637 except_decl = structDecl; 662 init_func_types();663 638 } else if ( structDecl->name == "__cfaehm_try_resume_node" ) { 664 639 assert( nullptr == node_decl ); … … 706 681 } 707 682 } 708 // split( mutStmt->handlers,709 // termination_handlers, resumption_handlers );710 683 711 684 if ( resumption_handlers.size() ) { -
src/InitTweak/FixGlobalInit.cc
ref3c383 rd672350 113 113 accept_all(translationUnit, fixer); 114 114 115 // Say these magic declarations come at the end of the file. 116 CodeLocation const & location = translationUnit.decls.back()->location; 117 115 118 if ( !fixer.core.initStmts.empty() ) { 116 119 std::vector<ast::ptr<ast::Expr>> ctorParams; 117 if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int({}, 200)); 118 auto initFunction = new ast::FunctionDecl({}, "__global_init__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.initStmts)), 119 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("constructor", std::move(ctorParams))}); 120 if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int(location, 200)); 121 auto initFunction = new ast::FunctionDecl(location, 122 "__global_init__", {}, {}, {}, 123 new ast::CompoundStmt(location, std::move(fixer.core.initStmts)), 124 ast::Storage::Static, ast::Linkage::C, 125 {new ast::Attribute("constructor", std::move(ctorParams))}); 120 126 121 127 translationUnit.decls.emplace_back( initFunction ); … … 124 130 if ( !fixer.core.destroyStmts.empty() ) { 125 131 std::vector<ast::ptr<ast::Expr>> dtorParams; 126 if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int({}, 200)); 127 auto destroyFunction = new ast::FunctionDecl({}, "__global_destroy__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.destroyStmts)), 128 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("destructor", std::move(dtorParams))}); 132 if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int(location, 200)); 133 auto destroyFunction = new ast::FunctionDecl( location, 134 "__global_destroy__", {}, {}, {}, 135 new ast::CompoundStmt(location, std::move(fixer.core.destroyStmts)), 136 ast::Storage::Static, ast::Linkage::C, 137 {new ast::Attribute("destructor", std::move(dtorParams))}); 129 138 130 139 translationUnit.decls.emplace_back(destroyFunction); -
src/InitTweak/FixInitNew.cpp
ref3c383 rd672350 16 16 #include "CodeGen/GenType.h" // for genPrettyType 17 17 #include "CodeGen/OperatorTable.h" 18 #include "Common/CodeLocationTools.hpp" 18 19 #include "Common/PassVisitor.h" // for PassVisitor, WithStmtsToAdd 19 20 #include "Common/SemanticError.h" // for SemanticError … … 85 86 /// generate/resolve copy construction expressions for each, and generate/resolve destructors for both 86 87 /// arguments and return value temporaries 87 struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors> {88 struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors>, public ast::WithConstTranslationUnit { 88 89 const ast::Expr * postvisit( const ast::ImplicitCopyCtorExpr * impCpCtorExpr ); 89 90 const ast::StmtExpr * previsit( const ast::StmtExpr * stmtExpr ); … … 189 190 /// for any member that is missing a corresponding ctor/dtor call. 190 191 /// error if a member is used before constructed 191 struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls> {192 struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls>, public ast::WithConstTranslationUnit { 192 193 void previsit( const ast::FunctionDecl * funcDecl ); 193 194 const ast::DeclWithType * postvisit( const ast::FunctionDecl * funcDecl ); … … 214 215 215 216 /// expands ConstructorExpr nodes into comma expressions, using a temporary for the first argument 216 struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting {217 struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithConstTranslationUnit { 217 218 const ast::Expr * postvisit( const ast::ConstructorExpr * ctorExpr ); 218 219 }; … … 509 510 // (VariableExpr and already resolved expression) 510 511 CP_CTOR_PRINT( std::cerr << "ResolvingCtorDtor " << untyped << std::endl; ) 511 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, symtab);512 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, { symtab, transUnit().global } ); 512 513 assert( resolved ); 513 514 if ( resolved->env ) { … … 553 554 ast::ptr<ast::Expr> guard = mutArg; 554 555 555 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl( {}, "__tmp", mutResult, nullptr );556 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl(loc, "__tmp", mutResult, nullptr ); 556 557 557 558 // create and resolve copy constructor … … 587 588 588 589 ast::Expr * ResolveCopyCtors::destructRet( const ast::ObjectDecl * ret, const ast::Expr * arg ) { 590 auto global = transUnit().global; 589 591 // TODO: refactor code for generating cleanup attribute, since it's common and reused in ~3-4 places 590 592 // check for existing cleanup attribute before adding another(?) 591 593 // need to add __Destructor for _tmp_cp variables as well 592 594 593 assertf( ast::dtorStruct, "Destructor generation requires __Destructor definition." );594 assertf( ast::dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );595 assertf( ast::dtorStructDestroy, "Destructor generation requires __destroy_Destructor." );595 assertf( global.dtorStruct, "Destructor generation requires __Destructor definition." ); 596 assertf( global.dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." ); 597 assertf( global.dtorDestroy, "Destructor generation requires __destroy_Destructor." ); 596 598 597 599 const CodeLocation loc = ret->location; … … 610 612 auto dtorFunc = getDtorFunc( ret, new ast::ExprStmt(loc, dtor ), stmtsToAddBefore ); 611 613 612 auto dtorStructType = new ast::StructInstType( ast::dtorStruct);614 auto dtorStructType = new ast::StructInstType( global.dtorStruct ); 613 615 614 616 // what does this do??? … … 622 624 static UniqueName namer( "_ret_dtor" ); 623 625 auto retDtor = new ast::ObjectDecl(loc, namer.newName(), dtorStructType, new ast::ListInit(loc, { new ast::SingleInit(loc, ast::ConstantExpr::null(loc) ), new ast::SingleInit(loc, new ast::CastExpr( new ast::VariableExpr(loc, dtorFunc ), dtorType ) ) } ) ); 624 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, ast::dtorStructDestroy ) } ) );626 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, global.dtorDestroy ) } ) ); 625 627 stmtsToAddBefore.push_back( new ast::DeclStmt(loc, retDtor ) ); 626 628 627 629 if ( arg ) { 628 auto member = new ast::MemberExpr(loc, ast::dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );630 auto member = new ast::MemberExpr(loc, global.dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) ); 629 631 auto object = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, ret ) ), new ast::PointerType(new ast::VoidType() ) ); 630 632 ast::Expr * assign = createBitwiseAssignment( member, object ); … … 799 801 // to prevent warnings ('_unq0' may be used uninitialized in this function), 800 802 // insert an appropriate zero initializer for UniqueExpr temporaries. 801 ast::Init * makeInit( const ast::Type * t ) {803 ast::Init * makeInit( const ast::Type * t, CodeLocation const & loc ) { 802 804 if ( auto inst = dynamic_cast< const ast::StructInstType * >( t ) ) { 803 805 // initizer for empty struct must be empty 804 if ( inst->base->members.empty() ) return new ast::ListInit({}, {}); 806 if ( inst->base->members.empty() ) { 807 return new ast::ListInit( loc, {} ); 808 } 805 809 } else if ( auto inst = dynamic_cast< const ast::UnionInstType * >( t ) ) { 806 810 // initizer for empty union must be empty 807 if ( inst->base->members.empty() ) return new ast::ListInit({}, {}); 808 } 809 810 return new ast::ListInit( {}, { new ast::SingleInit( {}, ast::ConstantExpr::from_int({}, 0) ) } ); 811 if ( inst->base->members.empty() ) { 812 return new ast::ListInit( loc, {} ); 813 } 814 } 815 816 return new ast::ListInit( loc, { 817 new ast::SingleInit( loc, ast::ConstantExpr::from_int( loc, 0 ) ) 818 } ); 811 819 } 812 820 … … 832 840 } else { 833 841 // expr isn't a call expr, so create a new temporary variable to use to hold the value of the unique expression 834 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result ) );842 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result, mutExpr->location ) ); 835 843 mutExpr->var = new ast::VariableExpr( mutExpr->location, mutExpr->object ); 836 844 } … … 1172 1180 auto guard = makeFuncGuard( [this]() { symtab.enterScope(); }, [this]() { symtab.leaveScope(); } ); 1173 1181 symtab.addFunction( function ); 1182 auto global = transUnit().global; 1174 1183 1175 1184 // need to iterate through members in reverse in order for … … 1217 1226 1218 1227 static UniqueName memberDtorNamer = { "__memberDtor" }; 1219 assertf( ast::dtorStruct, "builtin __Destructor not found." );1220 assertf( ast::dtorStructDestroy, "builtin __destroy_Destructor not found." );1228 assertf( global.dtorStruct, "builtin __Destructor not found." ); 1229 assertf( global.dtorDestroy, "builtin __destroy_Destructor not found." ); 1221 1230 1222 1231 ast::Expr * thisExpr = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, thisParam ) ), new ast::PointerType( new ast::VoidType(), ast::CV::Qualifiers() ) ); … … 1228 1237 auto dtorType = new ast::PointerType( dtorFtype ); 1229 1238 1230 auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( ast::dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );1231 destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( {}, ast::dtorStructDestroy ) } ) );1239 auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( global.dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) ); 1240 destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( loc, global.dtorDestroy ) } ) ); 1232 1241 mutStmts->push_front( new ast::DeclStmt(loc, destructor ) ); 1233 1242 mutStmts->kids.splice( mutStmts->kids.begin(), stmtsToAdd ); … … 1323 1332 1324 1333 const ast::Expr * GenStructMemberCalls::postvisit( const ast::UntypedExpr * untypedExpr ) { 1325 // Expression * newExpr = untypedExpr;1326 1334 // xxx - functions returning ast::ptr seems wrong... 1327 auto res = ResolvExpr::findVoidExpression( untypedExpr, symtab ); 1328 return res.release(); 1329 // return newExpr; 1335 auto res = ResolvExpr::findVoidExpression( untypedExpr, { symtab, transUnit().global } ); 1336 // Fix CodeLocation (at least until resolver is fixed). 1337 auto fix = localFillCodeLocations( untypedExpr->location, res.release() ); 1338 return strict_dynamic_cast<const ast::Expr *>( fix ); 1330 1339 } 1331 1340 … … 1361 1370 1362 1371 // resolve assignment and dispose of new env 1363 auto resolved = ResolvExpr::findVoidExpression( assign, symtab);1372 auto resolved = ResolvExpr::findVoidExpression( assign, { symtab, transUnit().global } ); 1364 1373 auto mut = resolved.get_and_mutate(); 1365 1374 assertf(resolved.get() == mut, "newly resolved expression must be unique"); -
src/Parser/parser.yy
ref3c383 rd672350 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Feb 11 14:26:15202213 // Update Count : 5 17412 // Last Modified On : Mon Mar 14 16:35:29 2022 13 // Update Count : 5276 14 14 // 15 15 … … 610 610 // | RESUME '(' comma_expression ')' compound_statement 611 611 // { SemanticError( yylloc, "Resume expression is currently unimplemented." ); $$ = nullptr; } 612 | IDENTIFIER IDENTIFIER // syntax error 613 { 614 SemanticError( yylloc, ::toString( "Adjacent identifiers are not meaningful in an expression. " 615 "Possible problem is identifier \"", *$1.str, 616 "\" is a misspelled typename or an incorrectly specified type name, " 617 "e.g., missing generic parameter or missing struct/union/enum before typename." ) ); 618 $$ = nullptr; 619 } 620 | IDENTIFIER direct_type // syntax error 621 { 622 SemanticError( yylloc, ::toString( "Identifier \"", *$1.str, "\" cannot appear before a type. " 623 "Possible problem is misspelled storage or CV qualifier." ) ); 624 $$ = nullptr; 625 } 612 626 ; 613 627 … … 638 652 // Historic, transitional: Disallow commas in subscripts. 639 653 // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts. 640 // { SemanticError( yylloc, "New array subscript is currently unimplemented." ); $$ = nullptr; }641 654 // Current: Commas in subscripts make tuples. 642 655 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); } … … 647 660 // equivalent to the old x[i,j]. 648 661 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); } 662 | constant '[' assignment_expression ']' // 3[a], 'a'[a], 3.5[a] 663 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); } 664 | string_literal '[' assignment_expression ']' // "abc"[3], 3["abc"] 665 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, new ExpressionNode( $1 ), $3 ) ); } 649 666 | postfix_expression '{' argument_expression_list_opt '}' // CFA, constructor call 650 667 { … … 1052 1069 identifier_or_type_name ':' attribute_list_opt statement 1053 1070 { $$ = $4->add_label( $1, $3 ); } 1071 | identifier_or_type_name ':' attribute_list_opt error // syntax error 1072 { 1073 SemanticError( yylloc, ::toString( "Label \"", *$1.str, "\" must be associated with a statement, " 1074 "where a declaration, case, or default is not a statement. " 1075 "Move the label or terminate with a semi-colon." ) ); 1076 $$ = nullptr; 1077 } 1054 1078 ; 1055 1079 … … 1086 1110 | statement_list_nodecl statement 1087 1111 { assert( $1 ); $1->set_last( $2 ); $$ = $1; } 1112 | statement_list_nodecl error // syntax error 1113 { SemanticError( yylloc, "Declarations only allowed at the start of the switch body, i.e., after the '{'." ); $$ = nullptr; } 1088 1114 ; 1089 1115 … … 1093 1119 | MUTEX '(' ')' comma_expression ';' 1094 1120 { $$ = new StatementNode( build_mutex( nullptr, new StatementNode( build_expr( $4 ) ) ) ); } 1095 // { SemanticError( yylloc, "Mutex expression is currently unimplemented." ); $$ = nullptr; }1096 1121 ; 1097 1122 … … 1113 1138 $$ = $7 ? new StatementNode( build_compound( (StatementNode *)((new StatementNode( $7 ))->set_last( sw )) ) ) : sw; 1114 1139 } 1140 | SWITCH '(' comma_expression ')' '{' error '}' // CFA, syntax error 1141 { SemanticError( yylloc, "Only declarations can appear before the list of case clauses." ); $$ = nullptr; } 1115 1142 | CHOOSE '(' comma_expression ')' case_clause // CFA 1116 1143 { $$ = new StatementNode( build_switch( false, $3, $5 ) ); } … … 1120 1147 $$ = $7 ? new StatementNode( build_compound( (StatementNode *)((new StatementNode( $7 ))->set_last( sw )) ) ) : sw; 1121 1148 } 1149 | CHOOSE '(' comma_expression ')' '{' error '}' // CFA, syntax error 1150 { SemanticError( yylloc, "Only declarations can appear before the list of case clauses." ); $$ = nullptr; } 1122 1151 ; 1123 1152 … … 1158 1187 1159 1188 case_label: // CFA 1160 CASE case_value_list ':' { $$ = $2; } 1189 CASE error // syntax error 1190 { SemanticError( yylloc, "Missing case list after case." ); $$ = nullptr; } 1191 | CASE case_value_list ':' { $$ = $2; } 1192 | CASE case_value_list error // syntax error 1193 { SemanticError( yylloc, "Missing colon after case list." ); $$ = nullptr; } 1161 1194 | DEFAULT ':' { $$ = new StatementNode( build_default() ); } 1162 1195 // A semantic check is required to ensure only one default clause per switch/choose statement. 1163 ; 1164 1165 //label_list_opt: 1166 // // empty 1167 // | identifier_or_type_name ':' 1168 // | label_list_opt identifier_or_type_name ':' 1169 // ; 1196 | DEFAULT error // syntax error 1197 { SemanticError( yylloc, "Missing colon after default." ); $$ = nullptr; } 1198 ; 1170 1199 1171 1200 case_label_list: // CFA … … 1403 1432 | when_clause_opt ELSE statement 1404 1433 { $$ = build_waitfor_timeout( nullptr, maybe_build_compound( $3 ), $1 ); } 1405 1406 | when_clause_opt timeout statement WOR ELSE statement 1434 // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless) 1435 | when_clause_opt timeout statement WOR ELSE statement // syntax error 1407 1436 { SemanticError( yylloc, "else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; } 1408 1437 | when_clause_opt timeout statement WOR when_clause ELSE statement -
src/ResolvExpr/CandidateFinder.cpp
ref3c383 rd672350 10 10 // Created On : Wed Jun 5 14:30:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 1 14:55:00 201913 // Update Count : 212 // Last Modified On : Wed Mar 16 11:58:00 2022 13 // Update Count : 3 14 14 // 15 15 … … 595 595 /// Actually visits expressions to find their candidate interpretations 596 596 class Finder final : public ast::WithShortCircuiting { 597 const ResolveContext & context; 597 598 const ast::SymbolTable & symtab; 598 599 public: … … 618 619 619 620 Finder( CandidateFinder & f ) 620 : symtab( f.localSyms ), selfFinder( f ), candidates( f.candidates ), tenv( f.env),621 targetType( f.targetType ) {}621 : context( f.context ), symtab( context.symtab ), selfFinder( f ), 622 candidates( f.candidates ), tenv( f.env ), targetType( f.targetType ) {} 622 623 623 624 void previsit( const ast::Node * ) { visit_children = false; } … … 872 873 Tuples::handleTupleAssignment( selfFinder, untypedExpr, argCandidates ); 873 874 874 CandidateFinder funcFinder { symtab, tenv };875 CandidateFinder funcFinder( context, tenv ); 875 876 if (auto nameExpr = untypedExpr->func.as<ast::NameExpr>()) { 876 877 auto kind = ast::SymbolTable::getSpecialFunctionKind(nameExpr->name); … … 918 919 // find function operators 919 920 ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" }; 920 CandidateFinder opFinder { symtab, tenv };921 CandidateFinder opFinder( context, tenv ); 921 922 // okay if there aren't any function operations 922 923 opFinder.find( opExpr, ResolvMode::withoutFailFast() ); … … 1059 1060 1060 1061 void postvisit( const ast::AddressExpr * addressExpr ) { 1061 CandidateFinder finder { symtab, tenv };1062 CandidateFinder finder( context, tenv ); 1062 1063 finder.find( addressExpr->arg ); 1063 1064 … … 1079 1080 ast::ptr< ast::Type > toType = castExpr->result; 1080 1081 assert( toType ); 1081 toType = resolveTypeof( toType, symtab);1082 toType = resolveTypeof( toType, context ); 1082 1083 // toType = SymTab::validateType( castExpr->location, toType, symtab ); 1083 1084 toType = adjustExprType( toType, tenv, symtab ); 1084 1085 1085 CandidateFinder finder { symtab, tenv, toType };1086 CandidateFinder finder( context, tenv, toType ); 1086 1087 finder.find( castExpr->arg, ResolvMode::withAdjustment() ); 1087 1088 … … 1136 1137 void postvisit( const ast::VirtualCastExpr * castExpr ) { 1137 1138 assertf( castExpr->result, "Implicit virtual cast targets not yet supported." ); 1138 CandidateFinder finder { symtab, tenv };1139 CandidateFinder finder( context, tenv ); 1139 1140 // don't prune here, all alternatives guaranteed to have same type 1140 1141 finder.find( castExpr->arg, ResolvMode::withoutPrune() ); … … 1153 1154 auto target = inst->base.get(); 1154 1155 1155 CandidateFinder finder { symtab, tenv };1156 CandidateFinder finder( context, tenv ); 1156 1157 1157 1158 auto pick_alternatives = [target, this](CandidateList & found, bool expect_ref) { … … 1202 1203 1203 1204 void postvisit( const ast::UntypedMemberExpr * memberExpr ) { 1204 CandidateFinder aggFinder { symtab, tenv };1205 CandidateFinder aggFinder( context, tenv ); 1205 1206 aggFinder.find( memberExpr->aggregate, ResolvMode::withAdjustment() ); 1206 1207 for ( CandidateRef & agg : aggFinder.candidates ) { … … 1287 1288 addCandidate( 1288 1289 new ast::SizeofExpr{ 1289 sizeofExpr->location, resolveTypeof( sizeofExpr->type, symtab) },1290 sizeofExpr->location, resolveTypeof( sizeofExpr->type, context ) }, 1290 1291 tenv ); 1291 1292 } else { 1292 1293 // find all candidates for the argument to sizeof 1293 CandidateFinder finder { symtab, tenv };1294 CandidateFinder finder( context, tenv ); 1294 1295 finder.find( sizeofExpr->expr ); 1295 1296 // find the lowest-cost candidate, otherwise ambiguous … … 1311 1312 addCandidate( 1312 1313 new ast::AlignofExpr{ 1313 alignofExpr->location, resolveTypeof( alignofExpr->type, symtab) },1314 alignofExpr->location, resolveTypeof( alignofExpr->type, context ) }, 1314 1315 tenv ); 1315 1316 } else { 1316 1317 // find all candidates for the argument to alignof 1317 CandidateFinder finder { symtab, tenv };1318 CandidateFinder finder( context, tenv ); 1318 1319 finder.find( alignofExpr->expr ); 1319 1320 // find the lowest-cost candidate, otherwise ambiguous … … 1354 1355 1355 1356 void postvisit( const ast::LogicalExpr * logicalExpr ) { 1356 CandidateFinder finder1 { symtab, tenv };1357 CandidateFinder finder1( context, tenv ); 1357 1358 finder1.find( logicalExpr->arg1, ResolvMode::withAdjustment() ); 1358 1359 if ( finder1.candidates.empty() ) return; 1359 1360 1360 CandidateFinder finder2 { symtab, tenv };1361 CandidateFinder finder2( context, tenv ); 1361 1362 finder2.find( logicalExpr->arg2, ResolvMode::withAdjustment() ); 1362 1363 if ( finder2.candidates.empty() ) return; … … 1384 1385 void postvisit( const ast::ConditionalExpr * conditionalExpr ) { 1385 1386 // candidates for condition 1386 CandidateFinder finder1 { symtab, tenv };1387 CandidateFinder finder1( context, tenv ); 1387 1388 finder1.find( conditionalExpr->arg1, ResolvMode::withAdjustment() ); 1388 1389 if ( finder1.candidates.empty() ) return; 1389 1390 1390 1391 // candidates for true result 1391 CandidateFinder finder2 { symtab, tenv };1392 CandidateFinder finder2( context, tenv ); 1392 1393 finder2.find( conditionalExpr->arg2, ResolvMode::withAdjustment() ); 1393 1394 if ( finder2.candidates.empty() ) return; 1394 1395 1395 1396 // candidates for false result 1396 CandidateFinder finder3 { symtab, tenv };1397 CandidateFinder finder3( context, tenv ); 1397 1398 finder3.find( conditionalExpr->arg3, ResolvMode::withAdjustment() ); 1398 1399 if ( finder3.candidates.empty() ) return; … … 1445 1446 void postvisit( const ast::CommaExpr * commaExpr ) { 1446 1447 ast::TypeEnvironment env{ tenv }; 1447 ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, symtab, env );1448 1449 CandidateFinder finder2 { symtab, env };1448 ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, context, env ); 1449 1450 CandidateFinder finder2( context, env ); 1450 1451 finder2.find( commaExpr->arg2, ResolvMode::withAdjustment() ); 1451 1452 … … 1460 1461 1461 1462 void postvisit( const ast::ConstructorExpr * ctorExpr ) { 1462 CandidateFinder finder { symtab, tenv };1463 CandidateFinder finder( context, tenv ); 1463 1464 finder.find( ctorExpr->callExpr, ResolvMode::withoutPrune() ); 1464 1465 for ( CandidateRef & r : finder.candidates ) { … … 1469 1470 void postvisit( const ast::RangeExpr * rangeExpr ) { 1470 1471 // resolve low and high, accept candidates where low and high types unify 1471 CandidateFinder finder1 { symtab, tenv };1472 CandidateFinder finder1( context, tenv ); 1472 1473 finder1.find( rangeExpr->low, ResolvMode::withAdjustment() ); 1473 1474 if ( finder1.candidates.empty() ) return; 1474 1475 1475 CandidateFinder finder2 { symtab, tenv };1476 CandidateFinder finder2( context, tenv ); 1476 1477 finder2.find( rangeExpr->high, ResolvMode::withAdjustment() ); 1477 1478 if ( finder2.candidates.empty() ) return; … … 1549 1550 1550 1551 void postvisit( const ast::UniqueExpr * unqExpr ) { 1551 CandidateFinder finder { symtab, tenv };1552 CandidateFinder finder( context, tenv ); 1552 1553 finder.find( unqExpr->expr, ResolvMode::withAdjustment() ); 1553 1554 for ( CandidateRef & r : finder.candidates ) { … … 1558 1559 1559 1560 void postvisit( const ast::StmtExpr * stmtExpr ) { 1560 addCandidate( resolveStmtExpr( stmtExpr, symtab), tenv );1561 addCandidate( resolveStmtExpr( stmtExpr, context ), tenv ); 1561 1562 } 1562 1563 … … 1570 1571 for ( const ast::InitAlternative & initAlt : initExpr->initAlts ) { 1571 1572 // calculate target type 1572 const ast::Type * toType = resolveTypeof( initAlt.type, symtab);1573 const ast::Type * toType = resolveTypeof( initAlt.type, context ); 1573 1574 // toType = SymTab::validateType( initExpr->location, toType, symtab ); 1574 1575 toType = adjustExprType( toType, tenv, symtab ); … … 1576 1577 // types are not bound to the initialization type, since return type variables are 1577 1578 // only open for the duration of resolving the UntypedExpr. 1578 CandidateFinder finder { symtab, tenv, toType };1579 CandidateFinder finder( context, tenv, toType ); 1579 1580 finder.find( initExpr->expr, ResolvMode::withAdjustment() ); 1580 1581 for ( CandidateRef & cand : finder.candidates ) { … … 1693 1694 } 1694 1695 else { 1695 satisfyAssertions(candidate, localSyms, satisfied, errors);1696 satisfyAssertions(candidate, context.symtab, satisfied, errors); 1696 1697 needRecomputeKey = true; 1697 1698 } … … 1855 1856 r->expr = ast::mutate_field( 1856 1857 r->expr.get(), &ast::Expr::result, 1857 adjustExprType( r->expr->result, r->env, localSyms) );1858 adjustExprType( r->expr->result, r->env, context.symtab ) ); 1858 1859 } 1859 1860 } … … 1873 1874 1874 1875 for ( const auto & x : xs ) { 1875 out.emplace_back( localSyms, env );1876 out.emplace_back( context, env ); 1876 1877 out.back().find( x, ResolvMode::withAdjustment() ); 1877 1878 -
src/ResolvExpr/CandidateFinder.hpp
ref3c383 rd672350 10 10 // Created On : Wed Jun 5 14:30:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 1 9:51:00 201913 // Update Count : 212 // Last Modified On : Wed Mar 16 15:22:00 2022 13 // Update Count : 3 14 14 // 15 15 … … 25 25 namespace ResolvExpr { 26 26 27 struct ResolveContext; 28 27 29 /// Data to perform expression resolution 28 30 struct CandidateFinder { 29 31 CandidateList candidates; ///< List of candidate resolutions 30 const ast::SymbolTable & localSyms; ///< Symbol table to lookup candidates32 const ResolveContext & context; ///< Information about where the canditates are being found. 31 33 const ast::TypeEnvironment & env; ///< Substitutions performed in this resolution 32 34 ast::ptr< ast::Type > targetType; ///< Target type for resolution … … 34 36 35 37 CandidateFinder( 36 const ast::SymbolTable & syms, const ast::TypeEnvironment & env,38 const ResolveContext & context, const ast::TypeEnvironment & env, 37 39 const ast::Type * tt = nullptr ) 38 : candidates(), localSyms( syms), env( env ), targetType( tt ) {}40 : candidates(), context( context ), env( env ), targetType( tt ) {} 39 41 40 42 /// Fill candidates with feasible resolutions for `expr` -
src/ResolvExpr/CandidatePrinter.cpp
ref3c383 rd672350 10 10 // Created On : Tue Nov 9 9:54:00 2021 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Nov 9 15:47:00 202113 // Update Count : 012 // Last Modified On : Wed Mar 16 13:56:00 2022 13 // Update Count : 1 14 14 // 15 15 … … 22 22 #include "AST/TranslationUnit.hpp" 23 23 #include "ResolvExpr/CandidateFinder.hpp" 24 #include "ResolvExpr/Resolver.h" 24 25 25 26 #include <iostream> … … 29 30 namespace { 30 31 31 class CandidatePrintCore : public ast::WithSymbolTable { 32 class CandidatePrintCore : public ast::WithSymbolTable, 33 public ast::WithConstTranslationUnit { 32 34 std::ostream & os; 33 35 public: … … 36 38 void postvisit( const ast::ExprStmt * stmt ) { 37 39 ast::TypeEnvironment env; 38 CandidateFinder finder( symtab, env );40 CandidateFinder finder( { symtab, transUnit().global }, env ); 39 41 finder.find( stmt->expr, ResolvMode::withAdjustment() ); 40 42 int count = 1; -
src/ResolvExpr/ResolveTypeof.cc
ref3c383 rd672350 9 9 // Author : Richard C. Bilson 10 10 // Created On : Sun May 17 12:12:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue May 19 16:49:04 201513 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 16:09:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 22 22 #include "AST/Node.hpp" 23 23 #include "AST/Pass.hpp" 24 #include "AST/TranslationUnit.hpp" 24 25 #include "AST/Type.hpp" 25 26 #include "AST/TypeEnvironment.hpp" … … 119 120 namespace { 120 121 struct ResolveTypeof_new : public ast::WithShortCircuiting { 121 const ast::SymbolTable & localSymtab; 122 123 ResolveTypeof_new( const ast::SymbolTable & syms ) : localSymtab( syms ) {} 122 const ResolveContext & context; 123 124 ResolveTypeof_new( const ResolveContext & context ) : 125 context( context ) {} 124 126 125 127 void previsit( const ast::TypeofType * ) { visit_children = false; } … … 137 139 ast::TypeEnvironment dummy; 138 140 ast::ptr< ast::Expr > newExpr = 139 resolveInVoidContext( typeofType->expr, localSymtab, dummy );141 resolveInVoidContext( typeofType->expr, context, dummy ); 140 142 assert( newExpr->result && ! newExpr->result->isVoid() ); 141 143 newType = newExpr->result; … … 161 163 } // anonymous namespace 162 164 163 const ast::Type * resolveTypeof( const ast::Type * type , const ast::SymbolTable & symtab) {164 ast::Pass< ResolveTypeof_new > mutator { symtab };165 const ast::Type * resolveTypeof( const ast::Type * type , const ResolveContext & context ) { 166 ast::Pass< ResolveTypeof_new > mutator( context ); 165 167 return type->accept( mutator ); 166 168 } … … 168 170 struct FixArrayDimension { 169 171 // should not require a mutable symbol table - prevent pass template instantiation 170 const ast::SymbolTable & _symtab;171 FixArrayDimension(const ast::SymbolTable & symtab): _symtab(symtab) {}172 const ResolveContext & context; 173 FixArrayDimension(const ResolveContext & context) : context( context ) {} 172 174 173 175 const ast::ArrayType * previsit (const ast::ArrayType * arrayType) { 174 176 if (!arrayType->dimension) return arrayType; 175 177 auto mutType = mutate(arrayType); 176 ast::ptr<ast::Type> sizetype = ast::sizeType ? ast::sizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt); 177 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, _symtab); 178 auto globalSizeType = context.global.sizeType; 179 ast::ptr<ast::Type> sizetype = globalSizeType ? globalSizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt); 180 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, context ); 178 181 179 182 if (InitTweak::isConstExpr(mutType->dimension)) { … … 187 190 }; 188 191 189 const ast::Type * fixArrayType( const ast::Type * type, const ast::SymbolTable & symtab) {190 ast::Pass<FixArrayDimension> visitor {symtab};192 const ast::Type * fixArrayType( const ast::Type * type, const ResolveContext & context ) { 193 ast::Pass<FixArrayDimension> visitor(context); 191 194 return type->accept(visitor); 192 195 } 193 196 194 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab) {195 if (!decl->isTypeFixed) { 197 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & context ) { 198 if (!decl->isTypeFixed) { 196 199 auto mutDecl = mutate(decl); 197 auto resolvedType = resolveTypeof(decl->type, symtab);198 resolvedType = fixArrayType(resolvedType, symtab);200 auto resolvedType = resolveTypeof(decl->type, context); 201 resolvedType = fixArrayType(resolvedType, context); 199 202 mutDecl->type = resolvedType; 200 203 -
src/ResolvExpr/ResolveTypeof.h
ref3c383 rd672350 9 9 // Author : Richard C. Bilson 10 10 // Created On : Sun May 17 12:14:53 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Sat Jul 22 09:38:35 201713 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 11:33:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 22 22 namespace ast { 23 23 class Type; 24 class SymbolTable;25 24 class ObjectDecl; 26 25 } 27 26 28 27 namespace ResolvExpr { 28 struct ResolveContext; 29 29 30 Type *resolveTypeof( Type*, const SymTab::Indexer &indexer ); 30 const ast::Type * resolveTypeof( const ast::Type *, const ast::SymbolTable& );31 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab);31 const ast::Type * resolveTypeof( const ast::Type *, const ResolveContext & ); 32 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & ); 32 33 } // namespace ResolvExpr 33 34 -
src/ResolvExpr/Resolver.cc
ref3c383 rd672350 9 9 // Author : Aaron B. Moss 10 10 // Created On : Sun May 17 12:17:01 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue Feb 1 16:27:14202213 // Update Count : 24 511 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Mar 18 10:41:00 2022 13 // Update Count : 247 14 14 // 15 15 … … 997 997 /// Calls the CandidateFinder and finds the single best candidate 998 998 CandidateRef findUnfinishedKindExpression( 999 const ast::Expr * untyped, const ast::SymbolTable & symtab, const std::string & kind,999 const ast::Expr * untyped, const ResolveContext & context, const std::string & kind, 1000 1000 std::function<bool(const Candidate &)> pred = anyCandidate, ResolvMode mode = {} 1001 1001 ) { … … 1007 1007 ++recursion_level; 1008 1008 ast::TypeEnvironment env; 1009 CandidateFinder finder { symtab, env };1009 CandidateFinder finder( context, env ); 1010 1010 finder.find( untyped, recursion_level == 1 ? mode.atTopLevel() : mode ); 1011 1011 --recursion_level; … … 1129 1129 1130 1130 ast::ptr< ast::Expr > resolveInVoidContext( 1131 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env 1131 const ast::Expr * expr, const ResolveContext & context, 1132 ast::TypeEnvironment & env 1132 1133 ) { 1133 1134 assertf( expr, "expected a non-null expression" ); … … 1136 1137 ast::ptr< ast::CastExpr > untyped = new ast::CastExpr{ expr }; 1137 1138 CandidateRef choice = findUnfinishedKindExpression( 1138 untyped, symtab, "", anyCandidate, ResolvMode::withAdjustment() );1139 untyped, context, "", anyCandidate, ResolvMode::withAdjustment() ); 1139 1140 1140 1141 // a cast expression has either 0 or 1 interpretations (by language rules); … … 1149 1150 /// context. 1150 1151 ast::ptr< ast::Expr > findVoidExpression( 1151 const ast::Expr * untyped, const ast::SymbolTable & symtab1152 const ast::Expr * untyped, const ResolveContext & context 1152 1153 ) { 1153 1154 ast::TypeEnvironment env; 1154 ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, symtab, env );1155 ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, context, env ); 1155 1156 finishExpr( newExpr, env, untyped->env ); 1156 1157 return newExpr; … … 1163 1164 /// lowest cost, returning the resolved version 1164 1165 ast::ptr< ast::Expr > findKindExpression( 1165 const ast::Expr * untyped, const ast::SymbolTable & symtab,1166 const ast::Expr * untyped, const ResolveContext & context, 1166 1167 std::function<bool(const Candidate &)> pred = anyCandidate, 1167 1168 const std::string & kind = "", ResolvMode mode = {} … … 1169 1170 if ( ! untyped ) return {}; 1170 1171 CandidateRef choice = 1171 findUnfinishedKindExpression( untyped, symtab, kind, pred, mode );1172 findUnfinishedKindExpression( untyped, context, kind, pred, mode ); 1172 1173 ResolvExpr::finishExpr( choice->expr, choice->env, untyped->env ); 1173 1174 return std::move( choice->expr ); … … 1176 1177 /// Resolve `untyped` to the single expression whose candidate is the best match 1177 1178 ast::ptr< ast::Expr > findSingleExpression( 1178 const ast::Expr * untyped, const ast::SymbolTable & symtab1179 const ast::Expr * untyped, const ResolveContext & context 1179 1180 ) { 1180 1181 Stats::ResolveTime::start( untyped ); 1181 auto res = findKindExpression( untyped, symtab);1182 auto res = findKindExpression( untyped, context ); 1182 1183 Stats::ResolveTime::stop(); 1183 1184 return res; … … 1186 1187 1187 1188 ast::ptr< ast::Expr > findSingleExpression( 1188 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab 1189 const ast::Expr * untyped, const ast::Type * type, 1190 const ResolveContext & context 1189 1191 ) { 1190 1192 assert( untyped && type ); 1191 1193 ast::ptr< ast::Expr > castExpr = new ast::CastExpr{ untyped, type }; 1192 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, symtab);1193 removeExtraneousCast( newExpr, symtab );1194 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, context ); 1195 removeExtraneousCast( newExpr, context.symtab ); 1194 1196 return newExpr; 1195 1197 } … … 1217 1219 /// Resolve `untyped` as an integral expression, returning the resolved version 1218 1220 ast::ptr< ast::Expr > findIntegralExpression( 1219 const ast::Expr * untyped, const ast::SymbolTable & symtab1221 const ast::Expr * untyped, const ResolveContext & context 1220 1222 ) { 1221 return findKindExpression( untyped, symtab, hasIntegralType, "condition" );1223 return findKindExpression( untyped, context, hasIntegralType, "condition" ); 1222 1224 } 1223 1225 … … 1249 1251 // for work previously in GenInit 1250 1252 static InitTweak::ManagedTypes_new managedTypes; 1253 ResolveContext context; 1251 1254 1252 1255 bool inEnumDecl = false; … … 1254 1257 public: 1255 1258 static size_t traceId; 1256 Resolver_new() = default; 1257 Resolver_new( const ast::SymbolTable & syms ) { symtab = syms; } 1259 Resolver_new( const ast::TranslationGlobal & global ) : 1260 context{ symtab, global } {} 1261 Resolver_new( const ResolveContext & context ) : 1262 ast::WithSymbolTable{ context.symtab }, 1263 context{ symtab, context.global } {} 1258 1264 1259 1265 const ast::FunctionDecl * previsit( const ast::FunctionDecl * ); … … 1272 1278 const ast::AsmStmt * previsit( const ast::AsmStmt * ); 1273 1279 const ast::IfStmt * previsit( const ast::IfStmt * ); 1274 const ast::WhileDoStmt * 1280 const ast::WhileDoStmt * previsit( const ast::WhileDoStmt * ); 1275 1281 const ast::ForStmt * previsit( const ast::ForStmt * ); 1276 1282 const ast::SwitchStmt * previsit( const ast::SwitchStmt * ); … … 1299 1305 1300 1306 void resolve( ast::TranslationUnit& translationUnit ) { 1301 ast::Pass< Resolver_new >::run( translationUnit );1307 ast::Pass< Resolver_new >::run( translationUnit, translationUnit.global ); 1302 1308 } 1303 1309 1304 1310 ast::ptr< ast::Init > resolveCtorInit( 1305 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab1311 const ast::ConstructorInit * ctorInit, const ResolveContext & context 1306 1312 ) { 1307 1313 assert( ctorInit ); 1308 ast::Pass< Resolver_new > resolver { symtab };1314 ast::Pass< Resolver_new > resolver( context ); 1309 1315 return ctorInit->accept( resolver ); 1310 1316 } 1311 1317 1312 1318 const ast::Expr * resolveStmtExpr( 1313 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab1319 const ast::StmtExpr * stmtExpr, const ResolveContext & context 1314 1320 ) { 1315 1321 assert( stmtExpr ); 1316 ast::Pass< Resolver_new > resolver { symtab };1322 ast::Pass< Resolver_new > resolver( context ); 1317 1323 auto ret = mutate(stmtExpr->accept(resolver)); 1318 1324 strict_dynamic_cast< ast::StmtExpr * >( ret )->computeResult(); … … 1321 1327 1322 1328 namespace { 1323 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ast::SymbolTable & symtab) {1329 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ResolveContext & context) { 1324 1330 std::string name = attr->normalizedName(); 1325 1331 if (name == "constructor" || name == "destructor") { 1326 1332 if (attr->params.size() == 1) { 1327 1333 auto arg = attr->params.front(); 1328 auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), symtab);1334 auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), context ); 1329 1335 auto result = eval(arg); 1330 1336 … … 1369 1375 1370 1376 for (auto & attr: mutDecl->attributes) { 1371 attr = handleAttribute(mutDecl->location, attr, symtab);1377 attr = handleAttribute(mutDecl->location, attr, context ); 1372 1378 } 1373 1379 … … 1382 1388 } 1383 1389 for (auto & asst : mutDecl->assertions) { 1384 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), symtab);1390 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), context); 1385 1391 symtab.addId(asst); 1386 1392 mutType->assertions.emplace_back(new ast::VariableExpr(functionDecl->location, asst)); … … 1394 1400 1395 1401 for (auto & param : mutDecl->params) { 1396 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), symtab);1402 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), context); 1397 1403 symtab.addId(param); 1398 1404 paramTypes.emplace_back(param->get_type()); 1399 1405 } 1400 1406 for (auto & ret : mutDecl->returns) { 1401 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), symtab);1407 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), context); 1402 1408 returnTypes.emplace_back(ret->get_type()); 1403 1409 } … … 1470 1476 // enumerator initializers should not use the enum type to initialize, since the 1471 1477 // enum type is still incomplete at this point. Use `int` instead. 1472 objectDecl = fixObjectType(objectDecl, symtab);1478 objectDecl = fixObjectType(objectDecl, context); 1473 1479 currentObject = ast::CurrentObject{ 1474 1480 objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } }; … … 1476 1482 else { 1477 1483 if (!objectDecl->isTypeFixed) { 1478 auto newDecl = fixObjectType(objectDecl, symtab);1484 auto newDecl = fixObjectType(objectDecl, context); 1479 1485 auto mutDecl = mutate(newDecl); 1480 1486 … … 1507 1513 // nested type decls are hoisted already. no need to do anything 1508 1514 if (auto obj = member.as<ast::ObjectDecl>()) { 1509 member = fixObjectType(obj, symtab);1515 member = fixObjectType(obj, context); 1510 1516 } 1511 1517 } … … 1530 1536 return ast::mutate_field( 1531 1537 assertDecl, &ast::StaticAssertDecl::cond, 1532 findIntegralExpression( assertDecl->cond, symtab) );1538 findIntegralExpression( assertDecl->cond, context ) ); 1533 1539 } 1534 1540 1535 1541 template< typename PtrType > 1536 const PtrType * handlePtrType( const PtrType * type, const ast::SymbolTable & symtab) {1542 const PtrType * handlePtrType( const PtrType * type, const ResolveContext & context ) { 1537 1543 if ( type->dimension ) { 1538 ast::ptr< ast::Type > sizeType = ast::sizeType;1544 ast::ptr< ast::Type > sizeType = context.global.sizeType; 1539 1545 ast::mutate_field( 1540 1546 type, &PtrType::dimension, 1541 findSingleExpression( type->dimension, sizeType, symtab) );1547 findSingleExpression( type->dimension, sizeType, context ) ); 1542 1548 } 1543 1549 return type; … … 1545 1551 1546 1552 const ast::ArrayType * Resolver_new::previsit( const ast::ArrayType * at ) { 1547 return handlePtrType( at, symtab);1553 return handlePtrType( at, context ); 1548 1554 } 1549 1555 1550 1556 const ast::PointerType * Resolver_new::previsit( const ast::PointerType * pt ) { 1551 return handlePtrType( pt, symtab);1557 return handlePtrType( pt, context ); 1552 1558 } 1553 1559 … … 1557 1563 1558 1564 return ast::mutate_field( 1559 exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, symtab) );1565 exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, context ) ); 1560 1566 } 1561 1567 … … 1564 1570 1565 1571 asmExpr = ast::mutate_field( 1566 asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, symtab) );1572 asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, context ) ); 1567 1573 1568 1574 return asmExpr; … … 1578 1584 const ast::IfStmt * Resolver_new::previsit( const ast::IfStmt * ifStmt ) { 1579 1585 return ast::mutate_field( 1580 ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, symtab) );1586 ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, context ) ); 1581 1587 } 1582 1588 1583 1589 const ast::WhileDoStmt * Resolver_new::previsit( const ast::WhileDoStmt * whileDoStmt ) { 1584 1590 return ast::mutate_field( 1585 whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, symtab) );1591 whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, context ) ); 1586 1592 } 1587 1593 … … 1589 1595 if ( forStmt->cond ) { 1590 1596 forStmt = ast::mutate_field( 1591 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, symtab) );1597 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, context ) ); 1592 1598 } 1593 1599 1594 1600 if ( forStmt->inc ) { 1595 1601 forStmt = ast::mutate_field( 1596 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, symtab) );1602 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, context ) ); 1597 1603 } 1598 1604 … … 1604 1610 switchStmt = ast::mutate_field( 1605 1611 switchStmt, &ast::SwitchStmt::cond, 1606 findIntegralExpression( switchStmt->cond, symtab) );1612 findIntegralExpression( switchStmt->cond, context ) ); 1607 1613 currentObject = ast::CurrentObject{ switchStmt->location, switchStmt->cond->result }; 1608 1614 return switchStmt; … … 1617 1623 ast::ptr< ast::Expr > untyped = 1618 1624 new ast::CastExpr{ caseStmt->location, caseStmt->cond, initAlts.front().type }; 1619 ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, symtab);1625 ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, context ); 1620 1626 1621 1627 // case condition cannot have a cast in C, so it must be removed here, regardless of … … 1638 1644 branchStmt = ast::mutate_field( 1639 1645 branchStmt, &ast::BranchStmt::computedTarget, 1640 findSingleExpression( branchStmt->computedTarget, target, symtab) );1646 findSingleExpression( branchStmt->computedTarget, target, context ) ); 1641 1647 } 1642 1648 return branchStmt; … … 1648 1654 returnStmt = ast::mutate_field( 1649 1655 returnStmt, &ast::ReturnStmt::expr, 1650 findSingleExpression( returnStmt->expr, functionReturn, symtab) );1656 findSingleExpression( returnStmt->expr, functionReturn, context ) ); 1651 1657 } 1652 1658 return returnStmt; … … 1663 1669 throwStmt = ast::mutate_field( 1664 1670 throwStmt, &ast::ThrowStmt::expr, 1665 findSingleExpression( throwStmt->expr, exceptType, symtab) );1671 findSingleExpression( throwStmt->expr, exceptType, context ) ); 1666 1672 } 1667 1673 return throwStmt; … … 1707 1713 1708 1714 ast::TypeEnvironment env; 1709 CandidateFinder funcFinder { symtab, env };1715 CandidateFinder funcFinder( context, env ); 1710 1716 1711 1717 // Find all candidates for a function in canonical form … … 1921 1927 ); 1922 1928 1923 clause2.target.args.emplace_back( findSingleExpression( init, symtab) );1929 clause2.target.args.emplace_back( findSingleExpression( init, context ) ); 1924 1930 } 1925 1931 1926 1932 // Resolve the conditions as if it were an IfStmt, statements normally 1927 clause2.cond = findSingleExpression( clause.cond, symtab);1933 clause2.cond = findSingleExpression( clause.cond, context ); 1928 1934 clause2.stmt = clause.stmt->accept( *visitor ); 1929 1935 … … 1940 1946 ast::ptr< ast::Type > target = 1941 1947 new ast::BasicType{ ast::BasicType::LongLongUnsignedInt }; 1942 timeout2.time = findSingleExpression( stmt->timeout.time, target, symtab);1943 timeout2.cond = findSingleExpression( stmt->timeout.cond, symtab);1948 timeout2.time = findSingleExpression( stmt->timeout.time, target, context ); 1949 timeout2.cond = findSingleExpression( stmt->timeout.cond, context ); 1944 1950 timeout2.stmt = stmt->timeout.stmt->accept( *visitor ); 1945 1951 … … 1954 1960 ast::WaitForStmt::OrElse orElse2; 1955 1961 1956 orElse2.cond = findSingleExpression( stmt->orElse.cond, symtab);1962 orElse2.cond = findSingleExpression( stmt->orElse.cond, context ); 1957 1963 orElse2.stmt = stmt->orElse.stmt->accept( *visitor ); 1958 1964 … … 1975 1981 for (auto & expr : exprs) { 1976 1982 // only struct- and union-typed expressions are viable candidates 1977 expr = findKindExpression( expr, symtab, structOrUnion, "with expression" );1983 expr = findKindExpression( expr, context, structOrUnion, "with expression" ); 1978 1984 1979 1985 // if with expression might be impure, create a temporary so that it is evaluated once … … 2001 2007 ast::ptr< ast::Expr > untyped = new ast::UntypedInitExpr{ 2002 2008 singleInit->location, singleInit->value, currentObject.getOptions() }; 2003 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, symtab);2009 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, context ); 2004 2010 const ast::InitExpr * initExpr = newExpr.strict_as< ast::InitExpr >(); 2005 2011 -
src/ResolvExpr/Resolver.h
ref3c383 rd672350 9 9 // Author : Richard C. Bilson 10 10 // Created On : Sun May 17 12:18:34 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Mon Feb 18 20:40:38 201913 // Update Count : 411 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 11:32:00 2022 13 // Update Count : 5 14 14 // 15 15 … … 23 23 class Declaration; 24 24 class Expression; 25 class DeletedExpr; 25 26 class StmtExpr; 27 class Type; 26 28 namespace SymTab { 27 29 class Indexer; … … 35 37 class StmtExpr; 36 38 class SymbolTable; 39 class TranslationGlobal; 37 40 class TranslationUnit; 38 41 class Type; … … 55 58 void resolveWithExprs( std::list< Declaration * > & translationUnit ); 56 59 60 /// Helper Type: Passes around information between various sub-calls. 61 struct ResolveContext { 62 const ast::SymbolTable & symtab; 63 const ast::TranslationGlobal & global; 64 }; 65 57 66 /// Checks types and binds syntactic constructs to typed representations 58 67 void resolve( ast::TranslationUnit& translationUnit ); … … 62 71 /// context. 63 72 ast::ptr< ast::Expr > resolveInVoidContext( 64 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env );73 const ast::Expr * expr, const ResolveContext &, ast::TypeEnvironment & env ); 65 74 /// Resolve `untyped` to the single expression whose candidate is the best match for the 66 75 /// given type. 67 76 ast::ptr< ast::Expr > findSingleExpression( 68 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab);77 const ast::Expr * untyped, const ast::Type * type, const ResolveContext & ); 69 78 ast::ptr< ast::Expr > findVoidExpression( 70 const ast::Expr * untyped, const ast::SymbolTable & symtab);79 const ast::Expr * untyped, const ResolveContext & ); 71 80 /// Resolves a constructor init expression 72 81 ast::ptr< ast::Init > resolveCtorInit( 73 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab);82 const ast::ConstructorInit * ctorInit, const ResolveContext & context ); 74 83 /// Resolves a statement expression 75 84 const ast::Expr * resolveStmtExpr( 76 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab);85 const ast::StmtExpr * stmtExpr, const ResolveContext & context ); 77 86 } // namespace ResolvExpr 78 87 -
src/ResolvExpr/Unify.cc
ref3c383 rd672350 943 943 // check that the other type is compatible and named the same 944 944 auto otherInst = dynamic_cast< const XInstType * >( other ); 945 this->result = otherInst && inst->name == otherInst->name;945 if (otherInst && inst->name == otherInst->name) this->result = otherInst; 946 946 return otherInst; 947 947 } -
src/SymTab/Validate.cc
ref3c383 rd672350 194 194 }; 195 195 196 // These structs are the sub-sub-passes of ForallPointerDecay_old. 197 198 struct TraitExpander_old final { 199 void previsit( FunctionType * ); 200 void previsit( StructDecl * ); 201 void previsit( UnionDecl * ); 202 }; 203 204 struct AssertionFixer_old final { 205 void previsit( FunctionType * ); 206 void previsit( StructDecl * ); 207 void previsit( UnionDecl * ); 208 }; 209 210 struct CheckOperatorTypes_old final { 211 void previsit( ObjectDecl * ); 212 }; 213 214 struct FixUniqueIds_old final { 215 void previsit( DeclarationWithType * ); 216 }; 217 196 218 struct ReturnChecker : public WithGuards { 197 219 /// Checks that return statements return nothing if their return type is void … … 386 408 387 409 void validate_D( std::list< Declaration * > & translationUnit ) { 388 PassVisitor<ForallPointerDecay_old> fpd;389 410 { 390 411 Stats::Heap::newPass("validate-D"); … … 394 415 }); 395 416 Stats::Time::TimeBlock("Forall Pointer Decay", [&]() { 396 acceptAll( translationUnit, fpd); // must happen before autogenerateRoutines, after Concurrency::applyKeywords because uniqueIds must be set on declaration before resolution417 decayForallPointers( translationUnit ); // must happen before autogenerateRoutines, after Concurrency::applyKeywords because uniqueIds must be set on declaration before resolution 397 418 }); 398 419 Stats::Time::TimeBlock("Hoist Control Declarations", [&]() { … … 454 475 455 476 void decayForallPointers( std::list< Declaration * > & translationUnit ) { 456 PassVisitor<ForallPointerDecay_old> fpd; 457 acceptAll( translationUnit, fpd ); 477 PassVisitor<TraitExpander_old> te; 478 acceptAll( translationUnit, te ); 479 PassVisitor<AssertionFixer_old> af; 480 acceptAll( translationUnit, af ); 481 PassVisitor<CheckOperatorTypes_old> cot; 482 acceptAll( translationUnit, cot ); 483 PassVisitor<FixUniqueIds_old> fui; 484 acceptAll( translationUnit, fui ); 485 } 486 487 void decayForallPointersA( std::list< Declaration * > & translationUnit ) { 488 PassVisitor<TraitExpander_old> te; 489 acceptAll( translationUnit, te ); 490 } 491 void decayForallPointersB( std::list< Declaration * > & translationUnit ) { 492 PassVisitor<AssertionFixer_old> af; 493 acceptAll( translationUnit, af ); 494 } 495 void decayForallPointersC( std::list< Declaration * > & translationUnit ) { 496 PassVisitor<CheckOperatorTypes_old> cot; 497 acceptAll( translationUnit, cot ); 498 } 499 void decayForallPointersD( std::list< Declaration * > & translationUnit ) { 500 PassVisitor<FixUniqueIds_old> fui; 501 acceptAll( translationUnit, fui ); 458 502 } 459 503 … … 470 514 PassVisitor<EnumAndPointerDecay_old> epc; 471 515 PassVisitor<LinkReferenceToTypes_old> lrt( indexer ); 472 PassVisitor<ForallPointerDecay_old> fpd; 516 PassVisitor<TraitExpander_old> te; 517 PassVisitor<AssertionFixer_old> af; 518 PassVisitor<CheckOperatorTypes_old> cot; 519 PassVisitor<FixUniqueIds_old> fui; 473 520 type->accept( epc ); 474 521 type->accept( lrt ); 475 type->accept( fpd ); 522 type->accept( te ); 523 type->accept( af ); 524 type->accept( cot ); 525 type->accept( fui ); 476 526 } 477 527 … … 972 1022 } 973 1023 1024 /// Replace all traits in assertion lists with their assertions. 1025 void expandTraits( std::list< TypeDecl * > & forall ) { 1026 for ( TypeDecl * type : forall ) { 1027 std::list< DeclarationWithType * > asserts; 1028 asserts.splice( asserts.end(), type->assertions ); 1029 // expand trait instances into their members 1030 for ( DeclarationWithType * assertion : asserts ) { 1031 if ( TraitInstType * traitInst = dynamic_cast< TraitInstType * >( assertion->get_type() ) ) { 1032 // expand trait instance into all of its members 1033 expandAssertions( traitInst, back_inserter( type->assertions ) ); 1034 delete traitInst; 1035 } else { 1036 // pass other assertions through 1037 type->assertions.push_back( assertion ); 1038 } // if 1039 } // for 1040 } 1041 } 1042 1043 /// Fix each function in the assertion list and check for invalid void type. 1044 void fixAssertions( 1045 std::list< TypeDecl * > & forall, BaseSyntaxNode * node ) { 1046 for ( TypeDecl * type : forall ) { 1047 for ( DeclarationWithType *& assertion : type->assertions ) { 1048 bool isVoid = fixFunction( assertion ); 1049 if ( isVoid ) { 1050 SemanticError( node, "invalid type void in assertion of function " ); 1051 } // if 1052 } // for 1053 } 1054 } 1055 974 1056 void ForallPointerDecay_old::previsit( ObjectDecl * object ) { 975 1057 // ensure that operator names only apply to functions or function pointers … … 994 1076 void ForallPointerDecay_old::previsit( UnionDecl * aggrDecl ) { 995 1077 forallFixer( aggrDecl->parameters, aggrDecl ); 1078 } 1079 1080 void TraitExpander_old::previsit( FunctionType * ftype ) { 1081 expandTraits( ftype->forall ); 1082 } 1083 1084 void TraitExpander_old::previsit( StructDecl * aggrDecl ) { 1085 expandTraits( aggrDecl->parameters ); 1086 } 1087 1088 void TraitExpander_old::previsit( UnionDecl * aggrDecl ) { 1089 expandTraits( aggrDecl->parameters ); 1090 } 1091 1092 void AssertionFixer_old::previsit( FunctionType * ftype ) { 1093 fixAssertions( ftype->forall, ftype ); 1094 } 1095 1096 void AssertionFixer_old::previsit( StructDecl * aggrDecl ) { 1097 fixAssertions( aggrDecl->parameters, aggrDecl ); 1098 } 1099 1100 void AssertionFixer_old::previsit( UnionDecl * aggrDecl ) { 1101 fixAssertions( aggrDecl->parameters, aggrDecl ); 1102 } 1103 1104 void CheckOperatorTypes_old::previsit( ObjectDecl * object ) { 1105 // ensure that operator names only apply to functions or function pointers 1106 if ( CodeGen::isOperator( object->name ) && ! dynamic_cast< FunctionType * >( object->type->stripDeclarator() ) ) { 1107 SemanticError( object->location, toCString( "operator ", object->name.c_str(), " is not a function or function pointer." ) ); 1108 } 1109 } 1110 1111 void FixUniqueIds_old::previsit( DeclarationWithType * decl ) { 1112 decl->fixUniqueId(); 996 1113 } 997 1114 -
src/SymTab/Validate.h
ref3c383 rd672350 43 43 void validate_F( std::list< Declaration * > &translationUnit ); 44 44 void decayForallPointers( std::list< Declaration * > & translationUnit ); 45 void decayForallPointersA( std::list< Declaration * > & translationUnit ); 46 void decayForallPointersB( std::list< Declaration * > & translationUnit ); 47 void decayForallPointersC( std::list< Declaration * > & translationUnit ); 48 void decayForallPointersD( std::list< Declaration * > & translationUnit ); 45 49 46 50 const ast::Type * validateType( -
src/Tuples/TupleAssignment.cc
ref3c383 rd672350 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Fri Dec 13 23:45:33 201913 // Update Count : 911 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 14:06:00 2022 13 // Update Count : 10 14 14 // 15 15 … … 465 465 // resolve ctor/dtor for the new object 466 466 ast::ptr< ast::Init > ctorInit = ResolvExpr::resolveCtorInit( 467 InitTweak::genCtorInit( location, ret ), spotter.crntFinder. localSyms);467 InitTweak::genCtorInit( location, ret ), spotter.crntFinder.context ); 468 468 // remove environments from subexpressions of stmtExpr 469 469 ast::Pass< EnvRemover > rm{ env }; … … 560 560 // resolve the cast expression so that rhsCand return type is bound by the cast 561 561 // type as needed, and transfer the resulting environment 562 ResolvExpr::CandidateFinder finder { spotter.crntFinder.localSyms, env };562 ResolvExpr::CandidateFinder finder( spotter.crntFinder.context, env ); 563 563 finder.find( rhsCand->expr, ResolvExpr::ResolvMode::withAdjustment() ); 564 564 assert( finder.candidates.size() == 1 ); … … 609 609 // explode the LHS so that each field of a tuple-valued expr is assigned 610 610 ResolvExpr::CandidateList lhs; 611 explode( *lhsCand, crntFinder. localSyms, back_inserter(lhs), true );611 explode( *lhsCand, crntFinder.context.symtab, back_inserter(lhs), true ); 612 612 for ( ResolvExpr::CandidateRef & cand : lhs ) { 613 613 // each LHS value must be a reference - some come in with a cast, if not … … 629 629 if ( isTuple( rhsCand->expr ) ) { 630 630 // multiple assignment 631 explode( *rhsCand, crntFinder. localSyms, back_inserter(rhs), true );631 explode( *rhsCand, crntFinder.context.symtab, back_inserter(rhs), true ); 632 632 matcher.reset( 633 633 new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } ); … … 648 648 // multiple assignment 649 649 ResolvExpr::CandidateList rhs; 650 explode( rhsCand, crntFinder. localSyms, back_inserter(rhs), true );650 explode( rhsCand, crntFinder.context.symtab, back_inserter(rhs), true ); 651 651 matcher.reset( 652 652 new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } ); … … 678 678 ) 679 679 680 ResolvExpr::CandidateFinder finder { crntFinder.localSyms, matcher->env };680 ResolvExpr::CandidateFinder finder( crntFinder.context, matcher->env ); 681 681 682 682 try { -
src/Validate/FindSpecialDeclsNew.cpp
ref3c383 rd672350 30 30 31 31 struct FindDeclsCore : public ast::WithShortCircuiting { 32 ast::Translation Unit::Global & global;33 FindDeclsCore( ast::Translation Unit::Global & g ) : global( g ) {}32 ast::TranslationGlobal & global; 33 FindDeclsCore( ast::TranslationGlobal & g ) : global( g ) {} 34 34 35 35 void previsit( const ast::Decl * decl ); … … 74 74 ast::Pass<FindDeclsCore>::run( translationUnit, translationUnit.global ); 75 75 76 // TODO: When everything gets the globals from the translation unit,77 // remove these.78 ast::dereferenceOperator = translationUnit.global.dereference;79 ast::dtorStruct = translationUnit.global.dtorStruct;80 ast::dtorStructDestroy = translationUnit.global.dtorDestroy;81 82 76 // TODO: conditionally generate 'fake' declarations for missing features, 83 77 // so that translation can proceed in the event that builtins, prelude, -
src/Validate/module.mk
ref3c383 rd672350 20 20 Validate/CompoundLiteral.cpp \ 21 21 Validate/CompoundLiteral.hpp \ 22 Validate/ForallPointerDecay.cpp \ 23 Validate/ForallPointerDecay.hpp \ 22 24 Validate/HandleAttributes.cc \ 23 25 Validate/HandleAttributes.h \ -
src/Virtual/Tables.cc
ref3c383 rd672350 10 10 // Created On : Mon Aug 31 11:11:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Apr 21 15:36:00 2021 13 // Update Count : 2 14 // 15 12 // Last Modified On : Fri Mar 11 10:40:00 2022 13 // Update Count : 3 14 // 15 16 #include "AST/Attribute.hpp" 17 #include "AST/Copy.hpp" 18 #include "AST/Decl.hpp" 19 #include "AST/Expr.hpp" 20 #include "AST/Init.hpp" 21 #include "AST/Stmt.hpp" 22 #include "AST/Type.hpp" 16 23 #include <SynTree/Attribute.h> 17 24 #include <SynTree/Declaration.h> … … 77 84 } 78 85 86 static ast::ObjectDecl * makeVtableDeclaration( 87 CodeLocation const & location, std::string const & name, 88 ast::StructInstType const * type, ast::Init const * init ) { 89 ast::Storage::Classes storage; 90 if ( nullptr == init ) { 91 storage.is_extern = true; 92 } 93 return new ast::ObjectDecl( 94 location, 95 name, 96 type, 97 init, 98 storage, 99 ast::Linkage::Cforall 100 ); 101 } 102 79 103 ObjectDecl * makeVtableForward( std::string const & name, StructInstType * type ) { 80 104 assert( type ); 81 105 return makeVtableDeclaration( name, type, nullptr ); 106 } 107 108 ast::ObjectDecl * makeVtableForward( 109 CodeLocation const & location, std::string const & name, 110 ast::StructInstType const * vtableType ) { 111 assert( vtableType ); 112 return makeVtableDeclaration( location, name, vtableType, nullptr ); 82 113 } 83 114 … … 123 154 } 124 155 156 static std::vector<ast::ptr<ast::Init>> buildInits( 157 CodeLocation const & location, 158 //std::string const & name, 159 ast::StructInstType const * vtableType, 160 ast::Type const * objectType ) { 161 ast::StructDecl const * vtableStruct = vtableType->base; 162 163 std::vector<ast::ptr<ast::Init>> inits; 164 inits.reserve( vtableStruct->members.size() ); 165 166 // This is designed to run before the resolver. 167 for ( auto field : vtableStruct->members ) { 168 if ( std::string( "parent" ) == field->name ) { 169 // This will not work with polymorphic state. 170 auto oField = field.strict_as<ast::ObjectDecl>(); 171 auto fieldType = oField->type.strict_as<ast::PointerType>(); 172 auto parentType = fieldType->base.strict_as<ast::StructInstType>(); 173 std::string const & parentInstance = instanceName( parentType->name ); 174 inits.push_back( 175 new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, parentInstance ) ) ) ); 176 } else if ( std::string( "__cfavir_typeid" ) == field->name ) { 177 std::string const & baseType = baseTypeName( vtableType->name ); 178 std::string const & typeId = typeIdName( baseType ); 179 inits.push_back( new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, typeId ) ) ) ); 180 } else if ( std::string( "size" ) == field->name ) { 181 inits.push_back( new ast::SingleInit( location, new ast::SizeofExpr( location, objectType ) 182 ) ); 183 } else if ( std::string( "align" ) == field->name ) { 184 inits.push_back( new ast::SingleInit( location, 185 new ast::AlignofExpr( location, objectType ) 186 ) ); 187 } else { 188 inits.push_back( new ast::SingleInit( location, 189 new ast::NameExpr( location, field->name ) 190 ) ); 191 } 192 //ast::Expr * expr = buildInitExpr(...); 193 //inits.push_back( new ast::SingleInit( location, expr ) ) 194 } 195 196 return inits; 197 } 198 199 ast::ObjectDecl * makeVtableInstance( 200 CodeLocation const & location, 201 std::string const & name, 202 ast::StructInstType const * vtableType, 203 ast::Type const * objectType, 204 ast::Init const * init ) { 205 assert( vtableType ); 206 assert( objectType ); 207 208 // Build the initialization. 209 if ( nullptr == init ) { 210 init = new ast::ListInit( location, 211 buildInits( location, vtableType, objectType ) ); 212 213 // The provided init should initialize everything except the parent 214 // pointer, the size-of and align-of fields. These should be inserted. 215 } else { 216 // Except this is not yet supported. 217 assert(false); 218 } 219 return makeVtableDeclaration( location, name, vtableType, init ); 220 } 221 125 222 namespace { 126 223 std::string const functionName = "get_exception_vtable"; … … 140 237 new ReferenceType( noQualifiers, vtableType ), 141 238 nullptr, 142 239 { new Attribute("unused") } 143 240 ) ); 144 241 type->parameters.push_back( new ObjectDecl( … … 157 254 type, 158 255 nullptr 256 ); 257 } 258 259 ast::FunctionDecl * makeGetExceptionForward( 260 CodeLocation const & location, 261 ast::Type const * vtableType, 262 ast::Type const * exceptType ) { 263 assert( vtableType ); 264 assert( exceptType ); 265 return new ast::FunctionDecl( 266 location, 267 functionName, 268 { /* forall */ }, 269 { new ast::ObjectDecl( 270 location, 271 "__unused", 272 new ast::PointerType( exceptType ) 273 ) }, 274 { new ast::ObjectDecl( 275 location, 276 "_retvalue", 277 new ast::ReferenceType( vtableType ) 278 ) }, 279 nullptr, 280 ast::Storage::Classes(), 281 ast::Linkage::Cforall, 282 { new ast::Attribute( "unused" ) } 159 283 ); 160 284 } … … 172 296 } 173 297 298 ast::FunctionDecl * makeGetExceptionFunction( 299 CodeLocation const & location, 300 ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ) { 301 assert( vtableInstance ); 302 assert( exceptType ); 303 ast::FunctionDecl * func = makeGetExceptionForward( 304 location, ast::deepCopy( vtableInstance->type ), exceptType ); 305 func->stmts = new ast::CompoundStmt( location, { 306 new ast::ReturnStmt( location, new ast::VariableExpr( location, vtableInstance ) ) 307 } ); 308 return func; 309 } 310 174 311 ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ) { 175 312 assert( typeIdType ); … … 191 328 } 192 329 193 } 330 ast::ObjectDecl * makeTypeIdInstance( 331 CodeLocation const & location, 332 ast::StructInstType const * typeIdType ) { 333 assert( typeIdType ); 334 ast::StructInstType * type = ast::mutate( typeIdType ); 335 type->set_const( true ); 336 std::string const & typeid_name = typeIdTypeToInstance( typeIdType->name ); 337 return new ast::ObjectDecl( 338 location, 339 typeid_name, 340 type, 341 new ast::ListInit( location, { 342 new ast::SingleInit( location, 343 new ast::AddressExpr( location, 344 new ast::NameExpr( location, "__cfatid_exception_t" ) ) ) 345 } ), 346 ast::Storage::Classes(), 347 ast::Linkage::Cforall, 348 nullptr, 349 { new ast::Attribute( "cfa_linkonce" ) } 350 ); 351 } 352 353 } -
src/Virtual/Tables.h
ref3c383 rd672350 10 10 // Created On : Mon Aug 31 11:07:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : We d Apr 21 10:30:00 202113 // Update Count : 212 // Last Modified On : Wec Dec 8 16:58:00 2021 13 // Update Count : 3 14 14 // 15 15 16 16 #include <list> // for list 17 17 18 #include <string> 19 #include "AST/Fwd.hpp" 18 20 class Declaration; 19 21 class StructDecl; … … 35 37 * vtableType node is consumed. 36 38 */ 39 ast::ObjectDecl * makeVtableForward( 40 CodeLocation const & location, std::string const & name, 41 ast::StructInstType const * vtableType ); 37 42 38 43 ObjectDecl * makeVtableInstance( … … 43 48 * vtableType and init (if provided) nodes are consumed. 44 49 */ 50 ast::ObjectDecl * makeVtableInstance( 51 CodeLocation const & location, 52 std::string const & name, 53 ast::StructInstType const * vtableType, 54 ast::Type const * objectType, 55 ast::Init const * init = nullptr ); 45 56 46 57 // Some special code for how exceptions interact with virtual tables. … … 49 60 * linking the vtableType to the exceptType. Both nodes are consumed. 50 61 */ 62 ast::FunctionDecl * makeGetExceptionForward( 63 CodeLocation const & location, 64 ast::Type const * vtableType, 65 ast::Type const * exceptType ); 51 66 52 67 FunctionDecl * makeGetExceptionFunction( … … 55 70 * exceptType node is consumed. 56 71 */ 72 ast::FunctionDecl * makeGetExceptionFunction( 73 CodeLocation const & location, 74 ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ); 57 75 58 76 ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ); … … 60 78 * TODO: Should take the parent type. Currently locked to the exception_t. 61 79 */ 80 ast::ObjectDecl * makeTypeIdInstance( 81 const CodeLocation & location, ast::StructInstType const * typeIdType ); 62 82 63 83 } -
src/main.cc
ref3c383 rd672350 10 10 // Created On : Fri May 15 23:12:02 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Jan 26 14:09:00 202213 // Update Count : 67 012 // Last Modified On : Fri Mar 11 10:39:00 2022 13 // Update Count : 671 14 14 // 15 15 … … 76 76 #include "Validate/Autogen.hpp" // for autogenerateRoutines 77 77 #include "Validate/FindSpecialDecls.h" // for findGlobalDecls 78 #include "Validate/ForallPointerDecay.hpp" // for decayForallPointers 78 79 #include "Validate/CompoundLiteral.hpp" // for handleCompoundLiterals 79 80 #include "Validate/InitializerLength.hpp" // for setLengthFromInitializer … … 331 332 332 333 if( useNewAST ) { 333 PASS( "Apply Concurrent Keywords", Concurrency::applyKeywords( translationUnit ) );334 PASS( "Forall Pointer Decay", SymTab::decayForallPointers( translationUnit ) );335 334 CodeTools::fillLocations( translationUnit ); 336 335 … … 342 341 343 342 forceFillCodeLocations( transUnit ); 343 344 PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords( transUnit ) ); 345 346 // Must be after implement concurrent keywords; because uniqueIds 347 // must be set on declaration before resolution. 348 // Must happen before autogen routines are added. 349 PASS( "Forall Pointer Decay", Validate::decayForallPointers( transUnit ) ); 344 350 345 351 // Must happen before autogen routines are added. … … 487 493 PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) ); 488 494 } 489 490 491 495 492 496 PASS( "Gen Waitfor" , Concurrency::generateWaitFor( translationUnit ) ); -
tests/.expect/declarationSpecifier.arm64.txt
ref3c383 rd672350 1132 1132 char **_X13cfa_args_argvPPc_1; 1133 1133 char **_X13cfa_args_envpPPc_1; 1134 signed int _X17cfa_main_returnedi_1 = ((signed int )0);1134 __attribute__ ((weak)) extern signed int _X17cfa_main_returnedi_1; 1135 1135 signed int main(signed int _X4argci_1, char **_X4argvPPc_1, char **_X4envpPPc_1){ 1136 1136 __attribute__ ((unused)) signed int _X12_retval_maini_1; … … 1149 1149 signed int _tmp_cp_ret6; 1150 1150 signed int _X3reti_2 = (((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6); 1151 { 1152 ((void)(_X17cfa_main_returnedi_1=((signed int )1))); 1151 if ( ((&_X17cfa_main_returnedi_1)!=((signed int *)0)) ) { 1152 { 1153 ((void)(_X17cfa_main_returnedi_1=((signed int )1))); 1154 } 1155 1153 1156 } 1154 1157 -
tests/.expect/gccExtensions.arm64.txt
ref3c383 rd672350 324 324 char **_X13cfa_args_argvPPc_1; 325 325 char **_X13cfa_args_envpPPc_1; 326 signed int _X17cfa_main_returnedi_1 = ((signed int )0);326 __attribute__ ((weak)) extern signed int _X17cfa_main_returnedi_1; 327 327 signed int main(signed int _X4argci_1, char **_X4argvPPc_1, char **_X4envpPPc_1){ 328 328 __attribute__ ((unused)) signed int _X12_retval_maini_1; … … 341 341 signed int _tmp_cp_ret6; 342 342 signed int _X3reti_2 = (((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6); 343 { 344 ((void)(_X17cfa_main_returnedi_1=((signed int )1))); 343 if ( ((&_X17cfa_main_returnedi_1)!=((signed int *)0)) ) { 344 { 345 ((void)(_X17cfa_main_returnedi_1=((signed int )1))); 346 } 347 345 348 } 346 349 -
tests/.expect/random.arm64.txt
ref3c383 rd672350 1 1 õ 2 2 = 3 V 3 K 4 4 -911259971 5 5 6 6 -4 6 11 7 7 1232105397 8 8 0 9 1 89 11 10 10 -914096085 11 11 1 12 15 12 20 13 13 2077092859 14 14 1 15 1 115 12 16 16 0.677254 17 17 0.678106775246139 -
tests/Makefile.am
ref3c383 rd672350 66 66 PRETTY_PATH=mkdir -p $(dir $(abspath ${@})) && cd ${srcdir} && 67 67 68 .PHONY: list .validate 69 .INTERMEDIATE: .validate .validate.cfa 68 .PHONY: list .validate .test_makeflags 69 .INTERMEDIATE: .validate .validate.cfa .test_makeflags 70 70 EXTRA_PROGRAMS = avl_test linkonce .dummy_hack # build but do not install 71 71 EXTRA_DIST = test.py \ … … 123 123 @+${TEST_PY} --list ${concurrent} 124 124 125 .test_makeflags: 126 @echo "${MAKEFLAGS}" 127 125 128 .validate: .validate.cfa 126 129 $(CFACOMPILE) .validate.cfa -fsyntax-only -Wall -Wextra -Werror -
tests/collections/.expect/string-api-coverage.txt
ref3c383 rd672350 1 1 hello hello hello 2 3 hello 2 4 true false 3 5 true false -
tests/collections/.expect/string-gc.txt
ref3c383 rd672350 38 38 x from 5 to 15 39 39 y from 5 to 15 40 ======================== fillNoCompact 41 about to expand, a = aaa 42 expanded, a = aaa 43 about to expand, a = aaa 44 expanded, a = aaa 45 about to expand, a = aaa 46 expanded, a = aaa 47 about to expand, a = aaa 48 expanded, a = aaa 49 about to expand, a = aaa 50 expanded, a = aaa -
tests/collections/.expect/vector-err-pass-perm-it-byval.txt
ref3c383 rd672350 1 error: Unique best alternative includes deleted identifier in Generated Cast of:1 collections/vector-demo.cfa:95:1 error: Unique best alternative includes deleted identifier in Generated Cast of: 2 2 Application of 3 3 Deleted Expression -
tests/collections/string-api-coverage.cfa
ref3c383 rd672350 1 1 #include <containers/string.hfa> 2 #include <string_sharectx.hfa> 2 3 3 4 void assertWellFormedHandleList( int maxLen ) { // with(HeapArea) … … 25 26 26 27 int main () { 28 29 #ifdef STRING_SHARING_OFF 30 string_sharectx c = { NO_SHARING }; 31 #endif 32 27 33 string s = "hello"; 28 34 string s2 = "hello"; … … 31 37 32 38 // IO operator, x2 33 sout | s | s | s; 39 sout | s | s | s; // hello hello hello 40 41 // empty ctor then assign 42 string sxx; 43 sout | sxx; // (blank line) 44 sxx = s; 45 sout | sxx; // hello 34 46 35 47 // Comparisons -
tests/collections/string-gc.cfa
ref3c383 rd672350 2 2 3 3 size_t bytesRemaining() { 4 return DEBUG_string_bytes_avail_until_gc( DEBUG_string_heap );4 return DEBUG_string_bytes_avail_until_gc( DEBUG_string_heap() ); 5 5 } 6 6 7 7 size_t heapOffsetStart( string_res & s ) { 8 const char * startByte = DEBUG_string_heap_start( DEBUG_string_heap );8 const char * startByte = DEBUG_string_heap_start( DEBUG_string_heap() ); 9 9 assert( s.Handle.s >= startByte ); 10 10 return s.Handle.s - startByte; … … 120 120 } 121 121 122 void fillNoCompact() { 123 // show that allocating in a heap filled with mostly live strings (no collectable garbage) causes heap growth 124 125 sout | "======================== fillNoCompact"; 126 127 size_t lastTimeBytesAvail = bytesRemaining(); 128 assert( lastTimeBytesAvail >= 200 ); // starting this test with nontrivial room 129 130 // mostly fill the pad 131 string_res a = "aaa"; // will have to be moved 132 string_res z = "zzz"; 133 for (i; 5) { 134 while ( bytesRemaining() > 10 ) { 135 z += "."; 136 } 137 sout | "about to expand, a = " | a; 138 while ( bytesRemaining() <= 10 ) { 139 z += "."; 140 } 141 sout | "expanded, a = " | a; 142 143 // each growth gives more usable space than the last 144 assert( bytesRemaining() > lastTimeBytesAvail ); 145 lastTimeBytesAvail = bytesRemaining(); 146 } 147 } 148 122 149 int main() { 123 150 basicFillCompact(); 124 151 fillCompact_withSharedEdits(); 152 fillNoCompact(); 125 153 } -
tests/collections/string-overwrite.cfa
ref3c383 rd672350 1 1 #include <containers/string.hfa> 2 #include <string_sharectx.hfa> 2 3 3 4 /* … … 11 12 WE = witness end 12 13 13 The dest does:14 The test does: 14 15 starts with the entire string being, initially, the alphabet; prints this entire alphabet 15 16 sets up modifier and witness as ranges within it, and prints a visualization of those ranges … … 24 25 This API's convention has Start positions being inclusive and end positions being exclusive. 25 26 27 v Case number in output 26 28 With 1 equivalence class: 27 29 MS = ME = WS = WE 1 … … 118 120 struct { int ms; int me; int ws; int we; char *replaceWith; char *label; } cases[] = { 119 121 { 12, 14, 10, 20, "xxxxx", "warmup" }, 120 // { 12, 14, 12, 14, "xxxxx", "" }, // the bug that got me into this test (should be a dup with case 6)121 122 { 10, 10, 10, 10, "=====", "1" }, 122 123 { 10, 10, 10, 10, "==" , "" }, … … 223 224 { 12, 14, 10, 16, "=" , "" }, 224 225 { 12, 14, 10, 16, "" , "" }, 225 /*226 { , , , , "=====", "NN" },227 { "==" , "" },228 { "=" , "" },229 { "" , "" },230 */231 226 }; 232 227 for ( i; sizeof(cases)/sizeof(cases[0]) ) { … … 238 233 239 234 240 // void f( string & s, string & toEdit ) {241 242 // sout | s | "|" | toEdit | "|";243 244 // s(14, 16) = "-";245 // sout | s | "|" | toEdit | "|";246 // }247 248 235 int main() { 236 237 #ifdef STRING_SHARING_OFF 238 string_sharectx c = { NO_SHARING }; 239 #endif 240 241 249 242 // 0 1 2 250 243 // 01234567890123456789012345 -
tests/concurrent/mutexstmt/.expect/locks.txt
ref3c383 rd672350 3 3 Start Test: multi lock deadlock/mutual exclusion 4 4 End Test: multi lock deadlock/mutual exclusion 5 Start Test: single scoped lock mutual exclusion 6 End Test: single scoped lock mutual exclusion 7 Start Test: multi scoped lock deadlock/mutual exclusion 8 End Test: multi scoped lock deadlock/mutual exclusion 5 Start Test: multi polymorphic lock deadlock/mutual exclusion 6 End Test: multi polymorphic lock deadlock/mutual exclusion -
tests/concurrent/mutexstmt/locks.cfa
ref3c383 rd672350 3 3 4 4 const unsigned int num_times = 10000; 5 6 Duration default_preemption() { return 0; } 5 7 6 8 single_acquisition_lock m1, m2, m3, m4, m5; … … 22 24 } 23 25 26 void refTest( single_acquisition_lock & m ) { 27 mutex ( m ) { 28 assert(!insideFlag); 29 insideFlag = true; 30 assert(insideFlag); 31 insideFlag = false; 32 } 33 } 34 24 35 thread T_Multi {}; 25 36 26 37 void main( T_Multi & this ) { 27 38 for (unsigned int i = 0; i < num_times; i++) { 39 refTest( m1 ); 28 40 mutex ( m1 ) { 29 41 assert(!insideFlag); … … 59 71 } 60 72 61 thread T_Mutex_Scoped {}; 73 single_acquisition_lock l1; 74 linear_backoff_then_block_lock l2; 75 owner_lock l3; 62 76 63 void main( T_Mutex_Scoped & this ) { 77 monitor monitor_t {}; 78 79 monitor_t l4; 80 81 thread T_Multi_Poly {}; 82 83 void main( T_Multi_Poly & this ) { 64 84 for (unsigned int i = 0; i < num_times; i++) { 65 { 66 scoped_lock(single_acquisition_lock) s{m1}; 67 count++; 68 } 69 { 70 scoped_lock(single_acquisition_lock) s{m1}; 85 refTest( l1 ); 86 mutex ( l1, l4 ) { 71 87 assert(!insideFlag); 72 88 insideFlag = true; … … 74 90 insideFlag = false; 75 91 } 76 } 77 } 78 79 thread T_Multi_Scoped {}; 80 81 void main( T_Multi_Scoped & this ) { 82 for (unsigned int i = 0; i < num_times; i++) { 83 { 84 scoped_lock(single_acquisition_lock) s{m1}; 92 mutex ( l1, l2, l3 ) { 85 93 assert(!insideFlag); 86 94 insideFlag = true; … … 88 96 insideFlag = false; 89 97 } 90 { 91 scoped_lock(single_acquisition_lock) s1{m1}; 92 scoped_lock(single_acquisition_lock) s2{m2}; 93 scoped_lock(single_acquisition_lock) s3{m3}; 94 scoped_lock(single_acquisition_lock) s4{m4}; 95 scoped_lock(single_acquisition_lock) s5{m5}; 98 mutex ( l3, l1, l4 ) { 96 99 assert(!insideFlag); 97 100 insideFlag = true; … … 99 102 insideFlag = false; 100 103 } 101 { 102 scoped_lock(single_acquisition_lock) s1{m1}; 103 scoped_lock(single_acquisition_lock) s3{m3}; 104 assert(!insideFlag); 105 insideFlag = true; 106 assert(insideFlag); 107 insideFlag = false; 108 } 109 { 110 scoped_lock(single_acquisition_lock) s1{m1}; 111 scoped_lock(single_acquisition_lock) s2{m2}; 112 scoped_lock(single_acquisition_lock) s4{m4}; 113 assert(!insideFlag); 114 insideFlag = true; 115 assert(insideFlag); 116 insideFlag = false; 117 } 118 { 119 scoped_lock(single_acquisition_lock) s1{m1}; 120 scoped_lock(single_acquisition_lock) s3{m3}; 121 scoped_lock(single_acquisition_lock) s4{m4}; 122 scoped_lock(single_acquisition_lock) s5{m5}; 104 mutex ( l1, l2, l4 ) { 123 105 assert(!insideFlag); 124 106 insideFlag = true; … … 131 113 int num_tasks = 10; 132 114 int main() { 133 processor p[ 10];115 processor p[num_tasks - 1]; 134 116 135 117 printf("Start Test: single lock mutual exclusion\n"); 136 118 { 137 T_Mutex t[ 10];119 T_Mutex t[num_tasks]; 138 120 } 139 121 assert(count == num_tasks * num_times); … … 141 123 printf("Start Test: multi lock deadlock/mutual exclusion\n"); 142 124 { 143 T_Multi t[ 10];125 T_Multi t[num_tasks]; 144 126 } 145 127 printf("End Test: multi lock deadlock/mutual exclusion\n"); 146 147 count = 0; 148 printf("Start Test: single scoped lock mutual exclusion\n"); 128 printf("Start Test: multi polymorphic lock deadlock/mutual exclusion\n"); 149 129 { 150 T_Mu tex_Scoped t[10];130 T_Multi_Poly t[num_tasks]; 151 131 } 152 assert(count == num_tasks * num_times); 153 printf("End Test: single scoped lock mutual exclusion\n"); 154 printf("Start Test: multi scoped lock deadlock/mutual exclusion\n"); 155 { 156 T_Multi_Scoped t[10]; 157 } 158 printf("End Test: multi scoped lock deadlock/mutual exclusion\n"); 132 printf("End Test: multi polymorphic lock deadlock/mutual exclusion\n"); 159 133 } -
tests/io/many_read.cfa
ref3c383 rd672350 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // many_read.cfa -- Make sure that multiple concurrent reads tomess up.7 // many_read.cfa -- Make sure that multiple concurrent reads don't mess up. 8 8 // 9 9 // Author : Thierry Delisle -
tests/meta/dumpable.cfa
ref3c383 rd672350 72 72 } 73 73 74 if((buf.f_bsize * buf.f_bavail) < 536870912) { 75 serr | "Available diskspace is less than ~500Mb: " | (buf.f_bsize * buf.f_bavail); 74 uint64_t avail = buf.f_bavail; 75 avail *= buf.f_bsize; 76 if(avail < 536870912_l64u) { 77 serr | "Available diskspace is less than ~500Mb: " | avail; 76 78 } 77 79 -
tests/pybin/settings.py
ref3c383 rd672350 155 155 global generating 156 156 global make 157 global make_jobfds 157 158 global output_width 158 159 global timeout … … 168 169 generating = options.regenerate_expected 169 170 make = ['make'] 171 make_jobfds = [] 170 172 output_width = 24 171 173 timeout = Timeouts(options.timeout, options.global_timeout) … … 177 179 os.putenv('DISTCC_LOG', os.path.join(BUILDDIR, 'distcc_error.log')) 178 180 179 def update_make_cmd(f orce, jobs):181 def update_make_cmd(flags): 180 182 global make 181 182 make = ['make'] if not force else ['make', "-j%i" % jobs] 183 make = ['make', *flags] 184 185 def update_make_fds(r, w): 186 global make_jobfds 187 make_jobfds = (r, w) 183 188 184 189 def validate(): … … 187 192 global distcc 188 193 distcc = "DISTCC_CFA_PATH=~/.cfadistcc/%s/cfa" % tools.config_hash() 189 errf = os.path.join(BUILDDIR, ".validate.err") 190 make_ret, out = tools.make( ".validate", error_file = errf, output_file=subprocess.DEVNULL, error=subprocess.DEVNULL ) 194 make_ret, out, err = tools.make( ".validate", output_file=subprocess.PIPE, error=subprocess.PIPE ) 191 195 if make_ret != 0: 192 with open (errf, "r") as myfile:193 error=myfile.read()194 196 print("ERROR: Invalid configuration %s:%s" % (arch.string, debug.string), file=sys.stderr) 195 print(" verify returned : \n%s" % error, file=sys.stderr) 196 tools.rm(errf) 197 print(" verify returned : \n%s" % err, file=sys.stderr) 197 198 sys.exit(1) 198 199 tools.rm(errf)200 199 201 200 def prep_output(tests): -
tests/pybin/tools.py
ref3c383 rd672350 23 23 24 24 # helper functions to run terminal commands 25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False ):25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False, pass_fds = []): 26 26 try: 27 27 cmd = list(cmd) … … 65 65 **({'input' : bytes(input_text, encoding='utf-8')} if input_text else {'stdin' : input_file}), 66 66 stdout = output_file, 67 stderr = error 67 stderr = error, 68 pass_fds = pass_fds 68 69 ) as proc: 69 70 70 71 try: 71 out, _= proc.communicate(72 out, errout = proc.communicate( 72 73 timeout = settings.timeout.single if timeout else None 73 74 ) 74 75 75 return proc.returncode, out.decode("latin-1") if out else None 76 return proc.returncode, out.decode("latin-1") if out else None, errout.decode("latin-1") if errout else None 76 77 except subprocess.TimeoutExpired: 77 78 if settings.timeout2gdb: 78 79 print("Process {} timeout".format(proc.pid)) 79 80 proc.communicate() 80 return 124, str(None) 81 return 124, str(None), "Subprocess Timeout 2 gdb" 81 82 else: 82 83 proc.send_signal(signal.SIGABRT) 83 84 proc.communicate() 84 return 124, str(None) 85 return 124, str(None), "Subprocess Timeout 2 gdb" 85 86 86 87 except Exception as ex: … … 105 106 return (False, "No file") 106 107 107 code, out = sh("file", fname, output_file=subprocess.PIPE)108 code, out, err = sh("file", fname, output_file=subprocess.PIPE) 108 109 if code != 0: 109 return (False, "'file EXPECT' failed with code {} ".format(code))110 return (False, "'file EXPECT' failed with code {} '{}'".format(code, err)) 110 111 111 112 match = re.search(".*: (.*)", out) … … 190 191 ] 191 192 cmd = [s for s in cmd if s] 192 return sh(*cmd, output_file=output_file, error=error )193 return sh(*cmd, output_file=output_file, error=error, pass_fds=settings.make_jobfds) 193 194 194 195 def make_recon(target): … … 241 242 # move a file 242 243 def mv(source, dest): 243 ret, _ = sh("mv", source, dest)244 ret, _, _ = sh("mv", source, dest) 244 245 return ret 245 246 246 247 # cat one file into the other 247 248 def cat(source, dest): 248 ret, _ = sh("cat", source, output_file=dest)249 ret, _, _ = sh("cat", source, output_file=dest) 249 250 return ret 250 251 … … 289 290 # system 290 291 ################################################################################ 292 def jobserver_version(): 293 make_ret, out, err = sh('make', '.test_makeflags', '-j2', output_file=subprocess.PIPE, error=subprocess.PIPE) 294 if make_ret != 0: 295 print("ERROR: cannot find Makefile jobserver version", file=sys.stderr) 296 print(" test returned : {} '{}'".format(make_ret, err), file=sys.stderr) 297 sys.exit(1) 298 299 re_jobs = re.search("--jobserver-(auth|fds)", out) 300 if not re_jobs: 301 print("ERROR: cannot find Makefile jobserver version", file=sys.stderr) 302 print(" MAKEFLAGS are : '{}'".format(out), file=sys.stderr) 303 sys.exit(1) 304 305 return "--jobserver-{}".format(re_jobs.group(1)) 306 307 def prep_recursive_make(N): 308 if N < 2: 309 return [] 310 311 # create the pipe 312 (r, w) = os.pipe() 313 314 # feel it with N-1 tokens, (Why N-1 and not N, I don't know it's in the manpage for make) 315 os.write(w, b'+' * (N - 1)); 316 317 # prep the flags for make 318 make_flags = ["-j{}".format(N), "--jobserver-auth={},{}".format(r, w)] 319 320 # tell make about the pipes 321 os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = " ".join(make_flags) 322 323 # make sure pass the pipes to our children 324 settings.update_make_fds(r, w) 325 326 return make_flags 327 328 def prep_unlimited_recursive_make(): 329 # prep the flags for make 330 make_flags = ["-j"] 331 332 # tell make about the pipes 333 os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = "-j" 334 335 return make_flags 336 337 338 def eval_hardware(): 339 # we can create as many things as we want 340 # how much hardware do we have? 341 if settings.distribute: 342 # remote hardware is allowed 343 # how much do we have? 344 ret, jstr, _ = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True) 345 return int(jstr.strip()) if ret == 0 else multiprocessing.cpu_count() 346 else: 347 # remote isn't allowed, use local cpus 348 return multiprocessing.cpu_count() 349 291 350 # count number of jobs to create 292 def job_count( options , tests):351 def job_count( options ): 293 352 # check if the user already passed in a number of jobs for multi-threading 294 if not options.jobs: 295 make_flags = os.environ.get('MAKEFLAGS') 296 force = bool(make_flags) 297 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None 298 if make_jobs_fds : 299 tokens = os.read(int(make_jobs_fds.group(2)), 1024) 300 options.jobs = len(tokens) 301 os.write(int(make_jobs_fds.group(3)), tokens) 302 else : 303 if settings.distribute: 304 ret, jstr = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True) 305 if ret == 0: 306 options.jobs = int(jstr.strip()) 307 else : 308 options.jobs = multiprocessing.cpu_count() 309 else: 310 options.jobs = multiprocessing.cpu_count() 353 make_env = os.environ.get('MAKEFLAGS') 354 make_flags = make_env.split() if make_env else None 355 jobstr = jobserver_version() 356 357 if options.jobs and make_flags: 358 print('WARNING: -j options should not be specified when called form Make', file=sys.stderr) 359 360 # Top level make is calling the shots, just follow 361 if make_flags: 362 # do we have -j and --jobserver-... 363 jobopt = None 364 exists_fds = None 365 for f in make_flags: 366 jobopt = f if f.startswith("-j") else jobopt 367 exists_fds = f if f.startswith(jobstr) else exists_fds 368 369 # do we have limited parallelism? 370 if exists_fds : 371 try: 372 rfd, wfd = tuple(exists_fds.split('=')[1].split(',')) 373 except: 374 print("ERROR: jobserver has unrecoginzable format, was '{}'".format(exists_fds), file=sys.stderr) 375 sys.exit(1) 376 377 # read the token pipe to count number of available tokens and restore the pipe 378 # this assumes the test suite script isn't invoked in parellel with something else 379 tokens = os.read(int(rfd), 65536) 380 os.write(int(wfd), tokens) 381 382 # the number of tokens is off by one for obscure but well documented reason 383 # see man make for more details 384 options.jobs = len(tokens) + 1 385 386 # do we have unlimited parallelism? 387 elif jobopt and jobopt != "-j1": 388 # check that this actually make sense 389 if jobopt != "-j": 390 print("ERROR: -j option passed by make but no {}, was '{}'".format(jobstr, jobopt), file=sys.stderr) 391 sys.exit(1) 392 393 options.jobs = eval_hardware() 394 flags = prep_unlimited_recursive_make() 395 396 397 # then no parallelism 398 else: 399 options.jobs = 1 400 401 # keep all flags make passed along, except the weird 'w' which is about subdirectories 402 flags = [f for f in make_flags if f != 'w'] 403 404 # Arguments are calling the shots, fake the top level make 405 elif options.jobs : 406 407 # make sure we have a valid number of jobs that corresponds to user input 408 if options.jobs < 0 : 409 print('ERROR: Invalid number of jobs', file=sys.stderr) 410 sys.exit(1) 411 412 flags = prep_recursive_make(options.jobs) 413 414 # Arguments are calling the shots, fake the top level make, but 0 is a special case 415 elif options.jobs == 0: 416 options.jobs = eval_hardware() 417 flags = prep_unlimited_recursive_make() 418 419 # No one says to run in parallel, then don't 311 420 else : 312 force = True313 314 # make sure we have a valid number of jobs that corresponds to user input 315 if options.jobs <= 0 :316 print('ERROR: Invalid number of jobs', file=sys.stderr)317 sys.exit(1) 318 319 return min( options.jobs, len(tests) ), force421 options.jobs = 1 422 flags = [] 423 424 # Make sure we call make as expected 425 settings.update_make_cmd( flags ) 426 427 # return the job count 428 return options.jobs 320 429 321 430 # enable core dumps for all the test children … … 334 443 distcc_hash = os.path.join(settings.SRCDIR, '../tools/build/distcc_hash') 335 444 config = "%s-%s" % (settings.arch.target, settings.debug.path) 336 _, out = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)445 _, out, _ = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True) 337 446 return out.strip() 338 447 … … 374 483 375 484 if not os.path.isfile(core): 376 return 1, "ERR No core dump (limit soft: {} hard: {})".format(*resource.getrlimit(resource.RLIMIT_CORE))485 return 1, "ERR No core dump, expected '{}' (limit soft: {} hard: {})".format(core, *resource.getrlimit(resource.RLIMIT_CORE)) 377 486 378 487 try: 379 return sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE) 488 ret, out, err = sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE) 489 if ret == 0: 490 return 0, out 491 else: 492 return 1, err 380 493 except: 381 494 return 1, "ERR Could not read core with gdb" -
tests/test.py
ref3c383 rd672350 140 140 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') 141 141 parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='') 142 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously ', type=int)142 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int) 143 143 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') 144 144 parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true') … … 195 195 # build, skipping to next test on error 196 196 with Timed() as comp_dur: 197 make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )197 make_ret, _, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file ) 198 198 199 199 # ---------- … … 208 208 if settings.dry_run or is_exe(exe_file): 209 209 # run test 210 retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)210 retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True) 211 211 else : 212 212 # simply cat the result into the output … … 226 226 else : 227 227 # fetch return code and error from the diff command 228 retcode, error = diff(cmp_file, out_file)228 retcode, error, _ = diff(cmp_file, out_file) 229 229 230 230 else: … … 366 366 print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ') 367 367 print(os.path.relpath(t.input() , settings.SRCDIR), end=' ') 368 code, out = make_recon(t.target())368 code, out, err = make_recon(t.target()) 369 369 370 370 if code != 0: 371 print('ERROR: recond failed for test {} '.format(t.target()), file=sys.stderr)371 print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr) 372 372 sys.exit(1) 373 373 … … 417 417 if is_empty(t.expect()): 418 418 print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr) 419 420 options.jobs = job_count( options ) 419 421 420 422 # for each build configurations, run the test … … 430 432 local_tests = settings.ast.filter( tests ) 431 433 local_tests = settings.arch.filter( local_tests ) 432 options.jobs, forceJobs = job_count( options, local_tests )433 settings.update_make_cmd(forceJobs, options.jobs)434 434 435 435 # check the build configuration works 436 436 settings.validate() 437 jobs = min(options.jobs, len(local_tests)) 437 438 438 439 # print configuration … … 440 441 'Regenerating' if settings.generating else 'Running', 441 442 len(local_tests), 442 options.jobs,443 jobs, 443 444 settings.ast.string, 444 445 settings.arch.string, … … 450 451 451 452 # otherwise run all tests and make sure to return the correct error code 452 failed = run_tests(local_tests, options.jobs)453 failed = run_tests(local_tests, jobs) 453 454 if failed: 454 455 if not settings.continue_:
Note:
See TracChangeset
for help on using the changeset viewer.