Changeset 2e9b59b
- Timestamp:
- Apr 19, 2022, 3:00:04 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 5b84a321
- Parents:
- ba897d21 (diff), bb7c77d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 70 added
- 135 edited
- 6 moved
Legend:
- Unmodified
- Added
- Removed
-
Jenkinsfile
rba897d21 r2e9b59b 108 108 109 109 // Configure libcfa 110 sh 'make -j 8--no-print-directory configure-libcfa'110 sh 'make -j $(nproc) --no-print-directory configure-libcfa' 111 111 } 112 112 } … … 116 116 dir (BuildDir) { 117 117 // Build driver 118 sh 'make -j 8--no-print-directory -C driver'118 sh 'make -j $(nproc) --no-print-directory -C driver' 119 119 120 120 // Build translator 121 sh 'make -j 8--no-print-directory -C src'121 sh 'make -j $(nproc) --no-print-directory -C src' 122 122 } 123 123 } … … 126 126 // Build outside of the src tree to ease cleaning 127 127 dir (BuildDir) { 128 sh "make -j 8--no-print-directory -C libcfa/${Settings.Architecture.name}-debug"128 sh "make -j \$(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-debug" 129 129 } 130 130 } … … 133 133 // Build outside of the src tree to ease cleaning 134 134 dir (BuildDir) { 135 sh "make -j 8--no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"135 sh "make -j \$(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug" 136 136 } 137 137 } … … 140 140 // Build outside of the src tree to ease cleaning 141 141 dir (BuildDir) { 142 sh "make -j 8 --no-print-directory install"142 sh 'make -j $(nproc) --no-print-directory install' 143 143 } 144 144 } … … 161 161 Tools.BuildStage('Test: full', Settings.RunAllTests) { 162 162 dir (BuildDir) { 163 jopt = ""163 jopt = '-j $(nproc)' 164 164 if( Settings.Architecture.node == 'x86' ) { 165 jopt = "-j2"165 jopt = '-j2' 166 166 } 167 167 //Run the tests from the tests directory -
benchmark/io/http/main.cfa
rba897d21 r2e9b59b 33 33 //=============================================================================================' 34 34 35 thread StatsPrinter {}; 35 thread StatsPrinter { 36 Worker * workers; 37 int worker_cnt; 38 }; 36 39 37 40 void ?{}( StatsPrinter & this, cluster & cl ) { 38 41 ((thread&)this){ "Stats Printer Thread", cl }; 42 this.worker_cnt = 0; 39 43 } 40 44 41 45 void ^?{}( StatsPrinter & mutex this ) {} 46 47 #define eng3(X) (ws(3, 3, unit(eng( X )))) 42 48 43 49 void main(StatsPrinter & this) { … … 51 57 52 58 print_stats_now( *active_cluster(), CFA_STATS_READY_Q | CFA_STATS_IO ); 59 if(this.worker_cnt != 0) { 60 uint64_t tries = 0; 61 uint64_t calls = 0; 62 uint64_t header = 0; 63 uint64_t splcin = 0; 64 uint64_t splcot = 0; 65 struct { 66 volatile uint64_t calls; 67 volatile uint64_t bytes; 68 } avgrd[zipf_cnts]; 69 memset(avgrd, 0, sizeof(avgrd)); 70 71 for(i; this.worker_cnt) { 72 tries += this.workers[i].stats.sendfile.tries; 73 calls += this.workers[i].stats.sendfile.calls; 74 header += this.workers[i].stats.sendfile.header; 75 splcin += this.workers[i].stats.sendfile.splcin; 76 splcot += this.workers[i].stats.sendfile.splcot; 77 for(j; zipf_cnts) { 78 avgrd[j].calls += this.workers[i].stats.sendfile.avgrd[j].calls; 79 avgrd[j].bytes += this.workers[i].stats.sendfile.avgrd[j].bytes; 80 } 81 } 82 83 double ratio = ((double)tries) / calls; 84 85 sout | "----- Worker Stats -----"; 86 sout | "sendfile : " | calls | "calls," | tries | "tries (" | ratio | " try/call)"; 87 sout | " " | header | "header," | splcin | "splice in," | splcot | "splice out"; 88 sout | " - zipf sizes:"; 89 for(i; zipf_cnts) { 90 double written = avgrd[i].calls > 0 ? ((double)avgrd[i].bytes) / avgrd[i].calls : 0; 91 sout | " " | zipf_sizes[i] | "bytes," | avgrd[i].calls | "shorts," | written | "written"; 92 } 93 } 94 else { 95 sout | "No Workers!"; 96 } 53 97 } 54 98 } … … 218 262 { 219 263 Worker * workers = anew(options.clopts.nworkers); 264 cl[0].prnt->workers = workers; 265 cl[0].prnt->worker_cnt = options.clopts.nworkers; 220 266 for(i; options.clopts.nworkers) { 221 267 // if( options.file_cache.fixed_fds ) { … … 311 357 } 312 358 } 359 360 const size_t zipf_sizes[] = { 102, 204, 307, 409, 512, 614, 716, 819, 921, 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192, 9216, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 92160, 102400, 204800, 307200, 409600, 512000, 614400, 716800, 819200, 921600 }; 361 static_assert(zipf_cnts == sizeof(zipf_sizes) / sizeof(zipf_sizes[0])); -
benchmark/io/http/parhttperf
rba897d21 r2e9b59b 6 6 7 7 mkdir -p out 8 rm -v out/* 8 rm out/* 9 echo "httperf --client [0-$(($NTHREADS - 1))]/$NTHREADS $@ > out/result.[0-$(($NTHREADS - 1))].out" 9 10 for ((i=0; i<$NTHREADS; i++)) 10 11 do 11 # echo "httperf --client $i/$NTHREADS $@ > out/result.$i.out"12 12 httperf --client $i/$NTHREADS $@ > out/result.$i.out & 13 13 done -
benchmark/io/http/protocol.cfa
rba897d21 r2e9b59b 24 24 25 25 #include "options.hfa" 26 #include "worker.hfa" 26 27 27 28 #define PLAINTEXT_1WRITE … … 156 157 157 158 count -= ret; 158 offset += ret;159 159 size_t in_pipe = ret; 160 160 SPLICE2: while(in_pipe > 0) { … … 249 249 } 250 250 251 static inline int wait_and_process(header_g & this ) {251 static inline int wait_and_process(header_g & this, sendfile_stats_t & stats) { 252 252 wait(this.f); 253 253 … … 278 278 } 279 279 280 stats.header++; 281 280 282 // It must be a Short read 281 283 this.len -= this.f.result; … … 289 291 io_future_t f; 290 292 int fd; int pipe; size_t len; off_t off; 293 short zipf_idx; 291 294 FSM_Result res; 292 295 }; … … 297 300 this.len = len; 298 301 this.off = 0; 302 this.zipf_idx = -1; 303 STATS: for(i; zipf_cnts) { 304 if(len <= zipf_sizes[i]) { 305 this.zipf_idx = i; 306 break STATS; 307 } 308 } 309 if(this.zipf_idx < 0) mutex(serr) serr | "SPLICE IN" | len | " greated than biggest zipf file"; 299 310 } 300 311 … … 312 323 } 313 324 314 static inline int wait_and_process(splice_in_t & this ) {325 static inline int wait_and_process(splice_in_t & this, sendfile_stats_t & stats ) { 315 326 wait(this.f); 316 327 … … 328 339 return error(this.res, -ECONNRESET); 329 340 } 341 mutex(serr) serr | "SPLICE IN got" | error | ", WTF!"; 342 return error(this.res, -ECONNRESET); 330 343 } 331 344 … … 340 353 return done(this.res); 341 354 } 355 356 stats.splcin++; 357 stats.avgrd[this.zipf_idx].calls++; 358 stats.avgrd[this.zipf_idx].bytes += this.f.result; 342 359 343 360 // It must be a Short read … … 381 398 } 382 399 383 static inline void wait_and_process(splice_out_g & this ) {400 static inline void wait_and_process(splice_out_g & this, sendfile_stats_t & stats ) { 384 401 wait(this.f); 385 402 … … 397 414 return error(this, -ECONNRESET); 398 415 } 416 mutex(serr) serr | "SPLICE OUT got" | error | ", WTF!"; 417 return error(this, -ECONNRESET); 399 418 } 400 419 … … 411 430 412 431 SHORT_WRITE: 432 stats.splcot++; 433 413 434 // It must be a Short Write 414 435 this.len -= this.f.result; … … 417 438 } 418 439 419 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t fsize ) { 440 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t fsize, sendfile_stats_t & stats ) { 441 stats.calls++; 420 442 #if defined(LINKED_IO) 421 443 char buffer[512]; … … 426 448 427 449 RETRY_LOOP: for() { 450 stats.tries++; 428 451 int have = need(header.res) + need(splice_in.res) + 1; 429 452 int idx = 0; … … 444 467 // we may need to kill the connection if it fails 445 468 // If it already completed, this is a no-op 446 wait_and_process(splice_in );469 wait_and_process(splice_in, stats); 447 470 448 471 if(is_error(splice_in.res)) { … … 452 475 453 476 // Process the other 2 454 wait_and_process(header );455 wait_and_process(splice_out );477 wait_and_process(header, stats); 478 wait_and_process(splice_out, stats); 456 479 457 480 if(is_done(splice_out.res)) { … … 473 496 return len + fsize; 474 497 #else 498 stats.tries++; 475 499 int ret = answer_header(fd, fsize); 476 500 if( ret < 0 ) { close(fd); return ret; } -
benchmark/io/http/protocol.hfa
rba897d21 r2e9b59b 1 1 #pragma once 2 3 struct sendfile_stats_t; 2 4 3 5 enum HttpCode { … … 18 20 int answer_plaintext( int fd ); 19 21 int answer_empty( int fd ); 20 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t count );22 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t count, struct sendfile_stats_t & ); 21 23 22 24 [HttpCode code, bool closed, * const char file, size_t len] http_read(int fd, []char buffer, size_t len); -
benchmark/io/http/worker.cfa
rba897d21 r2e9b59b 23 23 this.pipe[1] = -1; 24 24 this.done = false; 25 26 this.stats.sendfile.calls = 0; 27 this.stats.sendfile.tries = 0; 28 this.stats.sendfile.header = 0; 29 this.stats.sendfile.splcin = 0; 30 this.stats.sendfile.splcot = 0; 31 for(i; zipf_cnts) { 32 this.stats.sendfile.avgrd[i].calls = 0; 33 this.stats.sendfile.avgrd[i].bytes = 0; 34 } 25 35 } 26 36 … … 123 133 124 134 // Send the desired file 125 int ret = answer_sendfile( this.pipe, fd, ans_fd, count );135 int ret = answer_sendfile( this.pipe, fd, ans_fd, count, this.stats.sendfile ); 126 136 if( ret == -ECONNRESET ) break REQUEST; 127 137 -
benchmark/io/http/worker.hfa
rba897d21 r2e9b59b 11 11 //============================================================================================= 12 12 13 extern const size_t zipf_sizes[]; 14 enum { zipf_cnts = 36, }; 15 16 struct sendfile_stats_t { 17 volatile uint64_t calls; 18 volatile uint64_t tries; 19 volatile uint64_t header; 20 volatile uint64_t splcin; 21 volatile uint64_t splcot; 22 struct { 23 volatile uint64_t calls; 24 volatile uint64_t bytes; 25 } avgrd[zipf_cnts]; 26 }; 27 13 28 thread Worker { 14 29 int pipe[2]; … … 18 33 int flags; 19 34 volatile bool done; 35 struct { 36 sendfile_stats_t sendfile; 37 } stats; 20 38 }; 21 39 void ?{}( Worker & this); -
benchmark/plot.py
rba897d21 r2e9b59b 40 40 } 41 41 42 def plot(data, x, y ):42 def plot(data, x, y, out): 43 43 fig, ax = plt.subplots() 44 44 colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00']) … … 67 67 ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) ) 68 68 plt.legend(loc='upper left') 69 plt.show() 69 if out: 70 plt.savefig(out) 71 else: 72 plt.show() 70 73 71 74 … … 75 78 parser = parser = argparse.ArgumentParser(description='Python Script to draw R.M.I.T. results') 76 79 parser.add_argument('-f', '--file', nargs='?', type=argparse.FileType('r'), default=sys.stdin) 80 parser.add_argument('-o', '--out', nargs='?', type=str, default=None) 81 parser.add_argument('-y', nargs='?', type=str, default="") 77 82 78 83 try: … … 103 108 fields.add(label) 104 109 105 print(series) 106 print("fields") 107 for f in fields: 108 print("{}".format(f)) 110 if not options.out : 111 print(series) 112 print("fields") 113 for f in fields: 114 print("{}".format(f)) 109 115 110 plot(data, "Number of processors", "ns per ops") 116 if options.y and options.y in field_names.keys(): 117 plot(data, "Number of processors", options.y, options.out) 118 else: 119 if options.y: 120 print("Could not find key '{}', defaulting to 'ns per ops'".format(options.y)) 121 plot(data, "Number of processors", "ns per ops", options.out) -
doc/LaTeXmacros/common.sty
rba897d21 r2e9b59b 11 11 %% Created On : Sat Apr 9 10:06:17 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Mon Feb 7 23:00:46202214 %% Update Count : 5 6913 %% Last Modified On : Sat Apr 2 17:35:23 2022 14 %% Update Count : 570 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 301 301 {=>}{$\Rightarrow$}2 302 302 {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2, 303 defaultdialect={CFA},304 303 }% lstset 305 304 }% CFAStyle -
doc/LaTeXmacros/common.tex
rba897d21 r2e9b59b 11 11 %% Created On : Sat Apr 9 10:06:17 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Mon Feb 7 23:00:08202214 %% Update Count : 55 213 %% Last Modified On : Sat Apr 2 16:42:31 2022 14 %% Update Count : 553 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 306 306 {=>}{$\Rightarrow$}2 307 307 {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2, 308 defaultdialect={CFA},309 308 }% lstset 310 309 }% CFAStyle -
doc/theses/mubeen_zulfiqar_MMath/Makefile
rba897d21 r2e9b59b 1 # directory for latex clutter files 1 # Configuration variables 2 2 3 Build = build 3 4 Figures = figures 4 5 Pictures = pictures 6 7 LaTMac = ../../LaTeXmacros 8 BibRep = ../../bibliography 9 5 10 TeXSRC = ${wildcard *.tex} 6 11 FigSRC = ${notdir ${wildcard ${Figures}/*.fig}} 7 12 PicSRC = ${notdir ${wildcard ${Pictures}/*.fig}} 8 BIBSRC = ${wildcard *.bib} 9 TeXLIB = .:../../LaTeXmacros:${Build}: # common latex macros 10 BibLIB = .:../../bibliography # common citation repository 13 BibSRC = ${wildcard *.bib} 14 15 TeXLIB = .:${LaTMac}:${Build}: 16 BibLIB = .:${BibRep}: 11 17 12 18 MAKEFLAGS = --no-print-directory # --silent 13 19 VPATH = ${Build} ${Figures} ${Pictures} # extra search path for file names used in document 14 20 15 ### Special Rules: 21 DOCUMENT = uw-ethesis.pdf 22 BASE = ${basename ${DOCUMENT}} # remove suffix 16 23 17 .PHONY: all clean 18 .PRECIOUS: %.dvi %.ps # do not delete intermediate files 19 20 ### Commands: 24 # Commands 21 25 22 26 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 23 BibTeX = BIBINPUTS=${BibLIB} bibtex27 BibTeX = BIBINPUTS=${BibLIB} && export BIBINPUTS && bibtex 24 28 #Glossary = INDEXSTYLE=${Build} makeglossaries-lite 25 29 26 # ## Rules and Recipes:30 # Rules and Recipes 27 31 28 DOC = uw-ethesis.pdf 29 BASE = ${DOC:%.pdf=%} # remove suffix 32 .PHONY : all clean # not file names 33 .PRECIOUS: %.dvi %.ps # do not delete intermediate files 34 .ONESHELL : 30 35 31 all : ${DOC}36 all : ${DOCUMENT} 32 37 33 clean :34 @rm -frv ${DOC } ${Build}38 clean : 39 @rm -frv ${DOCUMENT} ${Build} 35 40 36 # File Dependencies #41 # File Dependencies 37 42 38 ${Build}/%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BIBSRC}Makefile | ${Build}43 %.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BibSRC} ${BibRep}/pl.bib ${LaTMac}/common.tex Makefile | ${Build} 39 44 ${LaTeX} ${BASE} 40 45 ${BibTeX} ${Build}/${BASE} 41 46 ${LaTeX} ${BASE} 42 # if ne dded, run latex again to get citations47 # if needed, run latex again to get citations 43 48 if fgrep -s "LaTeX Warning: Citation" ${basename $@}.log ; then ${LaTeX} ${BASE} ; fi 44 49 # ${Glossary} ${Build}/${BASE} … … 46 51 47 52 ${Build}: 48 mkdir $@53 mkdir -p $@ 49 54 50 55 %.pdf : ${Build}/%.ps | ${Build} -
doc/theses/mubeen_zulfiqar_MMath/allocator.tex
rba897d21 r2e9b59b 1 1 \chapter{Allocator} 2 2 3 \section{uHeap} 4 uHeap is a lightweight memory allocator. The objective behind uHeap is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements). 5 6 The objective of uHeap's new design was to fulfill following requirements: 7 \begin{itemize} 8 \item It should be concurrent and thread-safe for multi-threaded programs. 9 \item It should avoid global locks, on resources shared across all threads, as much as possible. 10 \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators). 11 \item It should be a lightweight memory allocator. 12 \end{itemize} 3 This chapter presents a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code), called llheap (low-latency heap), for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). 4 The new allocator fulfills the GNU C Library allocator API~\cite{GNUallocAPI}. 5 6 7 \section{llheap} 8 9 The primary design objective for llheap is low-latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator have a delay during an allocator call. 10 (Large allocations requiring initialization, \eg zero fill, and/or copying are not covered by the low-latency objective.) 11 A direct consequence of this objective is very simple or no storage coalescing; 12 hence, llheap's design is willing to use more storage to lower latency. 13 This objective is apropos because systems research and industrial applications are striving for low latency and computers have huge amounts of RAM memory. 14 Finally, llheap's performance should be comparable with the current best allocators (see performance comparison in \VRef[Chapter]{c:Performance}). 15 16 % The objective of llheap's new design was to fulfill following requirements: 17 % \begin{itemize} 18 % \item It should be concurrent and thread-safe for multi-threaded programs. 19 % \item It should avoid global locks, on resources shared across all threads, as much as possible. 20 % \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators). 21 % \item It should be a lightweight memory allocator. 22 % \end{itemize} 13 23 14 24 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 15 25 26 <<<<<<< HEAD 16 27 \section{Design choices for uHeap}\label{sec:allocatorSec} 17 28 uHeap's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed: 18 19 \paragraph{Design 1: Centralized} 20 One heap, but lower bucket sizes are N-shared across KTs. 21 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes. 22 When KTs $\le$ N, the important bucket sizes are uncontented. 23 When KTs $>$ N, the free buckets are contented. 24 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention. 25 \begin{cquote} 29 ======= 30 \section{Design Choices} 31 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961 32 33 llheap's design was reviewed and changed multiple times throughout the thesis. 34 Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}). 35 Note, a few simples tests for a design choice were compared with the current best allocators to determine the viability of a design. 36 37 38 \subsection{Allocation Fastpath} 39 \label{s:AllocationFastpath} 40 41 These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced. 42 \paragraph{T:1 model} 43 \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared across KTs. 44 This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5 different request sizes. 45 When KTs $\le$ N, the common bucket sizes are uncontented; 46 when KTs $>$ N, the free buckets are contented and latency increases significantly. 47 In all cases, a KT must acquire/release a lock, contented or uncontented, along the fast allocation path because a bucket is shared. 48 Therefore, while threads are contending for a small number of buckets sizes, the buckets are distributed among them to reduce contention, which lowers latency; 49 however, picking N is workload specific. 50 51 \begin{figure} 52 \centering 53 \input{AllocDS1} 54 \caption{T:1 with Shared Buckets} 55 \label{f:T1SharedBuckets} 56 \end{figure} 57 58 Problems: 59 \begin{itemize} 60 \item 61 Need to know when a KT is created/destroyed to assign/unassign a shared bucket-number from the memory allocator. 62 \item 63 When no thread is assigned a bucket number, its free storage is unavailable. 64 \item 65 All KTs contend for the global-pool lock for initial allocations, before free-lists get populated. 66 \end{itemize} 67 Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency. 68 69 \paragraph{T:H model} 70 \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs. 71 A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket. 72 When KT $\le$ N, the heaps are uncontented; 73 when KTs $>$ N, the heaps are contented. 74 In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared. 75 By adjusting N upwards, this approach reduces contention but increases storage (time versus space); 76 however, picking N is workload specific. 77 78 \begin{figure} 26 79 \centering 27 80 \input{AllocDS2} 28 \end{cquote} 29 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number. 30 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated). 31 32 \paragraph{Design 2: Decentralized N Heaps} 33 Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area. 34 Kernel threads (KT) are assigned to the N heaps. 35 When KTs $\le$ N, the heaps are uncontented. 36 When KTs $>$ N, the heaps are contented. 37 By adjusting N, this approach reduces storage at the cost of speed due to contention. 38 In all cases, a thread acquires/releases a lock, contented or uncontented. 39 \begin{cquote} 40 \centering 41 \input{AllocDS1} 42 \end{cquote} 43 Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT. 44 45 \paragraph{Design 3: Decentralized Per-thread Heaps} 46 Design 3 is similar to design 2 but instead of having an M:N model, it uses a 1:1 model. So, instead of having N heaos and sharing them among M KTs, Design 3 has one heap for each KT. 47 Dynamic number of heaps: create a thread-local heap for each kernel thread (KT) with a bump-area allocated from the @sbrk@ area. 48 Each KT will have its own exclusive thread-local heap. Heap will be uncontended between KTs regardless how many KTs have been created. 49 Operations on @sbrk@ area will still be protected by locks. 50 %\begin{cquote} 51 %\centering 52 %\input{AllocDS3} FIXME add figs 53 %\end{cquote} 54 Problems: We cannot destroy the heap when a KT exits because our dynamic objects have ownership and they are returned to the heap that created them when the program frees a dynamic object. All dynamic objects point back to their owner heap. If a thread A creates an object O, passes it to another thread B, and A itself exits. When B will free object O, O should return to A's heap so A's heap should be preserved for the lifetime of the whole program as their might be objects in-use of other threads that were allocated by A. Also, we need to know when a KT is created and destroyed to know when to create/destroy a heap for the KT. 55 56 \paragraph{Design 4: Decentralized Per-CPU Heaps} 57 Design 4 is similar to Design 3 but instead of having a heap for each thread, it creates a heap for each CPU. 58 Fixed number of heaps for a machine: create a heap for each CPU with a bump-area allocated from the @sbrk@ area. 59 Each CPU will have its own CPU-local heap. When the program does a dynamic memory operation, it will be entertained by the heap of the CPU where the process is currently running on. 60 Each CPU will have its own exclusive heap. Just like Design 3(FIXME cite), heap will be uncontended between KTs regardless how many KTs have been created. 61 Operations on @sbrk@ area will still be protected by locks. 62 To deal with preemtion during a dynamic memory operation, librseq(FIXME cite) will be used to make sure that the whole dynamic memory operation completes on one CPU. librseq's restartable sequences can make it possible to re-run a critical section and undo the current writes if a preemption happened during the critical section's execution. 63 %\begin{cquote} 64 %\centering 65 %\input{AllocDS4} FIXME add figs 66 %\end{cquote} 67 68 Problems: This approach was slower than the per-thread model. Also, librseq does not provide such restartable sequences to detect preemtions in user-level threading system which is important to us as CFA(FIXME cite) has its own threading system that we want to support. 69 70 Out of the four designs, Design 3 was chosen because of the following reasons. 71 \begin{itemize} 72 \item 73 Decentralized designes are better in general as compared to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designes shard the whole heap which has all the buckets with the addition of sharding sbrk area. So Design 1 was eliminated. 74 \item 75 Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenerio. 76 \item 77 Design 4 was eliminated because it was slower than Design 3 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achive user-threading safety which has some cost to it. Desing 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower. 78 \end{itemize} 79 80 81 \subsection{Advantages of distributed design} 82 83 The distributed design of uHeap is concurrent to work in multi-threaded applications. 84 85 Some key benefits of the distributed design of uHeap are as follows: 86 87 \begin{itemize} 88 \item 89 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The call to sbrk will be protected using locks but bump allocation (on memory taken from sbrk) will not be contended once the sbrk call has returned. 90 \item 91 Low or almost no contention on heap resources. 92 \item 93 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty. 94 \item 95 Distributed design avoids unnecassry locks on resources shared across all KTs. 96 \end{itemize} 81 \caption{T:H with Shared Heaps} 82 \label{f:THSharedHeaps} 83 \end{figure} 84 85 Problems: 86 \begin{itemize} 87 \item 88 Need to know when a KT is created/destroyed to assign/unassign a heap from the memory allocator. 89 \item 90 When no thread is assigned to a heap, its free storage is unavailable. 91 \item 92 Ownership issues arise (see \VRef{s:Ownership}). 93 \item 94 All KTs contend for the local/global-pool lock for initial allocations, before free-lists get populated. 95 \end{itemize} 96 Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency. 97 98 \paragraph{T:H model, H = number of CPUs} 99 This design is the T:H model but H is set to the number of CPUs on the computer or the number restricted to an application, \eg via @taskset@. 100 (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per CPU.) 101 Hence, each CPU logically has its own private heap and local pool. 102 A memory operation is serviced from the heap associated with the CPU executing the operation. 103 This approach removes fastpath locking and contention, regardless of the number of KTs mapped across the CPUs, because only one KT is running on each CPU at a time (modulo operations on the global pool and ownership). 104 This approach is essentially an M:N approach where M is the number if KTs and N is the number of CPUs. 105 106 Problems: 107 \begin{itemize} 108 \item 109 Need to know when a CPU is added/removed from the @taskset@. 110 \item 111 Need a fast way to determine the CPU a KT is executing on to access the appropriate heap. 112 \item 113 Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}. 114 \begin{quote} 115 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable} 116 \end{quote} 117 If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness. 118 Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable. 119 Essentially, the serially-reusable problem is a race condition on an unprotected critical section, where the operating system is providing the second thread via the signal handler. 120 121 Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical section after undoing its writes, if the critical section is preempted. 122 \end{itemize} 123 Tests showed that @librseq@ can determine the particular CPU quickly but setting up the restartable critical-section along the allocation fast-path produced a significant increase in allocation costs. 124 Also, the number of undoable writes in @librseq@ is limited and restartable sequences cannot deal with user-level thread (UT) migration across KTs. 125 For example, UT$_1$ is executing a memory operation by KT$_1$ on CPU$_1$ and a time-slice preemption occurs. 126 The signal handler context switches UT$_1$ onto the user-level ready-queue and starts running UT$_2$ on KT$_1$, which immediately calls a memory operation. 127 Since KT$_1$ is still executing on CPU$_1$, @librseq@ takes no action because it assumes KT$_1$ is still executing the same critical section. 128 Then UT$_1$ is scheduled onto KT$_2$ by the user-level scheduler, and its memory operation continues in parallel with UT$_2$ using references into the heap associated with CPU$_1$, which corrupts CPU$_1$'s heap. 129 If @librseq@ had an @rseq_abort@ which: 130 \begin{enumerate} 131 \item 132 Marked the current restartable critical-section as cancelled so it restarts when attempting to commit. 133 \item 134 Do nothing if there is no current restartable critical section in progress. 135 \end{enumerate} 136 Then @rseq_abort@ could be called on the backside of a user-level context-switching. 137 A feature similar to this idea might exist for hardware transactional-memory. 138 A significant effort was made to make this approach work but its complexity, lack of robustness, and performance costs resulted in its rejection. 139 140 \paragraph{1:1 model} 141 This design is the T:H model with T = H, where there is one thread-local heap for each KT. 142 (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.) 143 Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted. 144 Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership). 145 146 Problems: 147 \begin{itemize} 148 \item 149 Need to know when a KT is starts/terminates to create/delete its heap. 150 151 \noindent 152 It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates. 153 \item 154 There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap. 155 156 \noindent 157 The classic solution only deletes a heap after all referents are returned, which is complex. 158 The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees. 159 If old referents return storage to a terminated heap, it is handled in the same way as an active heap. 160 To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation). 161 In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT. 162 \item 163 There can be significant external fragmentation as the number of KTs increases. 164 165 \noindent 166 In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs. 167 Since the number of CPUs is relatively small, >~1024, and a heap relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs. 168 \item 169 There is the same serially-reusable problem with UTs migrating across KTs. 170 \end{itemize} 171 Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach. 172 173 174 \vspace{5pt} 175 \noindent 176 The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown. 177 For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps maybe shared by multiple threads, even when KTs $\le$ N. 178 For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath. 179 However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs. 180 More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading. 181 Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required. 182 The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns. 183 184 185 % \begin{itemize} 186 % \item 187 % A decentralized design is better to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designs shard the whole heap which has all the buckets with the addition of sharding @sbrk@ area. So Design 1 was eliminated. 188 % \item 189 % Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenario. 190 % \item 191 % Design 3 was eliminated because it was slower than Design 4 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achieve user-threading safety which has some cost to it. 192 % that because of 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower. 193 % \end{itemize} 194 % Of the four designs for a low-latency memory allocator, the 1:1 model was chosen for the following reasons: 195 196 % \subsection{Advantages of distributed design} 197 % 198 % The distributed design of llheap is concurrent to work in multi-threaded applications. 199 % Some key benefits of the distributed design of llheap are as follows: 200 % \begin{itemize} 201 % \item 202 % The bump allocation is concurrent as memory taken from @sbrk@ is sharded across all heaps as bump allocation reserve. The call to @sbrk@ will be protected using locks but bump allocation (on memory taken from @sbrk@) will not be contended once the @sbrk@ call has returned. 203 % \item 204 % Low or almost no contention on heap resources. 205 % \item 206 % It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty. 207 % \item 208 % Distributed design avoids unnecessary locks on resources shared across all KTs. 209 % \end{itemize} 210 211 \subsection{Allocation Latency} 212 213 A primary goal of llheap is low latency. 214 Two forms of latency are internal and external. 215 Internal latency is the time to perform an allocation, while external latency is time to obtain/return storage from/to the operating system. 216 Ideally latency is $O(1)$ with a small constant. 217 218 To obtain $O(1)$ internal latency means no searching on the allocation fastpath, largely prohibits coalescing, which leads to external fragmentation. 219 The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger). 220 221 To obtain $O(1)$ external latency means obtaining one large storage area from the operating system and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation. 222 Excluding real-time operating-systems, operating-system operations are unbounded, and hence some external latency is unavoidable. 223 The mitigating factor is that operating-system calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \VPageref{p:malloc_expansion}). 224 Furthermore, while operating-system calls are unbounded, many are now reasonably fast, so their latency is tolerable and infrequent. 225 97 226 98 227 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 99 228 100 \section{uHeap Structure} 101 102 As described in (FIXME cite 2.4) uHeap uses following features of multi-threaded memory allocators. 103 \begin{itemize} 104 \item 105 uHeap has multiple heaps without a global heap and uses 1:1 model. (FIXME cite 2.5 1:1 model) 106 \item 107 uHeap uses object ownership. (FIXME cite 2.5.2) 108 \item 109 uHeap does not use object containers (FIXME cite 2.6) or any coalescing technique. Instead each dynamic object allocated by uHeap has a header than contains bookkeeping information. 110 \item 111 Each thread-local heap in uHeap has its own allocation buffer that is taken from the system using sbrk() call. (FIXME cite 2.7) 112 \item 113 Unless a heap is freeing an object that is owned by another thread's heap or heap is using sbrk() system call, uHeap is mostly lock-free which eliminates most of the contention on shared resources. (FIXME cite 2.8) 114 \end{itemize} 115 116 As uHeap uses a heap per-thread model to reduce contention on heap resources, we manage a list of heaps (heap-list) that can be used by threads. The list is empty at the start of the program. When a kernel thread (KT) is created, we check if heap-list is empty. If no then a heap is removed from the heap-list and is given to this new KT to use exclusively. If yes then a new heap object is created in dynamic memory and is given to this new KT to use exclusively. When a KT exits, its heap is not destroyed but instead its heap is put on the heap-list and is ready to be reused by new KTs. 117 118 This reduces the memory footprint as the objects on free-lists of a KT that has exited can be reused by a new KT. Also, we preserve all the heaps that were created during the lifetime of the program till the end of the program. uHeap uses object ownership where an object is freed to the free-buckets of the heap that allocated it. Even after a KT A has exited, its heap has to be preserved as there might be objects in-use of other threads that were initially allocated by A and the passed to other threads. 229 \section{llheap Structure} 230 231 \VRef[Figure]{f:llheapStructure} shows the design of llheap, which uses the following features: 232 \begin{itemize} 233 \item 234 1:1 multiple-heap model to minimize the fastpath, 235 \item 236 can be built with or without heap ownership, 237 \item 238 headers per allocation versus containers, 239 \item 240 no coalescing to minimize latency, 241 \item 242 global heap memory (pool) obtained from the operating system using @mmap@ to create and reuse heaps needed by threads, 243 \item 244 local reserved memory (pool) per heap obtained from global pool, 245 \item 246 global reserved memory (pool) obtained from the operating system using @sbrk@ call, 247 \item 248 optional fast-lookup table for converting allocation requests into bucket sizes, 249 \item 250 optional statistic-counters table for accumulating counts of allocation operations. 251 \end{itemize} 119 252 120 253 \begin{figure} 121 254 \centering 255 <<<<<<< HEAD 122 256 \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps} 123 257 \caption{uHeap Structure} 124 258 \label{fig:heapStructureFig} 259 ======= 260 % \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps} 261 \input{llheap} 262 \caption{llheap Structure} 263 \label{f:llheapStructure} 264 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961 125 265 \end{figure} 126 266 127 Each heap uses seggregated free-buckets that have free objects of a specific size. Each free-bucket of a specific size has following 2 lists in it: 128 \begin{itemize} 129 \item 130 Free list is used when a thread is freeing an object that is owned by its own heap so free list does not use any locks/atomic-operations as it is only used by the owner KT. 131 \item 132 Away list is used when a thread A is freeing an object that is owned by another KT B's heap. This object should be freed to the owner heap (B's heap) so A will place the object on the away list of B. Away list is lock protected as it is shared by all other threads. 133 \end{itemize} 134 135 When a dynamic object of a size S is requested. The thread-local heap will check if S is greater than or equal to the mmap threshhold. Any request larger than the mmap threshhold is fulfilled by allocating an mmap area of that size and such requests are not allocated on sbrk area. The value of this threshhold can be changed using mallopt routine but the new value should not be larger than our biggest free-bucket size. 136 137 Algorithm~\ref{alg:heapObjectAlloc} briefly shows how an allocation request is fulfilled. 138 139 \begin{algorithm} 140 \caption{Dynamic object allocation of size S}\label{alg:heapObjectAlloc} 267 llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration. 268 There is a global bump-pointer to the next free heap in the array. 269 When this array is exhausted, another array is allocated. 270 There is a global top pointer for a heap intrusive link to chain free heaps from terminated threads. 271 When statistics are turned on, there is a global top pointer for a heap intrusive link to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@. 272 273 When a KT starts, a heap is allocated from the current array for exclusive use by the KT. 274 When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps. 275 The free heaps is a stack so hot storage is reused first. 276 Preserving all heaps created during the program lifetime, solves the storage lifetime problem, when ownership is used. 277 This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially. 278 llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap. 279 280 Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M. 281 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system. 282 Each free bucket of a specific size has the following two lists: 283 \begin{itemize} 284 \item 285 A free stack used solely by the KT heap-owner, so push/pop operations do not require locking. 286 The free objects are a stack so hot storage is reused first. 287 \item 288 For ownership, a shared away-stack for KTs to return storage allocated by other KTs, so push/pop operations require locking. 289 When the free stack is empty, the entire ownership stack is removed and becomes the head of the corresponding free stack. 290 \end{itemize} 291 292 Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$. 293 First, the allocation is divided into small (@sbrk@) or large (@mmap@). 294 For large allocations, the storage is mapped directly from the operating system. 295 For small allocations, $S$ is quantized into a bucket size. 296 Quantizing is performed using a binary search over the ordered bucket array. 297 An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket. 298 (Type @char@ restricts the number of bucket sizes to 256.) 299 For $S$ > 64K, a binary search is used. 300 Then, the allocation storage is obtained from the following locations (in order), with increasing latency. 301 \begin{enumerate}[topsep=0pt,itemsep=0pt,parsep=0pt] 302 \item 303 bucket's free stack, 304 \item 305 bucket's away stack, 306 \item 307 heap's local pool 308 \item 309 global pool 310 \item 311 operating system (@sbrk@) 312 \end{enumerate} 313 314 \begin{figure} 315 \vspace*{-10pt} 316 \begin{algorithm}[H] 317 \small 318 \caption{Dynamic object allocation of size $S$}\label{alg:heapObjectAlloc} 141 319 \begin{algorithmic}[1] 142 320 \State $\textit{O} \gets \text{NULL}$ 143 \If {$S < \textit{mmap-threshhold}$} 144 \State $\textit{B} \gets (\text{smallest free-bucket} \geq S)$ 321 \If {$S >= \textit{mmap-threshhold}$} 322 \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$ 323 \Else 324 \State $\textit{B} \gets \text{smallest free-bucket} \geq S$ 145 325 \If {$\textit{B's free-list is empty}$} 146 326 \If {$\textit{B's away-list is empty}$} 147 327 \If {$\textit{heap's allocation buffer} < S$} 148 \State $\text{get allocation buffer using system call sbrk()}$328 \State $\text{get allocation from global pool (which might call \lstinline{sbrk})}$ 149 329 \EndIf 150 330 \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$ … … 157 337 \EndIf 158 338 \State $\textit{O's owner} \gets \text{B}$ 159 \Else160 \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$161 339 \EndIf 162 340 \State $\Return \textit{ O}$ … … 164 342 \end{algorithm} 165 343 344 <<<<<<< HEAD 166 345 Algorithm~\ref{alg:heapObjectFreeOwn} shows how a free request is fulfilled if object ownership is turned on. Algorithm~\ref{alg:heapObjectFreeNoOwn} shows how the same free request is fulfilled without object ownership. 167 346 … … 171 350 \If {$\textit{A was mmap-ed}$} 172 351 \State $\text{return A's dynamic memory to system using system call munmap}$ 352 ======= 353 \vspace*{-15pt} 354 \begin{algorithm}[H] 355 \small 356 \caption{Dynamic object free at address $A$ with object ownership}\label{alg:heapObjectFreeOwn} 357 \begin{algorithmic}[1] 358 \If {$\textit{A mapped allocation}$} 359 \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$ 360 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961 173 361 \Else 174 362 \State $\text{B} \gets \textit{O's owner}$ … … 181 369 \end{algorithmic} 182 370 \end{algorithm} 371 <<<<<<< HEAD 183 372 184 373 \begin{algorithm} … … 199 388 \end{algorithm} 200 389 390 ======= 391 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961 392 393 \vspace*{-15pt} 394 \begin{algorithm}[H] 395 \small 396 \caption{Dynamic object free at address $A$ without object ownership}\label{alg:heapObjectFreeNoOwn} 397 \begin{algorithmic}[1] 398 \If {$\textit{A mapped allocation}$} 399 \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$ 400 \Else 401 \State $\text{B} \gets \textit{O's owner}$ 402 \If {$\textit{B is thread-local heap's bucket}$} 403 \State $\text{push A to B's free-list}$ 404 \Else 405 \State $\text{C} \gets \textit{thread local heap's bucket with same size as B}$ 406 \State $\text{push A to C's free-list}$ 407 \EndIf 408 \EndIf 409 \end{algorithmic} 410 \end{algorithm} 411 \end{figure} 412 413 Algorithm~\ref{alg:heapObjectFreeOwn} shows the de-allocation (free) outline for an object at address $A$ with ownership. 414 First, the address is divided into small (@sbrk@) or large (@mmap@). 415 For large allocations, the storage is unmapped back to the operating system. 416 For small allocations, the bucket associated with the request size is retrieved. 417 If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket. 418 If the bucket is not local to the thread, the allocation is pushed onto the owning thread's associated away stack. 419 420 Algorithm~\ref{alg:heapObjectFreeNoOwn} shows the de-allocation (free) outline for an object at address $A$ without ownership. 421 The algorithm is the same as for ownership except if the bucket is not local to the thread. 422 Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket. 423 424 Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap. 425 Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated. 426 This design simplifies heap-management code during development and maintenance. 427 428 429 \subsection{Alignment} 430 431 All dynamic memory allocations must have a minimum storage alignment for the contained object(s). 432 Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX). 433 In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers. 434 For consistency, the object header is normally aligned at this same boundary. 435 Larger alignments must be a power of 2, such page alignment (4/8K). 436 Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment. 437 438 For alignments greater than the minimum, the obvious approach for aligning to address @A@ is: compute the next address that is a multiple of @N@ after the current end of the heap, @E@, plus room for the header before @A@ and the size of the allocation after @A@, moving the end of the heap to @E'@. 439 \begin{center} 440 \input{Alignment1} 441 \end{center} 442 The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations. 443 This approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage. 444 In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation. 445 However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object. 446 As well, it does not work for large allocations, where many memory allocators switch from program @sbrk@ to operating-system @mmap@. 447 The reason is that @mmap@ only starts on a page boundary, and it is difficult to reuse the storage before the alignment boundary for other requests. 448 Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request. 449 450 Instead, llheap alignment is accomplished by making a \emph{pessimistically} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg: 451 \begin{center} 452 \input{Alignment2} 453 \end{center} 454 The amount of storage necessary is @alignment - M + size@, which ensures there is an address, @A@, after the storage returned from @malloc@, @P@, that is a multiple of @alignment@ followed by sufficient storage for the data object. 455 The approach is pessimistic because if @P@ already has the correct alignment @N@, the initial allocation has already requested sufficient space to move to the next multiple of @N@. 456 For this special case, there is @alignment - M@ bytes of unused storage after the data object, which subsequently can be used by @realloc@. 457 458 Note, the address returned is @A@, which is subsequently returned to @free@. 459 However, to correctly free the allocated object, the value @P@ must be computable, since that is the value generated by @malloc@ and returned within @memalign@. 460 Hence, there must be a mechanism to detect when @P@ $\neq$ @A@ and how to compute @P@ from @A@. 461 462 The llheap approach uses two headers: 463 the \emph{original} header associated with a memory allocation from @malloc@, and a \emph{fake} header within this storage before the alignment boundary @A@, which is returned from @memalign@, e.g.: 464 \begin{center} 465 \input{Alignment2Impl} 466 \end{center} 467 Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater. 468 When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation. 469 Therefore, there is always room for an @M@-byte fake header before @A@. 470 471 The fake header must supply an indicator to distinguish it from a normal header and the location of address @P@ generated by @malloc@. 472 This information is encoded as an offset from A to P and the initialize alignment (discussed in \VRef{s:ReallocStickyProperties}). 473 To distinguish a fake header from a normal header, the least-significant bit of the alignment is used because the offset participates in multiple calculations, while the alignment is just remembered data. 474 \begin{center} 475 \input{FakeHeader} 476 \end{center} 477 478 479 \subsection{\lstinline{realloc} and Sticky Properties} 480 \label{s:ReallocStickyProperties} 481 482 Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually. 483 \begin{flushleft} 484 \begin{tabular}{ll} 485 \multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manually}} \\ 486 \begin{lstlisting} 487 T * naddr = realloc( oaddr, newSize ); 488 489 490 491 \end{lstlisting} 492 & 493 \begin{lstlisting} 494 T * naddr = (T *)malloc( newSize ); $\C[2.4in]{// new storage}$ 495 memcpy( naddr, addr, oldSize ); $\C{// copy old bytes}$ 496 free( addr ); $\C{// free old storage}$ 497 addr = naddr; $\C{// change pointer}\CRT$ 498 \end{lstlisting} 499 \end{tabular} 500 \end{flushleft} 501 The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying. 502 This pattern is not used enough to reduce storage management costs. 503 In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern. 504 505 The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation. 506 Are these properties transient or persistent (``sticky'')? 507 For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size. 508 That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve? 509 Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment. 510 This silent problem is unintuitive to programmers and difficult to locate because it is transient. 511 To prevent these problems, llheap preserves initial allocation properties for the lifetime of an allocation and the semantics of @realloc@ are augmented to preserve these properties, with additional query routines. 512 This change makes the realloc pattern efficient and safe. 513 514 515 \subsection{Header} 516 517 To preserve allocation properties requires storing additional information with an allocation, 518 The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout. 519 The header has two data field sized appropriately for 32/64-bit alignment requirements. 520 The first field is a union of three values: 521 \begin{description} 522 \item[bucket pointer] 523 is for allocated storage and points back to the bucket associated with this storage requests (see \VRef[Figure]{f:llheapStructure} for the fields accessible in a bucket). 524 \item[mapped size] 525 is for mapped storage and is the storage size for use in unmapping. 526 \item[next free block] 527 is for free storage and is an intrusive pointer chaining same-size free blocks onto a bucket's free stack. 528 \end{description} 529 The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes. 530 Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors. 531 532 \begin{figure} 533 \centering 534 \input{Header} 535 \caption{llheap Normal Header} 536 \label{f:llheapNormalHeader} 537 \end{figure} 538 539 The low-order 3-bits of the first field are \emph{unused} for any stored values, whereas the second field may use all of its bits. 540 The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively. 541 Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header. 542 This implementation allows a fast test if any of the lower 3-bits are on (@&@ and compare). 543 If no bits are on, it implies a basic allocation, which is handled quickly; 544 otherwise, the bits are analysed and appropriate actions are taken for the complex cases. 545 Since most allocations are basic, this implementation results in a significant performance gain along the allocation and free fastpath. 546 547 548 \section{Statistics and Debugging} 549 550 llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand allocation behaviour. 551 Incrementing statistic counters must appear on the allocation fastpath. 552 As noted, any atomic operation along the fastpath produces a significant increase in allocation costs. 553 To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so heap operations do not require atomic operations. 554 555 To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps. 556 Note, the list is locked to prevent errors traversing an active list; 557 the statistics counters are not locked and can flicker during accumulation, which is not an issue with atomic read/write. 558 \VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread. 559 No other memory allocator studied provides as comprehensive statistical information. 560 Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness, and hence, should be equally valuable to application developers. 561 562 \begin{figure} 563 \begin{lstlisting} 564 Heap statistics: (storage request / allocation) 565 malloc >0 calls 2,766; 0 calls 2,064; storage 12,715 / 13,367 bytes 566 aalloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes 567 calloc >0 calls 6; 0 calls 0; storage 1,008 / 1,104 bytes 568 memalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes 569 amemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes 570 cmemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes 571 resize >0 calls 0; 0 calls 0; storage 0 / 0 bytes 572 realloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes 573 free !null calls 2,766; null calls 4,064; storage 12,715 / 13,367 bytes 574 away pulls 0; pushes 0; storage 0 / 0 bytes 575 sbrk calls 1; storage 10,485,760 bytes 576 mmap calls 10,000; storage 10,000 / 10,035 bytes 577 munmap calls 10,000; storage 10,000 / 10,035 bytes 578 threads started 4; exited 3 579 heaps new 4; reused 0 580 \end{lstlisting} 581 \caption{Statistics Output} 582 \label{f:StatiticsOutput} 583 \end{figure} 584 585 llheap can also be built with debug checking, which inserts many asserts along all allocation paths. 586 These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruptions because internal values (like header fields) are overwritten. 587 These checks are best effort as opposed to complete allocation checking as in @valgrind@. 588 Nevertheless, the checks detect many allocation problems. 589 There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage. 590 For example, @printf@ allocates a 1024 buffer on first call and never deletes this buffer. 591 To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference. 592 Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related. 593 594 Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program. 595 596 597 \section{User-level Threading Support} 598 \label{s:UserlevelThreadingSupport} 599 600 The serially-reusable problem (see \VRef{s:AllocationFastpath}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model. 601 The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections. 602 Locking these critical sections negates any attempt for a quick fastpath and results in high contention. 603 For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread. 604 Without time slicing, a user thread performing a long computation can prevent execution (starve) other threads. 605 To prevent starvation for an allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice. 606 The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute. 607 608 llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period. 609 On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately. 610 On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and not thread-safe. 611 For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing. 612 Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread. 613 614 The fast technique defines a special code section and places all non-interruptible routines in this section. 615 The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified. 616 Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag. 617 This technique is fragile because any calls in the non-interruptible code outside of the non-interruptible section (like @sbrk@) must be bracketed with disable/enable interrupts and these calls must be along the slowpath. 618 Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section. 619 This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined. 620 These techniques are used in both the \uC and \CFA versions of llheap, where both of these systems have user-level threading. 621 622 623 \section{Bootstrapping} 624 625 There are problems bootstrapping a memory allocator. 626 \begin{enumerate} 627 \item 628 Programs can be statically or dynamically linked. 629 \item 630 The order the linker schedules startup code is poorly supported. 631 \item 632 Knowing a KT's start and end independently from the KT code is difficult. 633 \end{enumerate} 634 635 For static linking, the allocator is loaded with the program. 636 Hence, allocation calls immediately invoke the allocator operation defined by the loaded allocation library and there is only one memory allocator used in the program. 637 This approach allows allocator substitution by placing an allocation library before any other in the linked/load path. 638 639 Allocator substitution is similar for dynamic linking, but the problem is that the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded. 640 As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including from the dynamic loader. 641 Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct. 642 Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations. 643 Testing showed up to a 5\% performance increase for dynamic linking over static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding. 644 645 All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap. 646 The problem is getting initialized done before the first allocator call. 647 However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library. 648 As a result, calls to allocation routines occur without initialization. 649 To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern). 650 651 Two other important execution points are program startup and termination, which include prologue or epilogue code to bootstrap a program, which programmers are unaware of. 652 For example, dynamic-memory allocations before/after the application starts should not be considered in statistics because the application does not make these calls. 653 llheap establishes these two points using routines: 654 \begin{lstlisting} 655 __attribute__(( constructor( 100 ) )) static void startup( void ) { 656 // clear statistic counters 657 // reset allocUnfreed counter 658 } 659 __attribute__(( destructor( 100 ) )) static void shutdown( void ) { 660 // sum allocUnfreed for all heaps 661 // subtract global unfreed storage 662 // if allocUnfreed > 0 then print warning message 663 } 664 \end{lstlisting} 665 which use global constructor/destructor priority 100, where the linker calls these routines at program prologue/epilogue in increasing/decreasing order of priority. 666 Application programs may only use global constructor/destructor priorities greater than 100. 667 Hence, @startup@ is called after the program prologue but before the application starts, and @shutdown@ is called after the program terminates but before the program epilogue. 668 By resetting counters in @startup@, prologue allocations are ignored, and checking unfreed storage in @shutdown@ checks only application memory management, ignoring the program epilogue. 669 670 While @startup@/@shutdown@ apply to the program KT, a concurrent program creates additional KTs that do not trigger these routines. 671 However, it is essential for the allocator to know when each KT is started/terminated. 672 One approach is to create a thread-local object with a construct/destructor, which is triggered after a new KT starts and before it terminates, respectively. 673 \begin{lstlisting} 674 struct ThreadManager { 675 volatile bool pgm_thread; 676 ThreadManager() {} // unusable 677 ~ThreadManager() { if ( pgm_thread ) heapManagerDtor(); } 678 }; 679 static thread_local ThreadManager threadManager; 680 \end{lstlisting} 681 Unfortunately, thread-local variables are created lazily, \ie on the first dereference of @threadManager@, which then triggers its constructor. 682 Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT. 683 Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called. 684 Now when a KT terminates, @~ThreadManager@ is called to chained it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT. 685 The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage. 686 687 Finally, there is a recursive problem when the singleton pattern dereferences @pgm_thread@ to initialize the thread-local object, because its initialization calls @atExit@, which immediately calls @malloc@ to obtain storage. 688 This recursion is handled with another thread-local flag to prevent double initialization. 689 A similar problem exists when the KT terminates and calls member @~ThreadManager@, because immediately afterwards, the terminating KT calls @free@ to deallocate the storage obtained from the @atExit@. 690 In the meantime, the terminated heap has been put on the global-heap free-stack, and may be active by a new KT, so the @atExit@ free is handled as a free to another heap and put onto the away list using locking. 691 692 For user threading systems, the KTs are controlled by the runtime, and hence, start/end pointers are known and interact directly with the llheap allocator for \uC and \CFA, which eliminates or simplifies several of these problems. 693 The following API was created to provide interaction between the language runtime and the allocator. 694 \begin{lstlisting} 695 void startTask(); $\C{// KT starts}$ 696 void finishTask(); $\C{// KT ends}$ 697 void startup(); $\C{// when application code starts}$ 698 void shutdown(); $\C{// when application code ends}$ 699 bool traceHeap(); $\C{// enable allocation/free printing for debugging}$ 700 bool traceHeapOn(); $\C{// start printing allocation/free calls}$ 701 bool traceHeapOff(); $\C{// stop printing allocation/free calls}$ 702 \end{lstlisting} 703 This kind of API is necessary to allow concurrent runtime systems to interact with difference memory allocators in a consistent way. 201 704 202 705 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 203 706 204 707 \section{Added Features and Methods} 205 To improve the uHeap allocator (FIX ME: cite uHeap) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a \CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator. 206 207 \subsection{C Interface} 208 We added a few more features and routines to the allocator's C interface that can make the allocator more usable to the programmers. THese features will programmer more control on the dynamic memory allocation. 708 709 The C dynamic-allocation API (see \VRef[Figure]{f:CDynamicAllocationAPI}) is neither orthogonal nor complete. 710 For example, 711 \begin{itemize} 712 \item 713 It is possible to zero fill or align an allocation but not both. 714 \item 715 It is \emph{only} possible to zero fill an array allocation. 716 \item 717 It is not possible to resize a memory allocation without data copying. 718 \item 719 @realloc@ does not preserve initial allocation properties. 720 \end{itemize} 721 As a result, programmers must provide these options, which is error prone, resulting in blaming the entire programming language for a poor dynamic-allocation API. 722 Furthermore, newer programming languages have better type systems that can provide safer and more powerful APIs for memory allocation. 723 724 \begin{figure} 725 \begin{lstlisting} 726 void * malloc( size_t size ); 727 void * calloc( size_t nmemb, size_t size ); 728 void * realloc( void * ptr, size_t size ); 729 void * reallocarray( void * ptr, size_t nmemb, size_t size ); 730 void free( void * ptr ); 731 void * memalign( size_t alignment, size_t size ); 732 void * aligned_alloc( size_t alignment, size_t size ); 733 int posix_memalign( void ** memptr, size_t alignment, size_t size ); 734 void * valloc( size_t size ); 735 void * pvalloc( size_t size ); 736 737 struct mallinfo mallinfo( void ); 738 int mallopt( int param, int val ); 739 int malloc_trim( size_t pad ); 740 size_t malloc_usable_size( void * ptr ); 741 void malloc_stats( void ); 742 int malloc_info( int options, FILE * fp ); 743 \end{lstlisting} 744 \caption{C Dynamic-Allocation API} 745 \label{f:CDynamicAllocationAPI} 746 \end{figure} 747 748 The following presents design and API changes for C, \CC (\uC), and \CFA, all of which are implemented in llheap. 749 209 750 210 751 \subsection{Out of Memory} … … 212 753 Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory; 213 754 hence the need to return an alternate value for a zero-sized allocation. 214 The alternative is to abort a program when out of memory. 215 In theory, notifying the programmer allows recovery; 216 in practice, it is almost impossible to gracefully when out of memory, so the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen. 217 218 219 \subsection{\lstinline{void * aalloc( size_t dim, size_t elemSize )}} 220 @aalloc@ is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0. 221 \paragraph{Usage} 755 A different approach allowed by the C API is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation. 756 In theory, notifying the programmer of memory failure allows recovery; 757 in practice, it is almost impossible to gracefully recover when out of memory. 758 Hence, the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen because no pseudo allocation is necessary. 759 760 761 \subsection{C Interface} 762 763 For C, it is possible to increase functionality and orthogonality of the dynamic-memory API to make allocation better for programmers. 764 765 For existing C allocation routines: 766 \begin{itemize} 767 \item 768 @calloc@ sets the sticky zero-fill property. 769 \item 770 @memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property. 771 \item 772 @realloc@ and @reallocarray@ preserve sticky properties. 773 \end{itemize} 774 775 The C dynamic-memory API is extended with the following routines: 776 777 \paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}} 778 extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory. 779 @aalloc@ is significantly faster than @calloc@, which is the only alternative. 780 781 \noindent\textbf{Usage} 222 782 @aalloc@ takes two parameters. 223 224 \begin{itemize} 225 \item 226 @dim@: number of objects in the array 227 \item 228 @elemSize@: size of the object in the array. 229 \end{itemize} 230 It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns a @NULL@ pointer. 231 232 \subsection{\lstinline{void * resize( void * oaddr, size_t size )}} 233 @resize@ is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is @realloc@ but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object. 234 \paragraph{Usage} 783 \begin{itemize} 784 \item 785 @dim@: number of array objects 786 \item 787 @elemSize@: size of array object 788 \end{itemize} 789 It returns the address of the dynamic array or @NULL@ if either @dim@ or @elemSize@ are zero. 790 791 \paragraph{\lstinline{void * resize( void * oaddr, size_t size )}} 792 extends @realloc@ for resizing an existing allocation \emph{without} copying previous data into the new allocation or preserving sticky properties. 793 @resize@ is significantly faster than @realloc@, which is the only alternative. 794 795 \noindent\textbf{Usage} 235 796 @resize@ takes two parameters. 236 237 \begin{itemize} 238 \item 239 @oaddr@: the address of the old object that needs to be resized. 240 \item 241 @size@: the new size requirement of the to which the old object needs to be resized. 242 \end{itemize} 243 It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns a @NULL@ pointer. 244 245 \subsection{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}} 246 This @resize@ is an extension of the above @resize@ (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement. 247 \paragraph{Usage} 248 This resize takes three parameters. It takes an additional parameter of nalign as compared to the above resize (FIX ME: cite above resize). 249 250 \begin{itemize} 251 \item 252 @oaddr@: the address of the old object that needs to be resized. 253 \item 254 @nalign@: the new alignment to which the old object needs to be realigned. 255 \item 256 @size@: the new size requirement of the to which the old object needs to be resized. 257 \end{itemize} 258 It returns an object with the size and alignment given in the parameters. On failure, it returns a @NULL@ pointer. 259 260 \subsection{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}} 261 amemalign is a hybrid of memalign and aalloc. It allows programmer to allocate an aligned dynamic array of objects without calculating the total size of the array explicitly. It frees the programmer from calculating the total size of the array. 262 \paragraph{Usage} 263 amemalign takes three parameters. 264 265 \begin{itemize} 266 \item 267 @alignment@: the alignment to which the dynamic array needs to be aligned. 268 \item 269 @dim@: number of objects in the array 270 \item 271 @elemSize@: size of the object in the array. 272 \end{itemize} 273 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns a @NULL@ pointer. 274 275 \subsection{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}} 276 cmemalign is a hybrid of amemalign and calloc. It allows programmer to allocate an aligned dynamic array of objects that is 0 filled. The current way to do this in other allocators is to allocate an aligned object with memalign and then fill it with 0 explicitly. This routine provides both features of aligning and 0 filling, implicitly. 277 \paragraph{Usage} 278 cmemalign takes three parameters. 279 280 \begin{itemize} 281 \item 282 @alignment@: the alignment to which the dynamic array needs to be aligned. 283 \item 284 @dim@: number of objects in the array 285 \item 286 @elemSize@: size of the object in the array. 287 \end{itemize} 288 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns a @NULL@ pointer. 289 290 \subsection{\lstinline{size_t malloc_alignment( void * addr )}} 291 @malloc_alignment@ returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment. 292 \paragraph{Usage} 293 @malloc_alignment@ takes one parameters. 294 295 \begin{itemize} 296 \item 297 @addr@: the address of the currently allocated dynamic object. 298 \end{itemize} 299 @malloc_alignment@ returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeap allocator. 300 301 \subsection{\lstinline{bool malloc_zero_fill( void * addr )}} 302 @malloc_zero_fill@ returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation. 303 \paragraph{Usage} 797 \begin{itemize} 798 \item 799 @oaddr@: address to be resized 800 \item 801 @size@: new allocation size (smaller or larger than previous) 802 \end{itemize} 803 It returns the address of the old or new storage with the specified new size or @NULL@ if @size@ is zero. 804 805 \paragraph{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}} 806 extends @aalloc@ and @memalign@ for allocating an aligned dynamic array of objects. 807 Sets sticky alignment property. 808 809 \noindent\textbf{Usage} 810 @amemalign@ takes three parameters. 811 \begin{itemize} 812 \item 813 @alignment@: alignment requirement 814 \item 815 @dim@: number of array objects 816 \item 817 @elemSize@: size of array object 818 \end{itemize} 819 It returns the address of the aligned dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero. 820 821 \paragraph{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}} 822 extends @amemalign@ with zero fill and has the same usage as @amemalign@. 823 Sets sticky zero-fill and alignment property. 824 It returns the address of the aligned, zero-filled dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero. 825 826 \paragraph{\lstinline{size_t malloc_alignment( void * addr )}} 827 returns the alignment of the dynamic object for use in aligning similar allocations. 828 829 \noindent\textbf{Usage} 830 @malloc_alignment@ takes one parameter. 831 \begin{itemize} 832 \item 833 @addr@: address of an allocated object. 834 \end{itemize} 835 It returns the alignment of the given object, where objects not allocated with alignment return the minimal allocation alignment. 836 837 \paragraph{\lstinline{bool malloc_zero_fill( void * addr )}} 838 returns true if the object has the zero-fill sticky property for use in zero filling similar allocations. 839 840 \noindent\textbf{Usage} 304 841 @malloc_zero_fill@ takes one parameters. 305 842 306 843 \begin{itemize} 307 844 \item 308 @addr@: the address of the currently allocated dynamic object. 309 \end{itemize} 310 @malloc_zero_fill@ returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false. 311 312 \subsection{\lstinline{size_t malloc_size( void * addr )}} 313 @malloc_size@ returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is @malloc_usable_size@. But, @malloc_size@ is different from @malloc_usable_size@ as @malloc_usabe_size@ returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, @malloc_size@ returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine. 314 \paragraph{Usage} 845 @addr@: address of an allocated object. 846 \end{itemize} 847 It returns true if the zero-fill sticky property is set and false otherwise. 848 849 \paragraph{\lstinline{size_t malloc_size( void * addr )}} 850 returns the request size of the dynamic object (updated when an object is resized) for use in similar allocations. 851 See also @malloc_usable_size@. 852 853 \noindent\textbf{Usage} 315 854 @malloc_size@ takes one parameters. 316 317 \begin{itemize} 318 \item 319 @addr@: the address of the currently allocated dynamic object. 320 \end{itemize} 321 @malloc_size@ returns the allocation size of the given dynamic object. On failure, it return zero. 322 323 \subsection{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}} 324 This @realloc@ is an extension of the default @realloc@ (FIX ME: cite default @realloc@). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement. 325 \paragraph{Usage} 326 This @realloc@ takes three parameters. It takes an additional parameter of nalign as compared to the default @realloc@. 327 328 \begin{itemize} 329 \item 330 @oaddr@: the address of the old object that needs to be reallocated. 331 \item 332 @nalign@: the new alignment to which the old object needs to be realigned. 333 \item 334 @size@: the new size requirement of the to which the old object needs to be resized. 335 \end{itemize} 336 It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a @NULL@ pointer. 337 338 \subsection{\CFA Malloc Interface} 339 We added some routines to the malloc interface of \CFA. These routines can only be used in \CFA and not in our standalone uHeap allocator as these routines use some features that are only provided by \CFA and not by C. It makes the allocator even more usable to the programmers. 340 \CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type. 341 342 \subsection{\lstinline{T * malloc( void )}} 343 This malloc is a simplified polymorphic form of defualt malloc (FIX ME: cite malloc). It does not take any parameter as compared to default malloc that takes one parameter. 344 \paragraph{Usage} 345 This malloc takes no parameters. 346 It returns a dynamic object of the size of type @T@. On failure, it returns a @NULL@ pointer. 347 348 \subsection{\lstinline{T * aalloc( size_t dim )}} 349 This aalloc is a simplified polymorphic form of above aalloc (FIX ME: cite aalloc). It takes one parameter as compared to the above aalloc that takes two parameters. 350 \paragraph{Usage} 351 aalloc takes one parameters. 352 353 \begin{itemize} 354 \item 355 @dim@: required number of objects in the array. 356 \end{itemize} 357 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer. 358 359 \subsection{\lstinline{T * calloc( size_t dim )}} 360 This calloc is a simplified polymorphic form of defualt calloc (FIX ME: cite calloc). It takes one parameter as compared to the default calloc that takes two parameters. 361 \paragraph{Usage} 362 This calloc takes one parameter. 363 364 \begin{itemize} 365 \item 366 @dim@: required number of objects in the array. 367 \end{itemize} 368 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer. 369 370 \subsection{\lstinline{T * resize( T * ptr, size_t size )}} 371 This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type. 372 \paragraph{Usage} 373 This resize takes two parameters. 374 375 \begin{itemize} 376 \item 377 @ptr@: address of the old object. 378 \item 379 @size@: the required size of the new object. 380 \end{itemize} 381 It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer. 382 383 \subsection{\lstinline{T * realloc( T * ptr, size_t size )}} 384 This @realloc@ is a simplified polymorphic form of defualt @realloc@ (FIX ME: cite @realloc@ with align). It takes two parameters as compared to the above @realloc@ that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type. 385 \paragraph{Usage} 386 This @realloc@ takes two parameters. 387 388 \begin{itemize} 389 \item 390 @ptr@: address of the old object. 391 \item 392 @size@: the required size of the new object. 393 \end{itemize} 394 It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer. 395 396 \subsection{\lstinline{T * memalign( size_t align )}} 397 This memalign is a simplified polymorphic form of defualt memalign (FIX ME: cite memalign). It takes one parameters as compared to the default memalign that takes two parameters. 398 \paragraph{Usage} 399 memalign takes one parameters. 400 401 \begin{itemize} 402 \item 403 @align@: the required alignment of the dynamic object. 404 \end{itemize} 405 It returns a dynamic object of the size of type @T@ that is aligned to given parameter align. On failure, it returns a @NULL@ pointer. 406 407 \subsection{\lstinline{T * amemalign( size_t align, size_t dim )}} 408 This amemalign is a simplified polymorphic form of above amemalign (FIX ME: cite amemalign). It takes two parameter as compared to the above amemalign that takes three parameters. 409 \paragraph{Usage} 410 amemalign takes two parameters. 411 412 \begin{itemize} 413 \item 414 @align@: required alignment of the dynamic array. 415 \item 416 @dim@: required number of objects in the array. 417 \end{itemize} 418 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align. On failure, it returns a @NULL@ pointer. 419 420 \subsection{\lstinline{T * cmemalign( size_t align, size_t dim )}} 421 This cmemalign is a simplified polymorphic form of above cmemalign (FIX ME: cite cmemalign). It takes two parameter as compared to the above cmemalign that takes three parameters. 422 \paragraph{Usage} 423 cmemalign takes two parameters. 424 425 \begin{itemize} 426 \item 427 @align@: required alignment of the dynamic array. 428 \item 429 @dim@: required number of objects in the array. 430 \end{itemize} 431 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align and is zero filled. On failure, it returns a @NULL@ pointer. 432 433 \subsection{\lstinline{T * aligned_alloc( size_t align )}} 434 This @aligned_alloc@ is a simplified polymorphic form of defualt @aligned_alloc@ (FIX ME: cite @aligned_alloc@). It takes one parameter as compared to the default @aligned_alloc@ that takes two parameters. 435 \paragraph{Usage} 436 This @aligned_alloc@ takes one parameter. 437 438 \begin{itemize} 439 \item 440 @align@: required alignment of the dynamic object. 441 \end{itemize} 442 It returns a dynamic object of the size of type @T@ that is aligned to the given parameter. On failure, it returns a @NULL@ pointer. 443 444 \subsection{\lstinline{int posix_memalign( T ** ptr, size_t align )}} 445 This @posix_memalign@ is a simplified polymorphic form of defualt @posix_memalign@ (FIX ME: cite @posix_memalign@). It takes two parameters as compared to the default @posix_memalign@ that takes three parameters. 446 \paragraph{Usage} 447 This @posix_memalign@ takes two parameter. 448 449 \begin{itemize} 450 \item 451 @ptr@: variable address to store the address of the allocated object. 452 \item 453 @align@: required alignment of the dynamic object. 454 \end{itemize} 455 456 It stores address of the dynamic object of the size of type @T@ in given parameter ptr. This object is aligned to the given parameter. On failure, it returns a @NULL@ pointer. 457 458 \subsection{\lstinline{T * valloc( void )}} 459 This @valloc@ is a simplified polymorphic form of defualt @valloc@ (FIX ME: cite @valloc@). It takes no parameters as compared to the default @valloc@ that takes one parameter. 460 \paragraph{Usage} 461 @valloc@ takes no parameters. 462 It returns a dynamic object of the size of type @T@ that is aligned to the page size. On failure, it returns a @NULL@ pointer. 463 464 \subsection{\lstinline{T * pvalloc( void )}} 465 \paragraph{Usage} 466 @pvalloc@ takes no parameters. 467 It returns a dynamic object of the size that is calcutaed by rouding the size of type @T@. The returned object is also aligned to the page size. On failure, it returns a @NULL@ pointer. 468 469 \subsection{Alloc Interface} 470 In addition to improve allocator interface both for \CFA and our standalone allocator uHeap in C. We also added a new alloc interface in \CFA that increases usability of dynamic memory allocation. 471 This interface helps programmers in three major ways. 472 473 \begin{itemize} 474 \item 475 Routine Name: alloc interfce frees programmers from remmebring different routine names for different kind of dynamic allocations. 476 \item 477 Parametre Positions: alloc interface frees programmers from remembering parameter postions in call to routines. 478 \item 479 Object Size: alloc interface does not require programmer to mention the object size as \CFA allows allocator to determince the object size from returned type of alloc call. 480 \end{itemize} 481 482 Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of \CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter. 483 484 \subsection{Routine: \lstinline{T * alloc( ... )}} 485 Call to alloc wihout any parameter returns one object of size of type @T@ allocated dynamically. 486 Only the dimension (dim) parameter for array allocation has the fixed position in the alloc routine. If programmer wants to allocate an array of objects that the required number of members in the array has to be given as the first parameter to the alloc routine. 487 alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except @`realloc@ and @`resize@ that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both @`resize@ and @`realloc@ are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced. 488 489 \paragraph{Dim} 490 This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type @T@. 491 It represents the required number of members in the array allocation as in \CFA's aalloc (FIX ME: cite aalloc). 492 This parameter should be of type @size_t@. 493 494 Example: @int a = alloc( 5 )@ 495 This call will return a dynamic array of five integers. 496 497 \paragraph{Align} 498 This parameter is position-free and uses a backtick routine align (@`align@). The parameter passed with @`align@ should be of type @size_t@. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in \CFA) then the passed alignment parameter will be rejected and the default alignment will be used. 499 500 Example: @int b = alloc( 5 , 64`align )@ 501 This call will return a dynamic array of five integers. It will align the allocated object to 64. 502 503 \paragraph{Fill} 504 This parameter is position-free and uses a backtick routine fill (@`fill@). In case of @realloc@, only the extra space after copying the data in the old object will be filled with given parameter. 505 Three types of parameters can be passed using `fill. 506 507 \begin{itemize} 508 \item 509 @char@: A char can be passed with @`fill@ to fill the whole dynamic allocation with the given char recursively till the end of required allocation. 510 \item 511 Object of returned type: An object of type of returned type can be passed with @`fill@ to fill the whole dynamic allocation with the given object recursively till the end of required allocation. 512 \item 513 Dynamic object of returned type: A dynamic object of type of returned type can be passed with @`fill@ to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to @`fill@ or the end of requested allocation reaches. 514 \end{itemize} 515 516 Example: @int b = alloc( 5 , 'a'`fill )@ 517 This call will return a dynamic array of five integers. It will fill the allocated object with character 'a' recursively till the end of requested allocation size. 518 519 Example: @int b = alloc( 5 , 4`fill )@ 520 This call will return a dynamic array of five integers. It will fill the allocated object with integer 4 recursively till the end of requested allocation size. 521 522 Example: @int b = alloc( 5 , a`fill )@ where @a@ is a pointer of int type 523 This call will return a dynamic array of five integers. It will copy data in a to the returned object non-recursively untill end of a or the newly allocated object is reached. 524 525 \paragraph{Resize} 526 This parameter is position-free and uses a backtick routine resize (@`resize@). It represents the old dynamic object (oaddr) that the programmer wants to 527 \begin{itemize} 528 \item 529 resize to a new size. 530 \item 531 realign to a new alignment 532 \item 533 fill with something. 534 \end{itemize} 535 The data in old dynamic object will not be preserved in the new object. The type of object passed to @`resize@ and the returned type of alloc call can be different. 536 537 Example: @int b = alloc( 5 , a`resize )@ 538 This call will resize object a to a dynamic array that can contain 5 integers. 539 540 Example: @int b = alloc( 5 , a`resize , 32`align )@ 541 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. 542 543 Example: @int b = alloc( 5 , a`resize , 32`align , 2`fill )@ 544 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32 and will be filled with 2. 545 546 \paragraph{Realloc} 547 This parameter is position-free and uses a backtick routine @realloc@ (@`realloc@). It represents the old dynamic object (oaddr) that the programmer wants to 548 \begin{itemize} 549 \item 550 realloc to a new size. 551 \item 552 realign to a new alignment 553 \item 554 fill with something. 555 \end{itemize} 556 The data in old dynamic object will be preserved in the new object. The type of object passed to @`realloc@ and the returned type of alloc call cannot be different. 557 558 Example: @int b = alloc( 5 , a`realloc )@ 559 This call will realloc object a to a dynamic array that can contain 5 integers. 560 561 Example: @int b = alloc( 5 , a`realloc , 32`align )@ 562 This call will realloc object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. 563 564 Example: @int b = alloc( 5 , a`realloc , 32`align , 2`fill )@ 565 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. The extra space after copying data of a to the returned object will be filled with 2. 855 \begin{itemize} 856 \item 857 @addr@: address of an allocated object. 858 \end{itemize} 859 It returns the request size or zero if @addr@ is @NULL@. 860 861 \paragraph{\lstinline{int malloc_stats_fd( int fd )}} 862 changes the file descriptor where @malloc_stats@ writes statistics (default @stdout@). 863 864 \noindent\textbf{Usage} 865 @malloc_stats_fd@ takes one parameters. 866 \begin{itemize} 867 \item 868 @fd@: files description. 869 \end{itemize} 870 It returns the previous file descriptor. 871 872 \paragraph{\lstinline{size_t malloc_expansion()}} 873 \label{p:malloc_expansion} 874 set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request. 875 It returns the heap extension size used throughout a program, \ie called once at heap initialization. 876 877 \paragraph{\lstinline{size_t malloc_mmap_start()}} 878 set the crossover between allocations occurring in the @sbrk@ area or separately mapped. 879 It returns the crossover point used throughout a program, \ie called once at heap initialization. 880 881 \paragraph{\lstinline{size_t malloc_unfreed()}} 882 \label{p:malloc_unfreed} 883 amount subtracted to adjust for unfreed program storage (debug only). 884 It returns the new subtraction amount and called by @malloc_stats@. 885 886 887 \subsection{\CC Interface} 888 889 The following extensions take advantage of overload polymorphism in the \CC type-system. 890 891 \paragraph{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}} 892 extends @resize@ with an alignment re\-quirement. 893 894 \noindent\textbf{Usage} 895 takes three parameters. 896 \begin{itemize} 897 \item 898 @oaddr@: address to be resized 899 \item 900 @nalign@: alignment requirement 901 \item 902 @size@: new allocation size (smaller or larger than previous) 903 \end{itemize} 904 It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero. 905 906 \paragraph{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}} 907 extends @realloc@ with an alignment re\-quirement and has the same usage as aligned @resize@. 908 909 910 \subsection{\CFA Interface} 911 912 The following extensions take advantage of overload polymorphism in the \CFA type-system. 913 The key safety advantage of the \CFA type system is using the return type to select overloads; 914 hence, a polymorphic routine knows the returned type and its size. 915 This capability is used to remove the object size parameter and correctly cast the return storage to match the result type. 916 For example, the following is the \CFA wrapper for C @malloc@: 917 \begin{cfa} 918 forall( T & | sized(T) ) { 919 T * malloc( void ) { 920 if ( _Alignof(T) <= libAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation 921 else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation 922 } // malloc 923 \end{cfa} 924 and is used as follows: 925 \begin{lstlisting} 926 int * i = malloc(); 927 double * d = malloc(); 928 struct Spinlock { ... } __attribute__(( aligned(128) )); 929 Spinlock * sl = malloc(); 930 \end{lstlisting} 931 where each @malloc@ call provides the return type as @T@, which is used with @sizeof@, @_Alignof@, and casting the storage to the correct type. 932 This interface removes many of the common allocation errors in C programs. 933 \VRef[Figure]{f:CFADynamicAllocationAPI} show the \CFA wrappers for the equivalent C/\CC allocation routines with same semantic behaviour. 934 935 \begin{figure} 936 \begin{lstlisting} 937 T * malloc( void ); 938 T * aalloc( size_t dim ); 939 T * calloc( size_t dim ); 940 T * resize( T * ptr, size_t size ); 941 T * realloc( T * ptr, size_t size ); 942 T * memalign( size_t align ); 943 T * amemalign( size_t align, size_t dim ); 944 T * cmemalign( size_t align, size_t dim ); 945 T * aligned_alloc( size_t align ); 946 int posix_memalign( T ** ptr, size_t align ); 947 T * valloc( void ); 948 T * pvalloc( void ); 949 \end{lstlisting} 950 \caption{\CFA C-Style Dynamic-Allocation API} 951 \label{f:CFADynamicAllocationAPI} 952 \end{figure} 953 954 In addition to the \CFA C-style allocator interface, a new allocator interface is provided to further increase orthogonality and usability of dynamic-memory allocation. 955 This interface helps programmers in three ways. 956 \begin{itemize} 957 \item 958 naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations. 959 \item 960 named arguments: individual allocation properties are specified using postfix function call, so programmers do have to remember parameter positions in allocation calls. 961 \item 962 object size: like the \CFA C-style interface, programmers do not have to specify object size or cast allocation results. 963 \end{itemize} 964 Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg 965 \begin{cfa} 966 duration ?@`@h( int h ); // ? denote the position of the function operand 967 duration ?@`@m( int m ); 968 duration ?@`@s( int s ); 969 duration dur = 3@`@h + 42@`@m + 17@`@s; 970 \end{cfa} 971 @ttype@ polymorphism is similar to \CC variadic templates. 972 973 \paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}} 974 is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines. 975 A call without parameters returns a dynamically allocated object of type @T@ (@malloc@). 976 A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@). 977 The variable number of arguments consist of allocation properties, which can be combined to produce different kinds of allocations. 978 The only restriction is for properties @realloc@ and @resize@, which cannot be combined. 979 980 The allocation property functions are: 981 \subparagraph{\lstinline{T_align ?`align( size_t alignment )}} 982 to align the allocation. 983 The alignment parameter must be $\ge$ the default alignment (@libAlign()@ in \CFA) and a power of two, \eg: 984 \begin{cfa} 985 int * i0 = alloc( @4096`align@ ); sout | i0 | nl; 986 int * i1 = alloc( 3, @4096`align@ ); sout | i1; for (i; 3 ) sout | &i1[i]; sout | nl; 987 988 0x555555572000 989 0x555555574000 0x555555574000 0x555555574004 0x555555574008 990 \end{cfa} 991 returns a dynamic object and object array aligned on a 4096-byte boundary. 992 993 \subparagraph{\lstinline{S_fill(T) ?`fill ( /* various types */ )}} 994 to initialize storage. 995 There are three ways to fill storage: 996 \begin{enumerate} 997 \item 998 A char fills each byte of each object. 999 \item 1000 An object of the returned type fills each object. 1001 \item 1002 An object array pointer fills some or all of the corresponding object array. 1003 \end{enumerate} 1004 For example: 1005 \begin{cfa}[numbers=left] 1006 int * i0 = alloc( @0n`fill@ ); sout | *i0 | nl; // disambiguate 0 1007 int * i1 = alloc( @5`fill@ ); sout | *i1 | nl; 1008 int * i2 = alloc( @'\xfe'`fill@ ); sout | hex( *i2 ) | nl; 1009 int * i3 = alloc( 5, @5`fill@ ); for ( i; 5 ) sout | i3[i]; sout | nl; 1010 int * i4 = alloc( 5, @0xdeadbeefN`fill@ ); for ( i; 5 ) sout | hex( i4[i] ); sout | nl; 1011 int * i5 = alloc( 5, @i3`fill@ ); for ( i; 5 ) sout | i5[i]; sout | nl; 1012 int * i6 = alloc( 5, @[i3, 3]`fill@ ); for ( i; 5 ) sout | i6[i]; sout | nl; 1013 \end{cfa} 1014 \begin{lstlisting}[numbers=left] 1015 0 1016 5 1017 0xfefefefe 1018 5 5 5 5 5 1019 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 1020 5 5 5 5 5 1021 5 5 5 -555819298 -555819298 // two undefined values 1022 \end{lstlisting} 1023 Examples 1 to 3, fill an object with a value or characters. 1024 Examples 4 to 7, fill an array of objects with values, another array, or part of an array. 1025 1026 \subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}} 1027 used to resize, realign, and fill, where the old object data is not copied to the new object. 1028 The old object type may be different from the new object type, since the values are not used. 1029 For example: 1030 \begin{cfa}[numbers=left] 1031 int * i = alloc( @5`fill@ ); sout | i | *i; 1032 i = alloc( @i`resize@, @256`align@, @7`fill@ ); sout | i | *i; 1033 double * d = alloc( @i`resize@, @4096`align@, @13.5`fill@ ); sout | d | *d; 1034 \end{cfa} 1035 \begin{lstlisting}[numbers=left] 1036 0x55555556d5c0 5 1037 0x555555570000 7 1038 0x555555571000 13.5 1039 \end{lstlisting} 1040 Examples 2 to 3 change the alignment, fill, and size for the initial storage of @i@. 1041 1042 \begin{cfa}[numbers=left] 1043 int * ia = alloc( 5, @5`fill@ ); for ( i; 5 ) sout | ia[i]; sout | nl; 1044 ia = alloc( 10, @ia`resize@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl; 1045 sout | ia; ia = alloc( 5, @ia`resize@, @512`align@, @13`fill@ ); sout | ia; for ( i; 5 ) sout | ia[i]; sout | nl;; 1046 ia = alloc( 3, @ia`resize@, @4096`align@, @2`fill@ ); sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl; 1047 \end{cfa} 1048 \begin{lstlisting}[numbers=left] 1049 5 5 5 5 5 1050 7 7 7 7 7 7 7 7 7 7 1051 0x55555556d560 0x555555571a00 13 13 13 13 13 1052 0x555555572000 0x555555572000 2 0x555555572004 2 0x555555572008 2 1053 \end{lstlisting} 1054 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@. 1055 1056 \subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}} 1057 used to resize, realign, and fill, where the old object data is copied to the new object. 1058 The old object type must be the same as the new object type, since the values used. 1059 Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter. 1060 For example: 1061 \begin{cfa}[numbers=left] 1062 int * i = alloc( @5`fill@ ); sout | i | *i; 1063 i = alloc( @i`realloc@, @256`align@ ); sout | i | *i; 1064 i = alloc( @i`realloc@, @4096`align@, @13`fill@ ); sout | i | *i; 1065 \end{cfa} 1066 \begin{lstlisting}[numbers=left] 1067 0x55555556d5c0 5 1068 0x555555570000 5 1069 0x555555571000 5 1070 \end{lstlisting} 1071 Examples 2 to 3 change the alignment for the initial storage of @i@. 1072 The @13`fill@ for example 3 does nothing because no extra space is added. 1073 1074 \begin{cfa}[numbers=left] 1075 int * ia = alloc( 5, @5`fill@ ); for ( i; 5 ) sout | ia[i]; sout | nl; 1076 ia = alloc( 10, @ia`realloc@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl; 1077 sout | ia; ia = alloc( 1, @ia`realloc@, @512`align@, @13`fill@ ); sout | ia; for ( i; 1 ) sout | ia[i]; sout | nl;; 1078 ia = alloc( 3, @ia`realloc@, @4096`align@, @2`fill@ ); sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl; 1079 \end{cfa} 1080 \begin{lstlisting}[numbers=left] 1081 5 5 5 5 5 1082 5 5 5 5 5 7 7 7 7 7 1083 0x55555556c560 0x555555570a00 5 1084 0x555555571000 0x555555571000 5 0x555555571004 2 0x555555571008 2 1085 \end{lstlisting} 1086 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@. 1087 The @13`fill@ for example 3 does nothing because no extra space is added. 1088 1089 These \CFA allocation features are used extensively in the development of the \CFA runtime. -
doc/theses/mubeen_zulfiqar_MMath/background.tex
rba897d21 r2e9b59b 34 34 \VRef[Figure]{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}. 35 35 The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data. 36 The management data starts with fixed-sized information in the static-data memory that flows intothe dynamic-allocation memory.36 The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory. 37 37 The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}. 38 Allocated objects ( white) are variable sized, and allocated and maintained by the program;38 Allocated objects (light grey) are variable sized, and allocated and maintained by the program; 39 39 \ie only the program knows the location of allocated storage, not the memory allocator. 40 40 \begin{figure}[h] … … 44 44 \label{f:AllocatorComponents} 45 45 \end{figure} 46 Freed objects ( light grey) are memory deallocated by the program, which are linked into one or more lists facilitating easy location fornew allocations.46 Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating easy location of new allocations. 47 47 Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks. 48 48 Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program; … … 54 54 The trailer may be used to simplify an allocation implementation, \eg coalescing, and/or for security purposes to mark the end of an object. 55 55 An object may be preceded by padding to ensure proper alignment. 56 Some algorithms quantize allocation requests into distinct sizes resulting in additional spacing after objects less than the quantized value. 56 Some algorithms quantize allocation requests into distinct sizes, called \newterm{buckets}, resulting in additional spacing after objects less than the quantized value. 57 (Note, the buckets are often organized as an array of ascending bucket sizes for fast searching, \eg binary search, and the array is stored in the heap management-area, where each bucket is a top point to the freed objects of that size.) 57 58 When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists. 58 59 A free object also contains management data, \eg size, chaining, etc. … … 81 82 Fragmentation is memory requested from the operating system but not used by the program; 82 83 hence, allocated objects are not fragmentation. 83 \VRef[Figure]{f:InternalExternalFragmentation} )shows fragmentation is divided into two forms: internal or external.84 \VRef[Figure]{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: internal or external. 84 85 85 86 \begin{figure} … … 96 97 An allocator should strive to keep internal management information to a minimum. 97 98 98 \newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes freed objects, all external management data, and reserved memory.99 \newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory. 99 100 This memory is problematic in two ways: heap blowup and highly fragmented memory. 100 101 \newterm{Heap blowup} occurs when memory freed by the program is not reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}. … … 125 126 \end{figure} 126 127 127 For a single-threaded memory allocator, three basic approaches for controlling fragmentation have beenidentified~\cite{Johnstone99}.128 For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}. 128 129 The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size. 129 130 Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size. … … 132 133 133 134 The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects. 134 When an object is allocated, the requested size is rounded up to the nearest bin-size, possibly withspacing after the object.135 When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to spacing after the object. 135 136 A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used. 136 137 The fewer bin-sizes, the fewer lists need to be searched and maintained; … … 158 159 Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array. 159 160 160 Hardware takes advantage of temporal and spatial locality through multiple levels of caching (\ie memory hierarchy).161 Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy. 161 162 When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time. 162 163 For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory. … … 171 172 172 173 There are a number of ways a memory allocator can degrade locality by increasing the working set. 173 For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request (\eg sequential-fit algorithm).174 For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request, \eg sequential-fit algorithm. 174 175 If there are a (large) number of objects accessed in very different areas of memory, the allocator may perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}. 175 176 Another way locality can be degraded is by spatially separating related data. … … 181 182 182 183 A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program. 183 In addition to single-threaded design issues of locality and fragmentation, a multi-threaded allocator may besimultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.184 In addition to single-threaded design issues of fragmentation and locality, a multi-threaded allocator is simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup. 184 185 185 186 … … 192 193 Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released. 193 194 Contention can be reduced in a number of ways: 195 \begin{itemize}[itemsep=0pt] 196 \item 194 197 using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks; 198 \item 195 199 using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff; 200 \item 196 201 using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}. 197 However, all of these approaches have degenerate cases where contention occurs. 202 \end{itemize} 203 However, all of these approaches have degenerate cases where program contention is high, which occurs outside of the allocator. 198 204 199 205 … … 275 281 \label{s:MultipleHeaps} 276 282 277 A single-threaded allocator has at most one thread and heap, while amulti-threaded allocator has potentially multiple threads and heaps.283 A multi-threaded allocator has potentially multiple threads and heaps. 278 284 The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity. 279 285 The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see \VRef[Figure]{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see \VRef[Figure]{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see \VRef[Figure]{f:PerThreadHeap}), which is almost back to a single-threaded allocator. … … 339 345 An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory. 340 346 Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur. 341 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area .347 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the operating system. 342 348 343 349 \begin{figure} … … 368 374 369 375 370 \paragraph{1:1 model (thread heaps)} where each thread has its own heap , which eliminates most contention and locking because threads seldom accesses another thread's heap (see ownership in \VRef{s:Ownership}).376 \paragraph{1:1 model (thread heaps)} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see ownership in \VRef{s:Ownership}). 371 377 An additional benefit of thread heaps is improved locality due to better memory layout. 372 378 As each thread only allocates from its heap, all objects for a thread are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages. … … 380 386 Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future. 381 387 Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads. 382 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap .388 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible.. 383 389 384 390 … … 388 394 However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000). 389 395 It is difficult to retain this goal, if the user-threading model is directly involved with the heap model. 390 \VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model provided by the language runtime.396 \VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime. 391 397 Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing. 392 398 … … 400 406 Adopting this model results in a subtle problem with shared heaps. 401 407 With kernel threading, an operation that is started by a kernel thread is always completed by that thread. 402 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted. 403 Any correctness locking associated with the shared heap is preserved across preemption. 408 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption. 404 409 405 410 However, this correctness property is not preserved for user-level threading. … … 409 414 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds). 410 415 Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically. 411 Occasionally ignoring a preemption should be benign .416 Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation. 412 417 413 418 … … 430 435 431 436 \newterm{Ownership} defines which heap an object is returned-to on deallocation. 432 If a thread returns an object to the heap it was originally allocated from, theheap has ownership of its objects.433 Alternatively, a thread can return an object to the heap it is currently a llocating from, which can be any heap accessible during a thread's lifetime.437 If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects. 438 Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime. 434 439 \VRef[Figure]{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership. 435 440 Again, the arrows indicate the direction memory conceptually moves for each kind of operation. … … 539 544 Only with the 1:1 model and ownership is active and passive false-sharing avoided (see \VRef{s:Ownership}). 540 545 Passive false-sharing may still occur, if delayed ownership is used. 546 Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap. 541 547 542 548 \begin{figure} … … 553 559 \caption{Free-list Structure with Container Ownership} 554 560 \end{figure} 555 556 A fragmented heap has multiple containers that may be partially or completely free.557 A completely free container can become reserved storage and be reset to allocate objects of a new size.558 When a heap reaches a threshold of free objects, it moves some free storage to the global heap for reuse to prevent heap blowup.559 Without ownership, when a heap frees objects to the global heap, individual objects must be passed, and placed on the global-heap's free-list.560 Containers cannot be freed to the global heap unless completely free because561 561 562 562 When a container changes ownership, the ownership of all objects within it change as well. … … 569 569 Note, once the object is freed by Task$_1$, no more false sharing can occur until the container changes ownership again. 570 570 To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free. 571 One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area .571 One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the operating system. 572 572 573 573 \begin{figure} … … 700 700 \end{figure} 701 701 702 As mentioned, an implementation may have only one heap dealwith the global heap, so the other heap can be simplified.702 As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified. 703 703 For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}. 704 704 To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage. … … 721 721 An allocation buffer is reserved memory (see~\VRef{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty. 722 722 That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later. 723 Both any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.723 Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively. 724 724 The allocation buffer reduces contention and the number of global/operating-system calls. 725 725 For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations. 726 726 727 Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts .727 Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation). 728 728 Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer. 729 Thus, allocation buffers are often allocated more frequently at program/thread start, and then their use often diminishes.729 Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish. 730 730 731 731 Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread. … … 746 746 \label{s:LockFreeOperations} 747 747 748 A lock-free algorithm guarantees safe concurrent-access to a data structure, so that at least one thread can make progress in the system, but an individual task has no bound to execution, and hence,may starve~\cite[pp.~745--746]{Herlihy93}.749 % A wait-free algorithm puts a finite bound on the number of steps any thread takes to complete an operation, so an individual task cannot starve 748 A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual task has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}. 749 (A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.) 750 750 Lock-free operations can be used in an allocator to reduce or eliminate the use of locks. 751 Locks are a problem for high contention or if the thread holding the lock is preempted and other threads attempt to use that lock.752 With respect to the heap, these situations are unlikely unless all threads make sextremely high use of dynamic-memory allocation, which can be an indication of poor design.751 While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress. 752 With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design. 753 753 Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock; 754 on the other hand, a thread may busy-wait for an unbounded period .754 on the other hand, a thread may busy-wait for an unbounded period holding a processor. 755 755 Finally, lock-free implementations have greater complexity and hardware dependency. 756 756 Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack. 757 Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is more complex.757 Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex. 758 758 Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator. 759 759 -
doc/theses/mubeen_zulfiqar_MMath/figures/AllocDS1.fig
rba897d21 r2e9b59b 8 8 -2 9 9 1200 2 10 6 4200 1575 4500 172511 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 1650 20 20 4275 1650 4295 165012 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4350 1650 20 20 4350 1650 4370 165013 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4425 1650 20 20 4425 1650 4445 165010 6 2850 2100 3150 2250 11 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 2925 2175 20 20 2925 2175 2945 2175 12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 2175 20 20 3000 2175 3020 2175 13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 2175 20 20 3075 2175 3095 2175 14 14 -6 15 6 2850 2475 3150 2850 15 6 4050 2100 4350 2250 16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 2175 20 20 4125 2175 4145 2175 17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 2175 20 20 4200 2175 4220 2175 18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 2175 20 20 4275 2175 4295 2175 19 -6 20 6 4650 2100 4950 2250 21 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4725 2175 20 20 4725 2175 4745 2175 22 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 2175 20 20 4800 2175 4820 2175 23 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 2175 20 20 4875 2175 4895 2175 24 -6 25 6 3450 2100 3750 2250 26 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3525 2175 20 20 3525 2175 3545 2175 27 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3600 2175 20 20 3600 2175 3620 2175 28 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3675 2175 20 20 3675 2175 3695 2175 29 -6 30 6 3300 2175 3600 2550 16 31 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 17 32 1 1 1.00 45.00 90.00 18 2925 2475 2925 270033 3375 2175 3375 2400 19 34 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 20 2850 2700 3150 2700 3150 2850 2850 2850 2850 270035 3300 2400 3600 2400 3600 2550 3300 2550 3300 2400 21 36 -6 22 6 4350 2475 4650 2850 37 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 38 3150 1800 3150 2250 39 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 40 2850 1800 2850 2250 41 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 42 4650 1800 4650 2250 43 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 44 4950 1800 4950 2250 45 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 46 4500 1725 4500 2250 47 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 48 5100 1725 5100 2250 49 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 50 3450 1800 3450 2250 51 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 52 3750 1800 3750 2250 53 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 54 3300 1725 3300 2250 55 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 56 3900 1725 3900 2250 57 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 58 5250 1800 5250 2250 59 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 60 5400 1800 5400 2250 61 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 62 5550 1800 5550 2250 63 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 64 5700 1800 5700 2250 65 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 66 5850 1800 5850 2250 67 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 68 2700 1725 2700 2250 23 69 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 24 70 1 1 1.00 45.00 90.00 25 4425 2475 4425 2700 26 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 27 4350 2700 4650 2700 4650 2850 4350 2850 4350 2700 28 -6 29 6 3600 2475 3825 3150 71 3375 1275 3375 1575 30 72 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 31 73 1 1 1.00 45.00 90.00 32 3675 2475 3675 2700 33 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 34 3600 2700 3825 2700 3825 2850 3600 2850 3600 2700 35 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 36 3600 3000 3825 3000 3825 3150 3600 3150 3600 3000 74 2700 1275 2700 1575 75 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2 76 1 1 1.00 45.00 90.00 77 2775 1275 2775 1575 37 78 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 38 79 1 1 1.00 45.00 90.00 39 3675 2775 3675 3000 40 -6 41 6 4875 3600 5175 3750 42 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 3675 20 20 4950 3675 4970 3675 43 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 3675 20 20 5025 3675 5045 3675 44 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3675 20 20 5100 3675 5120 3675 45 -6 46 6 4875 2325 5175 2475 47 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 2400 20 20 4950 2400 4970 2400 48 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 2400 20 20 5025 2400 5045 2400 49 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 2400 20 20 5100 2400 5120 2400 50 -6 51 6 5625 2325 5925 2475 52 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5700 2400 20 20 5700 2400 5720 2400 53 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5775 2400 20 20 5775 2400 5795 2400 54 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5850 2400 20 20 5850 2400 5870 2400 55 -6 56 6 5625 3600 5925 3750 57 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5700 3675 20 20 5700 3675 5720 3675 58 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5775 3675 20 20 5775 3675 5795 3675 59 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5850 3675 20 20 5850 3675 5870 3675 60 -6 61 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 62 2400 2100 2400 2550 63 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 64 2550 2100 2550 2550 65 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 66 2700 2100 2700 2550 67 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 68 2850 2100 2850 2550 69 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 70 3000 2100 3000 2550 71 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 72 3600 2100 3600 2550 73 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 74 3900 2100 3900 2550 75 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 76 4050 2100 4050 2550 77 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 78 4200 2100 4200 2550 79 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 80 4350 2100 4350 2550 81 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 82 4500 2100 4500 2550 83 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 84 3300 1500 3300 1800 85 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 86 3600 1500 3600 1800 87 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 88 3900 1500 3900 1800 89 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 90 3000 1500 4800 1500 4800 1800 3000 1800 3000 1500 80 5175 1275 5175 1575 81 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 82 1 1 1.00 45.00 90.00 83 5625 1275 5625 1575 84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 85 1 1 1.00 45.00 90.00 86 3750 1275 3750 1575 91 87 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2 92 88 1 1 1.00 45.00 90.00 93 3225 1650 2625 2100 89 3825 1275 3825 1575 90 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 91 2700 1950 6000 1950 92 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 93 2700 2100 6000 2100 94 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 95 2700 1800 6000 1800 6000 2250 2700 2250 2700 1800 94 96 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 95 97 1 1 1.00 45.00 90.00 96 3150 1650 2550 210098 2775 2175 2775 2400 97 99 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 98 100 1 1 1.00 45.00 90.00 99 3450 1650 4050 2100 100 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2 101 1 1 1.00 45.00 90.00 102 3375 1650 3975 2100 103 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 104 2100 2100 2100 2550 105 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 106 1950 2250 3150 2250 107 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 108 3450 2250 4650 2250 101 2775 2475 2775 2700 109 102 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 110 1950 2100 3150 2100 3150 2550 1950 2550 1950 2100103 2700 2700 2850 2700 2850 2850 2700 2850 2700 2700 111 104 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 112 3450 2100 4650 2100 4650 2550 3450 2550 3450 2100 113 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 114 2250 2100 2250 2550 115 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 116 3750 2100 3750 2550 105 2700 2400 2850 2400 2850 2550 2700 2550 2700 2400 117 106 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 118 107 1 1 1.00 45.00 90.00 119 2025 2475 2025 2700 120 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 121 1 1 1.00 45.00 90.00 122 2025 2775 2025 3000 108 4575 2175 4575 2400 123 109 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 124 1950 3000 2100 3000 2100 3150 1950 3150 1950 3000 125 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 126 1950 2700 2100 2700 2100 2850 1950 2850 1950 2700 110 4500 2400 5025 2400 5025 2550 4500 2550 4500 2400 127 111 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 128 112 1 1 1.00 45.00 90.00 129 1950 3750 2700 3750 2700 3525113 3600 3375 4350 3375 4350 3150 130 114 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 131 1950 3525 3150 3525 3150 3900 1950 3900 1950 3525 132 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 133 1 1 1.00 45.00 90.00 134 3450 3750 4200 3750 4200 3525 135 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 136 3450 3525 4650 3525 4650 3900 3450 3900 3450 3525 137 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 138 1 1 1.00 45.00 90.00 139 3150 4650 4200 4650 4200 4275 140 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 141 3150 4275 4650 4275 4650 4875 3150 4875 3150 4275 142 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 143 1950 2400 3150 2400 144 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 145 3450 2400 4650 2400 146 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 147 5400 2100 5400 3900 148 4 2 0 50 -1 0 11 0.0000 2 120 300 1875 2250 lock\001 149 4 1 0 50 -1 0 12 0.0000 2 135 1935 3900 1425 N kernel-thread buckets\001 150 4 1 0 50 -1 0 12 0.0000 2 195 810 4425 2025 heap$_2$\001 151 4 1 0 50 -1 0 12 0.0000 2 195 810 2175 2025 heap$_1$\001 152 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2400 size\001 153 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2550 free\001 154 4 1 0 50 -1 0 12 0.0000 2 180 825 2550 3450 local pool\001 155 4 0 0 50 -1 0 12 0.0000 2 135 360 3525 3700 lock\001 156 4 0 0 50 -1 0 12 0.0000 2 135 360 3225 4450 lock\001 157 4 2 0 50 -1 0 12 0.0000 2 135 600 1875 3000 free list\001 158 4 1 0 50 -1 0 12 0.0000 2 180 825 4050 3450 local pool\001 159 4 1 0 50 -1 0 12 0.0000 2 180 1455 3900 4200 global pool (sbrk)\001 160 4 0 0 50 -1 0 12 0.0000 2 135 360 2025 3700 lock\001 161 4 1 0 50 -1 0 12 0.0000 2 180 720 6450 3150 free pool\001 162 4 1 0 50 -1 0 12 0.0000 2 180 390 6450 2925 heap\001 115 3600 3150 5100 3150 5100 3525 3600 3525 3600 3150 116 4 2 0 50 -1 0 11 0.0000 2 135 300 2625 1950 lock\001 117 4 1 0 50 -1 0 11 0.0000 2 150 1155 3000 1725 N$\\times$S$_1$\001 118 4 1 0 50 -1 0 11 0.0000 2 150 1155 3600 1725 N$\\times$S$_2$\001 119 4 1 0 50 -1 0 12 0.0000 2 180 390 4425 1500 heap\001 120 4 2 0 50 -1 0 12 0.0000 2 135 1140 2550 1425 kernel threads\001 121 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2100 size\001 122 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2250 free\001 123 4 2 0 50 -1 0 12 0.0000 2 135 600 2625 2700 free list\001 124 4 0 0 50 -1 0 12 0.0000 2 135 360 3675 3325 lock\001 125 4 1 0 50 -1 0 12 0.0000 2 180 1455 4350 3075 global pool (sbrk)\001 126 4 1 0 50 -1 0 11 0.0000 2 150 1110 4800 1725 N$\\times$S$_t$\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/AllocDS2.fig
rba897d21 r2e9b59b 8 8 -2 9 9 1200 2 10 6 2850 2100 3150 2250 11 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 2925 2175 20 20 2925 2175 2945 2175 12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 2175 20 20 3000 2175 3020 2175 13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 2175 20 20 3075 2175 3095 2175 14 -6 15 6 4050 2100 4350 2250 16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 2175 20 20 4125 2175 4145 2175 17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 2175 20 20 4200 2175 4220 2175 18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 2175 20 20 4275 2175 4295 2175 19 -6 20 6 4650 2100 4950 2250 21 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4725 2175 20 20 4725 2175 4745 2175 22 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 2175 20 20 4800 2175 4820 2175 23 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 2175 20 20 4875 2175 4895 2175 24 -6 25 6 3450 2100 3750 2250 26 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3525 2175 20 20 3525 2175 3545 2175 27 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3600 2175 20 20 3600 2175 3620 2175 28 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3675 2175 20 20 3675 2175 3695 2175 29 -6 30 6 3300 2175 3600 2550 10 6 2850 2475 3150 2850 31 11 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 32 12 1 1 1.00 45.00 90.00 33 3375 2175 3375 240013 2925 2475 2925 2700 34 14 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 35 3300 2400 3600 2400 3600 2550 3300 2550 3300 2400 15 2850 2700 3150 2700 3150 2850 2850 2850 2850 2700 16 -6 17 6 4350 2475 4650 2850 18 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 19 1 1 1.00 45.00 90.00 20 4425 2475 4425 2700 21 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 22 4350 2700 4650 2700 4650 2850 4350 2850 4350 2700 23 -6 24 6 3600 2475 3825 3150 25 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 26 1 1 1.00 45.00 90.00 27 3675 2475 3675 2700 28 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 29 3600 2700 3825 2700 3825 2850 3600 2850 3600 2700 30 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 31 3600 3000 3825 3000 3825 3150 3600 3150 3600 3000 32 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 33 1 1 1.00 45.00 90.00 34 3675 2775 3675 3000 35 -6 36 6 1950 3525 3150 3900 37 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 38 1 1 1.00 45.00 90.00 39 1950 3750 2700 3750 2700 3525 40 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 41 1950 3525 3150 3525 3150 3900 1950 3900 1950 3525 42 4 0 0 50 -1 0 12 0.0000 2 135 360 2025 3700 lock\001 43 -6 44 6 4050 1575 4350 1725 45 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 1650 20 20 4125 1650 4145 1650 46 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 1650 20 20 4200 1650 4220 1650 47 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 1650 20 20 4275 1650 4295 1650 48 -6 49 6 4875 2325 6150 3750 50 6 4875 2325 5175 2475 51 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 2400 20 20 4950 2400 4970 2400 52 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 2400 20 20 5025 2400 5045 2400 53 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 2400 20 20 5100 2400 5120 2400 54 -6 55 6 4875 3600 5175 3750 56 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 3675 20 20 4950 3675 4970 3675 57 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 3675 20 20 5025 3675 5045 3675 58 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3675 20 20 5100 3675 5120 3675 59 -6 60 4 1 0 50 -1 0 12 0.0000 2 180 900 5700 3150 local pools\001 61 4 1 0 50 -1 0 12 0.0000 2 180 465 5700 2925 heaps\001 62 -6 63 6 3600 4050 5100 4650 64 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 65 1 1 1.00 45.00 90.00 66 3600 4500 4350 4500 4350 4275 67 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 68 3600 4275 5100 4275 5100 4650 3600 4650 3600 4275 69 4 1 0 50 -1 0 12 0.0000 2 180 1455 4350 4200 global pool (sbrk)\001 70 4 0 0 50 -1 0 12 0.0000 2 135 360 3675 4450 lock\001 36 71 -6 37 72 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 38 3150 1800 3150 225073 2400 2100 2400 2550 39 74 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 40 2 850 1800 2850 225075 2550 2100 2550 2550 41 76 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 42 4650 1800 4650 225077 2700 2100 2700 2550 43 78 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 44 4950 1800 4950 2250 45 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 46 4500 1725 4500 2250 47 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 48 5100 1725 5100 2250 79 2850 2100 2850 2550 49 80 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 50 3 450 1800 3450 225081 3000 2100 3000 2550 51 82 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 52 3750 1800 3750 2250 53 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 54 3300 1725 3300 2250 55 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 56 3900 1725 3900 2250 83 3600 2100 3600 2550 57 84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 58 5250 1800 5250 225085 3900 2100 3900 2550 59 86 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 60 5400 1800 5400 225087 4050 2100 4050 2550 61 88 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 62 5550 1800 5550 225089 4200 2100 4200 2550 63 90 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 64 5700 1800 5700 225091 4350 2100 4350 2550 65 92 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 66 5850 1800 5850 2250 67 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 68 2700 1725 2700 2250 93 4500 2100 4500 2550 94 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 95 3300 1500 3300 1800 96 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 97 3600 1500 3600 1800 98 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 99 3000 1500 4800 1500 4800 1800 3000 1800 3000 1500 69 100 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 70 101 1 1 1.00 45.00 90.00 71 3 375 1275 3375 1575102 3150 1650 2550 2100 72 103 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 73 104 1 1 1.00 45.00 90.00 74 2700 1275 2700 1575 75 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2 76 1 1 1.00 45.00 90.00 77 2775 1275 2775 1575 105 3450 1650 4050 2100 106 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 107 2100 2100 2100 2550 108 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 109 1950 2250 3150 2250 110 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 111 3450 2250 4650 2250 112 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 113 1950 2100 3150 2100 3150 2550 1950 2550 1950 2100 114 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 115 3450 2100 4650 2100 4650 2550 3450 2550 3450 2100 116 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 117 2250 2100 2250 2550 118 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 119 3750 2100 3750 2550 78 120 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 79 121 1 1 1.00 45.00 90.00 80 5175 1275 5175 1575122 2025 2475 2025 2700 81 123 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 82 124 1 1 1.00 45.00 90.00 83 5625 1275 5625 1575 84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 85 1 1 1.00 45.00 90.00 86 3750 1275 3750 1575 87 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2 88 1 1 1.00 45.00 90.00 89 3825 1275 3825 1575 90 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 91 2700 1950 6000 1950 92 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 93 2700 2100 6000 2100 125 2025 2775 2025 3000 94 126 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 95 2700 1800 6000 1800 6000 2250 2700 2250 2700 1800 96 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 97 1 1 1.00 45.00 90.00 98 2775 2175 2775 2400 99 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 100 1 1 1.00 45.00 90.00 101 2775 2475 2775 2700 127 1950 3000 2100 3000 2100 3150 1950 3150 1950 3000 102 128 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 103 2700 2700 2850 2700 2850 2850 2700 2850 2700 2700 104 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 105 2700 2400 2850 2400 2850 2550 2700 2550 2700 2400 106 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 107 1 1 1.00 45.00 90.00 108 4575 2175 4575 2400 109 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 110 4500 2400 5025 2400 5025 2550 4500 2550 4500 2400 129 1950 2700 2100 2700 2100 2850 1950 2850 1950 2700 111 130 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3 112 131 1 1 1.00 45.00 90.00 113 3 600 3525 4650 3525 4650 3150132 3450 3750 4200 3750 4200 3525 114 133 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 115 3600 3150 5100 3150 5100 3750 3600 3750 3600 3150 116 4 2 0 50 -1 0 11 0.0000 2 120 300 2625 1950 lock\001 117 4 1 0 50 -1 0 10 0.0000 2 150 1155 3000 1725 N$\\times$S$_1$\001 118 4 1 0 50 -1 0 10 0.0000 2 150 1155 3600 1725 N$\\times$S$_2$\001 119 4 1 0 50 -1 0 12 0.0000 2 180 390 4425 1500 heap\001 120 4 2 0 50 -1 0 12 0.0000 2 135 1140 2550 1425 kernel threads\001 121 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2100 size\001 122 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2250 free\001 123 4 2 0 50 -1 0 12 0.0000 2 135 600 2625 2700 free list\001 124 4 0 0 50 -1 0 12 0.0000 2 135 360 3675 3325 lock\001 125 4 1 0 50 -1 0 12 0.0000 2 180 1455 4350 3075 global pool (sbrk)\001 126 4 1 0 50 -1 0 10 0.0000 2 150 1110 4800 1725 N$\\times$S$_t$\001 134 3450 3525 4650 3525 4650 3900 3450 3900 3450 3525 135 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 136 1950 2400 3150 2400 137 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 138 3450 2400 4650 2400 139 4 2 0 50 -1 0 11 0.0000 2 135 300 1875 2250 lock\001 140 4 1 0 50 -1 0 12 0.0000 2 180 1245 3900 1425 H heap buckets\001 141 4 1 0 50 -1 0 12 0.0000 2 180 810 4425 2025 heap$_2$\001 142 4 1 0 50 -1 0 12 0.0000 2 180 810 2175 2025 heap$_1$\001 143 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2400 size\001 144 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2550 free\001 145 4 1 0 50 -1 0 12 0.0000 2 180 825 2550 3450 local pool\001 146 4 0 0 50 -1 0 12 0.0000 2 135 360 3525 3700 lock\001 147 4 2 0 50 -1 0 12 0.0000 2 135 600 1875 3000 free list\001 148 4 1 0 50 -1 0 12 0.0000 2 180 825 4050 3450 local pool\001 -
doc/theses/mubeen_zulfiqar_MMath/intro.tex
rba897d21 r2e9b59b 48 48 Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise. 49 49 This thesis only examines dynamic memory-management with \emph{explicit} deallocation. 50 While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.50 While garbage collection and compaction are not part this work, many of the work's results are applicable to the allocation phase in any memory-management approach. 51 51 52 52 Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime. … … 65 65 \begin{enumerate}[leftmargin=*] 66 66 \item 67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). 68 69 \item 70 Adopt returning of @nullptr@ for a zero-sized allocation, rather than an actual memory address, both of which can be passed to @free@. 71 72 \item 73 Extended the standard C heap functionality by preserving with each allocation its original request size versus the amount allocated, if an allocation is zero fill, and the allocation alignment. 74 75 \item 76 Use the zero fill and alignment as \emph{sticky} properties for @realloc@, to realign existing storage, or preserve existing zero-fill and alignment when storage is copied. 67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). 68 69 \item 70 Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@. 71 72 \item 73 Extend the standard C heap functionality by preserving with each allocation: 74 \begin{itemize}[itemsep=0pt] 75 \item 76 its request size plus the amount allocated, 77 \item 78 whether an allocation is zero fill, 79 \item 80 and allocation alignment. 81 \end{itemize} 82 83 \item 84 Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ to zero-fill and align when storage is extended or copied. 77 85 Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying. 78 86 This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient. … … 86 94 @resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill. 87 95 \item 88 @realloc( oaddr, alignment, size )@ same as previous@realloc@ but adding or changing alignment.96 @realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment. 89 97 \item 90 98 @aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled. … … 96 104 97 105 \item 98 Provide additional heap wrapper functions in \CFA to provide a completeorthogonal set of allocation operations and properties.106 Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties. 99 107 100 108 \item … … 109 117 @malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@. 110 118 \item 111 @malloc_usable_size( addr )@ returns the usable size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.119 @malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@. 112 120 \end{itemize} 113 121 … … 116 124 117 125 \item 118 Provide complete, fast, and contention-free allocation statistics to help understand programbehaviour:126 Provide complete, fast, and contention-free allocation statistics to help understand allocation behaviour: 119 127 \begin{itemize} 120 128 \item -
doc/theses/mubeen_zulfiqar_MMath/performance.tex
rba897d21 r2e9b59b 1 1 \chapter{Performance} 2 \label{c:Performance} 2 3 3 4 \section{Machine Specification} -
doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.bib
rba897d21 r2e9b59b 124 124 } 125 125 126 @misc{nedmalloc, 127 author = {Niall Douglas}, 128 title = {nedmalloc version 1.06 Beta}, 129 month = jan, 130 year = 2010, 131 note = {\textsf{http://\-prdownloads.\-sourceforge.\-net/\-nedmalloc/\-nedmalloc\_v1.06beta1\_svn1151.zip}}, 126 @misc{ptmalloc2, 127 author = {Wolfram Gloger}, 128 title = {ptmalloc version 2}, 129 month = jun, 130 year = 2006, 131 note = {\href{http://www.malloc.de/malloc/ptmalloc2-current.tar.gz}{http://www.malloc.de/\-malloc/\-ptmalloc2-current.tar.gz}}, 132 } 133 134 @misc{GNUallocAPI, 135 author = {GNU}, 136 title = {Summary of malloc-Related Functions}, 137 year = 2020, 138 note = {\href{https://www.gnu.org/software/libc/manual/html\_node/Summary-of-Malloc.html}{https://www.gnu.org/\-software/\-libc/\-manual/\-html\_node/\-Summary-of-Malloc.html}}, 139 } 140 141 @misc{SeriallyReusable, 142 author = {IBM}, 143 title = {Serially reusable programs}, 144 month = mar, 145 year = 2021, 146 note = {\href{https://www.ibm.com/docs/en/ztpf/1.1.0.15?topic=structures-serially-reusable-programs}{https://www.ibm.com/\-docs/\-en/\-ztpf/\-1.1.0.15?\-topic=structures-serially-reusable-programs}}, 147 } 148 149 @misc{librseq, 150 author = {Mathieu Desnoyers}, 151 title = {Library for Restartable Sequences}, 152 month = mar, 153 year = 2022, 154 note = {\href{https://github.com/compudj/librseq}{https://github.com/compudj/librseq}}, 132 155 } 133 156 -
doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex
rba897d21 r2e9b59b 60 60 % For hyperlinked PDF, suitable for viewing on a computer, use this: 61 61 \documentclass[letterpaper,12pt,titlepage,oneside,final]{book} 62 \usepackage[T1]{fontenc} % Latin-1 => 256-bit characters, => | not dash, <> not Spanish question marks 62 63 63 64 % For PDF, suitable for double-sided printing, change the PrintVersion variable below to "true" and use this \documentclass line instead of the one above: … … 94 95 % Use the "hyperref" package 95 96 % N.B. HYPERREF MUST BE THE LAST PACKAGE LOADED; ADD ADDITIONAL PKGS ABOVE 96 \usepackage[pagebackref=true]{hyperref} % with basic options 97 \usepackage{url} 98 \usepackage[dvips,pagebackref=true]{hyperref} % with basic options 97 99 %\usepackage[pdftex,pagebackref=true]{hyperref} 98 100 % N.B. pagebackref=true provides links back from the References to the body text. This can cause trouble for printing. … … 113 115 citecolor=blue, % color of links to bibliography 114 116 filecolor=magenta, % color of file links 115 urlcolor=blue % color of external links 117 urlcolor=blue, % color of external links 118 breaklinks=true 116 119 } 117 120 \ifthenelse{\boolean{PrintVersion}}{ % for improved print quality, change some hyperref options … … 122 125 urlcolor=black 123 126 }}{} % end of ifthenelse (no else) 127 %\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,pagebackref=true,breaklinks=true,colorlinks=true,linkcolor=blue,citecolor=blue,urlcolor=blue]{hyperref} 128 \usepackage{breakurl} 129 \urlstyle{sf} 124 130 125 131 %\usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package … … 171 177 \input{common} 172 178 %\usepackageinput{common} 173 \CFAStyle % CFA code-style for all languages 179 \CFAStyle % CFA code-style 180 \lstset{language=CFA} % default language 174 181 \lstset{basicstyle=\linespread{0.9}\sf} % CFA typewriter font 175 182 \newcommand{\uC}{$\mu$\CC} -
doc/theses/thierry_delisle_PhD/thesis/Makefile
rba897d21 r2e9b59b 29 29 PICTURES = ${addsuffix .pstex, \ 30 30 base \ 31 base_avg \ 32 cache-share \ 33 cache-noshare \ 31 34 empty \ 32 35 emptybit \ … … 38 41 system \ 39 42 cycle \ 43 result.cycle.jax.ops \ 40 44 } 41 45 … … 112 116 python3 $< $@ 113 117 118 build/result.%.ns.svg : data/% | ${Build} 119 ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops" 120 121 build/result.%.ops.svg : data/% | ${Build} 122 ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second" 123 114 124 ## pstex with inverted colors 115 125 %.dark.pstex : fig/%.fig Makefile | ${Build} -
doc/theses/thierry_delisle_PhD/thesis/fig/base.fig
rba897d21 r2e9b59b 89 89 5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470 90 90 5700 5210 91 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2 92 3600 5700 3600 1200 93 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2 94 4800 5700 4800 1200 95 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2 96 6000 5700 6000 1200 91 97 4 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001 92 98 4 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001 -
doc/theses/thierry_delisle_PhD/thesis/glossary.tex
rba897d21 r2e9b59b 101 101 102 102 \longnewglossaryentry{at} 103 {name={ fred}}103 {name={task}} 104 104 { 105 105 Abstract object representing an unit of work. Systems will offer one or more concrete implementations of this concept (\eg \gls{kthrd}, \gls{job}), however, most of the concept of schedulings are independent of the particular implementations of the work representation. For this reason, this document use the term \Gls{at} to mean any representation and not one in particular. -
doc/theses/thierry_delisle_PhD/thesis/local.bib
rba897d21 r2e9b59b 685 685 note = "[Online; accessed 9-February-2021]" 686 686 } 687 688 @misc{wiki:rcu, 689 author = "{Wikipedia contributors}", 690 title = "Read-copy-update --- {W}ikipedia{,} The Free Encyclopedia", 691 year = "2022", 692 url = "https://en.wikipedia.org/wiki/Linear_congruential_generator", 693 note = "[Online; accessed 12-April-2022]" 694 } 695 696 @misc{wiki:rwlock, 697 author = "{Wikipedia contributors}", 698 title = "Readers-writer lock --- {W}ikipedia{,} The Free Encyclopedia", 699 year = "2021", 700 url = "https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock", 701 note = "[Online; accessed 12-April-2022]" 702 } -
doc/theses/thierry_delisle_PhD/thesis/text/core.tex
rba897d21 r2e9b59b 3 3 Before discussing scheduling in general, where it is important to address systems that are changing states, this document discusses scheduling in a somewhat ideal scenario, where the system has reached a steady state. For this purpose, a steady state is loosely defined as a state where there are always \glspl{thrd} ready to run and the system has the resources necessary to accomplish the work, \eg, enough workers. In short, the system is neither overloaded nor underloaded. 4 4 5 I believe it is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state canto be pervasive in all states.5 It is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state tend to be pervasive in all states. 6 6 7 7 \section{Design Goals} … … 25 25 It is important to note that these guarantees are expected only up to a point. \Glspl{thrd} that are ready to run should not be prevented to do so, but they still share the limited hardware resources. Therefore, the guarantee is considered respected if a \gls{thrd} gets access to a \emph{fair share} of the hardware resources, even if that share is very small. 26 26 27 Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is equivalent to or lower than other popular languages, I consider the guaranteeachieved.27 Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is compatitive to other popular languages, the guarantee will be consider achieved. 28 28 29 29 More precisely the scheduler should be: … … 33 33 \end{itemize} 34 34 35 \subsection{Fairness vs Scheduler Locality} 35 \subsection{Fairness Goals} 36 For this work fairness will be considered as having two strongly related requirements: true starvation freedom and ``fast'' load balancing. 37 38 \paragraph{True starvation freedom} is more easily defined: As long as at least one \proc continues to dequeue \ats, all read \ats should be able to run eventually. 39 In any running system, \procs can stop dequeing \ats if they start running a \at that will simply never park. 40 Traditional workstealing schedulers do not have starvation freedom in these cases. 41 Now this requirement begs the question, what about preemption? 42 Generally speaking preemption happens on the timescale of several milliseconds, which brings us to the next requirement: ``fast'' load balancing. 43 44 \paragraph{Fast load balancing} means that load balancing should happen faster than preemption would normally allow. 45 For interactive applications that need to run at 60, 90, 120 frames per second, \ats having to wait for several millseconds to run are effectively starved. 46 Therefore load-balancing should be done at a faster pace, one that can detect starvation at the microsecond scale. 47 With that said, this is a much fuzzier requirement since it depends on the number of \procs, the number of \ats and the general load of the system. 48 49 \subsection{Fairness vs Scheduler Locality} \label{fairnessvlocal} 36 50 An important performance factor in modern architectures is cache locality. Waiting for data at lower levels or not present in the cache can have a major impact on performance. Having multiple \glspl{hthrd} writing to the same cache lines also leads to cache lines that must be waited on. It is therefore preferable to divide data among each \gls{hthrd}\footnote{This partitioning can be an explicit division up front or using data structures where different \glspl{hthrd} are naturally routed to different cache lines.}. 37 51 38 For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in part~\ref{Evaluation} on evaluation.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.52 For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in the next section.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available. 39 53 40 54 However, I claim that in practice it is possible to strike a balance between fairness and performance because these goals do not necessarily overlap temporally, where Figure~\ref{fig:fair} shows a visual representation of this behaviour. As mentioned, some unfairness is acceptable; therefore it is desirable to have an algorithm that prioritizes cache locality as long as thread delay does not exceed the execution mental-model. … … 48 62 \end{figure} 49 63 50 \section{Design} 64 \subsection{Performance Challenges}\label{pref:challenge} 65 While there exists a multitude of potential scheduling algorithms, they generally always have to contend with the same performance challenges. Since these challenges are recurring themes in the design of a scheduler it is relevant to describe the central ones here before looking at the design. 66 67 \subsubsection{Scalability} 68 The most basic performance challenge of a scheduler is scalability. 69 Given a large number of \procs and an even larger number of \ats, scalability measures how fast \procs can enqueue and dequeues \ats. 70 One could expect that doubling the number of \procs would double the rate at which \ats are dequeued, but contention on the internal data structure of the scheduler can lead to worst improvements. 71 While the ready-queue itself can be sharded to alleviate the main source of contention, auxillary scheduling features, \eg counting ready \ats, can also be sources of contention. 72 73 \subsubsection{Migration Cost} 74 Another important source of latency in scheduling is migration. 75 An \at is said to have migrated if it is executed by two different \proc consecutively, which is the process discussed in \ref{fairnessvlocal}. 76 Migrations can have many different causes, but it certain programs it can be all but impossible to limit migrations. 77 Chapter~\ref{microbench} for example, has a benchmark where any \at can potentially unblock any other \at, which can leat to \ats migrating more often than not. 78 Because of this it is important to design the internal data structures of the scheduler to limit the latency penalty from migrations. 79 80 81 \section{Inspirations} 51 82 In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering. 52 83 53 Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis.84 Before going into the design of \CFA's scheduler proper, it is relevant to discuss two sharding solutions which served as the inspiration scheduler in this thesis. 54 85 55 86 \subsection{Work-Stealing} 56 87 57 As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work. 58 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention. 88 As mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. 89 In this pattern each \gls{proc} has its own local ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work on their local ready-queue. 90 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. 91 In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention. 59 92 On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work. 60 This means that the scheduler may never balance unfairness that does notresult in a \gls{proc} running out of work.93 This means that the scheduler never balances unfair loads unless they result in a \gls{proc} running out of work. 61 94 Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation. 62 95 63 96 64 Based on these observation, I conclude that\emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.97 Based on these observation, the conclusion is that a \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises. 65 98 66 99 \subsection{Relaxed-Fifo} 67 100 An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from. 68 101 \Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element. 102 All subqueues are protected by TryLocks and \procs simply pick a different subqueue if they fail to acquire the TryLock. 69 103 The result is a queue that has both decent scalability and sufficient fairness. 70 104 The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element. … … 75 109 76 110 While the fairness, of this scheme is good, it does suffer in terms of performance. 77 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult. 78 79 \section{\CFA} 80 The \CFA is effectively attempting to merge these two approaches, keeping the best of both. 81 It is based on the 111 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and finding non-empty queues can be difficult if there are too few ready \ats. 112 113 \section{Relaxed-FIFO++} 114 Since it has inherent fairness quelities and decent performance in the presence of many \ats, the relaxed-FIFO queue appears as a good candidate to form the basis of a scheduler. 115 The most obvious problems is for workloads where the number of \ats is barely greater than the number of \procs. 116 In these situations, the wide sharding means most of the sub-queues from which the relaxed queue is formed will be empty. 117 The consequence is that when a dequeue operations attempts to pick a sub-queue at random, it is likely that it picks an empty sub-queue and will have to pick again. 118 This problem can repeat an unbounded number of times. 119 120 As this is the most obvious challenge, it is worth addressing first. 121 The obvious solution is to supplement each subqueue with some sharded data structure that keeps track of which subqueues are empty. 122 This data structure can take many forms, for example simple bitmask or a binary tree that tracks which branch are empty. 123 Following a binary tree on each pick has fairly good Big O complexity and many modern architectures have powerful bitmask manipulation instructions. 124 However, precisely tracking which sub-queues are empty is actually fundamentally problematic. 125 The reason is that each subqueues are already a form of sharding and the sharding width has presumably already chosen to avoid contention. 126 However, tracking which ready queue is empty is only useful if the tracking mechanism uses denser sharding than the sub queues, then it will invariably create a new source of contention. 127 But if the tracking mechanism is not denser than the sub-queues, then it will generally not provide useful because reading this new data structure risks being as costly as simply picking a sub-queue at random. 128 Early experiments with this approach have shown that even with low success rates, randomly picking a sub-queue can be faster than a simple tree walk. 129 130 The exception to this rule is using local tracking. 131 If each \proc keeps track locally of which sub-queue is empty, then this can be done with a very dense data structure without introducing a new source of contention. 132 The consequence of local tracking however, is that the information is not complete. 133 Each \proc is only aware of the last state it saw each subqueues but does not have any information about freshness. 134 Even on systems with low \gls{hthrd} count, \eg 4 or 8, this can quickly lead to the local information being no better than the random pick. 135 This is due in part to the cost of this maintaining this information and its poor quality. 136 137 However, using a very low cost approach to local tracking may actually be beneficial. 138 If the local tracking is no more costly than the random pick, than \emph{any} improvement to the succes rate, however low it is, would lead to a performance benefits. 139 This leads to the following approach: 140 141 \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/} 142 The Relaxed-FIFO approach can be made to handle the case of mostly empty sub-queues by tweaking the \glsxtrlong{prng}. 143 The \glsxtrshort{prng} state can be seen as containing a list of all the future sub-queues that will be accessed. 144 While this is not particularly useful on its own, the consequence is that if the \glsxtrshort{prng} algorithm can be run \emph{backwards}, then the state also contains a list of all the subqueues that were accessed. 145 Luckily, bidirectional \glsxtrshort{prng} algorithms do exist, for example some Linear Congruential Generators\cit{https://en.wikipedia.org/wiki/Linear\_congruential\_generator} support running the algorithm backwards while offering good quality and performance. 146 This particular \glsxtrshort{prng} can be used as follows: 147 148 Each \proc maintains two \glsxtrshort{prng} states, which whill be refered to as \texttt{F} and \texttt{B}. 149 150 When a \proc attempts to dequeue a \at, it picks the subqueues by running the \texttt{B} backwards. 151 When a \proc attempts to enqueue a \at, it runs \texttt{F} forward to pick to subqueue to enqueue to. 152 If the enqueue is successful, the state \texttt{B} is overwritten with the content of \texttt{F}. 153 154 The result is that each \proc will tend to dequeue \ats that it has itself enqueued. 155 When most sub-queues are empty, this technique increases the odds of finding \ats at very low cost, while also offering an improvement on locality in many cases. 156 157 However, while this approach does notably improve performance in many cases, this algorithm is still not competitive with work-stealing algorithms. 158 The fundamental problem is that the constant randomness limits how much locality the scheduler offers. 159 This becomes problematic both because the scheduler is likely to get cache misses on internal data-structures and because migration become very frequent. 160 Therefore since the approach of modifying to relaxed-FIFO algorithm to behave more like work stealing does not seem to pan out, the alternative is to do it the other way around. 161 162 \section{Work Stealing++} 163 To add stronger fairness guarantees to workstealing a few changes. 164 First, the relaxed-FIFO algorithm has fundamentally better fairness because each \proc always monitors all subqueues. 165 Therefore the workstealing algorithm must be prepended with some monitoring. 166 Before attempting to dequeue from a \proc's local queue, the \proc must make some effort to make sure remote queues are not being neglected. 167 To make this possible, \procs must be able to determie which \at has been on the ready-queue the longest. 168 Which is the second aspect that much be added. 169 The relaxed-FIFO approach uses timestamps for each \at and this is also what is done here. 170 82 171 \begin{figure} 83 172 \centering 84 173 \input{base.pstex_t} 85 \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.}174 \caption[Base \CFA design]{Base \CFA design \smallskip\newline A Pool of sub-ready queues offers the sharding, two per \glspl{proc}. Each \gls{proc} have local subqueues, however \glspl{proc} can access any of the sub-queues. Each \at is timestamped when enqueued.} 86 175 \label{fig:base} 87 176 \end{figure} 88 89 90 91 % The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance. 92 93 % \subsection{Sharding} \label{sec:sharding} 94 % An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again. 95 96 % \begin{figure} 97 % \centering 98 % \input{base.pstex_t} 99 % \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.} 100 % \label{fig:base} 101 % \end{figure} 102 103 % \subsection{Finding threads} 104 % Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses. 105 106 % \begin{figure} 107 % \centering 108 % \input{empty.pstex_t} 109 % \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.} 110 % \label{fig:empty} 111 % \end{figure} 112 113 % There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information: 114 115 % \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done. 116 117 % \begin{figure} 118 % \centering 119 % \vspace*{-5pt} 120 % {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}} 121 % \vspace*{-5pt} 122 % \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.} 123 % \label{fig:emptybit} 124 125 % \vspace*{10pt} 126 % {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}} 127 % \vspace*{-5pt} 128 % \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.} 129 % \label{fig:emptytree} 130 131 % \vspace*{10pt} 132 % {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}} 133 % \vspace*{-5pt} 134 % \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.} 135 % \label{fig:emptytls} 136 % \end{figure} 137 138 % \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow. 139 140 % \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries. 141 142 % I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach. 143 144 % \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/} 145 % In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later. 146 147 % To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards. Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached. 148 149 % The algorithm works as follows: 150 % \begin{itemize} 151 % \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$. 152 % \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions: 153 % \begin{itemize} 154 % \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$. 155 % \item Pop operations use $B$ going backwards on each try. 156 % \end{itemize} 157 % \end{itemize} 158 159 % The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm. 160 161 % \section{Details} 177 The algorithm is structure as shown in Figure~\ref{fig:base}. 178 This is very similar to classic workstealing except the local queues are placed in an array so \procs can access eachother's queue in constant time. 179 Sharding width can be adjusted based on need. 180 When a \proc attempts to dequeue a \at, it first picks a random remote queue and compares its timestamp to the timestamps of the local queue(s), dequeue from the remote queue if needed. 181 182 Implemented as as naively state above, this approach has some obvious performance problems. 183 First, it is necessary to have some damping effect on helping. 184 Random effects like cache misses and preemption can add spurious but short bursts of latency for which helping is not helpful, pun intended. 185 The effect of these bursts would be to cause more migrations than needed and make this workstealing approach slowdown to the match the relaxed-FIFO approach. 186 187 \begin{figure} 188 \centering 189 \input{base_avg.pstex_t} 190 \caption[\CFA design with Moving Average]{\CFA design with Moving Average \smallskip\newline A moving average is added to each subqueue.} 191 \label{fig:base-ma} 192 \end{figure} 193 194 A simple solution to this problem is to compare an exponential moving average\cit{https://en.wikipedia.org/wiki/Moving\_average\#Exponential\_moving\_average} instead if the raw timestamps, shown in Figure~\ref{fig:base-ma}. 195 Note that this is slightly more complex than it sounds because since the \at at the head of a subqueue is still waiting, its wait time has not ended. 196 Therefore the exponential moving average is actually an exponential moving average of how long each already dequeued \at have waited. 197 To compare subqueues, the timestamp at the head must be compared to the current time, yielding the bestcase wait time for the \at at the head of the queue. 198 This new waiting is averaged with the stored average. 199 To limit even more the amount of unnecessary migration, a bias can be added to the local queue, where a remote queue is helped only if its moving average is more than \emph{X} times the local queue's average. 200 None of the experimentation that I have run with these scheduler seem to indicate that the choice of the weight for the moving average or the choice of bis is particularly important. 201 Weigths and biases of similar \emph{magnitudes} have similar effects. 202 203 With these additions to workstealing, scheduling can be made as fair as the relaxed-FIFO approach, well avoiding the majority of unnecessary migrations. 204 Unfortunately, the performance of this approach does suffer in the cases with no risks of starvation. 205 The problem is that the constant polling of remote subqueues generally entail a cache miss. 206 To make things worst, remote subqueues that are very active, \ie \ats are frequently enqueued and dequeued from them, the higher the chances are that polling will incurr a cache-miss. 207 Conversly, the active subqueues do not benefit much from helping since starvation is already a non-issue. 208 This puts this algorithm in an akward situation where it is paying for a cost, but the cost itself suggests the operation was unnecessary. 209 The good news is that this problem can be mitigated 210 211 \subsection{Redundant Timestamps} 212 The problem with polling remote queues is due to a tension between the consistency requirement on the subqueue. 213 For the subqueues, correctness is critical. There must be a consensus among \procs on which subqueues hold which \ats. 214 Since the timestamps are use for fairness, it is alco important to have consensus and which \at is the oldest. 215 However, when deciding if a remote subqueue is worth polling, correctness is much less of a problem. 216 Since the only need is that a subqueue will eventually be polled, some data staleness can be acceptable. 217 This leads to a tension where stale timestamps are only problematic in some cases. 218 Furthermore, stale timestamps can be somewhat desirable since lower freshness requirements means less tension on the cache coherence protocol. 219 220 221 \begin{figure} 222 \centering 223 % \input{base_ts2.pstex_t} 224 \caption[\CFA design with Redundant Timestamps]{\CFA design with Redundant Timestamps \smallskip\newline A array is added containing a copy of the timestamps. These timestamps are written to with relaxed atomics, without fencing, leading to fewer cache invalidations.} 225 \label{fig:base-ts2} 226 \end{figure} 227 A solution to this is to create a second array containing a copy of the timestamps and average. 228 This copy is updated \emph{after} the subqueue's critical sections using relaxed atomics. 229 \Glspl{proc} now check if polling is needed by comparing the copy of the remote timestamp instead of the actual timestamp. 230 The result is that since there is no fencing, the writes can be buffered and cause fewer cache invalidations. 231 232 The correctness argument here is somewhat subtle. 233 The data used for deciding whether or not to poll a queue can be stale as long as it does not cause starvation. 234 Therefore, it is acceptable if stale data make queues appear older than they really are but not fresher. 235 For the timestamps, this means that missing writes to the timestamp is acceptable since they will make the head \at look older. 236 For the moving average, as long as the operation are RW-safe, the average is guaranteed to yield a value that is between the oldest and newest values written. 237 Therefore this unprotected read of the timestamp and average satisfy the limited correctness that is required. 238 239 \begin{figure} 240 \centering 241 \input{cache-share.pstex_t} 242 \caption[CPU design with wide L3 sharing]{CPU design with wide L3 sharing \smallskip\newline A very simple CPU with 4 \glspl{hthrd}. L1 and L2 are private to each \gls{hthrd} but the L3 is shared across to entire core.} 243 \label{fig:cache-share} 244 \end{figure} 245 246 \begin{figure} 247 \centering 248 \input{cache-noshare.pstex_t} 249 \caption[CPU design with a narrower L3 sharing]{CPU design with a narrower L3 sharing \smallskip\newline A different CPU design, still with 4 \glspl{hthrd}. L1 and L2 are still private to each \gls{hthrd} but the L3 is shared some of the CPU but there is still two distinct L3 instances.} 250 \label{fig:cache-noshare} 251 \end{figure} 252 253 With redundant tiemstamps this scheduling algorithm achieves both the fairness and performance requirements, on some machines. 254 The problem is that the cost of polling and helping is not necessarily consistent across each \gls{hthrd}. 255 For example, on machines where the motherboard holds multiple CPU, cache misses can be satisfied from a cache that belongs to the CPU that missed, the \emph{local} CPU, or by a different CPU, a \emph{remote} one. 256 Cache misses that are satisfied by a remote CPU will have higher latency than if it is satisfied by the local CPU. 257 However, this is not specific to systems with multiple CPUs. 258 Depending on the cache structure, cache-misses can have different latency for the same CPU. 259 The AMD EPYC 7662 CPUs that is described in Chapter~\ref{microbench} is an example of that. 260 Figure~\ref{fig:cache-share} and Figure~\ref{fig:cache-noshare} show two different cache topologies with highlight this difference. 261 In Figure~\ref{fig:cache-share}, all cache instances are either private to a \gls{hthrd} or shared to the entire system, this means latency due to cache-misses are likely fairly consistent. 262 By comparison, in Figure~\ref{fig:cache-noshare} misses in the L2 cache can be satisfied by a hit in either instance of the L3. 263 However, the memory access latency to the remote L3 instance will be notably higher than the memory access latency to the local L3. 264 The impact of these different design on this algorithm is that scheduling will scale very well on architectures similar to Figure~\ref{fig:cache-share}, both will have notably worst scalling with many narrower L3 instances. 265 This is simply because as the number of L3 instances grow, so two does the chances that the random helping will cause significant latency. 266 The solution is to have the scheduler be aware of the cache topology. 267 268 \subsection{Per CPU Sharding} 269 Building a scheduler that is aware of cache topology poses two main challenges: discovering cache topology and matching \procs to cache instance. 270 Sadly, there is no standard portable way to discover cache topology in C. 271 Therefore, while this is a significant portability challenge, it is outside the scope of this thesis to design a cross-platform cache discovery mechanisms. 272 The rest of this work assumes discovering the cache topology based on Linux's \texttt{/sys/devices/system/cpu} directory. 273 This leaves the challenge of matching \procs to cache instance, or more precisely identifying which subqueues of the ready queue are local to which cache instance. 274 Once this matching is available, the helping algorithm can be changed to add bias so that \procs more often help subqueues local to the same cache instance 275 \footnote{Note that like other biases mentioned in this section, the actual bias value does not appear to need precise tuinng.}. 276 277 The obvious approach to mapping cache instances to subqueues is to statically tie subqueues to CPUs. 278 Instead of having each subqueue local to a specific \proc, the system is initialized with subqueues for each \glspl{hthrd} up front. 279 Then \procs dequeue and enqueue by first asking which CPU id they are local to, in order to identify which subqueues are the local ones. 280 \Glspl{proc} can get the CPU id from \texttt{sched\_getcpu} or \texttt{librseq}. 281 282 This approach solves the performance problems on systems with topologies similar to Figure~\ref{fig:cache-noshare}. 283 However, it actually causes some subtle fairness problems in some systems, specifically systems with few \procs and many \glspl{hthrd}. 284 In these cases, the large number of subqueues and the bias agains subqueues tied to different cache instances make it so it is very unlikely any single subqueue is picked. 285 To make things worst, the small number of \procs mean that few helping attempts will be made. 286 This combination of few attempts and low chances make it so a \at stranded on a subqueue that is not actively dequeued from may wait very long before it gets randomly helped. 287 On a system with 2 \procs, 256 \glspl{hthrd} with narrow cache sharing, and a 100:1 bias, it can actually take multiple seconds for a \at to get dequeued from a remote queue. 288 Therefore, a more dynamic matching of subqueues to cache instance is needed. 289 290 \subsection{Topological Work Stealing} 291 The approach that is used in the \CFA scheduler is to have per-\proc subqueue, but have an excplicit data-structure track which cache instance each subqueue is tied to. 292 This is requires some finess because reading this data structure must lead to fewer cache misses than not having the data structure in the first place. 293 A key element however is that, like the timestamps for helping, reading the cache instance mapping only needs to give the correct result \emph{often enough}. 294 Therefore the algorithm can be built as follows: Before enqueuing or dequeing a \at, each \proc queries the CPU id and the corresponding cache instance. 295 Since subqueues are tied to \procs, each \proc can then update the cache instance mapped to the local subqueue(s). 296 To avoid unnecessary cache line invalidation, the map is only written to if the mapping changes. 297 -
doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
rba897d21 r2e9b59b 3 3 The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly. 4 4 This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler. 5 6 \section{Benchmark Environment} 7 All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine. 8 9 \paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM. 10 The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55. 11 These EPYCs have 64 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 256 \glspl{hthrd}. 12 The cpus each have 4 MB, 64 MB and 512 MB of L1, L2 and L3 caches respectively. 13 Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared by 4 cores, therefore 8 \glspl{hthrd}. 14 15 \paragraph{Intel} The Intel machine is a server with four Intel Xeon Platinum 8160 CPUs and 384GB of DDR4 RAM. 16 The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55. 17 These Xeon Platinums have 24 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 192 \glspl{hthrd}. 18 The cpus each have 3 MB, 96 MB and 132 MB of L1, L2 and L3 caches respectively. 19 Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared across the entire CPU, therefore 48 \glspl{hthrd}. 20 21 This limited sharing of the last level cache on the AMD machine is markedly different than the Intel machine. Indeed, while on both architectures L2 cache misses that are served by L3 caches on a different cpu incurr a significant latency, on AMD it is also the case that cache misses served by a different L3 instance on the same cpu still incur high latency. 22 5 23 6 24 \section{Cycling latency} … … 31 49 \end{figure} 32 50 33 \todo{check term ``idle sleep handling''}34 51 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available. 35 52 Beyond this point, adding more rings serves to mitigate even more the idle sleep handling. 36 This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.53 This is to avoid the case where one of the \glspl{proc} runs out of work because of the variation on the number of ready \glspl{at} mentionned above. 37 54 38 55 The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition. 39 56 40 \todo{code, setup, results}41 57 \begin{lstlisting} 42 58 Thread.main() { … … 52 68 \end{lstlisting} 53 69 70 \begin{figure} 71 \centering 72 \input{result.cycle.jax.ops.pstex_t} 73 \vspace*{-10pt} 74 \label{fig:cycle:ns:jax} 75 \end{figure} 54 76 55 77 \section{Yield} -
doc/theses/thierry_delisle_PhD/thesis/text/existing.tex
rba897d21 r2e9b59b 2 2 Scheduling is the process of assigning resources to incomming requests. 3 3 A very common form of this is assigning available workers to work-requests. 4 The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but itis also common in other fields.5 For example, assmebly lines are an example of scheduling where parts needed assembly are assigned to line workers.4 The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but scheduling is also common in other fields. 5 For example, in assmebly lines assigning parts in need of assembly to line workers is a form of scheduling. 6 6 7 7 In all these cases, the choice of a scheduling algorithm generally depends first and formost on how much information is available to the scheduler. … … 15 15 16 16 \section{Naming Convention} 17 Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{ task} to refer to the abstract objects being scheduled and the term \newterm{worker} to refer to the objects which will execute these tasks.17 Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{\Gls{at}} to refer to the abstract objects being scheduled and the term \newterm{\Gls{proc}} to refer to the objects which will execute these \glspl{at}. 18 18 19 19 \section{Static Scheduling} 20 Static schedulers require that taskshave their dependencies and costs explicitly and exhaustively specified prior schedule.20 Static schedulers require that \glspl{at} have their dependencies and costs explicitly and exhaustively specified prior schedule. 21 21 The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere. 22 22 This approach is generally popular in real-time systems since the need for strong guarantees justifies the cost of supplying this information. … … 26 26 27 27 \section{Dynamic Scheduling} 28 It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only taskswe no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.28 It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of adding one or more new \gls{at}(s) to the system as their dependencies are resolved. As well as potentially halting or suspending a \gls{at} that dynamically detect unfulfilled dependencies. Each \gls{at} has the responsability of adding the dependent \glspl{at} back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only \glspl{at} we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}. 29 29 30 30 \subsection{Explicitly Informed Dynamic Schedulers} 31 While dynamic schedulers do not have access to an exhaustive list of dependencies for a task, they may require to provide more or less information about each task, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a tasks takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of tasks and there complexity. For example, providing an exhaustive list of files read by 5 tasks is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct tasks.31 While dynamic schedulers do not have access to an exhaustive list of dependencies for a \gls{at}, they may require to provide more or less information about each \gls{at}, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a \glspl{at} takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of \glspl{at} and there complexity. For example, providing an exhaustive list of files read by 5 \glspl{at} is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct \glspl{at}. 32 32 33 33 Since the goal of this thesis is to provide a scheduler as a replacement for \CFA's existing \emph{uninformed} scheduler, Explicitly Informed schedulers are less relevant to this project. Nevertheless, some strategies are worth mentionnding. 34 34 35 35 \subsubsection{Prority Scheduling} 36 A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority tasks are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every task have a distinct pre-established priority and always run the available task with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of tasks. It can therefore be diserable for schedulers to support tasks with identical priorities and/or automatically setting and adjusting priorites for tasks. 36 A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority \glspl{at} are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every \gls{at} have a distinct pre-established priority and always run the available \gls{at} with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of \glspl{at}. It can therefore be diserable for schedulers to support \glspl{at} with identical priorities and/or automatically setting and adjusting priorites for \glspl{at}. The most common operating some variation on priorities with overlaps and dynamic priority adjustments. For example, Microsoft Windows uses a pair of priorities 37 \cit{https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities,https://docs.microsoft.com/en-us/windows/win32/taskschd/taskschedulerschema-priority-settingstype-element}, one specified by users out of ten possible options and one adjusted by the system. 37 38 38 39 \subsection{Uninformed and Self-Informed Dynamic Schedulers} 39 Several scheduling algorithms do not require programmers to provide additionnal information on each task, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.40 Several scheduling algorithms do not require programmers to provide additionnal information on each \gls{at}, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler. 40 41 41 42 42 43 \subsubsection{Feedback Scheduling} 43 As mentionned, Schedulers may also gather information about each tasks to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain tasks, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.44 As mentionned, Schedulers may also gather information about each \glspl{at} to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain \glspl{at}, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information. 44 45 45 46 46 47 \section{Work Stealing}\label{existing:workstealing} 47 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasksand stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.48 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local \glspl{at} first, but allows the possibility for other workers to steal local \glspl{at} if they run out of \glspl{at}. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of \glspl{at} to accomplish and workers without \glspl{at} steal \glspl{at} from random workers. (The Burton and Sleep algorithm had trees of \glspl{at} and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations. 48 49 49 50 Many variations of this algorithm have been proposed over the years\cite{DBLP:journals/ijpp/YangH18}, both optmizations of existing implementations and approaches that account for new metrics. … … 51 52 \paragraph{Granularity} A significant portion of early Work Stealing research was concentrating on \newterm{Implicit Parellelism}\cite{wiki:implicitpar}. Since the system was responsible to split the work, granularity is a challenge that cannot be left to the programmers (as opposed to \newterm{Explicit Parellelism}\cite{wiki:explicitpar} where the burden can be left to programmers). In general, fine granularity is better for load balancing and coarse granularity reduces communication overhead. The best performance generally means finding a middle ground between the two. Several methods can be employed, but I believe these are less relevant for threads, which are generally explicit and more coarse grained. 52 53 53 \paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating tasksfrom one core to another can be . \cite{DBLP:journals/tpds/SquillanteL93}54 \paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating \glspl{at} from one core to another can be . \cite{DBLP:journals/tpds/SquillanteL93} 54 55 55 56 \todo{The survey is not great on this subject} … … 58 59 59 60 \subsection{Theoretical Results} 60 There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among tasks.61 There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among \glspl{at}. 61 62 62 63 However, as \cite{DBLP:journals/ijpp/YangH18} highlights, it is worth mentionning that this theoretical research has mainly focused on ``fully-strict'' computations, \ie workloads that can be fully represented with a Direct Acyclic Graph. It is unclear how well these distributions represent workloads in real world scenarios. 63 64 64 65 \section{Preemption} 65 One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting tasks that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a task are never further apart than some fixed duration. While this helps schedulers guarantee that no taskswill unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.66 One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting \glspl{at} that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a \gls{at} are never further apart than some fixed duration. While this helps schedulers guarantee that no \glspl{at} will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it. 66 67 67 68 \section{Schedulers in Production}\label{existing:prod} … … 69 70 70 71 \subsection{Operating System Schedulers} 71 Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive tasksand support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.72 Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive \glspl{at} and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source. 72 73 73 74 \paragraph{Linux's CFS} 74 The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of tasks waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the task that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of tasks is also impacted by a group based notion of fairness, where tasks belonging to groups having spent less CPU time are preferred to tasksbeloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).75 The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of \glspl{at} waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the \gls{at} that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of \glspl{at} is also impacted by a group based notion of fairness, where \glspl{at} belonging to groups having spent less CPU time are preferred to \glspl{at} beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.). 75 76 76 \cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across tasks \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single task and the other with one thousand tasks, the user with a single taskdoes not receive one one thousandth of the CPU time.}, increasing the complexity.77 \cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across \glspl{at} \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single \gls{at} and the other with one thousand \glspl{at}, the user with a single \gls{at} does not receive one one thousandth of the CPU time.}, increasing the complexity. 77 78 78 Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority task, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}79 Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority \gls{at}, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched} 79 80 80 81 \paragraph{FreeBSD} … … 82 83 83 84 \paragraph{Windows(OS)} 84 Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules tasks based on the highest priorities (lowest number) and how much cpu time each taskshave used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.85 Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules \glspl{at} based on the highest priorities (lowest number) and how much cpu time each \glspl{at} have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests. 85 86 86 87 \todo{load balancing} … … 99 100 100 101 \subsection{User-Level Schedulers} 101 By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all taskshave the same user, and therefore cooperation is both feasible and probable.102 By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all \glspl{at} have the same user, and therefore cooperation is both feasible and probable. 102 103 \paragraph{Go} 103 104 Go's scheduler uses a Randomized Work Stealing algorithm that has a global runqueue(\emph{GRQ}) and each processor(\emph{P}) has both a fixed-size runqueue(\emph{LRQ}) and a high-priority next ``chair'' holding a single element.\cite{GITHUB:go,YTUBE:go} Preemption is present, but only at function call boundaries. … … 116 117 117 118 \paragraph{Intel\textregistered ~Threading Building Blocks} 118 \newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs tasks or \newterm{jobs}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules tasks as follows (where \textit{t} is the last taskcompleted):119 \newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs \newterm{jobs}, uninterruptable \glspl{at}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules \glspl{at} as follows (where \textit{t} is the last \gls{at} completed): 119 120 \begin{displayquote} 120 121 \begin{enumerate} … … 136 137 137 138 \paragraph{Grand Central Dispatch} 138 This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg taskson queue $A$ are executed in \emph{FIFO} order.139 This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg \glspl{at} on queue $A$ are executed in \emph{FIFO} order. 139 140 140 141 \todo{load balancing and scheduling} -
doc/theses/thierry_delisle_PhD/thesis/text/io.tex
rba897d21 r2e9b59b 173 173 The consequence is that the amount of parallelism used to prepare submissions for the next system call is limited. 174 174 Beyond this limit, the length of the system call is the throughput limiting factor. 175 I concluded from early experiments that preparing submissions seems to take a bout as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.175 I concluded from early experiments that preparing submissions seems to take at most as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}. 176 176 Therefore the design of the submission engine must manage multiple instances of @io_uring@ running in parallel, effectively sharding @io_uring@ instances. 177 177 Similarly to scheduling, this sharding can be done privately, \ie, one instance per \glspl{proc}, in decoupled pools, \ie, a pool of \glspl{proc} use a pool of @io_uring@ instances without one-to-one coupling between any given instance and any given \gls{proc}, or some mix of the two. … … 200 200 The only added complexity is that the number of SQEs is fixed, which means allocation can fail. 201 201 202 Allocation failures need to be pushed up to therouting algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.202 Allocation failures need to be pushed up to a routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available. 203 203 Furthermore, the routing algorithm should block operations up-front if none of the instances have available SQEs. 204 204 … … 214 214 215 215 In the case of designating a \gls{thrd}, ideally, when multiple \glspl{thrd} attempt to submit operations to the same @io_uring@ instance, all requests would be batched together and one of the \glspl{thrd} would do the system call on behalf of the others, referred to as the \newterm{submitter}. 216 In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a current submitter and a next submitter.216 In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a ``next submitter'' that guarentees everything that is missed by the current submitter is seen by the next one. 217 217 Indeed, as long as there is a ``next'' submitter, \glspl{thrd} submitting new \io requests can move on, knowing that some future system call will include their request. 218 218 Once the system call is done, the submitter must also free SQEs so that the allocator can reused them. … … 223 223 If the submission side does not designate submitters, polling can also submit all SQEs as it is polling events. 224 224 A simple approach to polling is to allocate a \gls{thrd} per @io_uring@ instance and simply let the poller \glspl{thrd} poll their respective instances when scheduled. 225 This design is especially convenient for reasons explained in Chapter~\ref{practice}.226 225 227 226 With this pool of instances approach, the big advantage is that it is fairly flexible. 228 227 It does not impose restrictions on what \glspl{thrd} submitting \io operations can and cannot do between allocations and submissions. 229 It also can gracefully handle srunning out of ressources, SQEs or the kernel returning @EBUSY@.228 It also can gracefully handle running out of ressources, SQEs or the kernel returning @EBUSY@. 230 229 The down side to this is that many of the steps used for submitting need complex synchronization to work properly. 231 230 The routing and allocation algorithm needs to keep track of which ring instances have available SQEs, block incoming requests if no instance is available, prevent barging if \glspl{thrd} are already queued up waiting for SQEs and handle SQEs being freed. 232 231 The submission side needs to safely append SQEs to the ring buffer, correctly handle chains, make sure no SQE is dropped or left pending forever, notify the allocation side when SQEs can be reused and handle the kernel returning @EBUSY@. 233 All this synchronization may have a significant cost and, compare to the next approach presented, this synchronization is entirely overhead.232 All this synchronization may have a significant cost and, compared to the next approach presented, this synchronization is entirely overhead. 234 233 235 234 \subsubsection{Private Instances} 236 235 Another approach is to simply create one ring instance per \gls{proc}. 237 This alleviate the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.236 This alleviates the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps. 238 237 This is effectively the same requirement as using @thread_local@ variables. 239 238 Since SQEs that are allocated must be submitted to the same ring, on the same \gls{proc}, this effectively forces the application to submit SQEs in allocation order … … 331 330 \paragraph{Pending Allocations} can be more complicated to handle. 332 331 If the arbiter has available instances, the arbiter can attempt to directly hand over the instance and satisfy the request. 333 Otherwise 332 Otherwise it must hold onto the list of threads until SQEs are made available again. 333 This handling becomes that much more complex if pending allocation require more than one SQE, since the arbiter must make a decision between statisfying requests in FIFO ordering or satisfy requests for fewer SQEs first. 334 335 While this arbiter has the potential to solve many of the problems mentionned in above, it also introduces a significant amount of complexity. 336 Tracking which processors are borrowing which instances and which instances have SQEs available ends-up adding a significant synchronization prelude to any I/O operation. 337 Any submission must start with a handshake that pins the currently borrowed instance, if available. 338 An attempt to allocate is then made, but the arbiter can concurrently be attempting to allocate from the same instance from a different \gls{hthrd}. 339 Once the allocation is completed, the submission must still check that the instance is still burrowed before attempt to flush. 340 These extra synchronization steps end-up having a similar cost to the multiple shared instances approach. 341 Furthermore, if the number of instances does not match the number of processors actively submitting I/O, the system can fall into a state where instances are constantly being revoked and end-up cycling the processors, which leads to significant cache deterioration. 342 Because of these reasons, this approach, which sounds promising on paper, does not improve on the private instance approach in practice. 343 344 \subsubsection{Private Instances V2} 345 334 346 335 347 … … 394 406 Finally, the last important part of the \io subsystem is it's interface. There are multiple approaches that can be offered to programmers, each with advantages and disadvantages. The new \io subsystem can replace the C runtime's API or extend it. And in the later case the interface can go from very similar to vastly different. The following sections discuss some useful options using @read@ as an example. The standard Linux interface for C is : 395 407 396 @ssize_t read(int fd, void *buf, size_t count);@ .408 @ssize_t read(int fd, void *buf, size_t count);@ 397 409 398 410 \subsection{Replacement} 399 Replacing the C \glsxtrshort{api} 411 Replacing the C \glsxtrshort{api} is the more intrusive and draconian approach. 412 The goal is to convince the compiler and linker to replace any calls to @read@ to direct them to the \CFA implementation instead of glibc's. 413 This has the advantage of potentially working transparently and supporting existing binaries without needing recompilation. 414 It also offers a, presumably, well known and familiar API that C programmers can simply continue to work with. 415 However, this approach also entails a plethora of subtle technical challenges which generally boils down to making a perfect replacement. 416 If the \CFA interface replaces only \emph{some} of the calls to glibc, then this can easily lead to esoteric concurrency bugs. 417 Since the gcc ecosystems does not offer a scheme for such perfect replacement, this approach was rejected as being laudable but infeasible. 400 418 401 419 \subsection{Synchronous Extension} 420 An other interface option is to simply offer an interface that is different in name only. For example: 421 422 @ssize_t cfa_read(int fd, void *buf, size_t count);@ 423 424 \noindent This is much more feasible but still familiar to C programmers. 425 It comes with the caveat that any code attempting to use it must be recompiled, which can be a big problem considering the amount of existing legacy C binaries. 426 However, it has the advantage of implementation simplicity. 402 427 403 428 \subsection{Asynchronous Extension} 429 It is important to mention that there is a certain irony to using only synchronous, therefore blocking, interfaces for a feature often referred to as ``non-blocking'' \io. 430 A fairly traditional way of doing this is using futures\cit{wikipedia futures}. 431 As simple way of doing so is as follows: 432 433 @future(ssize_t) read(int fd, void *buf, size_t count);@ 434 435 \noindent Note that this approach is not necessarily the most idiomatic usage of futures. 436 The definition of read above ``returns'' the read content through an output parameter which cannot be synchronized on. 437 A more classical asynchronous API could look more like: 438 439 @future([ssize_t, void *]) read(int fd, size_t count);@ 440 441 \noindent However, this interface immediately introduces memory lifetime challenges since the call must effectively allocate a buffer to be returned. 442 Because of the performance implications of this, the first approach is considered preferable as it is more familiar to C programmers. 404 443 405 444 \subsection{Interface directly to \lstinline{io_uring}} 445 Finally, an other interface that can be relevant is to simply expose directly the underlying \texttt{io\_uring} interface. For example: 446 447 @array(SQE, want) cfa_io_allocate(int want);@ 448 449 @void cfa_io_submit( const array(SQE, have) & );@ 450 451 \noindent This offers more flexibility to users wanting to fully use all of the \texttt{io\_uring} features. 452 However, it is not the most user-friendly option. 453 It obviously imposes a strong dependency between user code and \texttt{io\_uring} but at the same time restricting users to usages that are compatible with how \CFA internally uses \texttt{io\_uring}. 454 455 -
doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
rba897d21 r2e9b59b 2 2 The scheduling algorithm discribed in Chapter~\ref{core} addresses scheduling in a stable state. 3 3 However, it does not address problems that occur when the system changes state. 4 Indeed the \CFA runtime, supports expanding and shrinking the number of KTHREAD\_place \todo{add kthrd to glossary}, both manually and, to some extentautomatically.4 Indeed the \CFA runtime, supports expanding and shrinking the number of \procs, both manually and, to some extent, automatically. 5 5 This entails that the scheduling algorithm must support these transitions. 6 6 7 \section{Resizing} 7 More precise \CFA supports adding \procs using the RAII object @processor@. 8 These objects can be created at any time and can be destroyed at any time. 9 They are normally create as automatic stack variables, but this is not a requirement. 10 11 The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence. 12 13 \section{Manual Resizing} 14 The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed. 15 This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}. 16 17 There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown. 18 However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks. 19 It should also avoid as much as possible any effect on performance when the number of \procs remain constant. 20 This later requirement prehibits simple solutions, like simply adding a global lock to these arrays. 21 22 \subsection{Read-Copy-Update} 23 One solution is to use the Read-Copy-Update\cite{wiki:rcu} pattern. 24 In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy. 25 This approach potentially has the advantage that it may not need any synchronization to do the switch. 26 The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in. 27 The important question then becomes whether or not this race can be recovered from. 28 If the changes that arrived late can be transferred from the original to the copy then this solution works. 29 30 For linked-lists, dequeing is somewhat of a problem. 31 Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at. 32 Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves. 33 34 Another challenge is that the original must be kept until all \procs have witnessed the change. 35 This is a straight forward memory reclamation challenge but it does mean that every operation will need \emph{some} form of synchronization. 36 If each of these operation does need synchronization then it is possible a simpler solution achieves the same performance. 37 Because in addition to the classic challenge of memory reclamation, transferring the original data to the copy before reclaiming it poses additional challenges. 38 Especially merging subqueues while having a minimal impact on fairness and locality. 39 40 \subsection{Read-Writer Lock} 41 A simpler approach would be to use a \newterm{Readers-Writer Lock}\cite{wiki:rwlock} where the resizing requires acquiring the lock as a writer while simply enqueing/dequeing \ats requires acquiring the lock as a reader. 42 Using a Readers-Writer lock solves the problem of dynamically resizing and leaves the challenge of finding or building a lock with sufficient good read-side performance. 43 Since this is not a very complex challenge and an ad-hoc solution is perfectly acceptable, building a Readers-Writer lock was the path taken. 44 45 To maximize reader scalability, the readers should not contend with eachother when attempting to acquire and release the critical sections. 46 This effectively requires that each reader have its own piece of memory to mark as locked and unlocked. 47 Reades then acquire the lock wait for writers to finish the critical section and then acquire their local spinlocks. 48 Writers acquire the global lock, so writers have mutual exclusion among themselves, and then acquires each of the local reader locks. 49 Acquiring all the local locks guarantees mutual exclusion between the readers and the writer, while the wait on the read side prevents readers from continously starving the writer. 50 \todo{reference listings} 51 52 \begin{lstlisting} 53 void read_lock() { 54 // Step 1 : make sure no writers in 55 while write_lock { Pause(); } 56 57 // May need fence here 58 59 // Step 2 : acquire our local lock 60 while atomic_xchg( tls.lock ) { 61 Pause(); 62 } 63 } 64 65 void read_unlock() { 66 tls.lock = false; 67 } 68 \end{lstlisting} 69 70 \begin{lstlisting} 71 void write_lock() { 72 // Step 1 : lock global lock 73 while atomic_xchg( write_lock ) { 74 Pause(); 75 } 76 77 // Step 2 : lock per-proc locks 78 for t in all_tls { 79 while atomic_xchg( t.lock ) { 80 Pause(); 81 } 82 } 83 } 84 85 void write_unlock() { 86 // Step 1 : release local locks 87 for t in all_tls { 88 t.lock = false; 89 } 90 91 // Step 2 : release global lock 92 write_lock = false; 93 } 94 \end{lstlisting} 8 95 9 96 \section{Idle-Sleep} 97 In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful. 98 While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time. 99 Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice. 100 Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready. 101 This state is referred to as \newterm{Idle-Sleep}. 102 103 Idle sleep effectively encompasses several challenges. 104 First some data structure needs to keep track of all \procs that are in idle sleep. 105 Because of idle sleep can be spurious, this data structure has strict performance requirements in addition to the strict correctness requirements. 106 Next, some tool must be used to block kernel threads \glspl{kthrd}, \eg \texttt{pthread\_cond\_wait}, pthread semaphores. 107 The complexity here is to support \at parking and unparking, timers, \io operations and all other \CFA features with minimal complexity. 108 Finally, idle sleep also includes a heuristic to determine the appropriate number of \procs to be in idle sleep an any given time. 109 This third challenge is however outside the scope of this thesis because developping a general heuristic is involved enough to justify its own work. 110 The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work. 111 112 113 \section{Tracking Sleepers} 114 Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc. 115 The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at. 116 117 Furthermore, the ``Race-to-Idle'' approach means that there is some 118 119 \section{Sleeping} 120 121 \subsection{Event FDs} 122 123 \subsection{Epoll} 124 125 \subsection{\texttt{io\_uring}} 126 127 \section{Reducing Latency} -
doc/theses/thierry_delisle_PhD/thesis/thesis.tex
rba897d21 r2e9b59b 202 202 203 203 \newcommand\io{\glsxtrshort{io}\xspace}% 204 \newcommand\at{\gls{at}\xspace}% 205 \newcommand\ats{\glspl{at}\xspace}% 206 \newcommand\proc{\gls{proc}\xspace}% 207 \newcommand\procs{\glspl{proc}\xspace}% 204 208 205 209 %====================================================================== -
libcfa/src/Makefile.am
rba897d21 r2e9b59b 58 58 bits/queue.hfa \ 59 59 bits/sequence.hfa \ 60 concurrency/iofwd.hfa \ 61 concurrency/barrier.hfa \ 60 62 containers/array.hfa \ 61 concurrency/iofwd.hfa \62 63 containers/list.hfa \ 63 64 containers/queueLockFree.hfa \ … … 119 120 concurrency/exception.hfa \ 120 121 concurrency/kernel.hfa \ 122 concurrency/kernel/cluster.hfa \ 121 123 concurrency/locks.hfa \ 122 124 concurrency/monitor.hfa \ … … 134 136 concurrency/io/call.cfa \ 135 137 concurrency/iofwd.hfa \ 136 concurrency/kernel _private.hfa \138 concurrency/kernel/private.hfa \ 137 139 concurrency/kernel/startup.cfa \ 138 140 concurrency/preemption.cfa \ -
libcfa/src/concurrency/coroutine.cfa
rba897d21 r2e9b59b 27 27 #include <unwind.h> 28 28 29 #include "kernel _private.hfa"29 #include "kernel/private.hfa" 30 30 #include "exception.hfa" 31 31 #include "math.hfa" -
libcfa/src/concurrency/io.cfa
rba897d21 r2e9b59b 41 41 #include "kernel.hfa" 42 42 #include "kernel/fwd.hfa" 43 #include "kernel_private.hfa" 43 #include "kernel/private.hfa" 44 #include "kernel/cluster.hfa" 44 45 #include "io/types.hfa" 45 46 … … 93 94 extern void __kernel_unpark( thread$ * thrd, unpark_hint ); 94 95 95 bool __cfa_io_drain( processor * proc ) { 96 /* paranoid */ verify( ! __preemption_enabled() ); 97 /* paranoid */ verify( ready_schedule_islocked() ); 98 /* paranoid */ verify( proc ); 99 /* paranoid */ verify( proc->io.ctx ); 100 101 // Drain the queue 102 $io_context * ctx = proc->io.ctx; 103 unsigned head = *ctx->cq.head; 104 unsigned tail = *ctx->cq.tail; 105 const __u32 mask = *ctx->cq.mask; 106 107 __u32 count = tail - head; 108 __STATS__( false, io.calls.drain++; io.calls.completed += count; ) 109 110 if(count == 0) return false; 111 112 for(i; count) { 113 unsigned idx = (head + i) & mask; 114 volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx]; 115 116 /* paranoid */ verify(&cqe); 117 118 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; 119 __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); 120 121 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL ); 122 } 123 124 __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count); 125 126 // Mark to the kernel that the cqe has been seen 127 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. 128 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST ); 129 130 /* paranoid */ verify( ready_schedule_islocked() ); 131 /* paranoid */ verify( ! __preemption_enabled() ); 132 133 return true; 134 } 135 136 bool __cfa_io_flush( processor * proc, int min_comp ) { 137 /* paranoid */ verify( ! __preemption_enabled() ); 138 /* paranoid */ verify( proc ); 139 /* paranoid */ verify( proc->io.ctx ); 140 141 __attribute__((unused)) cluster * cltr = proc->cltr; 142 $io_context & ctx = *proc->io.ctx; 143 144 __ioarbiter_flush( ctx ); 145 146 if(ctx.sq.to_submit != 0 || min_comp > 0) { 147 148 __STATS__( true, io.calls.flush++; ) 149 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8); 96 static void ioring_syscsll( struct $io_context & ctx, unsigned int min_comp, unsigned int flags ) { 97 __STATS__( true, io.calls.flush++; ) 98 int ret; 99 for() { 100 ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8); 150 101 if( ret < 0 ) { 151 102 switch((int)errno) { 103 case EINTR: 104 continue; 152 105 case EAGAIN: 153 case EINTR:154 106 case EBUSY: 155 107 // Update statistics … … 160 112 } 161 113 } 162 163 __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd); 164 __STATS__( true, io.calls.submitted += ret; ) 165 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); 166 /* paranoid */ verify( ctx.sq.to_submit >= ret ); 167 168 ctx.sq.to_submit -= ret; 169 170 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); 171 172 // Release the consumed SQEs 173 __release_sqes( ctx ); 174 114 break; 115 } 116 117 __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd); 118 __STATS__( true, io.calls.submitted += ret; ) 119 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); 120 /* paranoid */ verify( ctx.sq.to_submit >= ret ); 121 122 ctx.sq.to_submit -= ret; 123 124 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); 125 126 // Release the consumed SQEs 127 __release_sqes( ctx ); 128 129 /* paranoid */ verify( ! __preemption_enabled() ); 130 131 __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED); 132 } 133 134 static bool try_acquire( $io_context * ctx ) __attribute__((nonnull(1))) { 135 /* paranoid */ verify( ! __preemption_enabled() ); 136 /* paranoid */ verify( ready_schedule_islocked() ); 137 138 139 { 140 const __u32 head = *ctx->cq.head; 141 const __u32 tail = *ctx->cq.tail; 142 143 if(head == tail) return false; 144 } 145 146 // Drain the queue 147 if(!__atomic_try_acquire(&ctx->cq.lock)) { 148 __STATS__( false, io.calls.locked++; ) 149 return false; 150 } 151 152 return true; 153 } 154 155 static bool __cfa_do_drain( $io_context * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) { 156 /* paranoid */ verify( ! __preemption_enabled() ); 157 /* paranoid */ verify( ready_schedule_islocked() ); 158 /* paranoid */ verify( ctx->cq.lock == true ); 159 160 const __u32 mask = *ctx->cq.mask; 161 unsigned long long ts_prev = ctx->cq.ts; 162 163 // re-read the head and tail in case it already changed. 164 const __u32 head = *ctx->cq.head; 165 const __u32 tail = *ctx->cq.tail; 166 const __u32 count = tail - head; 167 __STATS__( false, io.calls.drain++; io.calls.completed += count; ) 168 169 for(i; count) { 170 unsigned idx = (head + i) & mask; 171 volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx]; 172 173 /* paranoid */ verify(&cqe); 174 175 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; 176 // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); 177 178 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL ); 179 } 180 181 unsigned long long ts_next = ctx->cq.ts = rdtscl(); 182 183 // Mark to the kernel that the cqe has been seen 184 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. 185 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST ); 186 ctx->proc->idle_wctx.drain_time = ts_next; 187 188 __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next); 189 /* paranoid */ verify( ready_schedule_islocked() ); 190 /* paranoid */ verify( ! __preemption_enabled() ); 191 192 __atomic_unlock(&ctx->cq.lock); 193 194 touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next ); 195 196 return true; 197 } 198 199 bool __cfa_io_drain( processor * proc ) { 200 bool local = false; 201 bool remote = false; 202 203 ready_schedule_lock(); 204 205 cluster * const cltr = proc->cltr; 206 $io_context * const ctx = proc->io.ctx; 207 /* paranoid */ verify( cltr ); 208 /* paranoid */ verify( ctx ); 209 210 with(cltr->sched) { 211 const size_t ctxs_count = io.count; 212 213 /* paranoid */ verify( ready_schedule_islocked() ); 175 214 /* paranoid */ verify( ! __preemption_enabled() ); 176 177 ctx.proc->io.pending = false; 178 } 179 180 ready_schedule_lock(); 181 bool ret = __cfa_io_drain( proc ); 215 /* paranoid */ verify( active_processor() == proc ); 216 /* paranoid */ verify( __shard_factor.io > 0 ); 217 /* paranoid */ verify( ctxs_count > 0 ); 218 /* paranoid */ verify( ctx->cq.id < ctxs_count ); 219 220 const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io); 221 const unsigned long long ctsc = rdtscl(); 222 223 if(proc->io.target == MAX) { 224 uint64_t chaos = __tls_rand(); 225 unsigned ext = chaos & 0xff; 226 unsigned other = (chaos >> 8) % (ctxs_count); 227 228 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) { 229 proc->io.target = other; 230 } 231 } 232 else { 233 const unsigned target = proc->io.target; 234 /* paranoid */ verify( io.tscs[target].tv != MAX ); 235 HELP: if(target < ctxs_count) { 236 const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io); 237 const unsigned long long age = moving_average(ctsc, io.tscs[target].tv, io.tscs[target].ma); 238 __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no"); 239 if(age <= cutoff) break HELP; 240 241 if(!try_acquire(io.data[target])) break HELP; 242 243 if(!__cfa_do_drain( io.data[target], cltr )) break HELP; 244 245 remote = true; 246 __STATS__( false, io.calls.helped++; ) 247 } 248 proc->io.target = MAX; 249 } 250 } 251 252 253 // Drain the local queue 254 if(try_acquire( proc->io.ctx )) { 255 local = __cfa_do_drain( proc->io.ctx, cltr ); 256 } 257 258 /* paranoid */ verify( ready_schedule_islocked() ); 259 /* paranoid */ verify( ! __preemption_enabled() ); 260 /* paranoid */ verify( active_processor() == proc ); 261 182 262 ready_schedule_unlock(); 183 return ret; 263 return local || remote; 264 } 265 266 bool __cfa_io_flush( processor * proc ) { 267 /* paranoid */ verify( ! __preemption_enabled() ); 268 /* paranoid */ verify( proc ); 269 /* paranoid */ verify( proc->io.ctx ); 270 271 $io_context & ctx = *proc->io.ctx; 272 273 __ioarbiter_flush( ctx ); 274 275 if(ctx.sq.to_submit != 0) { 276 ioring_syscsll(ctx, 0, 0); 277 278 } 279 280 return __cfa_io_drain( proc ); 184 281 } 185 282 … … 209 306 struct io_uring_sqe * sqes = ctx->sq.sqes; 210 307 for(i; want) { 211 __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");308 // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n"); 212 309 out_sqes[i] = &sqes[idxs[i]]; 213 310 } … … 227 324 // copy all the indexes we want from the available list 228 325 for(i; want) { 229 __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");326 // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n"); 230 327 idxs[i] = sq.free_ring.array[(fhead + i) & mask]; 231 328 } … … 244 341 // sqe == &sqes[idx] 245 342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) { 246 __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);343 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); 247 344 248 345 disable_interrupts(); … … 252 349 /* paranoid */ verify( ctx ); 253 350 254 __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");351 // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n"); 255 352 256 353 // We can proceed to the fast path … … 260 357 enable_interrupts(); 261 358 262 __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);359 // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd); 263 360 264 361 __fill( sqes, want, idxs, ctx ); … … 275 372 /* paranoid */ verify( ioarb ); 276 373 277 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");374 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n"); 278 375 279 376 struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want); 280 377 281 __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);378 // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd); 282 379 283 380 __fill( sqes, want, idxs,ret ); … … 296 393 // Add the sqes to the array 297 394 for( i; have ) { 298 __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");395 // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n"); 299 396 sq.kring.array[ (tail + i) & mask ] = idxs[i]; 300 397 } … … 304 401 sq.to_submit += have; 305 402 306 ctx->proc->io.pending = true;307 ctx->proc->io.dirty = true;403 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED); 404 __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED); 308 405 } 309 406 … … 314 411 if(sq.to_submit > 30) { 315 412 __tls_stats()->io.flush.full++; 316 __cfa_io_flush( ctx->proc , 0);413 __cfa_io_flush( ctx->proc ); 317 414 } 318 415 if(!lazy) { 319 416 __tls_stats()->io.flush.eager++; 320 __cfa_io_flush( ctx->proc , 0);417 __cfa_io_flush( ctx->proc ); 321 418 } 322 419 } 323 420 324 421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) { 325 __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");422 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); 326 423 327 424 disable_interrupts(); … … 340 437 enable_interrupts(); 341 438 342 __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");439 // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n"); 343 440 return; 344 441 } … … 348 445 enable_interrupts(); 349 446 350 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");447 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n"); 351 448 352 449 __ioarbiter_submit(inctx, idxs, have, lazy); … … 392 489 // go through the range and release the sqes 393 490 for( i; count ) { 394 __cfadbg_print_safe(io, "Kernel I/O : release loop\n");491 // __cfadbg_print_safe(io, "Kernel I/O : release loop\n"); 395 492 __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ]; 396 493 ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx; … … 432 529 433 530 static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) { 434 __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");531 // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n"); 435 532 436 533 __STATS__( false, io.alloc.block += 1; ) … … 499 596 bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei); 500 597 501 ctx->proc->io.pending = true;598 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST); 502 599 503 600 if( we ) { … … 544 641 545 642 // We can proceed to the fast path 546 if( !__alloc(ctx, &idx, 1) ) return false; 643 if( !__alloc(ctx, &idx, 1) ) { 644 /* paranoid */ verify( false ); // for now check if this happens, next time just abort the sleep. 645 return false; 646 } 547 647 548 648 // Allocation was successful … … 574 674 575 675 /* paranoid */ verify( sqe->user_data == (uintptr_t)&future ); 576 __submit ( ctx, &idx, 1, true);676 __submit_only( ctx, &idx, 1 ); 577 677 578 678 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); … … 581 681 return true; 582 682 } 683 684 void __cfa_io_idle( processor * proc ) { 685 iovec iov; 686 __atomic_acquire( &proc->io.ctx->cq.lock ); 687 688 __attribute__((used)) volatile bool was_reset = false; 689 690 with( proc->idle_wctx) { 691 692 // Do we already have a pending read 693 if(available(*ftr)) { 694 // There is no pending read, we need to add one 695 reset(*ftr); 696 697 iov.iov_base = rdbuf; 698 iov.iov_len = sizeof(eventfd_t); 699 __kernel_read(proc, *ftr, iov, evfd ); 700 ftr->result = 0xDEADDEAD; 701 *((eventfd_t *)rdbuf) = 0xDEADDEADDEADDEAD; 702 was_reset = true; 703 } 704 } 705 706 if( !__atomic_load_n( &proc->do_terminate, __ATOMIC_SEQ_CST ) ) { 707 __ioarbiter_flush( *proc->io.ctx ); 708 proc->idle_wctx.sleep_time = rdtscl(); 709 ioring_syscsll( *proc->io.ctx, 1, IORING_ENTER_GETEVENTS); 710 } 711 712 ready_schedule_lock(); 713 __cfa_do_drain( proc->io.ctx, proc->cltr ); 714 ready_schedule_unlock(); 715 716 asm volatile ("" :: "m" (was_reset)); 717 } 583 718 #endif 584 719 #endif -
libcfa/src/concurrency/io/setup.cfa
rba897d21 r2e9b59b 32 32 33 33 void __cfa_io_start( processor * proc ) {} 34 bool __cfa_io_flush( processor * proc, int ) { return false; } 34 bool __cfa_io_flush( processor * proc ) { return false; } 35 bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; } 36 void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {} 35 37 void __cfa_io_stop ( processor * proc ) {} 36 38 … … 39 41 40 42 #else 43 #pragma GCC diagnostic push 44 #pragma GCC diagnostic ignored "-Waddress-of-packed-member" 41 45 #include <errno.h> 42 46 #include <stdint.h> … … 57 61 #include "bitmanip.hfa" 58 62 #include "fstream.hfa" 59 #include "kernel_private.hfa" 63 #include "kernel/private.hfa" 64 #include "limits.hfa" 60 65 #include "thread.hfa" 66 #pragma GCC diagnostic pop 61 67 62 68 void ?{}(io_context_params & this) { … … 112 118 this.ext_sq.empty = true; 113 119 (this.ext_sq.queue){}; 114 __io_uring_setup( this, cl.io.params, proc->idle_ fd );120 __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd ); 115 121 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this); 116 122 } … … 122 128 __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd); 123 129 } 124 125 extern void __disable_interrupts_hard();126 extern void __enable_interrupts_hard();127 130 128 131 static void __io_uring_setup( $io_context & this, const io_context_params & params_in, int procfd ) { … … 214 217 215 218 // completion queue 219 cq.lock = false; 220 cq.id = MAX; 221 cq.ts = rdtscl(); 216 222 cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head); 217 223 cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail); … … 227 233 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 228 234 229 __disable_interrupts_hard();230 231 235 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 232 236 if (ret < 0) { 233 237 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 234 238 } 235 236 __enable_interrupts_hard();237 239 238 240 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); -
libcfa/src/concurrency/io/types.hfa
rba897d21 r2e9b59b 23 23 #include "bits/locks.hfa" 24 24 #include "bits/queue.hfa" 25 #include "iofwd.hfa" 25 26 #include "kernel/fwd.hfa" 27 #include "limits.hfa" 26 28 27 29 #if defined(CFA_HAVE_LINUX_IO_URING_H) … … 77 79 78 80 struct __cmp_ring_t { 81 volatile bool lock; 82 83 unsigned id; 84 85 unsigned long long ts; 86 79 87 // Head and tail of the ring 80 88 volatile __u32 * head; … … 128 136 }; 129 137 138 static inline unsigned long long ts($io_context *& this) { 139 const __u32 head = *this->cq.head; 140 const __u32 tail = *this->cq.tail; 141 142 if(head == tail) return MAX; 143 144 return this->cq.ts; 145 } 146 130 147 struct __pending_alloc { 131 148 inline __outstanding_io; … … 170 187 // void __ioctx_prepare_block($io_context & ctx); 171 188 #endif 172 173 //-----------------------------------------------------------------------174 // IO user data175 struct io_future_t {176 future_t self;177 __s32 result;178 };179 180 static inline {181 thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {182 this.result = result;183 return fulfil(this.self, do_unpark);184 }185 186 // Wait for the future to be fulfilled187 bool wait ( io_future_t & this ) { return wait (this.self); }188 void reset ( io_future_t & this ) { return reset (this.self); }189 bool available( io_future_t & this ) { return available(this.self); }190 } -
libcfa/src/concurrency/iofwd.hfa
rba897d21 r2e9b59b 25 25 } 26 26 #include "bits/defs.hfa" 27 #include "kernel/fwd.hfa" 27 28 #include "time.hfa" 28 29 … … 48 49 49 50 struct cluster; 50 struct io_future_t;51 51 struct $io_context; 52 52 … … 58 58 59 59 struct io_uring_sqe; 60 61 //----------------------------------------------------------------------- 62 // IO user data 63 struct io_future_t { 64 future_t self; 65 __s32 result; 66 }; 67 68 static inline { 69 thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) { 70 this.result = result; 71 return fulfil(this.self, do_unpark); 72 } 73 74 // Wait for the future to be fulfilled 75 bool wait ( io_future_t & this ) { return wait (this.self); } 76 void reset ( io_future_t & this ) { return reset (this.self); } 77 bool available( io_future_t & this ) { return available(this.self); } 78 } 60 79 61 80 //---------- -
libcfa/src/concurrency/kernel.cfa
rba897d21 r2e9b59b 19 19 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 20 20 21 #pragma GCC diagnostic push 22 #pragma GCC diagnostic ignored "-Waddress-of-packed-member" 23 21 24 //C Includes 22 25 #include <errno.h> … … 25 28 #include <signal.h> 26 29 #include <unistd.h> 30 27 31 extern "C" { 28 32 #include <sys/eventfd.h> … … 31 35 32 36 //CFA Includes 33 #include "kernel _private.hfa"37 #include "kernel/private.hfa" 34 38 #include "preemption.hfa" 35 39 #include "strstream.hfa" … … 40 44 #define __CFA_INVOKE_PRIVATE__ 41 45 #include "invoke.h" 46 #pragma GCC diagnostic pop 42 47 43 48 #if !defined(__CFA_NO_STATISTICS__) … … 127 132 static void __wake_one(cluster * cltr); 128 133 129 static void idle_sleep(processor * proc , io_future_t & future, iovec & iov);134 static void idle_sleep(processor * proc); 130 135 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 131 136 static void mark_awake(__cluster_proc_list & idles, processor & proc); 132 137 133 extern void __cfa_io_start( processor * ); 134 extern bool __cfa_io_drain( processor * ); 135 extern bool __cfa_io_flush( processor *, int min_comp ); 136 extern void __cfa_io_stop ( processor * ); 137 static inline bool __maybe_io_drain( processor * ); 138 extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))); 139 extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1))); 140 extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1))); 138 141 139 142 #if defined(CFA_WITH_IO_URING_IDLE) … … 159 162 verify(this); 160 163 161 io_future_t future; // used for idle sleep when io_uring is present162 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not163 eventfd_t idle_val; 164 iovec idle_iovec = { &idle_val, sizeof(idle_val) };165 166 __cfa_io_start( this );164 /* paranoid */ verify( this->idle_wctx.ftr != 0p ); 165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p ); 166 167 // used for idle sleep when io_uring is present 168 // mark it as already fulfilled so we know if there is a pending request or not 169 this->idle_wctx.ftr->self.ptr = 1p; 167 170 168 171 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); … … 189 192 for() { 190 193 // Check if there is pending io 191 __ maybe_io_drain( this );194 __cfa_io_drain( this ); 192 195 193 196 // Try to get the next thread … … 195 198 196 199 if( !readyThread ) { 200 // there is no point in holding submissions if we are idle 197 201 __IO_STATS__(true, io.flush.idle++; ) 198 __cfa_io_flush( this, 0 ); 202 __cfa_io_flush( this ); 203 204 // drain again in case something showed up 205 __cfa_io_drain( this ); 199 206 200 207 readyThread = __next_thread( this->cltr ); … … 202 209 203 210 if( !readyThread ) for(5) { 211 readyThread = __next_thread_slow( this->cltr ); 212 213 if( readyThread ) break; 214 215 // It's unlikely we still I/O to submit, but the arbiter could 204 216 __IO_STATS__(true, io.flush.idle++; ) 205 206 readyThread = __next_thread_slow( this->cltr ); 207 208 if( readyThread ) break; 209 210 __cfa_io_flush( this, 0 ); 217 __cfa_io_flush( this ); 218 219 // drain again in case something showed up 220 __cfa_io_drain( this ); 211 221 } 212 222 … … 231 241 } 232 242 233 idle_sleep( this , future, idle_iovec);243 idle_sleep( this ); 234 244 235 245 // We were woken up, remove self from idle … … 251 261 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 252 262 253 if( this->io.pending && !this->io.dirty) {263 if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 254 264 __IO_STATS__(true, io.flush.dirty++; ) 255 __cfa_io_flush( this , 0);265 __cfa_io_flush( this ); 256 266 } 257 267 } … … 259 269 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 260 270 } 261 262 for(int i = 0; !available(future); i++) {263 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);264 __cfa_io_flush( this, 1 );265 }266 267 __cfa_io_stop( this );268 271 269 272 post( this->terminated ); … … 634 637 635 638 int fd = 1; 636 if( __atomic_load_n(&fdp-> fd, __ATOMIC_SEQ_CST) != 1 ) {637 fd = __atomic_exchange_n(&fdp-> fd, 1, __ATOMIC_RELAXED);639 if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 640 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 638 641 } 639 642 640 643 switch(fd) { 644 __attribute__((unused)) int ret; 641 645 case 0: 642 646 // If the processor isn't ready to sleep then the exchange will already wake it up … … 656 660 // If the processor was ready to sleep, we need to wake it up with an actual write 657 661 val = 1; 658 eventfd_write( fd, val ); 662 ret = eventfd_write( fd, val ); 663 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 659 664 660 665 #if !defined(__CFA_NO_STATISTICS__) … … 677 682 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 678 683 679 this->idle_wctx.fd = 1; 684 this->idle_wctx.sem = 1; 685 686 this->idle_wctx.wake__time = rdtscl(); 680 687 681 688 eventfd_t val; 682 689 val = 1; 683 eventfd_write( this->idle_fd, val ); 684 685 /* paranoid */ verify( ! __preemption_enabled() ); 686 } 687 688 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 690 __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val ); 691 692 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 693 /* paranoid */ verify( ! __preemption_enabled() ); 694 } 695 696 static void idle_sleep(processor * this) { 697 /* paranoid */ verify( this->idle_wctx.evfd != 1 ); 698 /* paranoid */ verify( this->idle_wctx.evfd != 2 ); 699 689 700 // Tell everyone we are ready to go do sleep 690 701 for() { 691 int expected = this->idle_wctx. fd;702 int expected = this->idle_wctx.sem; 692 703 693 704 // Someone already told us to wake-up! No time for a nap. … … 695 706 696 707 // Try to mark that we are going to sleep 697 if(__atomic_compare_exchange_n(&this->idle_wctx. fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {708 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 698 709 // Every one agreed, taking a nap 699 710 break; … … 713 724 { 714 725 eventfd_t val; 715 ssize_t ret = read( this->idle_ fd, &val, sizeof(val) );726 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 716 727 if(ret < 0) { 717 728 switch((int)errno) { … … 735 746 #endif 736 747 #else 737 // Do we already have a pending read 738 if(available(future)) { 739 // There is no pending read, we need to add one 740 reset(future); 741 742 __kernel_read(this, future, iov, this->idle_fd ); 743 } 744 745 __cfa_io_flush( this, 1 ); 748 __cfa_io_idle( this ); 746 749 #endif 747 750 } … … 750 753 __STATS__(true, ready.sleep.halts++; ) 751 754 752 proc.idle_wctx. fd= 0;755 proc.idle_wctx.sem = 0; 753 756 754 757 /* paranoid */ verify( ! __preemption_enabled() ); … … 831 834 #endif 832 835 833 static inline bool __maybe_io_drain( processor * proc ) { 834 bool ret = false; 835 #if defined(CFA_HAVE_LINUX_IO_URING_H) 836 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd); 837 838 // Check if we should drain the queue 839 $io_context * ctx = proc->io.ctx; 840 unsigned head = *ctx->cq.head; 841 unsigned tail = *ctx->cq.tail; 842 if(head == tail) return false; 843 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc ); 845 ready_schedule_unlock(); 846 #endif 847 return ret; 848 } 836 849 837 850 838 //----------------------------------------------------------------------------- … … 903 891 void print_stats_now( cluster & this, int flags ) { 904 892 crawl_cluster_stats( this ); 905 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); 906 894 } 907 895 #endif -
libcfa/src/concurrency/kernel.hfa
rba897d21 r2e9b59b 48 48 extern struct cluster * mainCluster; 49 49 50 // Processor id, required for scheduling threads 51 52 50 // Coroutine used py processors for the 2-step context switch 53 51 coroutine processorCtx_t { 54 52 struct processor * proc; 55 53 }; 56 54 57 55 struct io_future_t; 56 57 // Information needed for idle sleep 58 58 struct __fd_waitctx { 59 volatile int fd; 59 // semaphore/future like object 60 // values can be 0, 1 or some file descriptor. 61 // 0 - is the default state 62 // 1 - means the proc should wake-up immediately 63 // FD - means the proc is going asleep and should be woken by writing to the FD. 64 volatile int sem; 65 66 // The event FD that corresponds to this processor 67 int evfd; 68 69 // buffer into which the proc will read from evfd 70 // unused if not using io_uring for idle sleep 71 void * rdbuf; 72 73 // future use to track the read of the eventfd 74 // unused if not using io_uring for idle sleep 75 io_future_t * ftr; 76 77 volatile unsigned long long wake__time; 78 volatile unsigned long long sleep_time; 79 volatile unsigned long long drain_time; 60 80 }; 61 81 … … 92 112 struct { 93 113 $io_context * ctx; 94 bool pending; 95 bool dirty; 114 unsigned target; 115 volatile bool pending; 116 volatile bool dirty; 96 117 } io; 97 118 … … 103 124 bool pending_preemption; 104 125 105 // Idle lock (kernel semaphore) 106 int idle_fd; 107 108 // Idle waitctx 126 // context for idle sleep 109 127 struct __fd_waitctx idle_wctx; 110 128 … … 155 173 void ^?{}(__intrusive_lane_t & this); 156 174 157 // Aligned timestamps which are used by the re laxed ready queue175 // Aligned timestamps which are used by the ready queue and io subsystem 158 176 struct __attribute__((aligned(128))) __timestamp_t { 159 177 volatile unsigned long long tv; … … 161 179 }; 162 180 181 static inline void ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; } 182 static inline void ^?{}(__timestamp_t &) {} 183 184 163 185 struct __attribute__((aligned(16))) __cache_id_t { 164 186 volatile unsigned id; 165 187 }; 166 167 // Aligned timestamps which are used by the relaxed ready queue168 struct __attribute__((aligned(128))) __help_cnts_t {169 volatile unsigned long long src;170 volatile unsigned long long dst;171 volatile unsigned long long tri;172 };173 174 static inline void ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }175 static inline void ^?{}(__timestamp_t &) {}176 177 struct __attribute__((aligned(128))) __ready_queue_caches_t;178 void ?{}(__ready_queue_caches_t & this);179 void ^?{}(__ready_queue_caches_t & this);180 181 //TODO adjust cache size to ARCHITECTURE182 // Structure holding the ready queue183 struct __ready_queue_t {184 // Data tracking the actual lanes185 // On a seperate cacheline from the used struct since186 // used can change on each push/pop but this data187 // only changes on shrink/grow188 struct {189 // Arary of lanes190 __intrusive_lane_t * volatile data;191 192 // Array of times193 __timestamp_t * volatile tscs;194 195 __cache_id_t * volatile caches;196 197 // Array of stats198 __help_cnts_t * volatile help;199 200 // Number of lanes (empty or not)201 volatile size_t count;202 } lanes;203 };204 205 void ?{}(__ready_queue_t & this);206 void ^?{}(__ready_queue_t & this);207 #if !defined(__CFA_NO_STATISTICS__)208 unsigned cnt(const __ready_queue_t & this, unsigned idx);209 #endif210 188 211 189 // Idle Sleep … … 233 211 // Cluster 234 212 struct __attribute__((aligned(128))) cluster { 235 // Ready queue for threads 236 __ready_queue_t ready_queue; 213 struct { 214 struct { 215 // Arary of subqueues 216 __intrusive_lane_t * data; 217 218 // Time since subqueues were processed 219 __timestamp_t * tscs; 220 221 // Number of subqueue / timestamps 222 size_t count; 223 } readyQ; 224 225 struct { 226 // Array of $io_ 227 $io_context ** data; 228 229 // Time since subqueues were processed 230 __timestamp_t * tscs; 231 232 // Number of I/O subqueues 233 size_t count; 234 } io; 235 236 // Cache each kernel thread belongs to 237 __cache_id_t * caches; 238 } sched; 239 240 // // Ready queue for threads 241 // __ready_queue_t ready_queue; 237 242 238 243 // Name of the cluster -
libcfa/src/concurrency/kernel/fwd.hfa
rba897d21 r2e9b59b 248 248 // check if the future is available 249 249 bool available( future_t & this ) { 250 while( this.ptr == 2p ) Pause(); 250 251 return this.ptr == 1p; 251 252 } -
libcfa/src/concurrency/kernel/private.hfa
rba897d21 r2e9b59b 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // kernel _private.hfa --7 // kernel/private.hfa -- 8 8 // 9 9 // Author : Thierry Delisle … … 17 17 18 18 #if !defined(__cforall_thread__) 19 #error kernel _private.hfa should only be included in libcfathread source19 #error kernel/private.hfa should only be included in libcfathread source 20 20 #endif 21 21 … … 33 33 #else 34 34 #ifndef _GNU_SOURCE 35 #error kernel _private requires gnu_source35 #error kernel/private requires gnu_source 36 36 #endif 37 37 #include <sched.h> … … 40 40 41 41 // Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call 42 #define CFA_WANT_IO_URING_IDLE42 // #define CFA_WANT_IO_URING_IDLE 43 43 44 44 // Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call … … 365 365 void ready_queue_shrink(struct cluster * cltr); 366 366 367 //----------------------------------------------------------------------- 368 // Decrease the width of the ready queue (number of lanes) by 4 369 void ready_queue_close(struct cluster * cltr); 367 370 368 371 // Local Variables: // -
libcfa/src/concurrency/kernel/startup.cfa
rba897d21 r2e9b59b 32 32 33 33 // CFA Includes 34 #include "kernel_private.hfa" 34 #include "kernel/private.hfa" 35 #include "iofwd.hfa" 35 36 #include "startup.hfa" // STARTUP_PRIORITY_XXX 36 37 #include "limits.hfa" … … 97 98 extern void __kernel_alarm_startup(void); 98 99 extern void __kernel_alarm_shutdown(void); 100 extern void __cfa_io_start( processor * ); 101 extern void __cfa_io_stop ( processor * ); 99 102 100 103 //----------------------------------------------------------------------------- … … 111 114 KERNEL_STORAGE(__stack_t, mainThreadCtx); 112 115 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); 116 KERNEL_STORAGE(eventfd_t, mainIdleEventFd); 117 KERNEL_STORAGE(io_future_t, mainIdleFuture); 113 118 #if !defined(__CFA_NO_STATISTICS__) 114 119 KERNEL_STORAGE(__stats_t, mainProcStats); … … 224 229 (*mainProcessor){}; 225 230 231 mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd; 232 mainProcessor->idle_wctx.ftr = (io_future_t*)&storage_mainIdleFuture; 233 /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) ); 234 235 __cfa_io_start( mainProcessor ); 226 236 register_tls( mainProcessor ); 227 237 … … 305 315 306 316 unregister_tls( mainProcessor ); 317 __cfa_io_stop( mainProcessor ); 307 318 308 319 // Destroy the main processor and its context in reverse order of construction … … 353 364 proc->local_data = &__cfaabi_tls; 354 365 366 __cfa_io_start( proc ); 355 367 register_tls( proc ); 368 369 // used for idle sleep when io_uring is present 370 io_future_t future; 371 eventfd_t idle_buf; 372 proc->idle_wctx.ftr = &future; 373 proc->idle_wctx.rdbuf = &idle_buf; 374 356 375 357 376 // SKULLDUGGERY: We want to create a context for the processor coroutine … … 395 414 396 415 unregister_tls( proc ); 416 __cfa_io_stop( proc ); 397 417 398 418 return 0p; … … 515 535 this.rdq.its = 0; 516 536 this.rdq.itr = 0; 517 this.rdq.id = MAX;537 this.rdq.id = 0; 518 538 this.rdq.target = MAX; 519 539 this.rdq.last = MAX; … … 532 552 this.local_data = 0p; 533 553 534 this.idle_fd = eventfd(0, 0);535 if (idle_ fd < 0) {554 idle_wctx.evfd = eventfd(0, 0); 555 if (idle_wctx.evfd < 0) { 536 556 abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno)); 537 557 } 538 558 539 this.idle_wctx.fd = 0; 559 idle_wctx.sem = 0; 560 idle_wctx.wake__time = 0; 540 561 541 562 // I'm assuming these two are reserved for standard input and output 542 563 // so I'm using them as sentinels with idle_wctx. 543 /* paranoid */ verify( this.idle_fd != 0 );544 /* paranoid */ verify( this.idle_fd != 1 );564 /* paranoid */ verify( idle_wctx.evfd != 0 ); 565 /* paranoid */ verify( idle_wctx.evfd != 1 ); 545 566 546 567 #if !defined(__CFA_NO_STATISTICS__) … … 554 575 // Not a ctor, it just preps the destruction but should not destroy members 555 576 static void deinit(processor & this) { 556 close(this.idle_ fd);577 close(this.idle_wctx.evfd); 557 578 } 558 579 … … 605 626 this.name = name; 606 627 this.preemption_rate = preemption_rate; 607 ready_queue{}; 628 this.sched.readyQ.data = 0p; 629 this.sched.readyQ.tscs = 0p; 630 this.sched.readyQ.count = 0; 631 this.sched.io.tscs = 0p; 632 this.sched.io.data = 0p; 633 this.sched.caches = 0p; 608 634 609 635 #if !defined(__CFA_NO_STATISTICS__) … … 644 670 // Unlock the RWlock 645 671 ready_mutate_unlock( last_size ); 672 673 ready_queue_close( &this ); 674 /* paranoid */ verify( this.sched.readyQ.data == 0p ); 675 /* paranoid */ verify( this.sched.readyQ.tscs == 0p ); 676 /* paranoid */ verify( this.sched.readyQ.count == 0 ); 677 /* paranoid */ verify( this.sched.io.tscs == 0p ); 678 /* paranoid */ verify( this.sched.caches == 0p ); 679 646 680 enable_interrupts( false ); // Don't poll, could be in main cluster 681 647 682 648 683 #if !defined(__CFA_NO_STATISTICS__) -
libcfa/src/concurrency/locks.cfa
rba897d21 r2e9b59b 19 19 20 20 #include "locks.hfa" 21 #include "kernel _private.hfa"21 #include "kernel/private.hfa" 22 22 23 23 #include <kernel.hfa> -
libcfa/src/concurrency/locks.hfa
rba897d21 r2e9b59b 164 164 } 165 165 166 static inline boollock(linear_backoff_then_block_lock & this) with(this) {166 static inline void lock(linear_backoff_then_block_lock & this) with(this) { 167 167 // if owner just return 168 if (active_thread() == owner) return true;168 if (active_thread() == owner) return; 169 169 size_t compare_val = 0; 170 170 int spin = spin_start; … … 172 172 for( ;; ) { 173 173 compare_val = 0; 174 if (internal_try_lock(this, compare_val)) return true;174 if (internal_try_lock(this, compare_val)) return; 175 175 if (2 == compare_val) break; 176 176 for (int i = 0; i < spin; i++) Pause(); … … 179 179 } 180 180 181 if(2 != compare_val && try_lock_contention(this)) return true;181 if(2 != compare_val && try_lock_contention(this)) return; 182 182 // block until signalled 183 while (block(this)) if(try_lock_contention(this)) return true; 184 185 // this should never be reached as block(this) always returns true 186 return false; 183 while (block(this)) if(try_lock_contention(this)) return; 187 184 } 188 185 -
libcfa/src/concurrency/monitor.cfa
rba897d21 r2e9b59b 22 22 #include <inttypes.h> 23 23 24 #include "kernel _private.hfa"24 #include "kernel/private.hfa" 25 25 26 26 #include "bits/algorithm.hfa" -
libcfa/src/concurrency/mutex.cfa
rba897d21 r2e9b59b 21 21 #include "mutex.hfa" 22 22 23 #include "kernel _private.hfa"23 #include "kernel/private.hfa" 24 24 25 25 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/mutex_stmt.hfa
rba897d21 r2e9b59b 12 12 }; 13 13 14 15 struct __mutex_stmt_lock_guard { 16 void ** lockarr; 17 __lock_size_t count; 18 }; 19 20 static inline void ?{}( __mutex_stmt_lock_guard & this, void * lockarr [], __lock_size_t count ) { 21 this.lockarr = lockarr; 22 this.count = count; 23 24 // Sort locks based on address 25 __libcfa_small_sort(this.lockarr, count); 26 27 // acquire locks in order 28 // for ( size_t i = 0; i < count; i++ ) { 29 // lock(*this.lockarr[i]); 30 // } 31 } 32 33 static inline void ^?{}( __mutex_stmt_lock_guard & this ) with(this) { 34 // for ( size_t i = count; i > 0; i-- ) { 35 // unlock(*lockarr[i - 1]); 36 // } 37 } 38 14 39 forall(L & | is_lock(L)) { 15 16 struct __mutex_stmt_lock_guard {17 L ** lockarr;18 __lock_size_t count;19 };20 21 static inline void ?{}( __mutex_stmt_lock_guard(L) & this, L * lockarr [], __lock_size_t count ) {22 this.lockarr = lockarr;23 this.count = count;24 25 // Sort locks based on address26 __libcfa_small_sort(this.lockarr, count);27 28 // acquire locks in order29 for ( size_t i = 0; i < count; i++ ) {30 lock(*this.lockarr[i]);31 }32 }33 34 static inline void ^?{}( __mutex_stmt_lock_guard(L) & this ) with(this) {35 for ( size_t i = count; i > 0; i-- ) {36 unlock(*lockarr[i - 1]);37 }38 }39 40 40 41 struct scoped_lock { … … 51 52 } 52 53 53 static inline L * __get_ptr( L & this ) {54 static inline void * __get_mutexstmt_lock_ptr( L & this ) { 54 55 return &this; 55 56 } 56 57 57 static inline L __get_ type( L & this );58 static inline L __get_mutexstmt_lock_type( L & this ); 58 59 59 static inline L __get_ type( L * this );60 static inline L __get_mutexstmt_lock_type( L * this ); 60 61 } -
libcfa/src/concurrency/preemption.cfa
rba897d21 r2e9b59b 31 31 #include "bits/debug.hfa" 32 32 #include "bits/signal.hfa" 33 #include "kernel _private.hfa"33 #include "kernel/private.hfa" 34 34 35 35 -
libcfa/src/concurrency/ready_queue.cfa
rba897d21 r2e9b59b 20 20 21 21 22 // #define USE_RELAXED_FIFO23 // #define USE_WORK_STEALING24 // #define USE_CPU_WORK_STEALING25 22 #define USE_AWARE_STEALING 26 23 27 24 #include "bits/defs.hfa" 28 25 #include "device/cpu.hfa" 29 #include "kernel_private.hfa" 30 31 #include "stdlib.hfa" 32 #include "limits.hfa" 33 #include "math.hfa" 34 35 #include <errno.h> 36 #include <unistd.h> 37 38 extern "C" { 39 #include <sys/syscall.h> // __NR_xxx 40 } 26 #include "kernel/cluster.hfa" 27 #include "kernel/private.hfa" 28 29 // #include <errno.h> 30 // #include <unistd.h> 41 31 42 32 #include "ready_subqueue.hfa" … … 50 40 #endif 51 41 52 // No overriden function, no environment variable, no define53 // fall back to a magic number54 #ifndef __CFA_MAX_PROCESSORS__55 #define __CFA_MAX_PROCESSORS__ 102456 #endif57 58 #if defined(USE_AWARE_STEALING)59 #define READYQ_SHARD_FACTOR 260 #define SEQUENTIAL_SHARD 261 #elif defined(USE_CPU_WORK_STEALING)62 #define READYQ_SHARD_FACTOR 263 #elif defined(USE_RELAXED_FIFO)64 #define BIAS 465 #define READYQ_SHARD_FACTOR 466 #define SEQUENTIAL_SHARD 167 #elif defined(USE_WORK_STEALING)68 #define READYQ_SHARD_FACTOR 269 #define SEQUENTIAL_SHARD 270 #else71 #error no scheduling strategy selected72 #endif73 74 42 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); 75 43 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); 76 44 static inline struct thread$ * search(struct cluster * cltr); 77 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);78 79 80 // returns the maximum number of processors the RWLock support81 __attribute__((weak)) unsigned __max_processors() {82 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");83 if(!max_cores_s) {84 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");85 return __CFA_MAX_PROCESSORS__;86 }87 88 char * endptr = 0p;89 long int max_cores_l = strtol(max_cores_s, &endptr, 10);90 if(max_cores_l < 1 || max_cores_l > 65535) {91 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);92 return __CFA_MAX_PROCESSORS__;93 }94 if('\0' != *endptr) {95 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);96 return __CFA_MAX_PROCESSORS__;97 }98 99 return max_cores_l;100 }101 102 #if defined(CFA_HAVE_LINUX_LIBRSEQ)103 // No forward declaration needed104 #define __kernel_rseq_register rseq_register_current_thread105 #define __kernel_rseq_unregister rseq_unregister_current_thread106 #elif defined(CFA_HAVE_LINUX_RSEQ_H)107 static void __kernel_raw_rseq_register (void);108 static void __kernel_raw_rseq_unregister(void);109 110 #define __kernel_rseq_register __kernel_raw_rseq_register111 #define __kernel_rseq_unregister __kernel_raw_rseq_unregister112 #else113 // No forward declaration needed114 // No initialization needed115 static inline void noop(void) {}116 117 #define __kernel_rseq_register noop118 #define __kernel_rseq_unregister noop119 #endif120 121 //=======================================================================122 // Cluster wide reader-writer lock123 //=======================================================================124 void ?{}(__scheduler_RWLock_t & this) {125 this.max = __max_processors();126 this.alloc = 0;127 this.ready = 0;128 this.data = alloc(this.max);129 this.write_lock = false;130 131 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));132 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));133 134 }135 void ^?{}(__scheduler_RWLock_t & this) {136 free(this.data);137 }138 139 140 //=======================================================================141 // Lock-Free registering/unregistering of threads142 unsigned register_proc_id( void ) with(*__scheduler_lock) {143 __kernel_rseq_register();144 145 bool * handle = (bool *)&kernelTLS().sched_lock;146 147 // Step - 1 : check if there is already space in the data148 uint_fast32_t s = ready;149 150 // Check among all the ready151 for(uint_fast32_t i = 0; i < s; i++) {152 bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems153 /* paranoid */ verify( handle != *cell );154 155 bool * null = 0p; // Re-write every loop since compare thrashes it156 if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null157 && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {158 /* paranoid */ verify(i < ready);159 /* paranoid */ verify( (kernelTLS().sched_id = i, true) );160 return i;161 }162 }163 164 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);165 166 // Step - 2 : F&A to get a new spot in the array.167 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);168 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);169 170 // Step - 3 : Mark space as used and then publish it.171 data[n] = handle;172 while() {173 unsigned copy = n;174 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n175 && __atomic_compare_exchange_n(&ready, ©, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))176 break;177 Pause();178 }179 180 // Return new spot.181 /* paranoid */ verify(n < ready);182 /* paranoid */ verify( (kernelTLS().sched_id = n, true) );183 return n;184 }185 186 void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {187 /* paranoid */ verify(id < ready);188 /* paranoid */ verify(id == kernelTLS().sched_id);189 /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);190 191 bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems192 193 __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);194 195 __kernel_rseq_unregister();196 }197 198 //-----------------------------------------------------------------------199 // Writer side : acquire when changing the ready queue, e.g. adding more200 // queues or removing them.201 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {202 /* paranoid */ verify( ! __preemption_enabled() );203 204 // Step 1 : lock global lock205 // It is needed to avoid processors that register mid Critical-Section206 // to simply lock their own lock and enter.207 __atomic_acquire( &write_lock );208 209 // Make sure we won't deadlock ourself210 // Checking before acquiring the writer lock isn't safe211 // because someone else could have locked us.212 /* paranoid */ verify( ! kernelTLS().sched_lock );213 214 // Step 2 : lock per-proc lock215 // Processors that are currently being registered aren't counted216 // but can't be in read_lock or in the critical section.217 // All other processors are counted218 uint_fast32_t s = ready;219 for(uint_fast32_t i = 0; i < s; i++) {220 volatile bool * llock = data[i];221 if(llock) __atomic_acquire( llock );222 }223 224 /* paranoid */ verify( ! __preemption_enabled() );225 return s;226 }227 228 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {229 /* paranoid */ verify( ! __preemption_enabled() );230 231 // Step 1 : release local locks232 // This must be done while the global lock is held to avoid233 // threads that where created mid critical section234 // to race to lock their local locks and have the writer235 // immidiately unlock them236 // Alternative solution : return s in write_lock and pass it to write_unlock237 for(uint_fast32_t i = 0; i < last_s; i++) {238 volatile bool * llock = data[i];239 if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);240 }241 242 // Step 2 : release global lock243 /*paranoid*/ assert(true == write_lock);244 __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);245 246 /* paranoid */ verify( ! __preemption_enabled() );247 }248 249 //=======================================================================250 // caches handling251 252 struct __attribute__((aligned(128))) __ready_queue_caches_t {253 // Count States:254 // - 0 : No one is looking after this cache255 // - 1 : No one is looking after this cache, BUT it's not empty256 // - 2+ : At least one processor is looking after this cache257 volatile unsigned count;258 };259 260 void ?{}(__ready_queue_caches_t & this) { this.count = 0; }261 void ^?{}(__ready_queue_caches_t & this) {}262 263 static inline void depart(__ready_queue_caches_t & cache) {264 /* paranoid */ verify( cache.count > 1);265 __atomic_fetch_add(&cache.count, -1, __ATOMIC_SEQ_CST);266 /* paranoid */ verify( cache.count != 0);267 /* paranoid */ verify( cache.count < 65536 ); // This verify assumes no cluster will have more than 65000 kernel threads mapped to a single cache, which could be correct but is super weird.268 }269 270 static inline void arrive(__ready_queue_caches_t & cache) {271 // for() {272 // unsigned expected = cache.count;273 // unsigned desired = 0 == expected ? 2 : expected + 1;274 // }275 }276 45 277 46 //======================================================================= 278 47 // Cforall Ready Queue used for scheduling 279 48 //======================================================================= 280 unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) { 281 /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc ); 282 /* paranoid */ verifyf( instsc < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc ); 283 /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg ); 284 285 const unsigned long long new_val = currtsc > instsc ? currtsc - instsc : 0; 286 const unsigned long long total_weight = 16; 287 const unsigned long long new_weight = 4; 288 const unsigned long long old_weight = total_weight - new_weight; 289 const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight; 290 return ret; 291 } 292 293 void ?{}(__ready_queue_t & this) with (this) { 294 #if defined(USE_CPU_WORK_STEALING) 295 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR; 296 lanes.data = alloc( lanes.count ); 297 lanes.tscs = alloc( lanes.count ); 298 lanes.help = alloc( cpu_info.hthrd_count ); 299 300 for( idx; (size_t)lanes.count ) { 301 (lanes.data[idx]){}; 302 lanes.tscs[idx].tv = rdtscl(); 303 lanes.tscs[idx].ma = rdtscl(); 304 } 305 for( idx; (size_t)cpu_info.hthrd_count ) { 306 lanes.help[idx].src = 0; 307 lanes.help[idx].dst = 0; 308 lanes.help[idx].tri = 0; 309 } 310 #else 311 lanes.data = 0p; 312 lanes.tscs = 0p; 313 lanes.caches = 0p; 314 lanes.help = 0p; 315 lanes.count = 0; 316 #endif 317 } 318 319 void ^?{}(__ready_queue_t & this) with (this) { 320 #if !defined(USE_CPU_WORK_STEALING) 321 verify( SEQUENTIAL_SHARD == lanes.count ); 322 #endif 323 324 free(lanes.data); 325 free(lanes.tscs); 326 free(lanes.caches); 327 free(lanes.help); 328 } 329 330 //----------------------------------------------------------------------- 331 #if defined(USE_AWARE_STEALING) 332 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 333 processor * const proc = kernelTLS().this_processor; 334 const bool external = (!proc) || (cltr != proc->cltr); 335 const bool remote = hint == UNPARK_REMOTE; 336 337 unsigned i; 338 if( external || remote ) { 339 // Figure out where thread was last time and make sure it's valid 340 /* paranoid */ verify(thrd->preferred >= 0); 341 if(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count) { 342 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count); 343 unsigned start = thrd->preferred * READYQ_SHARD_FACTOR; 344 do { 345 unsigned r = __tls_rand(); 346 i = start + (r % READYQ_SHARD_FACTOR); 347 /* paranoid */ verify( i < lanes.count ); 348 // If we can't lock it retry 349 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 350 } else { 351 do { 352 i = __tls_rand() % lanes.count; 353 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 354 } 49 // void ?{}(__ready_queue_t & this) with (this) { 50 // lanes.data = 0p; 51 // lanes.tscs = 0p; 52 // lanes.caches = 0p; 53 // lanes.count = 0; 54 // } 55 56 // void ^?{}(__ready_queue_t & this) with (this) { 57 // free(lanes.data); 58 // free(lanes.tscs); 59 // free(lanes.caches); 60 // } 61 62 //----------------------------------------------------------------------- 63 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) { 64 processor * const proc = kernelTLS().this_processor; 65 const bool external = (!proc) || (cltr != proc->cltr); 66 const bool remote = hint == UNPARK_REMOTE; 67 const size_t lanes_count = readyQ.count; 68 69 /* paranoid */ verify( __shard_factor.readyq > 0 ); 70 /* paranoid */ verify( lanes_count > 0 ); 71 72 unsigned i; 73 if( external || remote ) { 74 // Figure out where thread was last time and make sure it's valid 75 /* paranoid */ verify(thrd->preferred >= 0); 76 unsigned start = thrd->preferred * __shard_factor.readyq; 77 if(start < lanes_count) { 78 do { 79 unsigned r = __tls_rand(); 80 i = start + (r % __shard_factor.readyq); 81 /* paranoid */ verify( i < lanes_count ); 82 // If we can't lock it retry 83 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 355 84 } else { 356 85 do { 357 unsigned r = proc->rdq.its++; 358 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); 359 /* paranoid */ verify( i < lanes.count ); 360 // If we can't lock it retry 361 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 362 } 363 364 // Actually push it 365 push(lanes.data[i], thrd); 366 367 // Unlock and return 368 __atomic_unlock( &lanes.data[i].lock ); 369 370 #if !defined(__CFA_NO_STATISTICS__) 371 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 372 else __tls_stats()->ready.push.local.success++; 373 #endif 374 } 375 376 static inline unsigned long long calc_cutoff(const unsigned long long ctsc, const processor * proc, __ready_queue_t & rdq) { 377 unsigned start = proc->rdq.id; 378 unsigned long long max = 0; 379 for(i; READYQ_SHARD_FACTOR) { 380 unsigned long long ptsc = ts(rdq.lanes.data[start + i]); 381 if(ptsc != -1ull) { 382 /* paranoid */ verify( start + i < rdq.lanes.count ); 383 unsigned long long tsc = moving_average(ctsc, ptsc, rdq.lanes.tscs[start + i].ma); 384 if(tsc > max) max = tsc; 385 } 386 } 387 return (max + 2 * max) / 2; 388 } 389 390 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 391 /* paranoid */ verify( lanes.count > 0 ); 392 /* paranoid */ verify( kernelTLS().this_processor ); 393 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 394 395 processor * const proc = kernelTLS().this_processor; 396 unsigned this = proc->rdq.id; 397 /* paranoid */ verify( this < lanes.count ); 398 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this); 399 400 // Figure out the current cpu and make sure it is valid 401 const int cpu = __kernel_getcpu(); 402 /* paranoid */ verify(cpu >= 0); 403 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 404 unsigned this_cache = cpu_info.llc_map[cpu].cache; 405 406 // Super important: don't write the same value over and over again 407 // We want to maximise our chances that his particular values stays in cache 408 if(lanes.caches[this / READYQ_SHARD_FACTOR].id != this_cache) 409 __atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED); 410 411 const unsigned long long ctsc = rdtscl(); 412 413 if(proc->rdq.target == MAX) { 414 uint64_t chaos = __tls_rand(); 415 unsigned ext = chaos & 0xff; 416 unsigned other = (chaos >> 8) % (lanes.count); 417 418 if(ext < 3 || __atomic_load_n(&lanes.caches[other / READYQ_SHARD_FACTOR].id, __ATOMIC_RELAXED) == this_cache) { 419 proc->rdq.target = other; 420 } 421 } 422 else { 423 const unsigned target = proc->rdq.target; 424 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, lanes.tscs[target].tv); 425 /* paranoid */ verify( lanes.tscs[target].tv != MAX ); 426 if(target < lanes.count) { 427 const unsigned long long cutoff = calc_cutoff(ctsc, proc, cltr->ready_queue); 428 const unsigned long long age = moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma); 429 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); 430 if(age > cutoff) { 431 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 432 if(t) return t; 433 } 434 } 435 proc->rdq.target = MAX; 436 } 437 438 for(READYQ_SHARD_FACTOR) { 439 unsigned i = this + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 440 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 441 } 442 443 // All lanes where empty return 0p 444 return 0p; 445 446 } 447 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 448 unsigned i = __tls_rand() % lanes.count; 449 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 450 } 451 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 452 return search(cltr); 453 } 454 #endif 455 #if defined(USE_CPU_WORK_STEALING) 456 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 457 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 458 459 processor * const proc = kernelTLS().this_processor; 460 const bool external = (!proc) || (cltr != proc->cltr); 461 462 // Figure out the current cpu and make sure it is valid 463 const int cpu = __kernel_getcpu(); 464 /* paranoid */ verify(cpu >= 0); 465 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 466 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 467 468 // Figure out where thread was last time and make sure it's 469 /* paranoid */ verify(thrd->preferred >= 0); 470 /* paranoid */ verify(thrd->preferred < cpu_info.hthrd_count); 471 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count); 472 const int prf = thrd->preferred * READYQ_SHARD_FACTOR; 473 474 const cpu_map_entry_t & map; 475 choose(hint) { 476 case UNPARK_LOCAL : &map = &cpu_info.llc_map[cpu]; 477 case UNPARK_REMOTE: &map = &cpu_info.llc_map[prf]; 478 } 479 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 480 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 481 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 482 483 const int start = map.self * READYQ_SHARD_FACTOR; 484 unsigned i; 86 i = __tls_rand() % lanes_count; 87 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 88 } 89 } else { 485 90 do { 486 unsigned r; 487 if(unlikely(external)) { r = __tls_rand(); } 488 else { r = proc->rdq.its++; } 489 choose(hint) { 490 case UNPARK_LOCAL : i = start + (r % READYQ_SHARD_FACTOR); 491 case UNPARK_REMOTE: i = prf + (r % READYQ_SHARD_FACTOR); 492 } 91 unsigned r = proc->rdq.its++; 92 i = proc->rdq.id + (r % __shard_factor.readyq); 93 /* paranoid */ verify( i < lanes_count ); 493 94 // If we can't lock it retry 494 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 495 496 // Actually push it 497 push(lanes.data[i], thrd); 498 499 // Unlock and return 500 __atomic_unlock( &lanes.data[i].lock ); 501 502 #if !defined(__CFA_NO_STATISTICS__) 503 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 504 else __tls_stats()->ready.push.local.success++; 505 #endif 506 507 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 508 509 } 510 511 // Pop from the ready queue from a given cluster 512 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 513 /* paranoid */ verify( lanes.count > 0 ); 514 /* paranoid */ verify( kernelTLS().this_processor ); 515 516 processor * const proc = kernelTLS().this_processor; 517 const int cpu = __kernel_getcpu(); 518 /* paranoid */ verify(cpu >= 0); 519 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 520 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 521 522 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 523 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 524 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 525 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 526 527 const int start = map.self * READYQ_SHARD_FACTOR; 528 const unsigned long long ctsc = rdtscl(); 529 530 // Did we already have a help target 531 if(proc->rdq.target == MAX) { 532 unsigned long long max = 0; 533 for(i; READYQ_SHARD_FACTOR) { 534 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma); 535 if(tsc > max) max = tsc; 536 } 537 // proc->rdq.cutoff = (max + 2 * max) / 2; 538 /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores. 539 /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores. 540 541 if(0 == (__tls_rand() % 100)) { 542 proc->rdq.target = __tls_rand() % lanes.count; 543 } else { 544 unsigned cpu_chaos = map.start + (__tls_rand() % map.count); 545 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (__tls_rand() % READYQ_SHARD_FACTOR); 546 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR)); 547 /* paranoid */ verify(proc->rdq.target < ((map.start + map.count) * READYQ_SHARD_FACTOR)); 548 } 549 550 /* paranoid */ verify(proc->rdq.target != MAX); 551 } 552 else { 553 unsigned long long max = 0; 554 for(i; READYQ_SHARD_FACTOR) { 555 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma); 556 if(tsc > max) max = tsc; 557 } 558 const unsigned long long cutoff = (max + 2 * max) / 2; 559 { 560 unsigned target = proc->rdq.target; 561 proc->rdq.target = MAX; 562 lanes.help[target / READYQ_SHARD_FACTOR].tri++; 563 if(moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma) > cutoff) { 564 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 565 proc->rdq.last = target; 566 if(t) return t; 567 } 568 proc->rdq.target = MAX; 569 } 570 571 unsigned last = proc->rdq.last; 572 if(last != MAX && moving_average(ctsc, lanes.tscs[last].tv, lanes.tscs[last].ma) > cutoff) { 573 thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 574 if(t) return t; 575 } 576 else { 577 proc->rdq.last = MAX; 578 } 579 } 580 581 for(READYQ_SHARD_FACTOR) { 582 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 583 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 584 } 585 586 // All lanes where empty return 0p 587 return 0p; 588 } 589 590 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 591 processor * const proc = kernelTLS().this_processor; 592 unsigned last = proc->rdq.last; 593 if(last != MAX) { 594 struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal)); 595 if(t) return t; 596 proc->rdq.last = MAX; 597 } 598 599 unsigned i = __tls_rand() % lanes.count; 600 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 601 } 602 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 603 return search(cltr); 604 } 605 #endif 606 #if defined(USE_RELAXED_FIFO) 607 //----------------------------------------------------------------------- 608 // get index from random number with or without bias towards queues 609 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { 610 unsigned i; 611 bool local; 612 unsigned rlow = r % BIAS; 613 unsigned rhigh = r / BIAS; 614 if((0 != rlow) && preferred >= 0) { 615 // (BIAS - 1) out of BIAS chances 616 // Use perferred queues 617 i = preferred + (rhigh % READYQ_SHARD_FACTOR); 618 local = true; 619 } 620 else { 621 // 1 out of BIAS chances 622 // Use all queues 623 i = rhigh; 624 local = false; 625 } 626 return [i, local]; 627 } 628 629 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 630 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 631 632 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 633 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 634 635 bool local; 636 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id; 637 638 // Try to pick a lane and lock it 639 unsigned i; 640 do { 641 // Pick the index of a lane 642 unsigned r = __tls_rand_fwd(); 643 [i, local] = idx_from_r(r, preferred); 644 645 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 646 647 #if !defined(__CFA_NO_STATISTICS__) 648 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED); 649 else if(local) __tls_stats()->ready.push.local.attempt++; 650 else __tls_stats()->ready.push.share.attempt++; 651 #endif 652 653 // If we can't lock it retry 654 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 655 656 // Actually push it 657 push(lanes.data[i], thrd); 658 659 // Unlock and return 660 __atomic_unlock( &lanes.data[i].lock ); 661 662 // Mark the current index in the tls rng instance as having an item 663 __tls_rand_advance_bck(); 664 665 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 666 667 // Update statistics 668 #if !defined(__CFA_NO_STATISTICS__) 669 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 670 else if(local) __tls_stats()->ready.push.local.success++; 671 else __tls_stats()->ready.push.share.success++; 672 #endif 673 } 674 675 // Pop from the ready queue from a given cluster 676 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 677 /* paranoid */ verify( lanes.count > 0 ); 678 /* paranoid */ verify( kernelTLS().this_processor ); 679 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 680 681 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 682 int preferred = kernelTLS().this_processor->rdq.id; 683 684 685 // As long as the list is not empty, try finding a lane that isn't empty and pop from it 686 for(25) { 687 // Pick two lists at random 688 unsigned ri = __tls_rand_bck(); 689 unsigned rj = __tls_rand_bck(); 690 691 unsigned i, j; 692 __attribute__((unused)) bool locali, localj; 693 [i, locali] = idx_from_r(ri, preferred); 694 [j, localj] = idx_from_r(rj, preferred); 695 696 i %= count; 697 j %= count; 698 699 // try popping from the 2 picked lists 700 struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help))); 701 if(thrd) { 702 return thrd; 703 } 704 } 705 706 // All lanes where empty return 0p 707 return 0p; 708 } 709 710 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); } 711 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 712 return search(cltr); 713 } 714 #endif 715 #if defined(USE_WORK_STEALING) 716 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) { 717 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 718 719 // #define USE_PREFERRED 720 #if !defined(USE_PREFERRED) 721 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 722 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 723 #else 724 unsigned preferred = thrd->preferred; 725 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == MAX || thrd->curr_cluster != cltr; 726 /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count ); 727 728 unsigned r = preferred % READYQ_SHARD_FACTOR; 729 const unsigned start = preferred - r; 730 #endif 731 732 // Try to pick a lane and lock it 733 unsigned i; 734 do { 735 #if !defined(__CFA_NO_STATISTICS__) 736 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED); 737 else __tls_stats()->ready.push.local.attempt++; 738 #endif 739 740 if(unlikely(external)) { 741 i = __tls_rand() % lanes.count; 742 } 743 else { 744 #if !defined(USE_PREFERRED) 745 processor * proc = kernelTLS().this_processor; 746 unsigned r = proc->rdq.its++; 747 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); 748 #else 749 i = start + (r++ % READYQ_SHARD_FACTOR); 750 #endif 751 } 752 // If we can't lock it retry 753 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 754 755 // Actually push it 756 push(lanes.data[i], thrd); 757 758 // Unlock and return 759 __atomic_unlock( &lanes.data[i].lock ); 760 761 #if !defined(__CFA_NO_STATISTICS__) 762 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 763 else __tls_stats()->ready.push.local.success++; 764 #endif 765 766 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 767 } 768 769 // Pop from the ready queue from a given cluster 770 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 771 /* paranoid */ verify( lanes.count > 0 ); 772 /* paranoid */ verify( kernelTLS().this_processor ); 773 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 774 775 processor * proc = kernelTLS().this_processor; 776 777 if(proc->rdq.target == MAX) { 778 unsigned long long min = ts(lanes.data[proc->rdq.id]); 779 for(int i = 0; i < READYQ_SHARD_FACTOR; i++) { 780 unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]); 781 if(tsc < min) min = tsc; 782 } 783 proc->rdq.cutoff = min; 784 proc->rdq.target = __tls_rand() % lanes.count; 785 } 786 else { 787 unsigned target = proc->rdq.target; 788 proc->rdq.target = MAX; 789 const unsigned long long bias = 0; //2_500_000_000; 790 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 791 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 95 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 96 } 97 98 // Actually push it 99 push(readyQ.data[i], thrd); 100 101 // Unlock and return 102 __atomic_unlock( &readyQ.data[i].lock ); 103 104 #if !defined(__CFA_NO_STATISTICS__) 105 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 106 else __tls_stats()->ready.push.local.success++; 107 #endif 108 } 109 110 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) { 111 const size_t lanes_count = readyQ.count; 112 113 /* paranoid */ verify( __shard_factor.readyq > 0 ); 114 /* paranoid */ verify( lanes_count > 0 ); 115 /* paranoid */ verify( kernelTLS().this_processor ); 116 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count ); 117 118 processor * const proc = kernelTLS().this_processor; 119 unsigned this = proc->rdq.id; 120 /* paranoid */ verify( this < lanes_count ); 121 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this); 122 123 // Figure out the current cache is 124 const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq); 125 const unsigned long long ctsc = rdtscl(); 126 127 if(proc->rdq.target == MAX) { 128 uint64_t chaos = __tls_rand(); 129 unsigned ext = chaos & 0xff; 130 unsigned other = (chaos >> 8) % (lanes_count); 131 132 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) { 133 proc->rdq.target = other; 134 } 135 } 136 else { 137 const unsigned target = proc->rdq.target; 138 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv); 139 /* paranoid */ verify( readyQ.tscs[target].tv != MAX ); 140 if(target < lanes_count) { 141 const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq); 142 const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma); 143 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); 144 if(age > cutoff) { 792 145 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 793 146 if(t) return t; 794 147 } 795 148 } 796 797 for(READYQ_SHARD_FACTOR) { 798 unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 799 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 800 } 801 return 0p; 802 } 803 804 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 805 unsigned i = __tls_rand() % lanes.count; 806 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 807 } 808 809 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) { 810 return search(cltr); 811 } 812 #endif 149 proc->rdq.target = MAX; 150 } 151 152 for(__shard_factor.readyq) { 153 unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq); 154 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 155 } 156 157 // All lanes where empty return 0p 158 return 0p; 159 160 } 161 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { 162 unsigned i = __tls_rand() % (cltr->sched.readyQ.count); 163 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 164 } 165 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 166 return search(cltr); 167 } 813 168 814 169 //======================================================================= … … 820 175 //----------------------------------------------------------------------- 821 176 // try to pop from a lane given by index w 822 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr-> ready_queue) {823 /* paranoid */ verify( w < lanes.count );177 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { 178 /* paranoid */ verify( w < readyQ.count ); 824 179 __STATS( stats.attempt++; ) 825 180 826 181 // Get relevant elements locally 827 __intrusive_lane_t & lane = lanes.data[w];182 __intrusive_lane_t & lane = readyQ.data[w]; 828 183 829 184 // If list looks empty retry … … 845 200 // Actually pop the list 846 201 struct thread$ * thrd; 847 #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING) 848 unsigned long long tsc_before = ts(lane); 849 #endif 850 unsigned long long tsv; 851 [thrd, tsv] = pop(lane); 202 unsigned long long ts_prev = ts(lane); 203 unsigned long long ts_next; 204 [thrd, ts_next] = pop(lane); 852 205 853 206 /* paranoid */ verify(thrd); 854 /* paranoid */ verify(ts v);207 /* paranoid */ verify(ts_next); 855 208 /* paranoid */ verify(lane.lock); 856 209 … … 861 214 __STATS( stats.success++; ) 862 215 863 #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING) 864 if (tsv != MAX) { 865 unsigned long long now = rdtscl(); 866 unsigned long long pma = __atomic_load_n(&lanes.tscs[w].ma, __ATOMIC_RELAXED); 867 __atomic_store_n(&lanes.tscs[w].tv, tsv, __ATOMIC_RELAXED); 868 __atomic_store_n(&lanes.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED); 869 } 870 #endif 871 872 #if defined(USE_AWARE_STEALING) || defined(USE_CPU_WORK_STEALING) 873 thrd->preferred = w / READYQ_SHARD_FACTOR; 874 #else 875 thrd->preferred = w; 876 #endif 216 touch_tsc(readyQ.tscs, w, ts_prev, ts_next); 217 218 thrd->preferred = w / __shard_factor.readyq; 877 219 878 220 // return the popped thread … … 883 225 // try to pop from any lanes making sure you don't miss any threads push 884 226 // before the start of the function 885 static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) { 886 /* paranoid */ verify( lanes.count > 0 ); 887 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 227 static inline struct thread$ * search(struct cluster * cltr) { 228 const size_t lanes_count = cltr->sched.readyQ.count; 229 /* paranoid */ verify( lanes_count > 0 ); 230 unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED ); 888 231 unsigned offset = __tls_rand(); 889 232 for(i; count) { … … 902 245 // get preferred ready for new thread 903 246 unsigned ready_queue_new_preferred() { 904 unsigned pref = 0;247 unsigned pref = MAX; 905 248 if(struct thread$ * thrd = publicTLS_get( this_thread )) { 906 249 pref = thrd->preferred; 907 250 } 908 else {909 #if defined(USE_CPU_WORK_STEALING)910 pref = __kernel_getcpu();911 #endif912 }913 914 #if defined(USE_CPU_WORK_STEALING)915 /* paranoid */ verify(pref >= 0);916 /* paranoid */ verify(pref < cpu_info.hthrd_count);917 #endif918 251 919 252 return pref; … … 921 254 922 255 //----------------------------------------------------------------------- 923 // Check that all the intrusive queues in the data structure are still consistent924 static void check( __ready_queue_t & q ) with (q) {925 #if defined(__CFA_WITH_VERIFY__)926 {927 for( idx ; lanes.count ) {928 __intrusive_lane_t & sl = lanes.data[idx];929 assert(!lanes.data[idx].lock);930 931 if(is_empty(sl)) {932 assert( sl.anchor.next == 0p );933 assert( sl.anchor.ts == -1llu );934 assert( mock_head(sl) == sl.prev );935 } else {936 assert( sl.anchor.next != 0p );937 assert( sl.anchor.ts != -1llu );938 assert( mock_head(sl) != sl.prev );939 }940 }941 }942 #endif943 }944 945 //-----------------------------------------------------------------------946 256 // Given 2 indexes, pick the list with the oldest push an try to pop from it 947 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr-> ready_queue) {257 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) { 948 258 // Pick the bet list 949 259 int w = i; 950 if( __builtin_expect(!is_empty( lanes.data[j]), true) ) {951 w = (ts( lanes.data[i]) < ts(lanes.data[j])) ? i : j;260 if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) { 261 w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j; 952 262 } 953 263 954 264 return try_pop(cltr, w __STATS(, stats)); 955 265 } 956 957 // Call this function of the intrusive list was moved using memcpy958 // fixes the list so that the pointers back to anchors aren't left dangling959 static inline void fix(__intrusive_lane_t & ll) {960 if(is_empty(ll)) {961 verify(ll.anchor.next == 0p);962 ll.prev = mock_head(ll);963 }964 }965 966 static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {967 processor * it = &list`first;968 for(unsigned i = 0; i < count; i++) {969 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);970 it->rdq.id = value;971 it->rdq.target = MAX;972 value += READYQ_SHARD_FACTOR;973 it = &(*it)`next;974 }975 }976 977 static void reassign_cltr_id(struct cluster * cltr) {978 unsigned preferred = 0;979 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);980 assign_list(preferred, cltr->procs.idles , cltr->procs.idle );981 }982 983 static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {984 #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING)985 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);986 for(i; lanes.count) {987 lanes.tscs[i].tv = rdtscl();988 lanes.tscs[i].ma = 0;989 }990 #endif991 }992 993 #if defined(USE_CPU_WORK_STEALING)994 // ready_queue size is fixed in this case995 void ready_queue_grow(struct cluster * cltr) {}996 void ready_queue_shrink(struct cluster * cltr) {}997 #else998 // Grow the ready queue999 void ready_queue_grow(struct cluster * cltr) {1000 size_t ncount;1001 int target = cltr->procs.total;1002 1003 /* paranoid */ verify( ready_mutate_islocked() );1004 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");1005 1006 // Make sure that everything is consistent1007 /* paranoid */ check( cltr->ready_queue );1008 1009 // grow the ready queue1010 with( cltr->ready_queue ) {1011 // Find new count1012 // Make sure we always have atleast 1 list1013 if(target >= 2) {1014 ncount = target * READYQ_SHARD_FACTOR;1015 } else {1016 ncount = SEQUENTIAL_SHARD;1017 }1018 1019 // Allocate new array (uses realloc and memcpies the data)1020 lanes.data = alloc( ncount, lanes.data`realloc );1021 1022 // Fix the moved data1023 for( idx; (size_t)lanes.count ) {1024 fix(lanes.data[idx]);1025 }1026 1027 // Construct new data1028 for( idx; (size_t)lanes.count ~ ncount) {1029 (lanes.data[idx]){};1030 }1031 1032 // Update original1033 lanes.count = ncount;1034 1035 lanes.caches = alloc( target, lanes.caches`realloc );1036 }1037 1038 fix_times(cltr);1039 1040 reassign_cltr_id(cltr);1041 1042 // Make sure that everything is consistent1043 /* paranoid */ check( cltr->ready_queue );1044 1045 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");1046 1047 /* paranoid */ verify( ready_mutate_islocked() );1048 }1049 1050 // Shrink the ready queue1051 void ready_queue_shrink(struct cluster * cltr) {1052 /* paranoid */ verify( ready_mutate_islocked() );1053 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");1054 1055 // Make sure that everything is consistent1056 /* paranoid */ check( cltr->ready_queue );1057 1058 int target = cltr->procs.total;1059 1060 with( cltr->ready_queue ) {1061 // Remember old count1062 size_t ocount = lanes.count;1063 1064 // Find new count1065 // Make sure we always have atleast 1 list1066 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;1067 /* paranoid */ verify( ocount >= lanes.count );1068 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );1069 1070 // for printing count the number of displaced threads1071 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)1072 __attribute__((unused)) size_t displaced = 0;1073 #endif1074 1075 // redistribute old data1076 for( idx; (size_t)lanes.count ~ ocount) {1077 // Lock is not strictly needed but makes checking invariants much easier1078 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);1079 verify(locked);1080 1081 // As long as we can pop from this lane to push the threads somewhere else in the queue1082 while(!is_empty(lanes.data[idx])) {1083 struct thread$ * thrd;1084 unsigned long long _;1085 [thrd, _] = pop(lanes.data[idx]);1086 1087 push(cltr, thrd, true);1088 1089 // for printing count the number of displaced threads1090 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)1091 displaced++;1092 #endif1093 }1094 1095 // Unlock the lane1096 __atomic_unlock(&lanes.data[idx].lock);1097 1098 // TODO print the queue statistics here1099 1100 ^(lanes.data[idx]){};1101 }1102 1103 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);1104 1105 // Allocate new array (uses realloc and memcpies the data)1106 lanes.data = alloc( lanes.count, lanes.data`realloc );1107 1108 // Fix the moved data1109 for( idx; (size_t)lanes.count ) {1110 fix(lanes.data[idx]);1111 }1112 1113 lanes.caches = alloc( target, lanes.caches`realloc );1114 }1115 1116 fix_times(cltr);1117 1118 1119 reassign_cltr_id(cltr);1120 1121 // Make sure that everything is consistent1122 /* paranoid */ check( cltr->ready_queue );1123 1124 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");1125 /* paranoid */ verify( ready_mutate_islocked() );1126 }1127 #endif1128 1129 #if !defined(__CFA_NO_STATISTICS__)1130 unsigned cnt(const __ready_queue_t & this, unsigned idx) {1131 /* paranoid */ verify(this.lanes.count > idx);1132 return this.lanes.data[idx].cnt;1133 }1134 #endif1135 1136 1137 #if defined(CFA_HAVE_LINUX_LIBRSEQ)1138 // No definition needed1139 #elif defined(CFA_HAVE_LINUX_RSEQ_H)1140 1141 #if defined( __x86_64 ) || defined( __i386 )1142 #define RSEQ_SIG 0x530530531143 #elif defined( __ARM_ARCH )1144 #ifdef __ARMEB__1145 #define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */1146 #else1147 #define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */1148 #endif1149 #endif1150 1151 extern void __disable_interrupts_hard();1152 extern void __enable_interrupts_hard();1153 1154 static void __kernel_raw_rseq_register (void) {1155 /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );1156 1157 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);1158 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);1159 if(ret != 0) {1160 int e = errno;1161 switch(e) {1162 case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");1163 case ENOSYS: abort("KERNEL ERROR: rseq register no supported");1164 case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");1165 case EBUSY : abort("KERNEL ERROR: rseq register already registered");1166 case EPERM : abort("KERNEL ERROR: rseq register sig argument on unregistration does not match the signature received on registration");1167 default: abort("KERNEL ERROR: rseq register unexpected return %d", e);1168 }1169 }1170 }1171 1172 static void __kernel_raw_rseq_unregister(void) {1173 /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );1174 1175 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);1176 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);1177 if(ret != 0) {1178 int e = errno;1179 switch(e) {1180 case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");1181 case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");1182 case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");1183 case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");1184 case EPERM : abort("KERNEL ERROR: rseq unregister sig argument on unregistration does not match the signature received on registration");1185 default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);1186 }1187 }1188 }1189 #else1190 // No definition needed1191 #endif -
libcfa/src/concurrency/ready_subqueue.hfa
rba897d21 r2e9b59b 3 3 #define __CFA_NO_SCHED_STATS__ 4 4 5 #include " containers/queueLockFree.hfa"5 #include "limits.hfa" 6 6 7 7 // Intrusives lanes which are used by the relaxed ready queue … … 27 27 } 28 28 29 // Ctor30 void ?{}( __intrusive_lane_t & this ) {31 this.lock = false;32 this.prev = mock_head(this);33 this.anchor.next = 0p;34 this.anchor.ts = -1llu;35 #if !defined(__CFA_NO_STATISTICS__)36 this.cnt = 0;37 #endif38 39 // We add a boat-load of assertions here because the anchor code is very fragile40 /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );41 /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );42 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) );43 /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next );44 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts );45 /* paranoid */ verify( mock_head(this)->link.next == 0p );46 /* paranoid */ verify( mock_head(this)->link.ts == -1llu );47 /* paranoid */ verify( mock_head(this) == this.prev );48 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );49 /* paranoid */ verify( __alignof__(this) == 128 );50 /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) );51 }52 53 // Dtor is trivial54 void ^?{}( __intrusive_lane_t & this ) {55 // Make sure the list is empty56 /* paranoid */ verify( this.anchor.next == 0p );57 /* paranoid */ verify( this.anchor.ts == -1llu );58 /* paranoid */ verify( mock_head(this) == this.prev );59 }60 61 29 // Push a thread onto this lane 62 30 // returns true of lane was empty before push, false otherwise … … 64 32 /* paranoid */ verify( this.lock ); 65 33 /* paranoid */ verify( node->link.next == 0p ); 66 /* paranoid */ verify( node->link.ts == -1llu);34 /* paranoid */ verify( node->link.ts == MAX ); 67 35 /* paranoid */ verify( this.prev->link.next == 0p ); 68 /* paranoid */ verify( this.prev->link.ts == -1llu);36 /* paranoid */ verify( this.prev->link.ts == MAX ); 69 37 if( this.anchor.next == 0p ) { 70 38 /* paranoid */ verify( this.anchor.next == 0p ); 71 /* paranoid */ verify( this.anchor.ts == -1llu);39 /* paranoid */ verify( this.anchor.ts == MAX ); 72 40 /* paranoid */ verify( this.anchor.ts != 0 ); 73 41 /* paranoid */ verify( this.prev == mock_head( this ) ); 74 42 } else { 75 43 /* paranoid */ verify( this.anchor.next != 0p ); 76 /* paranoid */ verify( this.anchor.ts != -1llu);44 /* paranoid */ verify( this.anchor.ts != MAX ); 77 45 /* paranoid */ verify( this.anchor.ts != 0 ); 78 46 /* paranoid */ verify( this.prev != mock_head( this ) ); … … 94 62 /* paranoid */ verify( this.lock ); 95 63 /* paranoid */ verify( this.anchor.next != 0p ); 96 /* paranoid */ verify( this.anchor.ts != -1llu);64 /* paranoid */ verify( this.anchor.ts != MAX ); 97 65 /* paranoid */ verify( this.anchor.ts != 0 ); 98 66 … … 103 71 bool is_empty = this.anchor.next == 0p; 104 72 node->link.next = 0p; 105 node->link.ts = -1llu;73 node->link.ts = MAX; 106 74 #if !defined(__CFA_NO_STATISTICS__) 107 75 this.cnt--; … … 112 80 113 81 /* paranoid */ verify( node->link.next == 0p ); 114 /* paranoid */ verify( node->link.ts == -1llu);82 /* paranoid */ verify( node->link.ts == MAX ); 115 83 /* paranoid */ verify( node->link.ts != 0 ); 116 84 /* paranoid */ verify( this.anchor.ts != 0 ); -
libcfa/src/concurrency/stats.cfa
rba897d21 r2e9b59b 55 55 stats->io.calls.drain = 0; 56 56 stats->io.calls.completed = 0; 57 stats->io.calls.locked = 0; 58 stats->io.calls.helped = 0; 57 59 stats->io.calls.errors.busy = 0; 58 60 stats->io.ops.sockread = 0; … … 123 125 tally_one( &cltr->io.calls.drain , &proc->io.calls.drain ); 124 126 tally_one( &cltr->io.calls.completed , &proc->io.calls.completed ); 127 tally_one( &cltr->io.calls.locked , &proc->io.calls.locked ); 128 tally_one( &cltr->io.calls.helped , &proc->io.calls.helped ); 125 129 tally_one( &cltr->io.calls.errors.busy, &proc->io.calls.errors.busy ); 126 130 tally_one( &cltr->io.ops.sockread , &proc->io.ops.sockread ); … … 205 209 | " sub " | eng3(io.calls.submitted) | "/" | eng3(io.calls.flush) | "(" | ws(3, 3, avgsubs) | "/flush)" 206 210 | " - cmp " | eng3(io.calls.completed) | "/" | eng3(io.calls.drain) | "(" | ws(3, 3, avgcomp) | "/drain)" 211 | " - cmp " | eng3(io.calls.locked) | "locked, " | eng3(io.calls.helped) | "helped" 207 212 | " - " | eng3(io.calls.errors.busy) | " EBUSY"; 208 213 sstr | " - sub: " | eng3(io.flush.full) | "full, " | eng3(io.flush.dirty) | "drty, " | eng3(io.flush.idle) | "idle, " | eng3(io.flush.eager) | "eagr, " | eng3(io.flush.external) | "ext"; -
libcfa/src/concurrency/stats.hfa
rba897d21 r2e9b59b 103 103 volatile uint64_t drain; 104 104 volatile uint64_t completed; 105 volatile uint64_t locked; 106 volatile uint64_t helped; 105 107 volatile uint64_t flush; 106 108 volatile uint64_t submitted; -
libcfa/src/concurrency/thread.cfa
rba897d21 r2e9b59b 19 19 #include "thread.hfa" 20 20 21 #include "kernel _private.hfa"21 #include "kernel/private.hfa" 22 22 #include "exception.hfa" 23 23 -
libcfa/src/containers/array.hfa
rba897d21 r2e9b59b 1 #include <assert.h> 1 2 2 3 … … 34 35 35 36 static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, int i ) { 37 assert( i < N ); 36 38 return (Timmed &) a.strides[i]; 37 39 } 38 40 39 41 static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, unsigned int i ) { 42 assert( i < N ); 40 43 return (Timmed &) a.strides[i]; 41 44 } 42 45 43 46 static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, long int i ) { 47 assert( i < N ); 44 48 return (Timmed &) a.strides[i]; 45 49 } 46 50 47 51 static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, unsigned long int i ) { 52 assert( i < N ); 48 53 return (Timmed &) a.strides[i]; 49 54 } -
libcfa/src/device/cpu.hfa
rba897d21 r2e9b59b 13 13 // Update Count : 14 14 // 15 16 #pragma once 15 17 16 18 #include <stddef.h> -
libcfa/src/fstream.cfa
rba897d21 r2e9b59b 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 10 08:45:05202213 // Update Count : 51 312 // Last Modified On : Sat Apr 9 14:55:54 2022 13 // Update Count : 515 14 14 // 15 15 … … 161 161 for ( cnt; 10 ) { 162 162 errno = 0; 163 disable_interrupts(); 163 164 len = vfprintf( (FILE *)(os.file$), format, args ); 165 enable_interrupts(); 164 166 if ( len != EOF || errno != EINTR ) break; // timer interrupt ? 165 167 if ( cnt == 9 ) abort( "ofstream fmt EINTR spinning exceeded" ); … … 293 295 for () { // no check for EINTR limit waiting for keyboard input 294 296 errno = 0; 297 disable_interrupts(); 295 298 len = vfscanf( (FILE *)(is.file$), format, args ); 299 enable_interrupts(); 296 300 if ( len != EOF || errno != EINTR ) break; // timer interrupt ? 297 301 } // for -
src/AST/Convert.cpp
rba897d21 r2e9b59b 9 9 // Author : Thierry Delisle 10 10 // Created On : Thu May 09 15::37::05 2019 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Feb 2 13:19:22202213 // Update Count : 4 111 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 15:01:00 2022 13 // Update Count : 42 14 14 // 15 15 … … 49 49 //================================================================================================ 50 50 namespace ast { 51 52 // This is to preserve the FindSpecialDecls hack. It does not (and perhaps should not) 53 // allow us to use the same stratagy in the new ast. 54 // xxx - since convert back pass works, this concern seems to be unnecessary. 55 56 // these need to be accessed in new FixInit now 57 ast::ptr<ast::Type> sizeType = nullptr; 58 const ast::FunctionDecl * dereferenceOperator = nullptr; 59 const ast::StructDecl * dtorStruct = nullptr; 60 const ast::FunctionDecl * dtorStructDestroy = nullptr; 51 // These are the shared local information used by ConverterNewToOld and 52 // ConverterOldToNew to update the global information in the two versions. 53 54 static ast::ptr<ast::Type> sizeType = nullptr; 55 static const ast::FunctionDecl * dereferenceOperator = nullptr; 56 static const ast::StructDecl * dtorStruct = nullptr; 57 static const ast::FunctionDecl * dtorStructDestroy = nullptr; 61 58 62 59 } … … 276 273 decl->parent = get<AggregateDecl>().accept1( node->parent ); 277 274 declPostamble( decl, node ); 278 return nullptr; 275 return nullptr; // ?? 279 276 } 280 277 … … 310 307 node->name, 311 308 get<Attribute>().acceptL( node->attributes ), 312 LinkageSpec::Spec( node->linkage.val ) 313 ); 314 return aggregatePostamble( decl, node ); 309 LinkageSpec::Spec( node->linkage.val ), 310 get<Type>().accept1(node->base) 311 ); 312 return aggregatePostamble( decl, node ); // Node info, including members, processed in aggregatePostamble 315 313 } 316 314 … … 354 352 this->node = stmt; 355 353 return nullptr; 354 } 355 356 void clausePostamble( Statement * stmt, const ast::StmtClause * node ) { 357 stmt->location = node->location; 358 this->node = stmt; 356 359 } 357 360 … … 404 407 auto stmt = new SwitchStmt( 405 408 get<Expression>().accept1( node->cond ), 406 get<Statement>().acceptL( node-> stmts )409 get<Statement>().acceptL( node->cases ) 407 410 ); 408 411 return stmtPostamble( stmt, node ); 409 412 } 410 413 411 const ast:: Stmt * visit( const ast::CaseStmt* node ) override final {414 const ast::CaseClause * visit( const ast::CaseClause * node ) override final { 412 415 if ( inCache( node ) ) return nullptr; 413 416 auto stmt = new CaseStmt( … … 416 419 node->isDefault() 417 420 ); 418 return stmtPostamble( stmt, node ); 421 clausePostamble( stmt, node ); 422 return nullptr; 419 423 } 420 424 … … 512 516 } 513 517 514 const ast:: Stmt * visit( const ast::CatchStmt* node ) override final {518 const ast::CatchClause * visit( const ast::CatchClause * node ) override final { 515 519 if ( inCache( node ) ) return nullptr; 516 520 CatchStmt::Kind kind; … … 523 527 break; 524 528 default: 525 assertf(false, "Invalid ast:: CatchStmt::Kind: %d\n", node->kind);529 assertf(false, "Invalid ast::ExceptionKind: %d\n", node->kind); 526 530 } 527 531 auto stmt = new CatchStmt( … … 531 535 get<Statement>().accept1( node->body ) 532 536 ); 533 return stmtPostamble( stmt, node );534 } 535 536 const ast:: Stmt * visit( const ast::FinallyStmt* node ) override final {537 return clausePostamble( stmt, node ), nullptr; 538 } 539 540 const ast::FinallyClause * visit( const ast::FinallyClause * node ) override final { 537 541 if ( inCache( node ) ) return nullptr; 538 542 auto stmt = new FinallyStmt( get<CompoundStmt>().accept1( node->body ) ); 539 return stmtPostamble( stmt, node );543 return clausePostamble( stmt, node ), nullptr; 540 544 } 541 545 … … 947 951 } 948 952 953 const ast::Expr * visit( const ast::DimensionExpr * node ) override final { 954 auto expr = visitBaseExpr( node, new DimensionExpr( node->name ) ); 955 this->node = expr; 956 return nullptr; 957 } 958 949 959 const ast::Expr * visit( const ast::AsmExpr * node ) override final { 950 960 auto expr = visitBaseExpr( node, … … 1467 1477 return strict_dynamic_cast< ast::Decl * >( node ); 1468 1478 } 1469 1479 1470 1480 ConverterOldToNew() = default; 1471 1481 ConverterOldToNew(const ConverterOldToNew &) = delete; … … 1495 1505 getAccept1< ast::type, decltype( old->child ) >( old->child ) 1496 1506 1507 1497 1508 template<typename NewT, typename OldC> 1498 1509 std::vector< ast::ptr<NewT> > getAcceptV( const OldC& old ) { … … 1509 1520 # define GET_ACCEPT_V(child, type) \ 1510 1521 getAcceptV< ast::type, decltype( old->child ) >( old->child ) 1522 1523 # define GET_ACCEPT_E(child, type) \ 1524 getAccept1< ast::type, decltype( old->base ) >( old->base ) 1511 1525 1512 1526 template<typename NewT, typename OldC> … … 1710 1724 } 1711 1725 1726 // Convert SynTree::EnumDecl to AST::EnumDecl 1712 1727 virtual void visit( const EnumDecl * old ) override final { 1713 1728 if ( inCache( old ) ) return; … … 1716 1731 old->name, 1717 1732 GET_ACCEPT_V(attributes, Attribute), 1718 { old->linkage.val } 1733 { old->linkage.val }, 1734 GET_ACCEPT_1(base, Type), 1735 old->enumValues 1719 1736 ); 1720 1737 cache.emplace( old, decl ); … … 1726 1743 decl->uniqueId = old->uniqueId; 1727 1744 decl->storage = { old->storageClasses.val }; 1728 1729 1745 this->node = decl; 1730 1746 } … … 1887 1903 old->location, 1888 1904 GET_ACCEPT_1(condition, Expr), 1889 GET_ACCEPT_V(statements, Stmt),1905 GET_ACCEPT_V(statements, CaseClause), 1890 1906 GET_LABELS_V(old->labels) 1891 1907 ); … … 1895 1911 virtual void visit( const CaseStmt * old ) override final { 1896 1912 if ( inCache( old ) ) return; 1897 this->node = new ast::Case Stmt(1913 this->node = new ast::CaseClause( 1898 1914 old->location, 1899 1915 GET_ACCEPT_1(condition, Expr), 1900 GET_ACCEPT_V(stmts, Stmt), 1901 GET_LABELS_V(old->labels) 1902 ); 1916 GET_ACCEPT_V(stmts, Stmt) 1917 ); 1918 auto labels = GET_LABELS_V(old->labels); 1919 assertf(labels.empty(), "Labels found on CaseStmt."); 1903 1920 cache.emplace( old, this->node ); 1904 1921 } … … 2008 2025 old->location, 2009 2026 GET_ACCEPT_1(block, CompoundStmt), 2010 GET_ACCEPT_V(handlers, Catch Stmt),2011 GET_ACCEPT_1(finallyBlock, Finally Stmt),2027 GET_ACCEPT_V(handlers, CatchClause), 2028 GET_ACCEPT_1(finallyBlock, FinallyClause), 2012 2029 GET_LABELS_V(old->labels) 2013 2030 ); … … 2029 2046 } 2030 2047 2031 this->node = new ast::Catch Stmt(2048 this->node = new ast::CatchClause( 2032 2049 old->location, 2033 2050 kind, 2034 2051 GET_ACCEPT_1(decl, Decl), 2035 2052 GET_ACCEPT_1(cond, Expr), 2036 GET_ACCEPT_1(body, Stmt), 2037 GET_LABELS_V(old->labels) 2038 ); 2053 GET_ACCEPT_1(body, Stmt) 2054 ); 2055 auto labels = GET_LABELS_V(old->labels); 2056 assertf(labels.empty(), "Labels found on CatchStmt."); 2039 2057 cache.emplace( old, this->node ); 2040 2058 } … … 2042 2060 virtual void visit( const FinallyStmt * old ) override final { 2043 2061 if ( inCache( old ) ) return; 2044 this->node = new ast::FinallyStmt( 2045 old->location, 2046 GET_ACCEPT_1(block, CompoundStmt), 2047 GET_LABELS_V(old->labels) 2048 ); 2062 this->node = new ast::FinallyClause( 2063 old->location, 2064 GET_ACCEPT_1(block, CompoundStmt) 2065 ); 2066 auto labels = GET_LABELS_V(old->labels); 2067 assertf(labels.empty(), "Labels found on FinallyStmt."); 2049 2068 cache.emplace( old, this->node ); 2050 2069 } … … 2450 2469 2451 2470 virtual void visit( const DimensionExpr * old ) override final { 2452 // DimensionExpr gets desugared away in Validate. 2453 // As long as new-AST passes don't use it, this cheap-cheerful error 2454 // detection helps ensure that these occurrences have been compiled 2455 // away, as expected. To move the DimensionExpr boundary downstream 2456 // or move the new-AST translation boundary upstream, implement 2457 // DimensionExpr in the new AST and implement a conversion. 2458 (void) old; 2459 assert(false && "DimensionExpr should not be present at new-AST boundary"); 2471 this->node = visitBaseExpr( old, 2472 new ast::DimensionExpr( old->location, old->name ) 2473 ); 2460 2474 } 2461 2475 … … 2711 2725 2712 2726 for (auto & param : foralls) { 2713 ty->forall.emplace_back(new ast::TypeInstType(param ->name, param));2727 ty->forall.emplace_back(new ast::TypeInstType(param)); 2714 2728 for (auto asst : param->assertions) { 2715 2729 ty->assertions.emplace_back(new ast::VariableExpr({}, asst)); … … 2761 2775 } 2762 2776 2763 virtual void visit( const EnumInstType * old ) override final { 2764 ast::EnumInstType * ty; 2777 virtual void visit( const EnumInstType * old ) override final { // Here is visiting the EnumInst Decl not the usage. 2778 ast::EnumInstType * ty; 2765 2779 if ( old->baseEnum ) { 2766 ty = new ast::EnumInstType{ 2780 ty = new ast::EnumInstType{ // Probably here: missing the specification of the base 2767 2781 GET_ACCEPT_1( baseEnum, EnumDecl ), 2768 2782 cv( old ), -
src/AST/Decl.cpp
rba897d21 r2e9b59b 68 68 } 69 69 for (auto & tp : this->type_params) { 70 ftype->forall.emplace_back(new TypeInstType(tp ->name, tp));70 ftype->forall.emplace_back(new TypeInstType(tp)); 71 71 for (auto & ap: tp->assertions) { 72 72 ftype->assertions.emplace_back(new VariableExpr(loc, ap)); … … 136 136 137 137 auto it = enumValues.find( enumerator->name ); 138 138 139 if ( it != enumValues.end() ) { 139 value = it->second; 140 141 // Handle typed enum by casting the value in (C++) compiler 142 // if ( base ) { // A typed enum 143 // if ( const BasicType * bt = dynamic_cast<const BasicType *>(base) ) { 144 // switch( bt->kind ) { 145 // case BasicType::Kind::Bool: value = (bool) it->second; break; 146 // case BasicType::Kind::Char: value = (char) it->second; break; 147 // case BasicType::Kind::SignedChar: value = (signed char) it->second; break; 148 // case BasicType::Kind::UnsignedChar: value = (unsigned char) it->second; break; 149 // case BasicType::Kind::ShortSignedInt: value = (short signed int) it->second; break; 150 // case BasicType::Kind::SignedInt: value = (signed int) it->second; break; 151 // case BasicType::Kind::UnsignedInt: value = (unsigned int) it->second; break; 152 // case BasicType::Kind::LongSignedInt: value = (long signed int) it->second; break; 153 // case BasicType::Kind::LongUnsignedInt: value = (long unsigned int) it->second; break; 154 // case BasicType::Kind::LongLongSignedInt: value = (long long signed int) it->second; break; 155 // case BasicType::Kind::LongLongUnsignedInt: value = (long long unsigned int) it->second; break; 156 // // TODO: value should be able to handle long long unsigned int 157 158 // default: 159 // value = it->second; 160 // } 161 // } 162 // } else { 163 value = it->second; 164 //} 165 140 166 return true; 141 167 } -
src/AST/Decl.hpp
rba897d21 r2e9b59b 302 302 class EnumDecl final : public AggregateDecl { 303 303 public: 304 ptr<Type> base; 305 304 306 EnumDecl( const CodeLocation& loc, const std::string& name, 305 std::vector<ptr<Attribute>>&& attrs = {}, Linkage::Spec linkage = Linkage::Cforall ) 306 : AggregateDecl( loc, name, std::move(attrs), linkage ), enumValues() {} 307 std::vector<ptr<Attribute>>&& attrs = {}, Linkage::Spec linkage = Linkage::Cforall, Type * base = nullptr, 308 std::unordered_map< std::string, long long > enumValues = std::unordered_map< std::string, long long >() ) 309 : AggregateDecl( loc, name, std::move(attrs), linkage ), base(base), enumValues(enumValues) {} 307 310 308 311 /// gets the integer value for this enumerator, returning true iff value found 312 // Maybe it is not used in producing the enum value 309 313 bool valueOf( const Decl * enumerator, long long& value ) const; 310 314 … … 312 316 313 317 const char * typeString() const override { return aggrString( Enum ); } 318 319 bool isTyped() {return base && base.get();} 314 320 315 321 private: -
src/AST/Expr.hpp
rba897d21 r2e9b59b 604 604 }; 605 605 606 class DimensionExpr final : public Expr { 607 public: 608 std::string name; 609 610 DimensionExpr( const CodeLocation & loc, std::string name ) 611 : Expr( loc ), name( name ) {} 612 613 const Expr * accept( Visitor & v ) const override { return v.visit( this ); } 614 private: 615 DimensionExpr * clone() const override { return new DimensionExpr{ *this }; } 616 MUTATE_FRIEND 617 }; 618 606 619 /// A GCC "asm constraint operand" used in an asm statement, e.g. `[output] "=f" (result)`. 607 620 /// https://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/Machine-Constraints.html#Machine-Constraints -
src/AST/Fwd.hpp
rba897d21 r2e9b59b 47 47 class ForStmt; 48 48 class SwitchStmt; 49 class Case Stmt;49 class CaseClause; 50 50 class BranchStmt; 51 51 class ReturnStmt; 52 52 class ThrowStmt; 53 53 class TryStmt; 54 class Catch Stmt;55 class Finally Stmt;54 class CatchClause; 55 class FinallyClause; 56 56 class SuspendStmt; 57 57 class WaitForStmt; … … 84 84 class CommaExpr; 85 85 class TypeExpr; 86 class DimensionExpr; 86 87 class AsmExpr; 87 88 class ImplicitCopyCtorExpr; … … 141 142 142 143 class TranslationUnit; 143 // TODO: Get from the TranslationUnit: 144 extern ptr<Type> sizeType; 145 extern const FunctionDecl * dereferenceOperator; 146 extern const StructDecl * dtorStruct; 147 extern const FunctionDecl * dtorStructDestroy; 144 class TranslationGlobal; 148 145 149 146 } -
src/AST/GenericSubstitution.cpp
rba897d21 r2e9b59b 45 45 visit_children = false; 46 46 const AggregateDecl * aggr = ty->aggr(); 47 sub = TypeSubstitution { aggr->params.begin(), aggr->params.end(), ty->params.begin() };47 sub = TypeSubstitution( aggr->params, ty->params ); 48 48 } 49 49 -
src/AST/Node.cpp
rba897d21 r2e9b59b 9 9 // Author : Thierry Delisle 10 10 // Created On : Thu May 16 14:16:00 2019 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue Feb 1 09:09:39202213 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Mar 25 10:30:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 19 19 #include <csignal> // MEMORY DEBUG -- for raise 20 20 #include <iostream> 21 #include <utility> 21 22 22 23 #include "Attribute.hpp" … … 76 77 void ast::ptr_base<node_t, ref_t>::_check() const { 77 78 // if(node) assert(node->was_ever_strong == false || node->strong_count > 0); 79 } 80 81 template< typename node_t, enum ast::Node::ref_type ref_t > 82 void ast::ptr_base<node_t, ref_t>::swap( ptr_base & other ) noexcept { 83 std::swap( this->node, other.node ); 84 _trap( this->node ); 85 _trap( other.node ); 78 86 } 79 87 … … 152 160 template class ast::ptr_base< ast::SwitchStmt, ast::Node::ref_type::weak >; 153 161 template class ast::ptr_base< ast::SwitchStmt, ast::Node::ref_type::strong >; 154 template class ast::ptr_base< ast::Case Stmt, ast::Node::ref_type::weak >;155 template class ast::ptr_base< ast::Case Stmt, ast::Node::ref_type::strong >;162 template class ast::ptr_base< ast::CaseClause, ast::Node::ref_type::weak >; 163 template class ast::ptr_base< ast::CaseClause, ast::Node::ref_type::strong >; 156 164 template class ast::ptr_base< ast::BranchStmt, ast::Node::ref_type::weak >; 157 165 template class ast::ptr_base< ast::BranchStmt, ast::Node::ref_type::strong >; … … 162 170 template class ast::ptr_base< ast::TryStmt, ast::Node::ref_type::weak >; 163 171 template class ast::ptr_base< ast::TryStmt, ast::Node::ref_type::strong >; 164 template class ast::ptr_base< ast::Catch Stmt, ast::Node::ref_type::weak >;165 template class ast::ptr_base< ast::Catch Stmt, ast::Node::ref_type::strong >;166 template class ast::ptr_base< ast::Finally Stmt, ast::Node::ref_type::weak >;167 template class ast::ptr_base< ast::Finally Stmt, ast::Node::ref_type::strong >;172 template class ast::ptr_base< ast::CatchClause, ast::Node::ref_type::weak >; 173 template class ast::ptr_base< ast::CatchClause, ast::Node::ref_type::strong >; 174 template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::weak >; 175 template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::strong >; 168 176 template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::weak >; 169 177 template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::strong >; -
src/AST/Node.hpp
rba897d21 r2e9b59b 10 10 // Created On : Wed May 8 10:27:04 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Jun 5 9:47:00 202013 // Update Count : 612 // Last Modified On : Fri Mar 25 10:33:00 2022 13 // Update Count : 7 14 14 // 15 15 … … 103 103 104 104 /// Mutate a node field (only clones if not equal to existing value) 105 template<typename node_t, typename parent_t, typename field_t, typename assn_t>106 const node_t * mutate_field( const node_t * node, field_t parent_t::* field, assn_t && val ) {105 template<typename node_t, typename super_t, typename field_t, typename assn_t> 106 const node_t * mutate_field( const node_t * node, field_t super_t::* field, assn_t && val ) { 107 107 // skip mutate if equivalent 108 108 if ( node->*field == val ) return node; … … 115 115 116 116 /// Mutate a single index of a node field (only clones if not equal to existing value) 117 template<typename node_t, typename parent_t, typename coll_t, typename ind_t, typename field_t>117 template<typename node_t, typename super_t, typename coll_t, typename ind_t, typename field_t> 118 118 const node_t * mutate_field_index( 119 const node_t * node, coll_t parent_t::* field, ind_t i, field_t && val119 const node_t * node, coll_t super_t::* field, ind_t i, field_t && val 120 120 ) { 121 121 // skip mutate if equivalent … … 129 129 130 130 /// Mutate an entire indexed collection by cloning to accepted value 131 template<typename node_t, typename parent_t, typename coll_t>132 const node_t * mutate_each( const node_t * node, coll_t parent_t::* field, Visitor & v ) {131 template<typename node_t, typename super_t, typename coll_t> 132 const node_t * mutate_each( const node_t * node, coll_t super_t::* field, Visitor & v ) { 133 133 for ( unsigned i = 0; i < (node->*field).size(); ++i ) { 134 134 node = mutate_field_index( node, field, i, (node->*field)[i]->accept( v ) ); … … 230 230 } 231 231 232 /// Swaps the nodes contained within two pointers. 233 void swap( ptr_base & other ) noexcept; 234 232 235 const node_t * get() const { _check(); return node; } 233 236 const node_t * operator->() const { _check(); return node; } … … 292 295 template< typename node_t > 293 296 using readonly = ptr_base< node_t, Node::ref_type::weak >; 297 298 /// Non-member swap that an participate in overload resolution. 299 template< typename node_t, enum Node::ref_type ref_t > 300 void swap( ptr_base< node_t, ref_t > & l, ptr_base< node_t, ref_t > & r ) { 301 l.swap( r ); 302 } 303 294 304 } 295 305 -
src/AST/Pass.hpp
rba897d21 r2e9b59b 149 149 const ast::Stmt * visit( const ast::ForStmt * ) override final; 150 150 const ast::Stmt * visit( const ast::SwitchStmt * ) override final; 151 const ast:: Stmt * visit( const ast::CaseStmt* ) override final;151 const ast::CaseClause * visit( const ast::CaseClause * ) override final; 152 152 const ast::Stmt * visit( const ast::BranchStmt * ) override final; 153 153 const ast::Stmt * visit( const ast::ReturnStmt * ) override final; 154 154 const ast::Stmt * visit( const ast::ThrowStmt * ) override final; 155 155 const ast::Stmt * visit( const ast::TryStmt * ) override final; 156 const ast:: Stmt * visit( const ast::CatchStmt* ) override final;157 const ast:: Stmt * visit( const ast::FinallyStmt* ) override final;156 const ast::CatchClause * visit( const ast::CatchClause * ) override final; 157 const ast::FinallyClause * visit( const ast::FinallyClause * ) override final; 158 158 const ast::Stmt * visit( const ast::SuspendStmt * ) override final; 159 159 const ast::Stmt * visit( const ast::WaitForStmt * ) override final; … … 184 184 const ast::Expr * visit( const ast::CommaExpr * ) override final; 185 185 const ast::Expr * visit( const ast::TypeExpr * ) override final; 186 const ast::Expr * visit( const ast::DimensionExpr * ) override final; 186 187 const ast::Expr * visit( const ast::AsmExpr * ) override final; 187 188 const ast::Expr * visit( const ast::ImplicitCopyCtorExpr * ) override final; -
src/AST/Pass.impl.hpp
rba897d21 r2e9b59b 354 354 // Take all the elements that are different in 'values' 355 355 // and swap them into 'container' 356 if( values[i] != nullptr ) s td::swap(container[i], values[i]);356 if( values[i] != nullptr ) swap(container[i], values[i]); 357 357 } 358 358 … … 399 399 400 400 template< typename core_t > 401 template<typename node_t, typename parent_t, typename child_t>401 template<typename node_t, typename super_t, typename field_t> 402 402 void ast::Pass< core_t >::maybe_accept( 403 403 const node_t * & parent, 404 child_t parent_t::*child404 field_t super_t::*field 405 405 ) { 406 static_assert( std::is_base_of< parent_t, node_t>::value, "Error deducing member object" );407 408 if(__pass::skip(parent->* child)) return;409 const auto & old_val = __pass::get(parent->* child, 0);406 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 407 408 if(__pass::skip(parent->*field)) return; 409 const auto & old_val = __pass::get(parent->*field, 0); 410 410 411 411 static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR"); … … 417 417 if( new_val.differs ) { 418 418 auto new_parent = __pass::mutate<core_t>(parent); 419 new_val.apply(new_parent, child);419 new_val.apply(new_parent, field); 420 420 parent = new_parent; 421 421 } … … 423 423 424 424 template< typename core_t > 425 template<typename node_t, typename parent_t, typename child_t>425 template<typename node_t, typename super_t, typename field_t> 426 426 void ast::Pass< core_t >::maybe_accept_as_compound( 427 427 const node_t * & parent, 428 child_t parent_t::*child428 field_t super_t::*child 429 429 ) { 430 static_assert( std::is_base_of< parent_t, node_t>::value, "Error deducing member object" );430 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" ); 431 431 432 432 if(__pass::skip(parent->*child)) return; … … 575 575 __pass::symtab::addId( core, 0, func ); 576 576 if ( __visit_children() ) { 577 // parameter declarations 577 maybe_accept( node, &FunctionDecl::type_params ); 578 maybe_accept( node, &FunctionDecl::assertions ); 578 579 maybe_accept( node, &FunctionDecl::params ); 579 580 maybe_accept( node, &FunctionDecl::returns ); 580 // type params and assertions 581 maybe_accept( node, &FunctionDecl::type_params ); 582 maybe_accept( node, &FunctionDecl::assertions ); 581 maybe_accept( node, &FunctionDecl::type ); 583 582 // First remember that we are now within a function. 584 583 ValueGuard< bool > oldInFunction( inFunction ); … … 893 892 if ( __visit_children() ) { 894 893 maybe_accept( node, &SwitchStmt::cond ); 895 maybe_accept( node, &SwitchStmt:: stmts );894 maybe_accept( node, &SwitchStmt::cases ); 896 895 } 897 896 … … 900 899 901 900 //-------------------------------------------------------------------------- 902 // Case Stmt903 template< typename core_t > 904 const ast:: Stmt * ast::Pass< core_t >::visit( const ast::CaseStmt* node ) {905 VISIT_START( node ); 906 907 if ( __visit_children() ) { 908 maybe_accept( node, &Case Stmt::cond );909 maybe_accept( node, &Case Stmt::stmts );910 } 911 912 VISIT_END( Stmt, node );901 // CaseClause 902 template< typename core_t > 903 const ast::CaseClause * ast::Pass< core_t >::visit( const ast::CaseClause * node ) { 904 VISIT_START( node ); 905 906 if ( __visit_children() ) { 907 maybe_accept( node, &CaseClause::cond ); 908 maybe_accept( node, &CaseClause::stmts ); 909 } 910 911 VISIT_END( CaseClause, node ); 913 912 } 914 913 … … 964 963 965 964 //-------------------------------------------------------------------------- 966 // Catch Stmt967 template< typename core_t > 968 const ast:: Stmt * ast::Pass< core_t >::visit( const ast::CatchStmt* node ) {965 // CatchClause 966 template< typename core_t > 967 const ast::CatchClause * ast::Pass< core_t >::visit( const ast::CatchClause * node ) { 969 968 VISIT_START( node ); 970 969 … … 972 971 // catch statements introduce a level of scope (for the caught exception) 973 972 guard_symtab guard { *this }; 974 maybe_accept( node, &Catch Stmt::decl );975 maybe_accept( node, &Catch Stmt::cond );976 maybe_accept_as_compound( node, &Catch Stmt::body );977 } 978 979 VISIT_END( Stmt, node );980 } 981 982 //-------------------------------------------------------------------------- 983 // Finally Stmt984 template< typename core_t > 985 const ast:: Stmt * ast::Pass< core_t >::visit( const ast::FinallyStmt* node ) {986 VISIT_START( node ); 987 988 if ( __visit_children() ) { 989 maybe_accept( node, &Finally Stmt::body );990 } 991 992 VISIT_END( Stmt, node );973 maybe_accept( node, &CatchClause::decl ); 974 maybe_accept( node, &CatchClause::cond ); 975 maybe_accept_as_compound( node, &CatchClause::body ); 976 } 977 978 VISIT_END( CatchClause, node ); 979 } 980 981 //-------------------------------------------------------------------------- 982 // FinallyClause 983 template< typename core_t > 984 const ast::FinallyClause * ast::Pass< core_t >::visit( const ast::FinallyClause * node ) { 985 VISIT_START( node ); 986 987 if ( __visit_children() ) { 988 maybe_accept( node, &FinallyClause::body ); 989 } 990 991 VISIT_END( FinallyClause, node ); 993 992 } 994 993 … … 1054 1053 auto n = __pass::mutate<core_t>(node); 1055 1054 for(size_t i = 0; i < new_clauses.size(); i++) { 1056 if(new_clauses.at(i).target.func != nullptr) s td::swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func);1055 if(new_clauses.at(i).target.func != nullptr) swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func); 1057 1056 1058 1057 for(size_t j = 0; j < new_clauses.at(i).target.args.size(); j++) { 1059 if(new_clauses.at(i).target.args.at(j) != nullptr) s td::swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j));1058 if(new_clauses.at(i).target.args.at(j) != nullptr) swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j)); 1060 1059 } 1061 1060 1062 if(new_clauses.at(i).stmt != nullptr) s td::swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt);1063 if(new_clauses.at(i).cond != nullptr) s td::swap(n->clauses.at(i).cond, new_clauses.at(i).cond);1061 if(new_clauses.at(i).stmt != nullptr) swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt); 1062 if(new_clauses.at(i).cond != nullptr) swap(n->clauses.at(i).cond, new_clauses.at(i).cond); 1064 1063 } 1065 1064 node = n; … … 1516 1515 } 1517 1516 maybe_accept( node, &TypeExpr::type ); 1517 } 1518 1519 VISIT_END( Expr, node ); 1520 } 1521 1522 //-------------------------------------------------------------------------- 1523 // DimensionExpr 1524 template< typename core_t > 1525 const ast::Expr * ast::Pass< core_t >::visit( const ast::DimensionExpr * node ) { 1526 VISIT_START( node ); 1527 1528 if ( __visit_children() ) { 1529 guard_symtab guard { *this }; 1530 maybe_accept( node, &DimensionExpr::result ); 1518 1531 } 1519 1532 … … 1859 1872 1860 1873 if ( __visit_children() ) { 1861 // xxx - should PointerType visit/mutate dimension?1874 maybe_accept( node, &PointerType::dimension ); 1862 1875 maybe_accept( node, &PointerType::base ); 1863 1876 } … … 2151 2164 2152 2165 if ( __visit_children() ) { 2153 { 2154 bool mutated = false; 2155 std::unordered_map< ast::TypeInstType::TypeEnvKey, ast::ptr< ast::Type > > new_map; 2156 for ( const auto & p : node->typeEnv ) { 2157 guard_symtab guard { *this }; 2158 auto new_node = p.second->accept( *this ); 2159 if (new_node != p.second) mutated = true; 2160 new_map.insert({ p.first, new_node }); 2161 } 2162 if (mutated) { 2163 auto new_node = __pass::mutate<core_t>( node ); 2164 new_node->typeEnv.swap( new_map ); 2165 node = new_node; 2166 } 2166 bool mutated = false; 2167 std::unordered_map< ast::TypeInstType::TypeEnvKey, ast::ptr< ast::Type > > new_map; 2168 for ( const auto & p : node->typeEnv ) { 2169 guard_symtab guard { *this }; 2170 auto new_node = p.second->accept( *this ); 2171 if (new_node != p.second) mutated = true; 2172 new_map.insert({ p.first, new_node }); 2173 } 2174 if (mutated) { 2175 auto new_node = __pass::mutate<core_t>( node ); 2176 new_node->typeEnv.swap( new_map ); 2177 node = new_node; 2167 2178 } 2168 2179 } -
src/AST/Pass.proto.hpp
rba897d21 r2e9b59b 26 26 27 27 struct PureVisitor; 28 29 template<typename node_t> 30 node_t * deepCopy( const node_t * localRoot ); 28 31 29 32 namespace __pass { … … 396 399 static inline auto addStructFwd( core_t & core, int, const ast::StructDecl * decl ) -> decltype( core.symtab.addStruct( decl ), void() ) { 397 400 ast::StructDecl * fwd = new ast::StructDecl( decl->location, decl->name ); 398 fwd->params = decl->params; 401 for ( const auto & param : decl->params ) { 402 fwd->params.push_back( deepCopy( param.get() ) ); 403 } 399 404 core.symtab.addStruct( fwd ); 400 405 } … … 405 410 template<typename core_t> 406 411 static inline auto addUnionFwd( core_t & core, int, const ast::UnionDecl * decl ) -> decltype( core.symtab.addUnion( decl ), void() ) { 407 UnionDecl * fwd = new UnionDecl( decl->location, decl->name ); 408 fwd->params = decl->params; 412 ast::UnionDecl * fwd = new ast::UnionDecl( decl->location, decl->name ); 413 for ( const auto & param : decl->params ) { 414 fwd->params.push_back( deepCopy( param.get() ) ); 415 } 409 416 core.symtab.addUnion( fwd ); 410 417 } -
src/AST/Print.cpp
rba897d21 r2e9b59b 210 210 } 211 211 212 auto ptrToEnum = dynamic_cast<const ast::EnumDecl *>(node); 213 if ( ! short_mode && ptrToEnum && ptrToEnum->base ) { 214 os << endl << indent << ".. with (enum) base" << endl; 215 ++indent; 216 ptrToEnum->base->accept( *this ); 217 --indent; 218 } 219 212 220 os << endl; 213 221 } … … 589 597 590 598 ++indent; 591 for ( const ast:: Stmt * stmt : node->stmts ) {599 for ( const ast::CaseClause * stmt : node->cases ) { 592 600 stmt->accept( *this ); 593 601 } … … 597 605 } 598 606 599 virtual const ast:: Stmt * visit( const ast::CaseStmt* node ) override final {607 virtual const ast::CaseClause * visit( const ast::CaseClause * node ) override final { 600 608 if ( node->isDefault() ) { 601 609 os << indent << "Default "; … … 679 687 680 688 os << indent-1 << "... and handlers:" << endl; 681 for ( const ast::Catch Stmt* stmt : node->handlers ) {689 for ( const ast::CatchClause * stmt : node->handlers ) { 682 690 os << indent; 683 691 stmt->accept( *this ); … … 693 701 } 694 702 695 virtual const ast:: Stmt * visit( const ast::CatchStmt* node ) override final {703 virtual const ast::CatchClause * visit( const ast::CatchClause * node ) override final { 696 704 os << "Catch "; 697 705 switch ( node->kind ) { … … 718 726 } 719 727 720 virtual const ast:: Stmt * visit( const ast::FinallyStmt* node ) override final {728 virtual const ast::FinallyClause * visit( const ast::FinallyClause * node ) override final { 721 729 os << "Finally Statement" << endl; 722 730 os << indent << "... with block:" << endl; … … 1088 1096 virtual const ast::Expr * visit( const ast::TypeExpr * node ) override final { 1089 1097 safe_print( node->type ); 1098 postprint( node ); 1099 1100 return node; 1101 } 1102 1103 virtual const ast::Expr * visit( const ast::DimensionExpr * node ) override final { 1104 os << "Type-Sys Value: " << node->name; 1090 1105 postprint( node ); 1091 1106 -
src/AST/Stmt.hpp
rba897d21 r2e9b59b 9 9 // Author : Aaron B. Moss 10 10 // Created On : Wed May 8 13:00:00 2019 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Feb 2 20:06:41202213 // Update Count : 3 411 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Mar 28 9:50:00 2022 13 // Update Count : 35 14 14 // 15 15 … … 47 47 private: 48 48 Stmt * clone() const override = 0; 49 MUTATE_FRIEND 50 }; 51 52 // Base statement component node (only serves to group them). 53 class StmtClause : public ParseNode { 54 public: 55 // This is for non-statements that still belong with the statements, 56 // but are not statements, usually some sort of clause. Often these can 57 // (and should) be folded into the approprate parent node, but if they 58 // cannot be, they are sub-types of this type, for organization. 59 60 StmtClause( const CodeLocation & loc ) 61 : ParseNode(loc) {} 62 63 private: 64 StmtClause * clone() const override = 0; 49 65 MUTATE_FRIEND 50 66 }; … … 158 174 public: 159 175 ptr<Expr> cond; 176 std::vector<ptr<CaseClause>> cases; 177 178 SwitchStmt( const CodeLocation & loc, const Expr * cond, 179 const std::vector<ptr<CaseClause>> && cases, 180 const std::vector<Label> && labels = {} ) 181 : Stmt(loc, std::move(labels)), cond(cond), cases(std::move(cases)) {} 182 183 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 184 private: 185 SwitchStmt * clone() const override { return new SwitchStmt{ *this }; } 186 MUTATE_FRIEND 187 }; 188 189 // Case label: case ...: or default: 190 class CaseClause final : public StmtClause { 191 public: 192 // Null for the default label. 193 ptr<Expr> cond; 160 194 std::vector<ptr<Stmt>> stmts; 161 195 162 SwitchStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts, 163 const std::vector<Label> && labels = {} ) 164 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {} 165 166 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 167 private: 168 SwitchStmt * clone() const override { return new SwitchStmt{ *this }; } 169 MUTATE_FRIEND 170 }; 171 172 // Case label: case ...: or default: 173 class CaseStmt final : public Stmt { 174 public: 175 // Null for the default label. 176 ptr<Expr> cond; 177 std::vector<ptr<Stmt>> stmts; 178 179 CaseStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts, 180 const std::vector<Label> && labels = {} ) 181 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {} 196 CaseClause( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts ) 197 : StmtClause(loc), cond(cond), stmts(std::move(stmts)) {} 182 198 183 199 bool isDefault() const { return !cond; } 184 200 185 const Stmt* accept( Visitor & v ) const override { return v.visit( this ); }186 private: 187 Case Stmt * clone() const override { return new CaseStmt{ *this }; }201 const CaseClause * accept( Visitor & v ) const override { return v.visit( this ); } 202 private: 203 CaseClause * clone() const override { return new CaseClause{ *this }; } 188 204 MUTATE_FRIEND 189 205 }; … … 298 314 public: 299 315 ptr<CompoundStmt> body; 300 std::vector<ptr<Catch Stmt>> handlers;301 ptr<Finally Stmt> finally;316 std::vector<ptr<CatchClause>> handlers; 317 ptr<FinallyClause> finally; 302 318 303 319 TryStmt( const CodeLocation & loc, const CompoundStmt * body, 304 const std::vector<ptr<Catch Stmt>> && handlers, const FinallyStmt* finally,320 const std::vector<ptr<CatchClause>> && handlers, const FinallyClause * finally, 305 321 const std::vector<Label> && labels = {} ) 306 322 : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {} … … 313 329 314 330 // Catch clause of try statement 315 class Catch Stmt final : public Stmt{331 class CatchClause final : public StmtClause { 316 332 public: 317 333 ptr<Decl> decl; … … 320 336 ExceptionKind kind; 321 337 322 Catch Stmt( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,323 const Stmt * body , const std::vector<Label> && labels = {})324 : Stmt (loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {}325 326 const Stmt* accept( Visitor & v ) const override { return v.visit( this ); }327 private: 328 Catch Stmt * clone() const override { return new CatchStmt{ *this }; }338 CatchClause( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond, 339 const Stmt * body ) 340 : StmtClause(loc), decl(decl), cond(cond), body(body), kind(kind) {} 341 342 const CatchClause * accept( Visitor & v ) const override { return v.visit( this ); } 343 private: 344 CatchClause * clone() const override { return new CatchClause{ *this }; } 329 345 MUTATE_FRIEND 330 346 }; 331 347 332 348 // Finally clause of try statement 333 class Finally Stmt final : public Stmt{349 class FinallyClause final : public StmtClause { 334 350 public: 335 351 ptr<CompoundStmt> body; 336 352 337 FinallyStmt( const CodeLocation & loc, const CompoundStmt * body, 338 std::vector<Label> && labels = {} ) 339 : Stmt(loc, std::move(labels)), body(body) {} 340 341 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 342 private: 343 FinallyStmt * clone() const override { return new FinallyStmt{ *this }; } 353 FinallyClause( const CodeLocation & loc, const CompoundStmt * body ) 354 : StmtClause(loc), body(body) {} 355 356 const FinallyClause * accept( Visitor & v ) const override { return v.visit( this ); } 357 private: 358 FinallyClause * clone() const override { return new FinallyClause{ *this }; } 344 359 MUTATE_FRIEND 345 360 }; -
src/AST/TranslationUnit.hpp
rba897d21 r2e9b59b 10 10 // Created On : Tue Jun 11 15:30:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Jun 11 15:42:00 201913 // Update Count : 012 // Last Modified On : Tue Mar 11 11:19:00 2022 13 // Update Count : 1 14 14 // 15 15 … … 23 23 namespace ast { 24 24 25 class TranslationGlobal { 26 public: 27 std::map< UniqueId, Decl * > idMap; 28 29 ptr<Type> sizeType; 30 const FunctionDecl * dereference; 31 const StructDecl * dtorStruct; 32 const FunctionDecl * dtorDestroy; 33 }; 34 25 35 class TranslationUnit { 26 36 public: 27 37 std::list< ptr< Decl > > decls; 28 29 struct Global { 30 std::map< UniqueId, Decl * > idMap; 31 32 ptr<Type> sizeType; 33 const FunctionDecl * dereference; 34 const StructDecl * dtorStruct; 35 const FunctionDecl * dtorDestroy; 36 } global; 38 TranslationGlobal global; 37 39 }; 38 40 -
src/AST/Type.cpp
rba897d21 r2e9b59b 147 147 // --- TypeInstType 148 148 149 TypeInstType::TypeInstType( const TypeDecl * b, 150 CV::Qualifiers q, std::vector<ptr<Attribute>> && as ) 151 : BaseInstType( b->name, q, move(as) ), base( b ), kind( b->kind ) {} 152 149 153 void TypeInstType::set_base( const TypeDecl * b ) { 150 154 base = b; -
src/AST/Type.hpp
rba897d21 r2e9b59b 421 421 std::vector<ptr<Attribute>> && as = {} ) 422 422 : BaseInstType( n, q, std::move(as) ), base( b ), kind( b->kind ) {} 423 424 TypeInstType( const TypeDecl * b, 425 CV::Qualifiers q = {}, std::vector<ptr<Attribute>> && as = {} ); 426 423 427 TypeInstType( const std::string& n, TypeDecl::Kind k, CV::Qualifiers q = {}, 424 428 std::vector<ptr<Attribute>> && as = {} ) -
src/AST/TypeSubstitution.hpp
rba897d21 r2e9b59b 37 37 public: 38 38 TypeSubstitution(); 39 template< typename FormalContainer, typename ActualContainer > 40 TypeSubstitution( FormalContainer formals, ActualContainer actuals ); 39 41 template< typename FormalIterator, typename ActualIterator > 40 42 TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ); … … 76 78 bool empty() const; 77 79 80 template< typename FormalContainer, typename ActualContainer > 81 void addAll( FormalContainer formals, ActualContainer actuals ); 78 82 template< typename FormalIterator, typename ActualIterator > 79 void add ( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );83 void addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ); 80 84 81 85 /// create a new TypeSubstitution using bindings from env containing all of the type variables in expr … … 112 116 }; 113 117 118 template< typename FormalContainer, typename ActualContainer > 119 TypeSubstitution::TypeSubstitution( FormalContainer formals, ActualContainer actuals ) { 120 assert( formals.size() == actuals.size() ); 121 addAll( formals.begin(), formals.end(), actuals.begin() ); 122 } 123 124 template< typename FormalIterator, typename ActualIterator > 125 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) { 126 addAll( formalBegin, formalEnd, actualBegin ); 127 } 128 129 template< typename FormalContainer, typename ActualContainer > 130 void TypeSubstitution::addAll( FormalContainer formals, ActualContainer actuals ) { 131 assert( formals.size() == actuals.size() ); 132 addAll( formals.begin(), formals.end(), actuals.begin() ); 133 } 134 114 135 // this is the only place where type parameters outside a function formal may be substituted. 115 136 template< typename FormalIterator, typename ActualIterator > 116 void TypeSubstitution::add ( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {137 void TypeSubstitution::addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) { 117 138 // FormalIterator points to a TypeDecl 118 139 // ActualIterator points to a Type … … 129 150 } // if 130 151 } else { 131 152 // Is this an error? 132 153 } // if 133 154 } // for 134 155 } 135 136 137 138 template< typename FormalIterator, typename ActualIterator >139 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {140 add( formalBegin, formalEnd, actualBegin );141 }142 143 156 144 157 } // namespace ast -
src/AST/Util.cpp
rba897d21 r2e9b59b 10 10 // Created On : Wed Jan 19 9:46:00 2022 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Feb 18 9:42:00 202213 // Update Count : 012 // Last Modified On : Fri Mar 11 18:07:00 2022 13 // Update Count : 1 14 14 // 15 15 16 16 #include "Util.hpp" 17 17 18 #include "Decl.hpp"19 18 #include "Node.hpp" 19 #include "ParseNode.hpp" 20 20 #include "Pass.hpp" 21 21 #include "TranslationUnit.hpp" 22 #include "Common/ScopedMap.h"23 22 24 23 #include <vector> … … 46 45 }; 47 46 47 /// Check that every note that can has a set CodeLocation. 48 struct SetCodeLocationsCore { 49 void previsit( const ParseNode * node ) { 50 assert( node->location.isSet() ); 51 } 52 }; 53 48 54 struct InvariantCore { 49 55 // To save on the number of visits: this is a kind of composed core. 50 56 // None of the passes should make changes so ordering doesn't matter. 51 57 NoStrongCyclesCore no_strong_cycles; 58 SetCodeLocationsCore set_code_locations; 52 59 53 60 void previsit( const Node * node ) { 54 61 no_strong_cycles.previsit( node ); 62 } 63 64 void previsit( const ParseNode * node ) { 65 no_strong_cycles.previsit( node ); 66 set_code_locations.previsit( node ); 55 67 } 56 68 -
src/AST/Visitor.hpp
rba897d21 r2e9b59b 41 41 virtual const ast::Stmt * visit( const ast::ForStmt * ) = 0; 42 42 virtual const ast::Stmt * visit( const ast::SwitchStmt * ) = 0; 43 virtual const ast:: Stmt * visit( const ast::CaseStmt* ) = 0;43 virtual const ast::CaseClause * visit( const ast::CaseClause * ) = 0; 44 44 virtual const ast::Stmt * visit( const ast::BranchStmt * ) = 0; 45 45 virtual const ast::Stmt * visit( const ast::ReturnStmt * ) = 0; 46 46 virtual const ast::Stmt * visit( const ast::ThrowStmt * ) = 0; 47 47 virtual const ast::Stmt * visit( const ast::TryStmt * ) = 0; 48 virtual const ast:: Stmt * visit( const ast::CatchStmt* ) = 0;49 virtual const ast:: Stmt * visit( const ast::FinallyStmt* ) = 0;48 virtual const ast::CatchClause * visit( const ast::CatchClause * ) = 0; 49 virtual const ast::FinallyClause * visit( const ast::FinallyClause * ) = 0; 50 50 virtual const ast::Stmt * visit( const ast::SuspendStmt * ) = 0; 51 51 virtual const ast::Stmt * visit( const ast::WaitForStmt * ) = 0; … … 76 76 virtual const ast::Expr * visit( const ast::CommaExpr * ) = 0; 77 77 virtual const ast::Expr * visit( const ast::TypeExpr * ) = 0; 78 virtual const ast::Expr * visit( const ast::DimensionExpr * ) = 0; 78 79 virtual const ast::Expr * visit( const ast::AsmExpr * ) = 0; 79 80 virtual const ast::Expr * visit( const ast::ImplicitCopyCtorExpr * ) = 0; -
src/CodeGen/CodeGenerator.cc
rba897d21 r2e9b59b 274 274 void CodeGenerator::postvisit( EnumDecl * enumDecl ) { 275 275 extension( enumDecl ); 276 output << "enum ";277 genAttributes( enumDecl->get_attributes() );278 279 output << enumDecl->get_name();280 281 276 std::list< Declaration* > &memb = enumDecl->get_members(); 282 283 if ( ! memb.empty() ) { 284 output << " {" << endl; 285 286 ++indent; 277 if (enumDecl->base && ! memb.empty()) { 278 unsigned long long last_val = -1; 287 279 for ( std::list< Declaration* >::iterator i = memb.begin(); i != memb.end(); i++) { 288 280 ObjectDecl * obj = dynamic_cast< ObjectDecl* >( *i ); 289 281 assert( obj ); 290 output << indent << mangleName( obj ); 291 if ( obj->get_init() ) { 292 output << " = "; 293 obj->get_init()->accept( *visitor ); 294 } // if 295 output << "," << endl; 282 output << "static const "; 283 output << genType(enumDecl->base, "", options) << " "; 284 output << mangleName( obj ) << " "; 285 output << " = "; 286 output << "(" << genType(enumDecl->base, "", options) << ")"; 287 if ( (BasicType *)(enumDecl->base) && ((BasicType *)(enumDecl->base))->isWholeNumber() ) { 288 if ( obj->get_init() ) { 289 obj->get_init()->accept( *visitor ); 290 last_val = ((ConstantExpr *)(((SingleInit *)(obj->init))->value))->constant.get_ival(); 291 } else { 292 output << ++last_val; 293 } // if 294 } else { 295 if ( obj->get_init() ) { 296 obj->get_init()->accept( *visitor ); 297 } else { 298 // Should not reach here! 299 } 300 } 301 output << ";" << endl; 296 302 } // for 297 303 } else { 304 output << "enum "; 305 genAttributes( enumDecl->get_attributes() ); 306 307 output << enumDecl->get_name(); 308 309 if ( ! memb.empty() ) { 310 output << " {" << endl; 311 312 ++indent; 313 for ( std::list< Declaration* >::iterator i = memb.begin(); i != memb.end(); i++) { 314 ObjectDecl * obj = dynamic_cast< ObjectDecl* >( *i ); 315 assert( obj ); 316 output << indent << mangleName( obj ); 317 if ( obj->get_init() ) { 318 output << " = "; 319 obj->get_init()->accept( *visitor ); 320 } // if 321 output << "," << endl; 322 } // for 298 323 --indent; 299 300 324 output << indent << "}"; 325 } // if 301 326 } // if 302 327 } … … 347 372 des->accept( *visitor ); 348 373 } else { 349 // otherwise, it has to be a ConstantExpr or CastExpr, initializing array ele emnt374 // otherwise, it has to be a ConstantExpr or CastExpr, initializing array element 350 375 output << "["; 351 376 des->accept( *visitor ); … … 661 686 output << opInfo->symbol; 662 687 } else { 688 // if (dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type()) 689 // && dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())->baseEnum->base) { 690 // output << '(' <<genType(dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())->baseEnum->base, "", options) << ')'; 691 // } 663 692 output << mangleName( variableExpr->get_var() ); 664 693 } // if -
src/CodeGen/FixMain.cc
rba897d21 r2e9b59b 91 91 } 92 92 93 ObjectDecl * charStarObj() {93 ObjectDecl * makeArgvObj() { 94 94 return new ObjectDecl( 95 95 "", Type::StorageClasses(), LinkageSpec::Cforall, 0, … … 117 117 main_type->get_returnVals().push_back( signedIntObj() ); 118 118 main_type->get_parameters().push_back( signedIntObj() ); 119 main_type->get_parameters().push_back( charStarObj() );119 main_type->get_parameters().push_back( makeArgvObj() ); 120 120 return create_mangled_main_function_name( main_type ); 121 121 } -
src/CodeGen/GenType.cc
rba897d21 r2e9b59b 253 253 254 254 void GenType::postvisit( EnumInstType * enumInst ) { 255 typeString = enumInst->name + " " + typeString; 256 if ( options.genC ) typeString = "enum " + typeString; 255 if ( enumInst->baseEnum->base ) { 256 typeString = genType(enumInst->baseEnum->base, "", options) + typeString; 257 } else { 258 typeString = enumInst->name + " " + typeString; 259 if ( options.genC ) { 260 typeString = "enum " + typeString; 261 } 262 } 257 263 handleQualifiers( enumInst ); 258 264 } -
src/Common/CodeLocationTools.cpp
rba897d21 r2e9b59b 9 9 // Author : Andrew Beach 10 10 // Created On : Fri Dec 4 15:42:00 2020 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue Feb 1 09:14:39202213 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Mar 14 15:14:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 112 112 macro(ForStmt, Stmt) \ 113 113 macro(SwitchStmt, Stmt) \ 114 macro(Case Stmt, Stmt) \114 macro(CaseClause, CaseClause) \ 115 115 macro(BranchStmt, Stmt) \ 116 116 macro(ReturnStmt, Stmt) \ 117 117 macro(ThrowStmt, Stmt) \ 118 118 macro(TryStmt, Stmt) \ 119 macro(Catch Stmt, Stmt) \120 macro(Finally Stmt, Stmt) \119 macro(CatchClause, CatchClause) \ 120 macro(FinallyClause, FinallyClause) \ 121 121 macro(SuspendStmt, Stmt) \ 122 122 macro(WaitForStmt, Stmt) \ … … 147 147 macro(CommaExpr, Expr) \ 148 148 macro(TypeExpr, Expr) \ 149 macro(DimensionExpr, Expr) \ 149 150 macro(AsmExpr, Expr) \ 150 151 macro(ImplicitCopyCtorExpr, Expr) \ … … 239 240 }; 240 241 242 class LocalFillCore : public ast::WithGuards { 243 CodeLocation const * parent; 244 public: 245 LocalFillCore( CodeLocation const & location ) : parent( &location ) { 246 assert( location.isSet() ); 247 } 248 249 template<typename node_t> 250 auto previsit( node_t const * node ) 251 -> typename std::enable_if<has_code_location<node_t>::value, node_t const *>::type { 252 if ( node->location.isSet() ) { 253 GuardValue( parent ) = &node->location; 254 return node; 255 } else { 256 node_t * mut = ast::mutate( node ); 257 mut->location = *parent; 258 return mut; 259 } 260 } 261 }; 262 241 263 } // namespace 242 264 … … 278 300 ast::Pass<FillCore>::run( unit ); 279 301 } 302 303 ast::Node const * localFillCodeLocations( 304 CodeLocation const & location , ast::Node const * node ) { 305 ast::Pass<LocalFillCore> visitor( location ); 306 return node->accept( visitor ); 307 } -
src/Common/CodeLocationTools.hpp
rba897d21 r2e9b59b 10 10 // Created On : Fri Dec 4 15:35:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Dec 9 9:53:00 202013 // Update Count : 112 // Last Modified On : Mon Mar 14 15:14:00 2022 13 // Update Count : 2 14 14 // 15 15 16 16 #pragma once 17 17 18 struct CodeLocation; 18 19 namespace ast { 20 class Node; 19 21 class TranslationUnit; 20 22 } … … 28 30 // Assign a nearby code-location to any unset code locations in the forest. 29 31 void forceFillCodeLocations( ast::TranslationUnit & unit ); 32 33 // Fill in code-locations with a parent code location, 34 // using the provided CodeLocation as the base. 35 ast::Node const * 36 localFillCodeLocations( CodeLocation const &, ast::Node const * ); -
src/Common/Eval.cc
rba897d21 r2e9b59b 112 112 } 113 113 114 void postvisit( const ast::VariableExpr * expr ) { 114 void postvisit( const ast::VariableExpr * expr ) { // No hit 115 115 if ( const ast::EnumInstType * inst = dynamic_cast<const ast::EnumInstType *>(expr->result.get()) ) { 116 116 if ( const ast::EnumDecl * decl = inst->base ) { -
src/Common/Examine.cc
rba897d21 r2e9b59b 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // Examine. h --7 // Examine.cc -- Helpers for examining AST code. 8 8 // 9 9 // Author : Andrew Beach 10 10 // Created On : Wed Sept 2 14:02 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Sep 8 12:15 202013 // Update Count : 012 // Last Modified On : Fri Dec 10 10:27 2021 13 // Update Count : 1 14 14 // 15 15 16 16 #include "Common/Examine.h" 17 17 18 #include "AST/Type.hpp" 18 19 #include "CodeGen/OperatorTable.h" 20 #include "InitTweak/InitTweak.h" 19 21 20 22 DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ) { … … 36 38 37 39 namespace { 40 41 // getTypeofThis but does some extra checks used in this module. 42 const ast::Type * getTypeofThisSolo( const ast::FunctionDecl * func ) { 43 if ( 1 != func->params.size() ) { 44 return nullptr; 45 } 46 auto ref = func->type->params.front().as<ast::ReferenceType>(); 47 return (ref) ? ref->base : nullptr; 48 } 49 50 } 51 52 const ast::DeclWithType * isMainFor( 53 const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ) { 54 if ( "main" != func->name ) return nullptr; 55 if ( 1 != func->params.size() ) return nullptr; 56 57 auto param = func->params.front(); 58 59 auto type = dynamic_cast<const ast::ReferenceType *>( param->get_type() ); 60 if ( !type ) return nullptr; 61 62 auto obj = type->base.as<ast::StructInstType>(); 63 if ( !obj ) return nullptr; 64 65 if ( kind != obj->base->kind ) return nullptr; 66 67 return param; 68 } 69 70 namespace { 38 71 Type * getDestructorParam( FunctionDecl * func ) { 39 72 if ( !CodeGen::isDestructor( func->name ) ) return nullptr; … … 48 81 return nullptr; 49 82 } 83 84 const ast::Type * getDestructorParam( const ast::FunctionDecl * func ) { 85 if ( !CodeGen::isDestructor( func->name ) ) return nullptr; 86 //return InitTweak::getParamThis( func )->type; 87 return getTypeofThisSolo( func ); 88 } 89 50 90 } 51 91 … … 57 97 return false; 58 98 } 99 100 bool isDestructorFor( 101 const ast::FunctionDecl * func, const ast::StructDecl * type_decl ) { 102 if ( const ast::Type * type = getDestructorParam( func ) ) { 103 auto stype = dynamic_cast<const ast::StructInstType *>( type ); 104 return stype && stype->base.get() == type_decl; 105 } 106 return false; 107 } -
src/Common/Examine.h
rba897d21 r2e9b59b 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // Examine.h -- 7 // Examine.h -- Helpers for examining AST code. 8 8 // 9 9 // Author : Andrew Beach 10 10 // Created On : Wed Sept 2 13:57 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Sep 8 12:08 202013 // Update Count : 012 // Last Modified On : Fri Dec 10 10:28 2021 13 // Update Count : 1 14 14 // 15 15 16 #include "AST/Decl.hpp" 16 17 #include "SynTree/Declaration.h" 17 18 18 19 /// Check if this is a main function for a type of an aggregate kind. 19 20 DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ); 21 const ast::DeclWithType * isMainFor( 22 const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ); 20 23 // Returns a pointer to the parameter if true, nullptr otherwise. 21 24 22 25 /// Check if this function is a destructor for the given structure. 23 26 bool isDestructorFor( FunctionDecl * func, StructDecl * type_decl ); 27 bool isDestructorFor( 28 const ast::FunctionDecl * func, const ast::StructDecl * type ); -
src/Common/PassVisitor.impl.h
rba897d21 r2e9b59b 754 754 755 755 // unlike structs, traits, and unions, enums inject their members into the global scope 756 // if ( node->base ) maybeAccept_impl( node->base, *this ); // Need this? Maybe not? 756 757 maybeAccept_impl( node->parameters, *this ); 757 758 maybeAccept_impl( node->members , *this ); -
src/Concurrency/Keywords.cc
rba897d21 r2e9b59b 1204 1204 //new TypeofType( noQualifiers, args.front()->clone() ) 1205 1205 new TypeofType( noQualifiers, new UntypedExpr( 1206 new NameExpr( "__get_ type" ),1206 new NameExpr( "__get_mutexstmt_lock_type" ), 1207 1207 { args.front()->clone() } 1208 1208 ) … … 1216 1216 map_range < std::list<Initializer*> > ( args, [](Expression * var ){ 1217 1217 return new SingleInit( new UntypedExpr( 1218 new NameExpr( "__get_ ptr" ),1218 new NameExpr( "__get_mutexstmt_lock_ptr" ), 1219 1219 { var } 1220 1220 ) ); … … 1227 1227 TypeExpr * lock_type_expr = new TypeExpr( 1228 1228 new TypeofType( noQualifiers, new UntypedExpr( 1229 new NameExpr( "__get_ type" ),1229 new NameExpr( "__get_mutexstmt_lock_type" ), 1230 1230 { args.front()->clone() } 1231 1231 ) -
src/Concurrency/KeywordsNew.cpp
rba897d21 r2e9b59b 10 10 // Created On : Tue Nov 16 9:53:00 2021 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Dec 1 11:24:00 202113 // Update Count : 112 // Last Modified On : Fri Mar 11 10:40:00 2022 13 // Update Count : 2 14 14 // 15 16 #include <iostream> 15 17 16 18 #include "Concurrency/Keywords.h" … … 18 20 #include "AST/Copy.hpp" 19 21 #include "AST/Decl.hpp" 22 #include "AST/Expr.hpp" 20 23 #include "AST/Pass.hpp" 21 24 #include "AST/Stmt.hpp" 25 #include "AST/DeclReplacer.hpp" 22 26 #include "AST/TranslationUnit.hpp" 23 27 #include "CodeGen/OperatorTable.h" 28 #include "Common/Examine.h" 24 29 #include "Common/utility.h" 30 #include "Common/UniqueName.h" 31 #include "ControlStruct/LabelGeneratorNew.hpp" 25 32 #include "InitTweak/InitTweak.h" 33 #include "Virtual/Tables.h" 26 34 27 35 namespace Concurrency { … … 29 37 namespace { 30 38 31 inline static bool isThread( const ast::DeclWithType * decl ) { 39 // -------------------------------------------------------------------------- 40 // Loose Helper Functions: 41 42 /// Detect threads constructed with the keyword thread. 43 bool isThread( const ast::DeclWithType * decl ) { 32 44 auto baseType = decl->get_type()->stripDeclarator(); 33 45 auto instType = dynamic_cast<const ast::StructInstType *>( baseType ); … … 36 48 } 37 49 50 /// Get the virtual type id if given a type name. 51 std::string typeIdType( std::string const & exception_name ) { 52 return exception_name.empty() ? std::string() 53 : Virtual::typeIdType( exception_name ); 54 } 55 56 /// Get the vtable type name if given a type name. 57 std::string vtableTypeName( std::string const & exception_name ) { 58 return exception_name.empty() ? std::string() 59 : Virtual::vtableTypeName( exception_name ); 60 } 61 62 static ast::Type * mutate_under_references( ast::ptr<ast::Type>& type ) { 63 ast::Type * mutType = type.get_and_mutate(); 64 for ( ast::ReferenceType * mutRef 65 ; (mutRef = dynamic_cast<ast::ReferenceType *>( mutType )) 66 ; mutType = mutRef->base.get_and_mutate() ); 67 return mutType; 68 } 69 70 // Describe that it adds the generic parameters and the uses of the generic 71 // parameters on the function and first "this" argument. 72 ast::FunctionDecl * fixupGenerics( 73 const ast::FunctionDecl * func, const ast::StructDecl * decl ) { 74 const CodeLocation & location = decl->location; 75 // We have to update both the declaration 76 auto mutFunc = ast::mutate( func ); 77 auto mutType = mutFunc->type.get_and_mutate(); 78 79 if ( decl->params.empty() ) { 80 return mutFunc; 81 } 82 83 assert( 0 != mutFunc->params.size() ); 84 assert( 0 != mutType->params.size() ); 85 86 // Add the "forall" clause information. 87 for ( const ast::ptr<ast::TypeDecl> & typeParam : decl->params ) { 88 auto typeDecl = ast::deepCopy( typeParam ); 89 mutFunc->type_params.push_back( typeDecl ); 90 mutType->forall.push_back( new ast::TypeInstType( typeDecl ) ); 91 for ( auto & assertion : typeDecl->assertions ) { 92 mutFunc->assertions.push_back( assertion ); 93 mutType->assertions.emplace_back( 94 new ast::VariableExpr( location, assertion ) ); 95 } 96 typeDecl->assertions.clear(); 97 } 98 99 // Even chain_mutate is not powerful enough for this: 100 ast::ptr<ast::Type>& paramType = strict_dynamic_cast<ast::ObjectDecl *>( 101 mutFunc->params[0].get_and_mutate() )->type; 102 auto paramTypeInst = strict_dynamic_cast<ast::StructInstType *>( 103 mutate_under_references( paramType ) ); 104 auto typeParamInst = strict_dynamic_cast<ast::StructInstType *>( 105 mutate_under_references( mutType->params[0] ) ); 106 107 for ( const ast::ptr<ast::TypeDecl> & typeDecl : mutFunc->type_params ) { 108 paramTypeInst->params.push_back( 109 new ast::TypeExpr( location, new ast::TypeInstType( typeDecl ) ) ); 110 typeParamInst->params.push_back( 111 new ast::TypeExpr( location, new ast::TypeInstType( typeDecl ) ) ); 112 } 113 114 return mutFunc; 115 } 116 38 117 // -------------------------------------------------------------------------- 39 struct MutexKeyword final { 118 struct ConcurrentSueKeyword : public ast::WithDeclsToAdd<> { 119 ConcurrentSueKeyword( 120 std::string&& type_name, std::string&& field_name, 121 std::string&& getter_name, std::string&& context_error, 122 std::string&& exception_name, 123 bool needs_main, ast::AggregateDecl::Aggregate cast_target 124 ) : 125 type_name( type_name ), field_name( field_name ), 126 getter_name( getter_name ), context_error( context_error ), 127 exception_name( exception_name ), 128 typeid_name( typeIdType( exception_name ) ), 129 vtable_name( vtableTypeName( exception_name ) ), 130 needs_main( needs_main ), cast_target( cast_target ) 131 {} 132 133 virtual ~ConcurrentSueKeyword() {} 134 135 const ast::Decl * postvisit( const ast::StructDecl * decl ); 136 const ast::DeclWithType * postvisit( const ast::FunctionDecl * decl ); 137 const ast::Expr * postvisit( const ast::KeywordCastExpr * expr ); 138 139 struct StructAndField { 140 const ast::StructDecl * decl; 141 const ast::ObjectDecl * field; 142 }; 143 144 const ast::StructDecl * handleStruct( const ast::StructDecl * ); 145 void handleMain( const ast::FunctionDecl *, const ast::StructInstType * ); 146 void addTypeId( const ast::StructDecl * ); 147 void addVtableForward( const ast::StructDecl * ); 148 const ast::FunctionDecl * forwardDeclare( const ast::StructDecl * ); 149 StructAndField addField( const ast::StructDecl * ); 150 void addGetRoutines( const ast::ObjectDecl *, const ast::FunctionDecl * ); 151 void addLockUnlockRoutines( const ast::StructDecl * ); 152 153 private: 154 const std::string type_name; 155 const std::string field_name; 156 const std::string getter_name; 157 const std::string context_error; 158 const std::string exception_name; 159 const std::string typeid_name; 160 const std::string vtable_name; 161 const bool needs_main; 162 const ast::AggregateDecl::Aggregate cast_target; 163 164 const ast::StructDecl * type_decl = nullptr; 165 const ast::FunctionDecl * dtor_decl = nullptr; 166 const ast::StructDecl * except_decl = nullptr; 167 const ast::StructDecl * typeid_decl = nullptr; 168 const ast::StructDecl * vtable_decl = nullptr; 169 170 }; 171 172 // Handles thread type declarations: 173 // 174 // thread Mythread { struct MyThread { 175 // int data; int data; 176 // a_struct_t more_data; a_struct_t more_data; 177 // => thread$ __thrd_d; 178 // }; }; 179 // static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; } 180 // 181 struct ThreadKeyword final : public ConcurrentSueKeyword { 182 ThreadKeyword() : ConcurrentSueKeyword( 183 "thread$", 184 "__thrd", 185 "get_thread", 186 "thread keyword requires threads to be in scope, add #include <thread.hfa>\n", 187 "ThreadCancelled", 188 true, 189 ast::AggregateDecl::Thread ) 190 {} 191 192 virtual ~ThreadKeyword() {} 193 }; 194 195 // Handles coroutine type declarations: 196 // 197 // coroutine MyCoroutine { struct MyCoroutine { 198 // int data; int data; 199 // a_struct_t more_data; a_struct_t more_data; 200 // => coroutine$ __cor_d; 201 // }; }; 202 // static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; } 203 // 204 struct CoroutineKeyword final : public ConcurrentSueKeyword { 205 CoroutineKeyword() : ConcurrentSueKeyword( 206 "coroutine$", 207 "__cor", 208 "get_coroutine", 209 "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa>\n", 210 "CoroutineCancelled", 211 true, 212 ast::AggregateDecl::Coroutine ) 213 {} 214 215 virtual ~CoroutineKeyword() {} 216 }; 217 218 // Handles monitor type declarations: 219 // 220 // monitor MyMonitor { struct MyMonitor { 221 // int data; int data; 222 // a_struct_t more_data; a_struct_t more_data; 223 // => monitor$ __mon_d; 224 // }; }; 225 // static inline monitor$ * get_coroutine( MyMonitor * this ) { 226 // return &this->__cor_d; 227 // } 228 // void lock(MyMonitor & this) { 229 // lock(get_monitor(this)); 230 // } 231 // void unlock(MyMonitor & this) { 232 // unlock(get_monitor(this)); 233 // } 234 // 235 struct MonitorKeyword final : public ConcurrentSueKeyword { 236 MonitorKeyword() : ConcurrentSueKeyword( 237 "monitor$", 238 "__mon", 239 "get_monitor", 240 "monitor keyword requires monitors to be in scope, add #include <monitor.hfa>\n", 241 "", 242 false, 243 ast::AggregateDecl::Monitor ) 244 {} 245 246 virtual ~MonitorKeyword() {} 247 }; 248 249 // Handles generator type declarations: 250 // 251 // generator MyGenerator { struct MyGenerator { 252 // int data; int data; 253 // a_struct_t more_data; a_struct_t more_data; 254 // => int __generator_state; 255 // }; }; 256 // 257 struct GeneratorKeyword final : public ConcurrentSueKeyword { 258 GeneratorKeyword() : ConcurrentSueKeyword( 259 "generator$", 260 "__generator_state", 261 "get_generator", 262 "Unable to find builtin type generator$\n", 263 "", 264 true, 265 ast::AggregateDecl::Generator ) 266 {} 267 268 virtual ~GeneratorKeyword() {} 269 }; 270 271 const ast::Decl * ConcurrentSueKeyword::postvisit( 272 const ast::StructDecl * decl ) { 273 if ( !decl->body ) { 274 return decl; 275 } else if ( cast_target == decl->kind ) { 276 return handleStruct( decl ); 277 } else if ( type_name == decl->name ) { 278 assert( !type_decl ); 279 type_decl = decl; 280 } else if ( exception_name == decl->name ) { 281 assert( !except_decl ); 282 except_decl = decl; 283 } else if ( typeid_name == decl->name ) { 284 assert( !typeid_decl ); 285 typeid_decl = decl; 286 } else if ( vtable_name == decl->name ) { 287 assert( !vtable_decl ); 288 vtable_decl = decl; 289 } 290 return decl; 291 } 292 293 // Try to get the full definition, but raise an error on conflicts. 294 const ast::FunctionDecl * getDefinition( 295 const ast::FunctionDecl * old_decl, 296 const ast::FunctionDecl * new_decl ) { 297 if ( !new_decl->stmts ) { 298 return old_decl; 299 } else if ( !old_decl->stmts ) { 300 return new_decl; 301 } else { 302 assert( !old_decl->stmts || !new_decl->stmts ); 303 return nullptr; 304 } 305 } 306 307 const ast::DeclWithType * ConcurrentSueKeyword::postvisit( 308 const ast::FunctionDecl * decl ) { 309 if ( type_decl && isDestructorFor( decl, type_decl ) ) { 310 // Check for forward declarations, try to get the full definition. 311 dtor_decl = (dtor_decl) ? getDefinition( dtor_decl, decl ) : decl; 312 } else if ( !vtable_name.empty() && decl->has_body() ) { 313 if (const ast::DeclWithType * param = isMainFor( decl, cast_target )) { 314 if ( !vtable_decl ) { 315 SemanticError( decl, context_error ); 316 } 317 // Should be safe because of isMainFor. 318 const ast::StructInstType * struct_type = 319 static_cast<const ast::StructInstType *>( 320 static_cast<const ast::ReferenceType *>( 321 param->get_type() )->base.get() ); 322 323 handleMain( decl, struct_type ); 324 } 325 } 326 return decl; 327 } 328 329 const ast::Expr * ConcurrentSueKeyword::postvisit( 330 const ast::KeywordCastExpr * expr ) { 331 if ( cast_target == expr->target ) { 332 // Convert `(thread &)ex` to `(thread$ &)*get_thread(ex)`, etc. 333 if ( !type_decl || !dtor_decl ) { 334 SemanticError( expr, context_error ); 335 } 336 assert( nullptr == expr->result ); 337 auto cast = ast::mutate( expr ); 338 cast->result = new ast::ReferenceType( new ast::StructInstType( type_decl ) ); 339 cast->concrete_target.field = field_name; 340 cast->concrete_target.getter = getter_name; 341 return cast; 342 } 343 return expr; 344 } 345 346 const ast::StructDecl * ConcurrentSueKeyword::handleStruct( 347 const ast::StructDecl * decl ) { 348 assert( decl->body ); 349 350 if ( !type_decl || !dtor_decl ) { 351 SemanticError( decl, context_error ); 352 } 353 354 if ( !exception_name.empty() ) { 355 if( !typeid_decl || !vtable_decl ) { 356 SemanticError( decl, context_error ); 357 } 358 addTypeId( decl ); 359 addVtableForward( decl ); 360 } 361 362 const ast::FunctionDecl * func = forwardDeclare( decl ); 363 StructAndField addFieldRet = addField( decl ); 364 decl = addFieldRet.decl; 365 const ast::ObjectDecl * field = addFieldRet.field; 366 367 addGetRoutines( field, func ); 368 // Add routines to monitors for use by mutex stmt. 369 if ( ast::AggregateDecl::Monitor == cast_target ) { 370 addLockUnlockRoutines( decl ); 371 } 372 373 return decl; 374 } 375 376 void ConcurrentSueKeyword::handleMain( 377 const ast::FunctionDecl * decl, const ast::StructInstType * type ) { 378 assert( vtable_decl ); 379 assert( except_decl ); 380 381 const CodeLocation & location = decl->location; 382 383 std::vector<ast::ptr<ast::Expr>> poly_args = { 384 new ast::TypeExpr( location, type ), 385 }; 386 ast::ObjectDecl * vtable_object = Virtual::makeVtableInstance( 387 location, 388 "_default_vtable_object_declaration", 389 new ast::StructInstType( vtable_decl, copy( poly_args ) ), 390 type, 391 nullptr 392 ); 393 declsToAddAfter.push_back( vtable_object ); 394 declsToAddAfter.push_back( 395 new ast::ObjectDecl( 396 location, 397 Virtual::concurrentDefaultVTableName(), 398 new ast::ReferenceType( vtable_object->type, ast::CV::Const ), 399 new ast::SingleInit( location, 400 new ast::VariableExpr( location, vtable_object ) ), 401 ast::Storage::Classes(), 402 ast::Linkage::Cforall 403 ) 404 ); 405 declsToAddAfter.push_back( Virtual::makeGetExceptionFunction( 406 location, 407 vtable_object, 408 new ast::StructInstType( except_decl, copy( poly_args ) ) 409 ) ); 410 } 411 412 void ConcurrentSueKeyword::addTypeId( const ast::StructDecl * decl ) { 413 assert( typeid_decl ); 414 const CodeLocation & location = decl->location; 415 416 ast::StructInstType * typeid_type = 417 new ast::StructInstType( typeid_decl, ast::CV::Const ); 418 typeid_type->params.push_back( 419 new ast::TypeExpr( location, new ast::StructInstType( decl ) ) ); 420 declsToAddBefore.push_back( 421 Virtual::makeTypeIdInstance( location, typeid_type ) ); 422 // If the typeid_type is going to be kept, the other reference will have 423 // been made by now, but we also get to avoid extra mutates. 424 ast::ptr<ast::StructInstType> typeid_cleanup = typeid_type; 425 } 426 427 void ConcurrentSueKeyword::addVtableForward( const ast::StructDecl * decl ) { 428 assert( vtable_decl ); 429 const CodeLocation& location = decl->location; 430 431 std::vector<ast::ptr<ast::Expr>> poly_args = { 432 new ast::TypeExpr( location, new ast::StructInstType( decl ) ), 433 }; 434 declsToAddBefore.push_back( Virtual::makeGetExceptionForward( 435 location, 436 new ast::StructInstType( vtable_decl, copy( poly_args ) ), 437 new ast::StructInstType( except_decl, copy( poly_args ) ) 438 ) ); 439 ast::ObjectDecl * vtable_object = Virtual::makeVtableForward( 440 location, 441 "_default_vtable_object_declaration", 442 new ast::StructInstType( vtable_decl, std::move( poly_args ) ) 443 ); 444 declsToAddBefore.push_back( vtable_object ); 445 declsToAddBefore.push_back( 446 new ast::ObjectDecl( 447 location, 448 Virtual::concurrentDefaultVTableName(), 449 new ast::ReferenceType( vtable_object->type, ast::CV::Const ), 450 nullptr, 451 ast::Storage::Extern, 452 ast::Linkage::Cforall 453 ) 454 ); 455 } 456 457 const ast::FunctionDecl * ConcurrentSueKeyword::forwardDeclare( 458 const ast::StructDecl * decl ) { 459 const CodeLocation & location = decl->location; 460 461 ast::StructDecl * forward = ast::deepCopy( decl ); 462 { 463 // If removing members makes ref-count go to zero, do not free. 464 ast::ptr<ast::StructDecl> forward_ptr = forward; 465 forward->body = false; 466 forward->members.clear(); 467 forward_ptr.release(); 468 } 469 470 ast::ObjectDecl * this_decl = new ast::ObjectDecl( 471 location, 472 "this", 473 new ast::ReferenceType( new ast::StructInstType( decl ) ), 474 nullptr, 475 ast::Storage::Classes(), 476 ast::Linkage::Cforall 477 ); 478 479 ast::ObjectDecl * ret_decl = new ast::ObjectDecl( 480 location, 481 "ret", 482 new ast::PointerType( new ast::StructInstType( type_decl ) ), 483 nullptr, 484 ast::Storage::Classes(), 485 ast::Linkage::Cforall 486 ); 487 488 ast::FunctionDecl * get_decl = new ast::FunctionDecl( 489 location, 490 getter_name, 491 {}, // forall 492 { this_decl }, // params 493 { ret_decl }, // returns 494 nullptr, // stmts 495 ast::Storage::Static, 496 ast::Linkage::Cforall, 497 { new ast::Attribute( "const" ) }, 498 ast::Function::Inline 499 ); 500 get_decl = fixupGenerics( get_decl, decl ); 501 502 ast::FunctionDecl * main_decl = nullptr; 503 if ( needs_main ) { 504 // `this_decl` is copied here because the original was used above. 505 main_decl = new ast::FunctionDecl( 506 location, 507 "main", 508 {}, 509 { ast::deepCopy( this_decl ) }, 510 {}, 511 nullptr, 512 ast::Storage::Classes(), 513 ast::Linkage::Cforall 514 ); 515 main_decl = fixupGenerics( main_decl, decl ); 516 } 517 518 declsToAddBefore.push_back( forward ); 519 if ( needs_main ) declsToAddBefore.push_back( main_decl ); 520 declsToAddBefore.push_back( get_decl ); 521 522 return get_decl; 523 } 524 525 ConcurrentSueKeyword::StructAndField ConcurrentSueKeyword::addField( 526 const ast::StructDecl * decl ) { 527 const CodeLocation & location = decl->location; 528 529 ast::ObjectDecl * field = new ast::ObjectDecl( 530 location, 531 field_name, 532 new ast::StructInstType( type_decl ), 533 nullptr, 534 ast::Storage::Classes(), 535 ast::Linkage::Cforall 536 ); 537 538 auto mutDecl = ast::mutate( decl ); 539 mutDecl->members.push_back( field ); 540 541 return {mutDecl, field}; 542 } 543 544 void ConcurrentSueKeyword::addGetRoutines( 545 const ast::ObjectDecl * field, const ast::FunctionDecl * forward ) { 546 // Say it is generated at the "same" places as the forward declaration. 547 const CodeLocation & location = forward->location; 548 549 const ast::DeclWithType * param = forward->params.front(); 550 ast::Stmt * stmt = new ast::ReturnStmt( location, 551 new ast::AddressExpr( location, 552 new ast::MemberExpr( location, 553 field, 554 new ast::CastExpr( location, 555 new ast::VariableExpr( location, param ), 556 ast::deepCopy( param->get_type()->stripReferences() ), 557 ast::ExplicitCast 558 ) 559 ) 560 ) 561 ); 562 563 ast::FunctionDecl * decl = ast::deepCopy( forward ); 564 decl->stmts = new ast::CompoundStmt( location, { stmt } ); 565 declsToAddAfter.push_back( decl ); 566 } 567 568 void ConcurrentSueKeyword::addLockUnlockRoutines( 569 const ast::StructDecl * decl ) { 570 // This should only be used on monitors. 571 assert( ast::AggregateDecl::Monitor == cast_target ); 572 573 const CodeLocation & location = decl->location; 574 575 // The parameter for both routines. 576 ast::ObjectDecl * this_decl = new ast::ObjectDecl( 577 location, 578 "this", 579 new ast::ReferenceType( new ast::StructInstType( decl ) ), 580 nullptr, 581 ast::Storage::Classes(), 582 ast::Linkage::Cforall 583 ); 584 585 ast::FunctionDecl * lock_decl = new ast::FunctionDecl( 586 location, 587 "lock", 588 { /* forall */ }, 589 { 590 // Copy the declaration of this. 591 ast::deepCopy( this_decl ), 592 }, 593 { /* returns */ }, 594 nullptr, 595 ast::Storage::Static, 596 ast::Linkage::Cforall, 597 { /* attributes */ }, 598 ast::Function::Inline 599 ); 600 lock_decl = fixupGenerics( lock_decl, decl ); 601 602 lock_decl->stmts = new ast::CompoundStmt( location, { 603 new ast::ExprStmt( location, 604 new ast::UntypedExpr( location, 605 new ast::NameExpr( location, "lock" ), 606 { 607 new ast::UntypedExpr( location, 608 new ast::NameExpr( location, "get_monitor" ), 609 { new ast::VariableExpr( location, 610 InitTweak::getParamThis( lock_decl ) ) } 611 ) 612 } 613 ) 614 ) 615 } ); 616 617 ast::FunctionDecl * unlock_decl = new ast::FunctionDecl( 618 location, 619 "unlock", 620 { /* forall */ }, 621 { 622 // Last use, consume the declaration of this. 623 this_decl, 624 }, 625 { /* returns */ }, 626 nullptr, 627 ast::Storage::Static, 628 ast::Linkage::Cforall, 629 { /* attributes */ }, 630 ast::Function::Inline 631 ); 632 unlock_decl = fixupGenerics( unlock_decl, decl ); 633 634 unlock_decl->stmts = new ast::CompoundStmt( location, { 635 new ast::ExprStmt( location, 636 new ast::UntypedExpr( location, 637 new ast::NameExpr( location, "unlock" ), 638 { 639 new ast::UntypedExpr( location, 640 new ast::NameExpr( location, "get_monitor" ), 641 { new ast::VariableExpr( location, 642 InitTweak::getParamThis( unlock_decl ) ) } 643 ) 644 } 645 ) 646 ) 647 } ); 648 649 declsToAddAfter.push_back( lock_decl ); 650 declsToAddAfter.push_back( unlock_decl ); 651 } 652 653 654 // -------------------------------------------------------------------------- 655 struct SuspendKeyword final : 656 public ast::WithStmtsToAdd<>, public ast::WithGuards { 657 SuspendKeyword() = default; 658 virtual ~SuspendKeyword() = default; 659 660 void previsit( const ast::FunctionDecl * ); 661 const ast::DeclWithType * postvisit( const ast::FunctionDecl * ); 662 const ast::Stmt * postvisit( const ast::SuspendStmt * ); 663 664 private: 665 bool is_real_suspend( const ast::FunctionDecl * ); 666 667 const ast::Stmt * make_generator_suspend( const ast::SuspendStmt * ); 668 const ast::Stmt * make_coroutine_suspend( const ast::SuspendStmt * ); 669 670 struct LabelPair { 671 ast::Label obj; 672 int idx; 673 }; 674 675 LabelPair make_label(const ast::Stmt * stmt ) { 676 labels.push_back( ControlStruct::newLabel( "generator", stmt ) ); 677 return { labels.back(), int(labels.size()) }; 678 } 679 680 const ast::DeclWithType * in_generator = nullptr; 681 const ast::FunctionDecl * decl_suspend = nullptr; 682 std::vector<ast::Label> labels; 683 }; 684 685 void SuspendKeyword::previsit( const ast::FunctionDecl * decl ) { 686 GuardValue( in_generator ); in_generator = nullptr; 687 688 // If it is the real suspend, grab it if we don't have one already. 689 if ( is_real_suspend( decl ) ) { 690 decl_suspend = decl_suspend ? decl_suspend : decl; 691 return; 692 } 693 694 // Otherwise check if this is a generator main and, if so, handle it. 695 auto param = isMainFor( decl, ast::AggregateDecl::Generator ); 696 if ( !param ) return; 697 698 if ( 0 != decl->returns.size() ) { 699 SemanticError( decl->location, "Generator main must return void" ); 700 } 701 702 in_generator = param; 703 GuardValue( labels ); labels.clear(); 704 } 705 706 const ast::DeclWithType * SuspendKeyword::postvisit( 707 const ast::FunctionDecl * decl ) { 708 // Only modify a full definition of a generator with states. 709 if ( !decl->stmts || !in_generator || labels.empty() ) return decl; 710 711 const CodeLocation & location = decl->location; 712 713 // Create a new function body: 714 // static void * __generator_labels[] = {&&s0, &&s1, ...}; 715 // void * __generator_label = __generator_labels[GEN.__generator_state]; 716 // goto * __generator_label; 717 // s0: ; 718 // OLD_BODY 719 720 // This is the null statement inserted right before the body. 721 ast::NullStmt * noop = new ast::NullStmt( location ); 722 noop->labels.push_back( ControlStruct::newLabel( "generator", noop ) ); 723 const ast::Label & first_label = noop->labels.back(); 724 725 // Add each label to the init, starting with the first label. 726 std::vector<ast::ptr<ast::Init>> inits = { 727 new ast::SingleInit( location, 728 new ast::LabelAddressExpr( location, copy( first_label ) ) ) }; 729 // Then go through all the stored labels, and clear the store. 730 for ( auto && label : labels ) { 731 inits.push_back( new ast::SingleInit( label.location, 732 new ast::LabelAddressExpr( label.location, std::move( label ) 733 ) ) ); 734 } 735 labels.clear(); 736 // Then construct the initializer itself. 737 auto init = new ast::ListInit( location, std::move( inits ) ); 738 739 ast::ObjectDecl * generatorLabels = new ast::ObjectDecl( 740 location, 741 "__generator_labels", 742 new ast::ArrayType( 743 new ast::PointerType( new ast::VoidType() ), 744 nullptr, 745 ast::FixedLen, 746 ast::DynamicDim 747 ), 748 init, 749 ast::Storage::Classes(), 750 ast::Linkage::AutoGen 751 ); 752 753 ast::ObjectDecl * generatorLabel = new ast::ObjectDecl( 754 location, 755 "__generator_label", 756 new ast::PointerType( new ast::VoidType() ), 757 new ast::SingleInit( location, 758 new ast::UntypedExpr( location, 759 new ast::NameExpr( location, "?[?]" ), 760 { 761 // TODO: Could be a variable expr. 762 new ast::NameExpr( location, "__generator_labels" ), 763 new ast::UntypedMemberExpr( location, 764 new ast::NameExpr( location, "__generator_state" ), 765 new ast::VariableExpr( location, in_generator ) 766 ) 767 } 768 ) 769 ), 770 ast::Storage::Classes(), 771 ast::Linkage::AutoGen 772 ); 773 774 ast::BranchStmt * theGoTo = new ast::BranchStmt( 775 location, new ast::VariableExpr( location, generatorLabel ) 776 ); 777 778 // The noop goes here in order. 779 780 ast::CompoundStmt * body = new ast::CompoundStmt( location, { 781 { new ast::DeclStmt( location, generatorLabels ) }, 782 { new ast::DeclStmt( location, generatorLabel ) }, 783 { theGoTo }, 784 { noop }, 785 { decl->stmts }, 786 } ); 787 788 auto mutDecl = ast::mutate( decl ); 789 mutDecl->stmts = body; 790 return mutDecl; 791 } 792 793 const ast::Stmt * SuspendKeyword::postvisit( const ast::SuspendStmt * stmt ) { 794 switch ( stmt->type ) { 795 case ast::SuspendStmt::None: 796 // Use the context to determain the implicit target. 797 if ( in_generator ) { 798 return make_generator_suspend( stmt ); 799 } else { 800 return make_coroutine_suspend( stmt ); 801 } 802 case ast::SuspendStmt::Coroutine: 803 return make_coroutine_suspend( stmt ); 804 case ast::SuspendStmt::Generator: 805 // Generator suspends must be directly in a generator. 806 if ( !in_generator ) SemanticError( stmt->location, "'suspend generator' must be used inside main of generator type." ); 807 return make_generator_suspend( stmt ); 808 } 809 assert( false ); 810 return stmt; 811 } 812 813 /// Find the real/official suspend declaration. 814 bool SuspendKeyword::is_real_suspend( const ast::FunctionDecl * decl ) { 815 return ( !decl->linkage.is_mangled 816 && 0 == decl->params.size() 817 && 0 == decl->returns.size() 818 && "__cfactx_suspend" == decl->name ); 819 } 820 821 const ast::Stmt * SuspendKeyword::make_generator_suspend( 822 const ast::SuspendStmt * stmt ) { 823 assert( in_generator ); 824 // Target code is: 825 // GEN.__generator_state = X; 826 // THEN 827 // return; 828 // __gen_X:; 829 830 const CodeLocation & location = stmt->location; 831 832 LabelPair label = make_label( stmt ); 833 834 // This is the context saving statement. 835 stmtsToAddBefore.push_back( new ast::ExprStmt( location, 836 new ast::UntypedExpr( location, 837 new ast::NameExpr( location, "?=?" ), 838 { 839 new ast::UntypedMemberExpr( location, 840 new ast::NameExpr( location, "__generator_state" ), 841 new ast::VariableExpr( location, in_generator ) 842 ), 843 ast::ConstantExpr::from_int( location, label.idx ), 844 } 845 ) 846 ) ); 847 848 // The THEN component is conditional (return is not). 849 if ( stmt->then ) { 850 stmtsToAddBefore.push_back( stmt->then.get() ); 851 } 852 stmtsToAddBefore.push_back( new ast::ReturnStmt( location, nullptr ) ); 853 854 // The null statement replaces the old suspend statement. 855 return new ast::NullStmt( location, { label.obj } ); 856 } 857 858 const ast::Stmt * SuspendKeyword::make_coroutine_suspend( 859 const ast::SuspendStmt * stmt ) { 860 // The only thing we need from the old statement is the location. 861 const CodeLocation & location = stmt->location; 862 863 if ( !decl_suspend ) { 864 SemanticError( location, "suspend keyword applied to coroutines requires coroutines to be in scope, add #include <coroutine.hfa>\n" ); 865 } 866 if ( stmt->then ) { 867 SemanticError( location, "Compound statement following coroutines is not implemented." ); 868 } 869 870 return new ast::ExprStmt( location, 871 new ast::UntypedExpr( location, 872 ast::VariableExpr::functionPointer( location, decl_suspend ) ) 873 ); 874 } 875 876 // -------------------------------------------------------------------------- 877 struct MutexKeyword final : public ast::WithDeclsToAdd<> { 40 878 const ast::FunctionDecl * postvisit( const ast::FunctionDecl * decl ); 41 879 void postvisit( const ast::StructDecl * decl ); … … 50 888 ast::CompoundStmt * addStatements( const ast::CompoundStmt * body, const std::vector<ast::ptr<ast::Expr>> & args ); 51 889 ast::CompoundStmt * addThreadDtorStatements( const ast::FunctionDecl* func, const ast::CompoundStmt * body, const std::vector<const ast::DeclWithType *> & args ); 52 890 ast::ExprStmt * genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param); 891 ast::IfStmt * genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ); 53 892 private: 54 893 const ast::StructDecl * monitor_decl = nullptr; … … 59 898 60 899 static ast::ptr<ast::Type> generic_func; 900 901 UniqueName mutex_func_namer = UniqueName("__lock_unlock_curr"); 61 902 }; 62 903 … … 160 1001 161 1002 const ast::Stmt * MutexKeyword::postvisit( const ast::MutexStmt * stmt ) { 1003 if ( !lock_guard_decl ) { 1004 SemanticError( stmt->location, "mutex stmt requires a header, add #include <mutex_stmt.hfa>\n" ); 1005 } 162 1006 ast::CompoundStmt * body = 163 1007 new ast::CompoundStmt( stmt->location, { stmt->stmt } ); 164 addStatements( body, stmt->mutexObjs );165 return body;1008 1009 return addStatements( body, stmt->mutexObjs );; 166 1010 } 167 1011 … … 251 1095 { 252 1096 new ast::SingleInit( location, 253 new ast::AddressExpr( 1097 new ast::AddressExpr( location, 254 1098 new ast::VariableExpr( location, monitor ) ) ), 255 1099 new ast::SingleInit( location, … … 358 1202 } 359 1203 1204 // generates a cast to the void ptr to the appropriate lock type and dereferences it before calling lock or unlock on it 1205 // used to undo the type erasure done by storing all the lock pointers as void 1206 ast::ExprStmt * MutexKeyword::genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param ) { 1207 return new ast::ExprStmt( location, 1208 new ast::UntypedExpr( location, 1209 new ast::NameExpr( location, fnName ), { 1210 ast::UntypedExpr::createDeref( 1211 location, 1212 new ast::CastExpr( location, 1213 param, 1214 new ast::PointerType( new ast::TypeofType( new ast::UntypedExpr( 1215 expr->location, 1216 new ast::NameExpr( expr->location, "__get_mutexstmt_lock_type" ), 1217 { expr } 1218 ) ) ), 1219 ast::GeneratedFlag::ExplicitCast 1220 ) 1221 ) 1222 } 1223 ) 1224 ); 1225 } 1226 1227 ast::IfStmt * MutexKeyword::genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ) { 1228 ast::IfStmt * outerLockIf = nullptr; 1229 ast::IfStmt * lastLockIf = nullptr; 1230 1231 //adds an if/elif clause for each lock to assign type from void ptr based on ptr address 1232 for ( long unsigned int i = 0; i < args.size(); i++ ) { 1233 1234 ast::UntypedExpr * ifCond = new ast::UntypedExpr( location, 1235 new ast::NameExpr( location, "?==?" ), { 1236 ast::deepCopy( thisParam ), 1237 new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() )) 1238 } 1239 ); 1240 1241 ast::IfStmt * currLockIf = new ast::IfStmt( 1242 location, 1243 ifCond, 1244 genVirtLockUnlockExpr( fnName, args.at(i), location, ast::deepCopy( thisParam ) ) 1245 ); 1246 1247 if ( i == 0 ) { 1248 outerLockIf = currLockIf; 1249 } else { 1250 // add ifstmt to else of previous stmt 1251 lastLockIf->else_ = currLockIf; 1252 } 1253 1254 lastLockIf = currLockIf; 1255 } 1256 return outerLockIf; 1257 } 1258 360 1259 ast::CompoundStmt * MutexKeyword::addStatements( 361 1260 const ast::CompoundStmt * body, 362 1261 const std::vector<ast::ptr<ast::Expr>> & args ) { 363 ast::CompoundStmt * mutBody = ast::mutate( body );364 1262 365 1263 // Code is generated near the beginning of the compound statement. 366 const CodeLocation & location = mutBody->location; 1264 const CodeLocation & location = body->location; 1265 1266 // final body to return 1267 ast::CompoundStmt * newBody = new ast::CompoundStmt( location ); 1268 1269 // std::string lockFnName = mutex_func_namer.newName(); 1270 // std::string unlockFnName = mutex_func_namer.newName(); 367 1271 368 1272 // Make pointer to the monitors. … … 372 1276 new ast::ArrayType( 373 1277 new ast::PointerType( 374 new ast::TypeofType( 375 new ast::UntypedExpr( 376 location, 377 new ast::NameExpr( location, "__get_type" ), 378 { args.front() } 379 ) 380 ) 1278 new ast::VoidType() 381 1279 ), 382 1280 ast::ConstantExpr::from_ulong( location, args.size() ), … … 392 1290 new ast::UntypedExpr( 393 1291 expr->location, 394 new ast::NameExpr( expr->location, "__get_ ptr" ),1292 new ast::NameExpr( expr->location, "__get_mutexstmt_lock_ptr" ), 395 1293 { expr } 396 1294 ) … … 405 1303 ast::StructInstType * lock_guard_struct = 406 1304 new ast::StructInstType( lock_guard_decl ); 407 ast::TypeExpr * lock_type_expr = new ast::TypeExpr( 408 location, 409 new ast::TypeofType( 410 new ast::UntypedExpr( 411 location, 412 new ast::NameExpr( location, "__get_type" ), 413 { args.front() } 414 ) 415 ) 416 ); 417 418 lock_guard_struct->params.push_back( lock_type_expr ); 419 420 // In reverse order: 1305 1306 // use try stmts to lock and finally to unlock 1307 ast::TryStmt * outerTry = nullptr; 1308 ast::TryStmt * currentTry; 1309 ast::CompoundStmt * lastBody = nullptr; 1310 1311 // adds a nested try stmt for each lock we are locking 1312 for ( long unsigned int i = 0; i < args.size(); i++ ) { 1313 ast::UntypedExpr * innerAccess = new ast::UntypedExpr( 1314 location, 1315 new ast::NameExpr( location,"?[?]" ), { 1316 new ast::NameExpr( location, "__monitors" ), 1317 ast::ConstantExpr::from_int( location, i ) 1318 } 1319 ); 1320 1321 // make the try body 1322 ast::CompoundStmt * currTryBody = new ast::CompoundStmt( location ); 1323 ast::IfStmt * lockCall = genTypeDiscrimLockUnlock( "lock", args, location, innerAccess ); 1324 currTryBody->push_back( lockCall ); 1325 1326 // make the finally stmt 1327 ast::CompoundStmt * currFinallyBody = new ast::CompoundStmt( location ); 1328 ast::IfStmt * unlockCall = genTypeDiscrimLockUnlock( "unlock", args, location, innerAccess ); 1329 currFinallyBody->push_back( unlockCall ); 1330 1331 // construct the current try 1332 currentTry = new ast::TryStmt( 1333 location, 1334 currTryBody, 1335 {}, 1336 new ast::FinallyClause( location, currFinallyBody ) 1337 ); 1338 if ( i == 0 ) outerTry = currentTry; 1339 else { 1340 // pushback try into the body of the outer try 1341 lastBody->push_back( currentTry ); 1342 } 1343 lastBody = currTryBody; 1344 } 1345 1346 // push body into innermost try body 1347 if ( lastBody != nullptr ) { 1348 lastBody->push_back( body ); 1349 newBody->push_front( outerTry ); 1350 } 1351 421 1352 // monitor_guard_t __guard = { __monitors, # }; 422 mutBody->push_front(1353 newBody->push_front( 423 1354 new ast::DeclStmt( 424 1355 location, … … 447 1378 448 1379 // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 449 mutBody->push_front( new ast::DeclStmt( location, monitors ) ); 450 451 return mutBody; 1380 newBody->push_front( new ast::DeclStmt( location, monitors ) ); 1381 1382 // // The parameter for both __lock_curr/__unlock_curr routines. 1383 // ast::ObjectDecl * this_decl = new ast::ObjectDecl( 1384 // location, 1385 // "this", 1386 // new ast::PointerType( new ast::VoidType() ), 1387 // nullptr, 1388 // {}, 1389 // ast::Linkage::Cforall 1390 // ); 1391 1392 // ast::FunctionDecl * lock_decl = new ast::FunctionDecl( 1393 // location, 1394 // lockFnName, 1395 // { /* forall */ }, 1396 // { 1397 // // Copy the declaration of this. 1398 // this_decl, 1399 // }, 1400 // { /* returns */ }, 1401 // nullptr, 1402 // 0, 1403 // ast::Linkage::Cforall, 1404 // { /* attributes */ }, 1405 // ast::Function::Inline 1406 // ); 1407 1408 // ast::FunctionDecl * unlock_decl = new ast::FunctionDecl( 1409 // location, 1410 // unlockFnName, 1411 // { /* forall */ }, 1412 // { 1413 // // Copy the declaration of this. 1414 // ast::deepCopy( this_decl ), 1415 // }, 1416 // { /* returns */ }, 1417 // nullptr, 1418 // 0, 1419 // ast::Linkage::Cforall, 1420 // { /* attributes */ }, 1421 // ast::Function::Inline 1422 // ); 1423 1424 // ast::IfStmt * outerLockIf = nullptr; 1425 // ast::IfStmt * outerUnlockIf = nullptr; 1426 // ast::IfStmt * lastLockIf = nullptr; 1427 // ast::IfStmt * lastUnlockIf = nullptr; 1428 1429 // //adds an if/elif clause for each lock to assign type from void ptr based on ptr address 1430 // for ( long unsigned int i = 0; i < args.size(); i++ ) { 1431 // ast::VariableExpr * thisParam = new ast::VariableExpr( location, InitTweak::getParamThis( lock_decl ) ); 1432 // ast::UntypedExpr * ifCond = new ast::UntypedExpr( location, 1433 // new ast::NameExpr( location, "?==?" ), { 1434 // thisParam, 1435 // new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() )) 1436 // } 1437 // ); 1438 1439 // ast::IfStmt * currLockIf = new ast::IfStmt( 1440 // location, 1441 // ast::deepCopy( ifCond ), 1442 // genVirtLockUnlockExpr( "lock", args.at(i), location, ast::deepCopy( thisParam ) ) 1443 // ); 1444 1445 // ast::IfStmt * currUnlockIf = new ast::IfStmt( 1446 // location, 1447 // ifCond, 1448 // genVirtLockUnlockExpr( "unlock", args.at(i), location, ast::deepCopy( thisParam ) ) 1449 // ); 1450 1451 // if ( i == 0 ) { 1452 // outerLockIf = currLockIf; 1453 // outerUnlockIf = currUnlockIf; 1454 // } else { 1455 // // add ifstmt to else of previous stmt 1456 // lastLockIf->else_ = currLockIf; 1457 // lastUnlockIf->else_ = currUnlockIf; 1458 // } 1459 1460 // lastLockIf = currLockIf; 1461 // lastUnlockIf = currUnlockIf; 1462 // } 1463 1464 // // add pointer typing if/elifs to body of routines 1465 // lock_decl->stmts = new ast::CompoundStmt( location, { outerLockIf } ); 1466 // unlock_decl->stmts = new ast::CompoundStmt( location, { outerUnlockIf } ); 1467 1468 // // add routines to scope 1469 // declsToAddBefore.push_back( lock_decl ); 1470 // declsToAddBefore.push_back( unlock_decl ); 1471 1472 // newBody->push_front(new ast::DeclStmt( location, lock_decl )); 1473 // newBody->push_front(new ast::DeclStmt( location, unlock_decl )); 1474 1475 return newBody; 452 1476 } 453 1477 … … 564 1588 565 1589 // -------------------------------------------------------------------------- 1590 // Interface Functions: 566 1591 567 1592 void implementKeywords( ast::TranslationUnit & translationUnit ) { 568 (void)translationUnit; 569 assertf(false, "Apply Keywords not implemented." ); 1593 ast::Pass<ThreadKeyword>::run( translationUnit ); 1594 ast::Pass<CoroutineKeyword>::run( translationUnit ); 1595 ast::Pass<MonitorKeyword>::run( translationUnit ); 1596 ast::Pass<GeneratorKeyword>::run( translationUnit ); 1597 ast::Pass<SuspendKeyword>::run( translationUnit ); 570 1598 } 571 1599 -
src/ControlStruct/ExceptTranslateNew.cpp
rba897d21 r2e9b59b 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 8 11:53:00 2021 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Mon Jan 31 18:49:58202213 // Update Count : 111 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Mar 11 17:51:00 2022 13 // Update Count : 2 14 14 // 15 15 … … 26 26 namespace { 27 27 28 typedef std::list<ast::CatchStmt*> CatchList; 29 30 void split( CatchList& allHandlers, CatchList& terHandlers, 31 CatchList& resHandlers ) { 32 while ( !allHandlers.empty() ) { 33 ast::CatchStmt * stmt = allHandlers.front(); 34 allHandlers.pop_front(); 35 if (stmt->kind == ast::ExceptionKind::Terminate) { 36 terHandlers.push_back(stmt); 37 } else { 38 resHandlers.push_back(stmt); 39 } 40 } 41 } 28 typedef std::list<ast::CatchClause*> CatchList; 42 29 43 30 void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) { … … 58 45 {} 59 46 60 void previsit( const ast::Catch Stmt* stmt );47 void previsit( const ast::CatchClause * stmt ); 61 48 const ast::Stmt * postvisit( const ast::ThrowStmt * stmt ); 62 49 }; … … 101 88 } 102 89 103 void TranslateThrowsCore::previsit( const ast::Catch Stmt* stmt ) {90 void TranslateThrowsCore::previsit( const ast::CatchClause * stmt ) { 104 91 // Validate the statement's form. 105 92 const ast::ObjectDecl * decl = stmt->decl.as<ast::ObjectDecl>(); … … 160 147 ast::FunctionDecl * create_terminate_catch( CatchList &handlers ); 161 148 ast::CompoundStmt * create_single_matcher( 162 const ast::DeclWithType * except_obj, ast::Catch Stmt* modded_handler );149 const ast::DeclWithType * except_obj, ast::CatchClause * modded_handler ); 163 150 ast::FunctionDecl * create_terminate_match( CatchList &handlers ); 164 151 ast::CompoundStmt * create_terminate_caller( CodeLocation loc, ast::FunctionDecl * try_wrapper, … … 171 158 ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt ); 172 159 173 // Types used in translation, make sure to use clone. 160 // Types used in translation, first group are internal. 161 ast::ObjectDecl * make_index_object( CodeLocation const & ) const; 162 ast::ObjectDecl * make_exception_object( CodeLocation const & ) const; 163 ast::ObjectDecl * make_bool_object( CodeLocation const & ) const; 164 ast::ObjectDecl * make_voidptr_object( CodeLocation const & ) const; 165 ast::ObjectDecl * make_unused_index_object( CodeLocation const & ) const; 174 166 // void (*function)(); 175 ast::FunctionDecl * try_func_t;167 ast::FunctionDecl * make_try_function( CodeLocation const & ) const; 176 168 // void (*function)(int, exception); 177 ast::FunctionDecl * catch_func_t;169 ast::FunctionDecl * make_catch_function( CodeLocation const & ) const; 178 170 // int (*function)(exception); 179 ast::FunctionDecl * ma tch_func_t;171 ast::FunctionDecl * make_match_function( CodeLocation const & ) const; 180 172 // bool (*function)(exception); 181 ast::FunctionDecl * handle_func_t;173 ast::FunctionDecl * make_handle_function( CodeLocation const & ) const; 182 174 // void (*function)(__attribute__((unused)) void *); 183 ast::FunctionDecl * finally_func_t; 184 185 ast::StructInstType * create_except_type() { 186 assert( except_decl ); 187 return new ast::StructInstType( except_decl ); 188 } 189 void init_func_types(); 175 ast::FunctionDecl * make_finally_function( CodeLocation const & ) const; 190 176 191 177 public: … … 199 185 }; 200 186 201 void TryMutatorCore::init_func_types() { 187 ast::ObjectDecl * TryMutatorCore::make_index_object( 188 CodeLocation const & location ) const { 189 return new ast::ObjectDecl( 190 location, 191 "__handler_index", 192 new ast::BasicType(ast::BasicType::SignedInt), 193 nullptr, //init 194 ast::Storage::Classes{}, 195 ast::Linkage::Cforall 196 ); 197 } 198 199 ast::ObjectDecl * TryMutatorCore::make_exception_object( 200 CodeLocation const & location ) const { 202 201 assert( except_decl ); 203 204 ast::ObjectDecl index_obj( 205 {}, 206 "__handler_index", 207 new ast::BasicType(ast::BasicType::SignedInt) 208 ); 209 ast::ObjectDecl exception_obj( 210 {}, 202 return new ast::ObjectDecl( 203 location, 211 204 "__exception_inst", 212 205 new ast::PointerType( 213 206 new ast::StructInstType( except_decl ) 214 207 ), 215 NULL 216 ); 217 ast::ObjectDecl bool_obj( 218 {}, 208 nullptr, //init 209 ast::Storage::Classes{}, 210 ast::Linkage::Cforall 211 ); 212 } 213 214 ast::ObjectDecl * TryMutatorCore::make_bool_object( 215 CodeLocation const & location ) const { 216 return new ast::ObjectDecl( 217 location, 219 218 "__ret_bool", 220 219 new ast::BasicType( ast::BasicType::Bool ), … … 225 224 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 226 225 ); 227 ast::ObjectDecl voidptr_obj( 228 {}, 226 } 227 228 ast::ObjectDecl * TryMutatorCore::make_voidptr_object( 229 CodeLocation const & location ) const { 230 return new ast::ObjectDecl( 231 location, 229 232 "__hook", 230 233 new ast::PointerType( … … 237 240 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 238 241 ); 239 240 ast::ObjectDecl unused_index_obj( 241 {}, 242 } 243 244 ast::ObjectDecl * TryMutatorCore::make_unused_index_object( 245 CodeLocation const & location ) const { 246 return new ast::ObjectDecl( 247 location, 242 248 "__handler_index", 243 249 new ast::BasicType(ast::BasicType::SignedInt), … … 248 254 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 249 255 ); 250 //unused_index_obj->attributes.push_back( new Attribute( "unused" ) ); 251 252 try_func_t = new ast::FunctionDecl( 253 {}, 256 } 257 258 ast::FunctionDecl * TryMutatorCore::make_try_function( 259 CodeLocation const & location ) const { 260 return new ast::FunctionDecl( 261 location, 254 262 "try", 255 263 {}, //forall … … 260 268 ast::Linkage::Cforall 261 269 ); 262 263 catch_func_t = new ast::FunctionDecl( 264 {}, 270 } 271 272 ast::FunctionDecl * TryMutatorCore::make_catch_function( 273 CodeLocation const & location ) const { 274 return new ast::FunctionDecl( 275 location, 265 276 "catch", 266 277 {}, //forall 267 { ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param278 { make_index_object( location ), make_exception_object( location ) }, 268 279 {}, //return void 269 280 nullptr, … … 271 282 ast::Linkage::Cforall 272 283 ); 273 274 match_func_t = new ast::FunctionDecl( 275 {}, 284 } 285 286 ast::FunctionDecl * TryMutatorCore::make_match_function( 287 CodeLocation const & location ) const { 288 return new ast::FunctionDecl( 289 location, 276 290 "match", 277 291 {}, //forall 278 { ast::deepCopy(&exception_obj)},279 { ast::deepCopy(&unused_index_obj)},292 { make_exception_object( location ) }, 293 { make_unused_index_object( location ) }, 280 294 nullptr, 281 295 ast::Storage::Classes{}, 282 296 ast::Linkage::Cforall 283 297 ); 284 285 handle_func_t = new ast::FunctionDecl( 286 {}, 298 } 299 300 ast::FunctionDecl * TryMutatorCore::make_handle_function( 301 CodeLocation const & location ) const { 302 return new ast::FunctionDecl( 303 location, 287 304 "handle", 288 305 {}, //forall 289 { ast::deepCopy(&exception_obj)},290 { ast::deepCopy(&bool_obj)},306 { make_exception_object( location ) }, 307 { make_bool_object( location ) }, 291 308 nullptr, 292 309 ast::Storage::Classes{}, 293 310 ast::Linkage::Cforall 294 311 ); 295 296 finally_func_t = new ast::FunctionDecl( 297 {}, 312 } 313 314 ast::FunctionDecl * TryMutatorCore::make_finally_function( 315 CodeLocation const & location ) const { 316 return new ast::FunctionDecl( 317 location, 298 318 "finally", 299 319 {}, //forall 300 { ast::deepCopy(&voidptr_obj)},320 { make_voidptr_object( location ) }, 301 321 {}, //return void 302 322 nullptr, … … 304 324 ast::Linkage::Cforall 305 325 ); 306 307 //catch_func_t.get_parameters().push_back( index_obj.clone() );308 //catch_func_t.get_parameters().push_back( exception_obj.clone() );309 //match_func_t.get_returnVals().push_back( unused_index_obj );310 //match_func_t.get_parameters().push_back( exception_obj.clone() );311 //handle_func_t.get_returnVals().push_back( bool_obj.clone() );312 //handle_func_t.get_parameters().push_back( exception_obj.clone() );313 //finally_func_t.get_parameters().push_back( voidptr_obj.clone() );314 326 } 315 327 316 328 // TryStmt Mutation Helpers 317 318 /*319 ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) {320 ast::CompoundStmt * block = tryStmt->body;321 tryStmt->body = nullptr;322 return block;323 }324 */325 329 326 330 ast::FunctionDecl * TryMutatorCore::create_try_wrapper( 327 331 const ast::CompoundStmt *body ) { 328 332 329 ast::FunctionDecl * ret = ast::deepCopy(try_func_t);333 ast::FunctionDecl * ret = make_try_function( body->location ); 330 334 ret->stmts = body; 331 335 return ret; … … 334 338 ast::FunctionDecl * TryMutatorCore::create_terminate_catch( 335 339 CatchList &handlers ) { 336 std::vector<ast::ptr<ast:: Stmt>> handler_wrappers;340 std::vector<ast::ptr<ast::CaseClause>> handler_wrappers; 337 341 338 342 assert (!handlers.empty()); 339 343 const CodeLocation loc = handlers.front()->location; 340 344 341 ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t);345 ast::FunctionDecl * func_t = make_catch_function( loc ); 342 346 const ast::DeclWithType * index_obj = func_t->params.front(); 343 347 const ast::DeclWithType * except_obj = func_t->params.back(); … … 348 352 for ( ; it != handlers.end() ; ++it ) { 349 353 ++index; 350 ast::Catch Stmt* handler = *it;354 ast::CatchClause * handler = *it; 351 355 const CodeLocation loc = handler->location; 352 356 … … 386 390 // handler->body = nullptr; 387 391 388 handler_wrappers.push_back( new ast::Case Stmt(loc,392 handler_wrappers.push_back( new ast::CaseClause(loc, 389 393 ast::ConstantExpr::from_int(loc, index) , 390 394 { block, new ast::ReturnStmt( loc, nullptr ) } … … 393 397 // TODO: Some sort of meaningful error on default perhaps? 394 398 395 /* 396 std::list<Statement*> stmt_handlers; 397 while ( !handler_wrappers.empty() ) { 398 stmt_handlers.push_back( handler_wrappers.front() ); 399 handler_wrappers.pop_front(); 400 } 401 */ 402 403 ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc, 399 ast::SwitchStmt * handler_lookup = new ast::SwitchStmt( loc, 404 400 new ast::VariableExpr( loc, index_obj ), 405 401 std::move(handler_wrappers) 406 402 ); 407 ast::CompoundStmt * body = new ast::CompoundStmt(loc, 408 {handler_lookup}); 403 ast::CompoundStmt * body = new ast::CompoundStmt( loc, {handler_lookup} ); 409 404 410 405 func_t->stmts = body; … … 415 410 // except_obj is referenced, modded_handler will be freed. 416 411 ast::CompoundStmt * TryMutatorCore::create_single_matcher( 417 const ast::DeclWithType * except_obj, ast::Catch Stmt* modded_handler ) {412 const ast::DeclWithType * except_obj, ast::CatchClause * modded_handler ) { 418 413 // { 419 414 // `modded_handler.decl` … … 433 428 434 429 // Check for type match. 435 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 430 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 436 431 new ast::VariableExpr(loc, except_obj ), 437 432 local_except->get_type() … … 445 440 } 446 441 // Construct the match condition. 447 block->push_back( new ast::IfStmt(loc, 442 block->push_back( new ast::IfStmt(loc, 448 443 cond, modded_handler->body, nullptr ) ); 449 444 450 // xxx - how does this work in new ast451 //modded_handler->set_decl( nullptr );452 //modded_handler->set_cond( nullptr );453 //modded_handler->set_body( nullptr );454 //delete modded_handler;455 445 return block; 456 446 } … … 467 457 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 468 458 469 ast::FunctionDecl * func_t = ast::deepCopy(match_func_t);459 ast::FunctionDecl * func_t = make_match_function( loc ); 470 460 const ast::DeclWithType * except_obj = func_t->params.back(); 471 461 … … 475 465 for ( it = handlers.begin() ; it != handlers.end() ; ++it ) { 476 466 ++index; 477 ast::Catch Stmt* handler = *it;467 ast::CatchClause * handler = *it; 478 468 479 469 // Body should have been taken by create_terminate_catch. … … 490 480 } 491 481 492 body->push_back( new ast::ReturnStmt(loc, 482 body->push_back( new ast::ReturnStmt(loc, 493 483 ast::ConstantExpr::from_int( loc, 0 ) )); 494 484 … … 525 515 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 526 516 527 ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t);517 ast::FunctionDecl * func_t = make_handle_function( loc ); 528 518 const ast::DeclWithType * except_obj = func_t->params.back(); 529 519 530 520 CatchList::iterator it; 531 521 for ( it = handlers.begin() ; it != handlers.end() ; ++it ) { 532 ast::Catch Stmt* handler = *it;522 ast::CatchClause * handler = *it; 533 523 const CodeLocation loc = handler->location; 534 524 // Modifiy body. 535 525 ast::CompoundStmt * handling_code; 536 526 if (handler->body.as<ast::CompoundStmt>()) { 537 handling_code = 538 strict_dynamic_cast<ast::CompoundStmt*>(handler->body.get_and_mutate() );527 handling_code = strict_dynamic_cast<ast::CompoundStmt*>( 528 handler->body.get_and_mutate() ); 539 529 } else { 540 530 handling_code = new ast::CompoundStmt(loc); … … 597 587 ast::TryStmt * tryStmt ) { 598 588 // void finally() { `finally->block` } 599 const ast::Finally Stmt* finally = tryStmt->finally;589 const ast::FinallyClause * finally = tryStmt->finally; 600 590 const ast::CompoundStmt * body = finally->body; 601 591 602 ast::FunctionDecl * func_t = ast::deepCopy(finally_func_t);592 ast::FunctionDecl * func_t = make_finally_function( tryStmt->location ); 603 593 func_t->stmts = body; 604 594 605 // finally->set_block( nullptr );606 // delete finally;607 595 tryStmt->finally = nullptr; 608 609 596 610 597 return func_t; … … 617 604 618 605 const CodeLocation loc = finally_wrapper->location; 619 // Make Cleanup Attribute.620 /*621 std::list< ast::Attribute * > attributes;622 {623 std::list< > attr_params;624 attr_params.push_back( nameOf( finally_wrapper ) );625 attributes.push_back( new Attribute( "cleanup", attr_params ) );626 }627 */628 629 606 return new ast::ObjectDecl( 630 607 loc, … … 644 621 // return false; 645 622 const CodeLocation loc = throwStmt->location; 646 ast::Stmt * result = new ast::ReturnStmt(loc, 623 ast::Stmt * result = new ast::ReturnStmt(loc, 647 624 ast::ConstantExpr::from_bool( loc, false ) 648 625 ); 649 626 result->labels = throwStmt->labels; 650 // delete throwStmt; done by postvisit651 627 return result; 652 628 } … … 660 636 assert( nullptr == except_decl ); 661 637 except_decl = structDecl; 662 init_func_types();663 638 } else if ( structDecl->name == "__cfaehm_try_resume_node" ) { 664 639 assert( nullptr == node_decl ); … … 706 681 } 707 682 } 708 // split( mutStmt->handlers,709 // termination_handlers, resumption_handlers );710 683 711 684 if ( resumption_handlers.size() ) { -
src/ControlStruct/LabelGeneratorNew.cpp
rba897d21 r2e9b59b 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // LabelGenerator .cc--7 // LabelGeneratorNew.cpp -- 8 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Feb 2 09:11:17202213 // Update Count : 7 211 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Mar 28 10:03:00 2022 13 // Update Count : 73 14 14 // 15 15 … … 25 25 namespace ControlStruct { 26 26 27 Label newLabel( const string & suffix, const Stmt * stmt ) { 27 enum { size = 128 }; 28 29 static int newLabelPre( char buf[size], const string & suffix ) { 28 30 static int current = 0; 29 31 30 assertf( stmt, "CFA internal error: parameter statement cannot be null pointer" );31 32 enum { size = 128 };33 char buf[size]; // space to build label34 32 int len = snprintf( buf, size, "__L%d__%s", current++, suffix.c_str() ); 35 33 assertf( len < size, "CFA Internal error: buffer overflow creating label" ); 34 return len; 35 } 36 37 static Label newLabelPost( char buf[size], const CodeLocation & location ) { 38 Label ret_label( location, buf ); 39 ret_label.attributes.push_back( new Attribute( "unused" ) ); 40 return ret_label; 41 } 42 43 Label newLabel( const string & suffix, const Stmt * stmt ) { 44 // Buffer for string manipulation. 45 char buf[size]; 46 47 assertf( stmt, "CFA internal error: parameter statement cannot be null pointer" ); 48 int len = newLabelPre( buf, suffix ); 36 49 37 50 // What does this do? … … 41 54 } // if 42 55 43 Label ret_label( stmt->location, buf ); 44 ret_label.attributes.push_back( new Attribute( "unused" ) ); 45 return ret_label; 56 return newLabelPost( buf, stmt->location ); 57 } 58 59 Label newLabel( const string & suffix, const CodeLocation & location ) { 60 // Buffer for string manipulation. 61 char buf[size]; 62 63 newLabelPre( buf, suffix ); 64 return newLabelPost( buf, location ); 46 65 } 47 66 -
src/ControlStruct/LabelGeneratorNew.hpp
rba897d21 r2e9b59b 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Mon Jan 31 18:03:09202213 // Update Count : 2 711 // Last Modified By : Andrew Beach 12 // Last Modified On : Fir Mar 25 15:40:00 2022 13 // Update Count : 28 14 14 // 15 15 … … 18 18 #include <string> // for string 19 19 20 class Statement;20 class CodeLocation; 21 21 22 22 namespace ast { 23 class Label; 23 24 class Stmt; 24 class Label;25 25 } // namespace ast 26 26 27 27 namespace ControlStruct { 28 28 ast::Label newLabel( const std::string &, const ast::Stmt * ); 29 ast::Label newLabel( const std::string &, const CodeLocation & ); 29 30 } // namespace ControlStruct 30 31 -
src/ControlStruct/MultiLevelExit.cpp
rba897d21 r2e9b59b 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 1 13:48:00 2021 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Feb 2 23:07:54202213 // Update Count : 3 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Mar 28 9:42:00 2022 13 // Update Count : 34 14 14 // 15 15 … … 40 40 41 41 enum Kind { 42 ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, Case StmtK, SwitchStmtK, TryStmtK42 ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, CaseClauseK, SwitchStmtK, TryStmtK 43 43 } kind; 44 44 … … 58 58 Entry( const IfStmt *stmt, Label breakExit ) : 59 59 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( IfStmtK ) {} 60 Entry( const Case Stmt *stmt, Label fallExit ) :61 stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( Case StmtK ) {}60 Entry( const CaseClause *, const CompoundStmt *stmt, Label fallExit ) : 61 stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseClauseK ) {} 62 62 Entry( const SwitchStmt *stmt, Label breakExit, Label fallDefaultExit ) : 63 63 stmt( stmt ), firstTarget( breakExit ), secondTarget( fallDefaultExit ), kind( SwitchStmtK ) {} … … 66 66 67 67 bool isContTarget() const { return kind <= WhileDoStmtK; } 68 bool isBreakTarget() const { return kind != Case StmtK; }69 bool isFallTarget() const { return kind == Case StmtK; }68 bool isBreakTarget() const { return kind != CaseClauseK; } 69 bool isFallTarget() const { return kind == CaseClauseK; } 70 70 bool isFallDefaultTarget() const { return kind == SwitchStmtK; } 71 71 72 72 // These routines set a target as being "used" by a BranchStmt 73 73 Label useContExit() { assert( kind <= WhileDoStmtK ); return useTarget(secondTarget); } 74 Label useBreakExit() { assert( kind != Case StmtK ); return useTarget(firstTarget); }75 Label useFallExit() { assert( kind == Case StmtK ); return useTarget(firstTarget); }74 Label useBreakExit() { assert( kind != CaseClauseK ); return useTarget(firstTarget); } 75 Label useFallExit() { assert( kind == CaseClauseK ); return useTarget(firstTarget); } 76 76 Label useFallDefaultExit() { assert( kind == SwitchStmtK ); return useTarget(secondTarget); } 77 77 78 78 // These routines check if a specific label for a statement is used by a BranchStmt 79 79 bool isContUsed() const { assert( kind <= WhileDoStmtK ); return secondTarget.used; } 80 bool isBreakUsed() const { assert( kind != Case StmtK ); return firstTarget.used; }81 bool isFallUsed() const { assert( kind == Case StmtK ); return firstTarget.used; }80 bool isBreakUsed() const { assert( kind != CaseClauseK ); return firstTarget.used; } 81 bool isFallUsed() const { assert( kind == CaseClauseK ); return firstTarget.used; } 82 82 bool isFallDefaultUsed() const { assert( kind == SwitchStmtK ); return secondTarget.used; } 83 83 void seenDefault() { fallDefaultValid = false; } … … 115 115 void previsit( const ForStmt * ); 116 116 const ForStmt * postvisit( const ForStmt * ); 117 const Case Stmt * previsit( const CaseStmt* );117 const CaseClause * previsit( const CaseClause * ); 118 118 void previsit( const IfStmt * ); 119 119 const IfStmt * postvisit( const IfStmt * ); … … 123 123 void previsit( const TryStmt * ); 124 124 void postvisit( const TryStmt * ); 125 void previsit( const Finally Stmt* );125 void previsit( const FinallyClause * ); 126 126 127 127 const Stmt * mutateLoop( const Stmt * body, Entry& ); … … 288 288 auto switchStmt = strict_dynamic_cast< const SwitchStmt * >( targetEntry->stmt ); 289 289 bool foundDefault = false; 290 for ( auto subStmt : switchStmt->stmts ) { 291 const CaseStmt * caseStmt = subStmt.strict_as<CaseStmt>(); 290 for ( auto caseStmt : switchStmt->cases ) { 292 291 if ( caseStmt->isDefault() ) { 293 292 foundDefault = true; … … 365 364 } 366 365 367 const Case Stmt * MultiLevelExitCore::previsit( const CaseStmt* stmt ) {366 const CaseClause * MultiLevelExitCore::previsit( const CaseClause * stmt ) { 368 367 visit_children = false; 369 368 … … 375 374 376 375 // The cond may not exist, but if it does update it now. 377 visitor->maybe_accept( stmt, &Case Stmt::cond );376 visitor->maybe_accept( stmt, &CaseClause::cond ); 378 377 379 378 // Just save the mutated node for simplicity. 380 Case Stmt* mutStmt = mutate( stmt );381 382 Label fallLabel = newLabel( "fallThrough", stmt );379 CaseClause * mutStmt = mutate( stmt ); 380 381 Label fallLabel = newLabel( "fallThrough", stmt->location ); 383 382 if ( ! mutStmt->stmts.empty() ) { 383 // These should already be in a block. 384 auto first = mutStmt->stmts.front().get_and_mutate(); 385 auto block = strict_dynamic_cast<CompoundStmt *>( first ); 386 384 387 // Ensure that the stack isn't corrupted by exceptions in fixBlock. 385 388 auto guard = makeFuncGuard( 386 [&](){ enclosing_control_structures.emplace_back( mutStmt, fallLabel ); },389 [&](){ enclosing_control_structures.emplace_back( mutStmt, block, fallLabel ); }, 387 390 [this](){ enclosing_control_structures.pop_back(); } 388 391 ); 389 392 390 // These should already be in a block.391 auto block = mutate( mutStmt->stmts.front().strict_as<CompoundStmt>() );392 393 block->kids = fixBlock( block->kids, true ); 393 394 … … 396 397 Entry & entry = enclosing_control_structures.back(); 397 398 if ( entry.isFallUsed() ) { 398 mutStmt->stmts.push_back( labelledNullStmt( mutStmt->location, entry.useFallExit() ) );399 mutStmt->stmts.push_back( labelledNullStmt( block->location, entry.useFallExit() ) ); 399 400 } 400 401 } … … 433 434 } 434 435 435 bool isDefaultCase( const ptr<Stmt> & stmt ) { 436 const CaseStmt * caseStmt = stmt.strict_as<CaseStmt>(); 437 return caseStmt->isDefault(); 436 static bool isDefaultCase( const ptr<CaseClause> & caseClause ) { 437 return caseClause->isDefault(); 438 438 } 439 439 440 440 void MultiLevelExitCore::previsit( const SwitchStmt * stmt ) { 441 441 Label label = newLabel( "switchBreak", stmt ); 442 auto it = find_if( stmt-> stmts.rbegin(), stmt->stmts.rend(), isDefaultCase );443 444 const Case Stmt * defaultCase = it != stmt->stmts.rend() ? (it)->strict_as<CaseStmt>() : nullptr;445 Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase ) : Label( stmt->location, "" );442 auto it = find_if( stmt->cases.rbegin(), stmt->cases.rend(), isDefaultCase ); 443 444 const CaseClause * defaultCase = it != stmt->cases.rend() ? (*it) : nullptr; 445 Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase->location ) : Label( stmt->location, "" ); 446 446 enclosing_control_structures.emplace_back( stmt, label, defaultLabel ); 447 447 GuardAction( [this]() { enclosing_control_structures.pop_back(); } ); … … 449 449 // Collect valid labels for fallthrough. It starts with all labels at this level, then remove as each is seen during 450 450 // traversal. 451 for ( const Stmt * stmt : stmt->stmts ) { 452 auto * caseStmt = strict_dynamic_cast< const CaseStmt * >( stmt ); 451 for ( const CaseClause * caseStmt : stmt->cases ) { 453 452 if ( caseStmt->stmts.empty() ) continue; 454 453 auto block = caseStmt->stmts.front().strict_as<CompoundStmt>(); … … 471 470 // exit label and break to the last case, create a default case if no cases. 472 471 SwitchStmt * mutStmt = mutate( stmt ); 473 if ( mutStmt-> stmts.empty() ) {474 mutStmt-> stmts.push_back( new CaseStmt( mutStmt->location, nullptr, {} ) );475 } 476 477 auto caseStmt = mutStmt-> stmts.back().strict_as<CaseStmt>();472 if ( mutStmt->cases.empty() ) { 473 mutStmt->cases.push_back( new CaseClause( mutStmt->location, nullptr, {} ) ); 474 } 475 476 auto caseStmt = mutStmt->cases.back().get(); 478 477 auto mutCase = mutate( caseStmt ); 479 mutStmt-> stmts.back() = mutCase;478 mutStmt->cases.back() = mutCase; 480 479 481 480 Label label( mutCase->location, "breakLabel" ); … … 514 513 } 515 514 516 void MultiLevelExitCore::previsit( const Finally Stmt* ) {515 void MultiLevelExitCore::previsit( const FinallyClause * ) { 517 516 GuardAction([this, old = move( enclosing_control_structures)](){ enclosing_control_structures = move(old); }); 518 517 enclosing_control_structures = vector<Entry>(); -
src/InitTweak/FixGlobalInit.cc
rba897d21 r2e9b59b 113 113 accept_all(translationUnit, fixer); 114 114 115 // Say these magic declarations come at the end of the file. 116 CodeLocation const & location = translationUnit.decls.back()->location; 117 115 118 if ( !fixer.core.initStmts.empty() ) { 116 119 std::vector<ast::ptr<ast::Expr>> ctorParams; 117 if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int({}, 200)); 118 auto initFunction = new ast::FunctionDecl({}, "__global_init__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.initStmts)), 119 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("constructor", std::move(ctorParams))}); 120 if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int(location, 200)); 121 auto initFunction = new ast::FunctionDecl(location, 122 "__global_init__", {}, {}, {}, 123 new ast::CompoundStmt(location, std::move(fixer.core.initStmts)), 124 ast::Storage::Static, ast::Linkage::C, 125 {new ast::Attribute("constructor", std::move(ctorParams))}); 120 126 121 127 translationUnit.decls.emplace_back( initFunction ); … … 124 130 if ( !fixer.core.destroyStmts.empty() ) { 125 131 std::vector<ast::ptr<ast::Expr>> dtorParams; 126 if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int({}, 200)); 127 auto destroyFunction = new ast::FunctionDecl({}, "__global_destroy__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.destroyStmts)), 128 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("destructor", std::move(dtorParams))}); 132 if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int(location, 200)); 133 auto destroyFunction = new ast::FunctionDecl( location, 134 "__global_destroy__", {}, {}, {}, 135 new ast::CompoundStmt(location, std::move(fixer.core.destroyStmts)), 136 ast::Storage::Static, ast::Linkage::C, 137 {new ast::Attribute("destructor", std::move(dtorParams))}); 129 138 130 139 translationUnit.decls.emplace_back(destroyFunction); -
src/InitTweak/FixInitNew.cpp
rba897d21 r2e9b59b 16 16 #include "CodeGen/GenType.h" // for genPrettyType 17 17 #include "CodeGen/OperatorTable.h" 18 #include "Common/CodeLocationTools.hpp" 18 19 #include "Common/PassVisitor.h" // for PassVisitor, WithStmtsToAdd 19 20 #include "Common/SemanticError.h" // for SemanticError … … 85 86 /// generate/resolve copy construction expressions for each, and generate/resolve destructors for both 86 87 /// arguments and return value temporaries 87 struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors> {88 struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors>, public ast::WithConstTranslationUnit { 88 89 const ast::Expr * postvisit( const ast::ImplicitCopyCtorExpr * impCpCtorExpr ); 89 90 const ast::StmtExpr * previsit( const ast::StmtExpr * stmtExpr ); … … 189 190 /// for any member that is missing a corresponding ctor/dtor call. 190 191 /// error if a member is used before constructed 191 struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls> {192 struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls>, public ast::WithConstTranslationUnit { 192 193 void previsit( const ast::FunctionDecl * funcDecl ); 193 194 const ast::DeclWithType * postvisit( const ast::FunctionDecl * funcDecl ); … … 214 215 215 216 /// expands ConstructorExpr nodes into comma expressions, using a temporary for the first argument 216 struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting {217 struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithConstTranslationUnit { 217 218 const ast::Expr * postvisit( const ast::ConstructorExpr * ctorExpr ); 218 219 }; … … 509 510 // (VariableExpr and already resolved expression) 510 511 CP_CTOR_PRINT( std::cerr << "ResolvingCtorDtor " << untyped << std::endl; ) 511 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, symtab);512 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, { symtab, transUnit().global } ); 512 513 assert( resolved ); 513 514 if ( resolved->env ) { … … 553 554 ast::ptr<ast::Expr> guard = mutArg; 554 555 555 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl( {}, "__tmp", mutResult, nullptr );556 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl(loc, "__tmp", mutResult, nullptr ); 556 557 557 558 // create and resolve copy constructor … … 587 588 588 589 ast::Expr * ResolveCopyCtors::destructRet( const ast::ObjectDecl * ret, const ast::Expr * arg ) { 590 auto global = transUnit().global; 589 591 // TODO: refactor code for generating cleanup attribute, since it's common and reused in ~3-4 places 590 592 // check for existing cleanup attribute before adding another(?) 591 593 // need to add __Destructor for _tmp_cp variables as well 592 594 593 assertf( ast::dtorStruct, "Destructor generation requires __Destructor definition." );594 assertf( ast::dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );595 assertf( ast::dtorStructDestroy, "Destructor generation requires __destroy_Destructor." );595 assertf( global.dtorStruct, "Destructor generation requires __Destructor definition." ); 596 assertf( global.dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." ); 597 assertf( global.dtorDestroy, "Destructor generation requires __destroy_Destructor." ); 596 598 597 599 const CodeLocation loc = ret->location; … … 610 612 auto dtorFunc = getDtorFunc( ret, new ast::ExprStmt(loc, dtor ), stmtsToAddBefore ); 611 613 612 auto dtorStructType = new ast::StructInstType( ast::dtorStruct);614 auto dtorStructType = new ast::StructInstType( global.dtorStruct ); 613 615 614 616 // what does this do??? … … 622 624 static UniqueName namer( "_ret_dtor" ); 623 625 auto retDtor = new ast::ObjectDecl(loc, namer.newName(), dtorStructType, new ast::ListInit(loc, { new ast::SingleInit(loc, ast::ConstantExpr::null(loc) ), new ast::SingleInit(loc, new ast::CastExpr( new ast::VariableExpr(loc, dtorFunc ), dtorType ) ) } ) ); 624 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, ast::dtorStructDestroy ) } ) );626 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, global.dtorDestroy ) } ) ); 625 627 stmtsToAddBefore.push_back( new ast::DeclStmt(loc, retDtor ) ); 626 628 627 629 if ( arg ) { 628 auto member = new ast::MemberExpr(loc, ast::dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );630 auto member = new ast::MemberExpr(loc, global.dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) ); 629 631 auto object = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, ret ) ), new ast::PointerType(new ast::VoidType() ) ); 630 632 ast::Expr * assign = createBitwiseAssignment( member, object ); … … 799 801 // to prevent warnings ('_unq0' may be used uninitialized in this function), 800 802 // insert an appropriate zero initializer for UniqueExpr temporaries. 801 ast::Init * makeInit( const ast::Type * t ) {803 ast::Init * makeInit( const ast::Type * t, CodeLocation const & loc ) { 802 804 if ( auto inst = dynamic_cast< const ast::StructInstType * >( t ) ) { 803 805 // initizer for empty struct must be empty 804 if ( inst->base->members.empty() ) return new ast::ListInit({}, {}); 806 if ( inst->base->members.empty() ) { 807 return new ast::ListInit( loc, {} ); 808 } 805 809 } else if ( auto inst = dynamic_cast< const ast::UnionInstType * >( t ) ) { 806 810 // initizer for empty union must be empty 807 if ( inst->base->members.empty() ) return new ast::ListInit({}, {}); 808 } 809 810 return new ast::ListInit( {}, { new ast::SingleInit( {}, ast::ConstantExpr::from_int({}, 0) ) } ); 811 if ( inst->base->members.empty() ) { 812 return new ast::ListInit( loc, {} ); 813 } 814 } 815 816 return new ast::ListInit( loc, { 817 new ast::SingleInit( loc, ast::ConstantExpr::from_int( loc, 0 ) ) 818 } ); 811 819 } 812 820 … … 832 840 } else { 833 841 // expr isn't a call expr, so create a new temporary variable to use to hold the value of the unique expression 834 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result ) );842 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result, mutExpr->location ) ); 835 843 mutExpr->var = new ast::VariableExpr( mutExpr->location, mutExpr->object ); 836 844 } … … 1172 1180 auto guard = makeFuncGuard( [this]() { symtab.enterScope(); }, [this]() { symtab.leaveScope(); } ); 1173 1181 symtab.addFunction( function ); 1182 auto global = transUnit().global; 1174 1183 1175 1184 // need to iterate through members in reverse in order for … … 1217 1226 1218 1227 static UniqueName memberDtorNamer = { "__memberDtor" }; 1219 assertf( ast::dtorStruct, "builtin __Destructor not found." );1220 assertf( ast::dtorStructDestroy, "builtin __destroy_Destructor not found." );1228 assertf( global.dtorStruct, "builtin __Destructor not found." ); 1229 assertf( global.dtorDestroy, "builtin __destroy_Destructor not found." ); 1221 1230 1222 1231 ast::Expr * thisExpr = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, thisParam ) ), new ast::PointerType( new ast::VoidType(), ast::CV::Qualifiers() ) ); … … 1228 1237 auto dtorType = new ast::PointerType( dtorFtype ); 1229 1238 1230 auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( ast::dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );1231 destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( {}, ast::dtorStructDestroy ) } ) );1239 auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( global.dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) ); 1240 destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( loc, global.dtorDestroy ) } ) ); 1232 1241 mutStmts->push_front( new ast::DeclStmt(loc, destructor ) ); 1233 1242 mutStmts->kids.splice( mutStmts->kids.begin(), stmtsToAdd ); … … 1323 1332 1324 1333 const ast::Expr * GenStructMemberCalls::postvisit( const ast::UntypedExpr * untypedExpr ) { 1325 // Expression * newExpr = untypedExpr;1326 1334 // xxx - functions returning ast::ptr seems wrong... 1327 auto res = ResolvExpr::findVoidExpression( untypedExpr, symtab ); 1328 return res.release(); 1329 // return newExpr; 1335 auto res = ResolvExpr::findVoidExpression( untypedExpr, { symtab, transUnit().global } ); 1336 // Fix CodeLocation (at least until resolver is fixed). 1337 auto fix = localFillCodeLocations( untypedExpr->location, res.release() ); 1338 return strict_dynamic_cast<const ast::Expr *>( fix ); 1330 1339 } 1331 1340 … … 1361 1370 1362 1371 // resolve assignment and dispose of new env 1363 auto resolved = ResolvExpr::findVoidExpression( assign, symtab);1372 auto resolved = ResolvExpr::findVoidExpression( assign, { symtab, transUnit().global } ); 1364 1373 auto mut = resolved.get_and_mutate(); 1365 1374 assertf(resolved.get() == mut, "newly resolved expression must be unique"); -
src/InitTweak/GenInit.cc
rba897d21 r2e9b59b 402 402 retVal->location, "?{}", retVal, stmt->expr ); 403 403 assertf( ctorStmt, 404 "ReturnFixer: genCtorDtor returned n llptr: %s / %s",404 "ReturnFixer: genCtorDtor returned nullptr: %s / %s", 405 405 toString( retVal ).c_str(), 406 406 toString( stmt->expr ).c_str() ); 407 407 stmtsToAddBefore.push_back( ctorStmt ); 408 408 409 409 // Return the retVal object. … … 421 421 void genInit( ast::TranslationUnit & transUnit ) { 422 422 ast::Pass<HoistArrayDimension_NoResolve_New>::run( transUnit ); 423 ast::Pass<ReturnFixer_New>::run( transUnit ); 424 } 425 426 void fixReturnStatements( ast::TranslationUnit & transUnit ) { 423 427 ast::Pass<ReturnFixer_New>::run( transUnit ); 424 428 } -
src/InitTweak/GenInit.h
rba897d21 r2e9b59b 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Oct 22 16:08:00 202113 // Update Count : 612 // Last Modified On : Fri Mar 18 14:22:00 2022 13 // Update Count : 7 14 14 // 15 15 … … 31 31 /// Converts return statements into copy constructor calls on the hidden return variable 32 32 void fixReturnStatements( std::list< Declaration * > & translationUnit ); 33 void fixReturnStatements( ast::TranslationUnit & translationUnit ); 33 34 34 35 /// generates a single ctor/dtor statement using objDecl as the 'this' parameter and arg as the optional argument -
src/InitTweak/InitTweak.cc
rba897d21 r2e9b59b 423 423 loc, targetLabel.newName(), { new ast::Attribute{ "unused" } } }; 424 424 425 std::vector< ast::ptr< ast:: Stmt> > branches;425 std::vector< ast::ptr< ast::CaseClause > > branches; 426 426 for ( const ast::Init * init : *listInit ) { 427 427 auto condition = ast::ConstantExpr::from_ulong( loc, cond ); … … 432 432 stmts.emplace_back( 433 433 new ast::BranchStmt{ loc, ast::BranchStmt::Break, switchLabel } ); 434 branches.emplace_back( new ast::Case Stmt{ loc, condition, std::move( stmts ) } );434 branches.emplace_back( new ast::CaseClause{ loc, condition, std::move( stmts ) } ); 435 435 } 436 436 out.emplace_back( new ast::SwitchStmt{ loc, index, std::move( branches ) } ); -
src/Parser/DeclarationNode.cc
rba897d21 r2e9b59b 78 78 delete variable.initializer; 79 79 80 delete type;80 // delete type; 81 81 delete bitfieldWidth; 82 82 … … 253 253 } // DeclarationNode::newAggregate 254 254 255 DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body 255 DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body, bool typed) { 256 256 DeclarationNode * newnode = new DeclarationNode; 257 257 newnode->type = new TypeData( TypeData::Enum ); … … 263 263 } // DeclarationNode::newEnum 264 264 265 266 265 267 DeclarationNode * DeclarationNode::newName( const string * name ) { 266 268 DeclarationNode * newnode = new DeclarationNode; … … 270 272 } // DeclarationNode::newName 271 273 272 DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) { 274 DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) { // Marker 273 275 DeclarationNode * newnode = newName( name ); 274 276 newnode->enumeratorValue.reset( constant ); … … 665 667 } 666 668 669 DeclarationNode * DeclarationNode::addEnumBase( DeclarationNode * o ) { 670 if ( o && o -> type) { 671 type->base= o->type; 672 } 673 delete o; 674 return this; 675 } 676 667 677 DeclarationNode * DeclarationNode::addTypedef() { 668 678 TypeData * newtype = new TypeData( TypeData::Symbolic ); -
src/Parser/ParseNode.h
rba897d21 r2e9b59b 235 235 static DeclarationNode * newFunction( const std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body ); 236 236 static DeclarationNode * newAggregate( AggregateDecl::Aggregate kind, const std::string * name, ExpressionNode * actuals, DeclarationNode * fields, bool body ); 237 static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body );237 static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed ); 238 238 static DeclarationNode * newEnumConstant( const std::string * name, ExpressionNode * constant ); 239 239 static DeclarationNode * newName( const std::string * ); … … 265 265 DeclarationNode * addType( DeclarationNode * ); 266 266 DeclarationNode * addTypedef(); 267 DeclarationNode * addEnumBase( DeclarationNode * ); 267 268 DeclarationNode * addAssertions( DeclarationNode * ); 268 269 DeclarationNode * addName( std::string * ); -
src/Parser/StatementNode.cc
rba897d21 r2e9b59b 366 366 } // maybe_build_compound 367 367 368 // Question 368 369 Statement * build_asm( bool voltile, Expression * instruction, ExpressionNode * output, ExpressionNode * input, ExpressionNode * clobber, LabelNode * gotolabels ) { 369 370 list< Expression * > out, in; -
src/Parser/TypeData.cc
rba897d21 r2e9b59b 918 918 EnumDecl * buildEnum( const TypeData * td, std::list< Attribute * > attributes, LinkageSpec::Spec linkage ) { 919 919 assert( td->kind == TypeData::Enum ); 920 EnumDecl * ret = new EnumDecl( *td->enumeration.name, attributes, linkage ); 920 Type * baseType = td->base ? typebuild(td->base) : nullptr; 921 EnumDecl * ret = new EnumDecl( *td->enumeration.name, attributes, linkage, baseType ); 921 922 buildList( td->enumeration.constants, ret->get_members() ); 922 923 list< Declaration * >::iterator members = ret->get_members().begin(); 923 for ( const DeclarationNode * cur = td->enumeration. 924 for ( const DeclarationNode * cur = td->enumeration.constants; cur != nullptr; cur = dynamic_cast< DeclarationNode * >( cur->get_next() ), ++members ) { 924 925 if ( cur->has_enumeratorValue() ) { 925 926 ObjectDecl * member = dynamic_cast< ObjectDecl * >(* members); 926 927 member->set_init( new SingleInit( maybeMoveBuild< Expression >( cur->consume_enumeratorValue() ) ) ); 928 } else { 929 if ( baseType && (!dynamic_cast<BasicType *>(baseType) || !dynamic_cast<BasicType *>(baseType)->isWholeNumber())) { 930 SemanticError( td->location, "A non whole number enum value decl must be explicitly initialized." ); 931 } 927 932 } // if 928 933 } // for 929 ret->set_body( td->enumeration.body ); 934 ret->set_body( td->enumeration.body ); // Boolean; if it has body 930 935 return ret; 931 936 } // buildEnum -
src/Parser/TypeData.h
rba897d21 r2e9b59b 132 132 Initializer * init = nullptr, std::list< class Attribute * > attributes = std::list< class Attribute * >() ); 133 133 FunctionType * buildFunction( const TypeData * ); 134 Declaration * addEnumBase( Declaration *, const TypeData * ); 134 135 void buildKRFunction( const TypeData::Function_t & function ); 135 136 -
src/Parser/parser.yy
rba897d21 r2e9b59b 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Feb 25 17:54:56202213 // Update Count : 52 6212 // Last Modified On : Mon Mar 14 16:35:29 2022 13 // Update Count : 5276 14 14 // 15 15 … … 652 652 // Historic, transitional: Disallow commas in subscripts. 653 653 // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts. 654 // { SemanticError( yylloc, "New array subscript is currently unimplemented." ); $$ = nullptr; }655 654 // Current: Commas in subscripts make tuples. 656 655 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); } … … 661 660 // equivalent to the old x[i,j]. 662 661 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); } 662 | constant '[' assignment_expression ']' // 3[a], 'a'[a], 3.5[a] 663 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); } 664 | string_literal '[' assignment_expression ']' // "abc"[3], 3["abc"] 665 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, new ExpressionNode( $1 ), $3 ) ); } 663 666 | postfix_expression '{' argument_expression_list_opt '}' // CFA, constructor call 664 667 { … … 2300 2303 ; 2301 2304 2302 enum_type: // enum2305 enum_type: // static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed ); // enum 2303 2306 ENUM attribute_list_opt '{' enumerator_list comma_opt '}' 2304 { $$ = DeclarationNode::newEnum( nullptr, $4, true )->addQualifiers( $2 ); }2307 { $$ = DeclarationNode::newEnum( nullptr, $4, true, false )->addQualifiers( $2 ); } 2305 2308 | ENUM attribute_list_opt identifier 2306 2309 { typedefTable.makeTypedef( *$3 ); } 2307 2310 '{' enumerator_list comma_opt '}' 2308 { $$ = DeclarationNode::newEnum( $3, $6, true )->addQualifiers( $2 ); }2311 { $$ = DeclarationNode::newEnum( $3, $6, true, false )->addQualifiers( $2 ); } 2309 2312 | ENUM attribute_list_opt typedef_name // unqualified type name 2310 2313 '{' enumerator_list comma_opt '}' 2311 { $$ = DeclarationNode::newEnum( $3->name, $5, true )->addQualifiers( $2 ); }2314 { $$ = DeclarationNode::newEnum( $3->name, $5, true, false )->addQualifiers( $2 ); } 2312 2315 | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt '{' enumerator_list comma_opt '}' 2313 2316 { 2314 if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); } 2315 SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr; 2316 } 2317 | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt 2317 if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) 2318 { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); } 2319 // SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr; 2320 2321 $$ = DeclarationNode::newEnum( nullptr, $7, true, true ) ->addQualifiers( $5 ) -> addEnumBase( $3 ); 2322 // $$ = DeclarationNode::newEnum( nullptr, $7, true, true ) ->addQualifiers( $5 ); 2323 } 2324 | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt // Question: why attributes/qualifier after identifier 2318 2325 { 2319 2326 if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); } … … 2322 2329 '{' enumerator_list comma_opt '}' 2323 2330 { 2324 SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr; 2331 $$ = DeclarationNode::newEnum( $6, $10, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ) -> addEnumBase( $3 ); 2332 // $$ = DeclarationNode::newEnum( $6, $10, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ); 2325 2333 } 2326 2334 | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt typedef_name attribute_list_opt '{' enumerator_list comma_opt '}' … … 2328 2336 if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); } 2329 2337 typedefTable.makeTypedef( *$6->name ); 2330 SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr; 2338 $$ = DeclarationNode::newEnum( $6->name, $9, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ) -> addEnumBase( $3 ); 2339 // $$ = DeclarationNode::newEnum( $6->name, $9, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ); 2331 2340 } 2332 2341 | enum_type_nobody … … 2335 2344 enum_type_nobody: // enum - {...} 2336 2345 ENUM attribute_list_opt identifier 2337 { typedefTable.makeTypedef( *$3 ); $$ = DeclarationNode::newEnum( $3, 0, false )->addQualifiers( $2 ); }2346 { typedefTable.makeTypedef( *$3 ); $$ = DeclarationNode::newEnum( $3, 0, false, false )->addQualifiers( $2 ); } 2338 2347 | ENUM attribute_list_opt type_name // qualified type name 2339 { typedefTable.makeTypedef( *$3->type->symbolic.name ); $$ = DeclarationNode::newEnum( $3->type->symbolic.name, 0, false )->addQualifiers( $2 ); }2348 { typedefTable.makeTypedef( *$3->type->symbolic.name ); $$ = DeclarationNode::newEnum( $3->type->symbolic.name, 0, false, false )->addQualifiers( $2 ); } 2340 2349 ; 2341 2350 -
src/ResolvExpr/CandidateFinder.cpp
rba897d21 r2e9b59b 10 10 // Created On : Wed Jun 5 14:30:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 1 14:55:00 201913 // Update Count : 212 // Last Modified On : Wed Mar 16 11:58:00 2022 13 // Update Count : 3 14 14 // 15 15 … … 595 595 /// Actually visits expressions to find their candidate interpretations 596 596 class Finder final : public ast::WithShortCircuiting { 597 const ResolveContext & context; 597 598 const ast::SymbolTable & symtab; 598 599 public: … … 618 619 619 620 Finder( CandidateFinder & f ) 620 : symtab( f.localSyms ), selfFinder( f ), candidates( f.candidates ), tenv( f.env),621 targetType( f.targetType ) {}621 : context( f.context ), symtab( context.symtab ), selfFinder( f ), 622 candidates( f.candidates ), tenv( f.env ), targetType( f.targetType ) {} 622 623 623 624 void previsit( const ast::Node * ) { visit_children = false; } … … 872 873 Tuples::handleTupleAssignment( selfFinder, untypedExpr, argCandidates ); 873 874 874 CandidateFinder funcFinder { symtab, tenv };875 CandidateFinder funcFinder( context, tenv ); 875 876 if (auto nameExpr = untypedExpr->func.as<ast::NameExpr>()) { 876 877 auto kind = ast::SymbolTable::getSpecialFunctionKind(nameExpr->name); … … 918 919 // find function operators 919 920 ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" }; 920 CandidateFinder opFinder { symtab, tenv };921 CandidateFinder opFinder( context, tenv ); 921 922 // okay if there aren't any function operations 922 923 opFinder.find( opExpr, ResolvMode::withoutFailFast() ); … … 1059 1060 1060 1061 void postvisit( const ast::AddressExpr * addressExpr ) { 1061 CandidateFinder finder { symtab, tenv };1062 CandidateFinder finder( context, tenv ); 1062 1063 finder.find( addressExpr->arg ); 1063 1064 … … 1079 1080 ast::ptr< ast::Type > toType = castExpr->result; 1080 1081 assert( toType ); 1081 toType = resolveTypeof( toType, symtab);1082 toType = resolveTypeof( toType, context ); 1082 1083 // toType = SymTab::validateType( castExpr->location, toType, symtab ); 1083 1084 toType = adjustExprType( toType, tenv, symtab ); 1084 1085 1085 CandidateFinder finder { symtab, tenv, toType };1086 CandidateFinder finder( context, tenv, toType ); 1086 1087 finder.find( castExpr->arg, ResolvMode::withAdjustment() ); 1087 1088 … … 1136 1137 void postvisit( const ast::VirtualCastExpr * castExpr ) { 1137 1138 assertf( castExpr->result, "Implicit virtual cast targets not yet supported." ); 1138 CandidateFinder finder { symtab, tenv };1139 CandidateFinder finder( context, tenv ); 1139 1140 // don't prune here, all alternatives guaranteed to have same type 1140 1141 finder.find( castExpr->arg, ResolvMode::withoutPrune() ); … … 1153 1154 auto target = inst->base.get(); 1154 1155 1155 CandidateFinder finder { symtab, tenv };1156 CandidateFinder finder( context, tenv ); 1156 1157 1157 1158 auto pick_alternatives = [target, this](CandidateList & found, bool expect_ref) { … … 1202 1203 1203 1204 void postvisit( const ast::UntypedMemberExpr * memberExpr ) { 1204 CandidateFinder aggFinder { symtab, tenv };1205 CandidateFinder aggFinder( context, tenv ); 1205 1206 aggFinder.find( memberExpr->aggregate, ResolvMode::withAdjustment() ); 1206 1207 for ( CandidateRef & agg : aggFinder.candidates ) { … … 1287 1288 addCandidate( 1288 1289 new ast::SizeofExpr{ 1289 sizeofExpr->location, resolveTypeof( sizeofExpr->type, symtab) },1290 sizeofExpr->location, resolveTypeof( sizeofExpr->type, context ) }, 1290 1291 tenv ); 1291 1292 } else { 1292 1293 // find all candidates for the argument to sizeof 1293 CandidateFinder finder { symtab, tenv };1294 CandidateFinder finder( context, tenv ); 1294 1295 finder.find( sizeofExpr->expr ); 1295 1296 // find the lowest-cost candidate, otherwise ambiguous … … 1311 1312 addCandidate( 1312 1313 new ast::AlignofExpr{ 1313 alignofExpr->location, resolveTypeof( alignofExpr->type, symtab) },1314 alignofExpr->location, resolveTypeof( alignofExpr->type, context ) }, 1314 1315 tenv ); 1315 1316 } else { 1316 1317 // find all candidates for the argument to alignof 1317 CandidateFinder finder { symtab, tenv };1318 CandidateFinder finder( context, tenv ); 1318 1319 finder.find( alignofExpr->expr ); 1319 1320 // find the lowest-cost candidate, otherwise ambiguous … … 1354 1355 1355 1356 void postvisit( const ast::LogicalExpr * logicalExpr ) { 1356 CandidateFinder finder1 { symtab, tenv };1357 CandidateFinder finder1( context, tenv ); 1357 1358 finder1.find( logicalExpr->arg1, ResolvMode::withAdjustment() ); 1358 1359 if ( finder1.candidates.empty() ) return; 1359 1360 1360 CandidateFinder finder2 { symtab, tenv };1361 CandidateFinder finder2( context, tenv ); 1361 1362 finder2.find( logicalExpr->arg2, ResolvMode::withAdjustment() ); 1362 1363 if ( finder2.candidates.empty() ) return; … … 1384 1385 void postvisit( const ast::ConditionalExpr * conditionalExpr ) { 1385 1386 // candidates for condition 1386 CandidateFinder finder1 { symtab, tenv };1387 CandidateFinder finder1( context, tenv ); 1387 1388 finder1.find( conditionalExpr->arg1, ResolvMode::withAdjustment() ); 1388 1389 if ( finder1.candidates.empty() ) return; 1389 1390 1390 1391 // candidates for true result 1391 CandidateFinder finder2 { symtab, tenv };1392 CandidateFinder finder2( context, tenv ); 1392 1393 finder2.find( conditionalExpr->arg2, ResolvMode::withAdjustment() ); 1393 1394 if ( finder2.candidates.empty() ) return; 1394 1395 1395 1396 // candidates for false result 1396 CandidateFinder finder3 { symtab, tenv };1397 CandidateFinder finder3( context, tenv ); 1397 1398 finder3.find( conditionalExpr->arg3, ResolvMode::withAdjustment() ); 1398 1399 if ( finder3.candidates.empty() ) return; … … 1445 1446 void postvisit( const ast::CommaExpr * commaExpr ) { 1446 1447 ast::TypeEnvironment env{ tenv }; 1447 ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, symtab, env );1448 1449 CandidateFinder finder2 { symtab, env };1448 ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, context, env ); 1449 1450 CandidateFinder finder2( context, env ); 1450 1451 finder2.find( commaExpr->arg2, ResolvMode::withAdjustment() ); 1451 1452 … … 1460 1461 1461 1462 void postvisit( const ast::ConstructorExpr * ctorExpr ) { 1462 CandidateFinder finder { symtab, tenv };1463 CandidateFinder finder( context, tenv ); 1463 1464 finder.find( ctorExpr->callExpr, ResolvMode::withoutPrune() ); 1464 1465 for ( CandidateRef & r : finder.candidates ) { … … 1469 1470 void postvisit( const ast::RangeExpr * rangeExpr ) { 1470 1471 // resolve low and high, accept candidates where low and high types unify 1471 CandidateFinder finder1 { symtab, tenv };1472 CandidateFinder finder1( context, tenv ); 1472 1473 finder1.find( rangeExpr->low, ResolvMode::withAdjustment() ); 1473 1474 if ( finder1.candidates.empty() ) return; 1474 1475 1475 CandidateFinder finder2 { symtab, tenv };1476 CandidateFinder finder2( context, tenv ); 1476 1477 finder2.find( rangeExpr->high, ResolvMode::withAdjustment() ); 1477 1478 if ( finder2.candidates.empty() ) return; … … 1549 1550 1550 1551 void postvisit( const ast::UniqueExpr * unqExpr ) { 1551 CandidateFinder finder { symtab, tenv };1552 CandidateFinder finder( context, tenv ); 1552 1553 finder.find( unqExpr->expr, ResolvMode::withAdjustment() ); 1553 1554 for ( CandidateRef & r : finder.candidates ) { … … 1558 1559 1559 1560 void postvisit( const ast::StmtExpr * stmtExpr ) { 1560 addCandidate( resolveStmtExpr( stmtExpr, symtab), tenv );1561 addCandidate( resolveStmtExpr( stmtExpr, context ), tenv ); 1561 1562 } 1562 1563 … … 1570 1571 for ( const ast::InitAlternative & initAlt : initExpr->initAlts ) { 1571 1572 // calculate target type 1572 const ast::Type * toType = resolveTypeof( initAlt.type, symtab);1573 const ast::Type * toType = resolveTypeof( initAlt.type, context ); 1573 1574 // toType = SymTab::validateType( initExpr->location, toType, symtab ); 1574 1575 toType = adjustExprType( toType, tenv, symtab ); … … 1576 1577 // types are not bound to the initialization type, since return type variables are 1577 1578 // only open for the duration of resolving the UntypedExpr. 1578 CandidateFinder finder { symtab, tenv, toType };1579 CandidateFinder finder( context, tenv, toType ); 1579 1580 finder.find( initExpr->expr, ResolvMode::withAdjustment() ); 1580 1581 for ( CandidateRef & cand : finder.candidates ) { … … 1693 1694 } 1694 1695 else { 1695 satisfyAssertions(candidate, localSyms, satisfied, errors);1696 satisfyAssertions(candidate, context.symtab, satisfied, errors); 1696 1697 needRecomputeKey = true; 1697 1698 } … … 1855 1856 r->expr = ast::mutate_field( 1856 1857 r->expr.get(), &ast::Expr::result, 1857 adjustExprType( r->expr->result, r->env, localSyms) );1858 adjustExprType( r->expr->result, r->env, context.symtab ) ); 1858 1859 } 1859 1860 } … … 1873 1874 1874 1875 for ( const auto & x : xs ) { 1875 out.emplace_back( localSyms, env );1876 out.emplace_back( context, env ); 1876 1877 out.back().find( x, ResolvMode::withAdjustment() ); 1877 1878 -
src/ResolvExpr/CandidateFinder.hpp
rba897d21 r2e9b59b 10 10 // Created On : Wed Jun 5 14:30:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 1 9:51:00 201913 // Update Count : 212 // Last Modified On : Wed Mar 16 15:22:00 2022 13 // Update Count : 3 14 14 // 15 15 … … 25 25 namespace ResolvExpr { 26 26 27 struct ResolveContext; 28 27 29 /// Data to perform expression resolution 28 30 struct CandidateFinder { 29 31 CandidateList candidates; ///< List of candidate resolutions 30 const ast::SymbolTable & localSyms; ///< Symbol table to lookup candidates32 const ResolveContext & context; ///< Information about where the canditates are being found. 31 33 const ast::TypeEnvironment & env; ///< Substitutions performed in this resolution 32 34 ast::ptr< ast::Type > targetType; ///< Target type for resolution … … 34 36 35 37 CandidateFinder( 36 const ast::SymbolTable & syms, const ast::TypeEnvironment & env,38 const ResolveContext & context, const ast::TypeEnvironment & env, 37 39 const ast::Type * tt = nullptr ) 38 : candidates(), localSyms( syms), env( env ), targetType( tt ) {}40 : candidates(), context( context ), env( env ), targetType( tt ) {} 39 41 40 42 /// Fill candidates with feasible resolutions for `expr` -
src/ResolvExpr/CandidatePrinter.cpp
rba897d21 r2e9b59b 10 10 // Created On : Tue Nov 9 9:54:00 2021 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Nov 9 15:47:00 202113 // Update Count : 012 // Last Modified On : Wed Mar 16 13:56:00 2022 13 // Update Count : 1 14 14 // 15 15 … … 22 22 #include "AST/TranslationUnit.hpp" 23 23 #include "ResolvExpr/CandidateFinder.hpp" 24 #include "ResolvExpr/Resolver.h" 24 25 25 26 #include <iostream> … … 29 30 namespace { 30 31 31 class CandidatePrintCore : public ast::WithSymbolTable { 32 class CandidatePrintCore : public ast::WithSymbolTable, 33 public ast::WithConstTranslationUnit { 32 34 std::ostream & os; 33 35 public: … … 36 38 void postvisit( const ast::ExprStmt * stmt ) { 37 39 ast::TypeEnvironment env; 38 CandidateFinder finder( symtab, env );40 CandidateFinder finder( { symtab, transUnit().global }, env ); 39 41 finder.find( stmt->expr, ResolvMode::withAdjustment() ); 40 42 int count = 1; -
src/ResolvExpr/ConversionCost.cc
rba897d21 r2e9b59b 333 333 } else if ( dynamic_cast< const EnumInstType * >( dest ) ) { 334 334 // xxx - not positive this is correct, but appears to allow casting int => enum 335 cost = Cost::unsafe; 335 // TODO 336 EnumDecl * decl = dynamic_cast< const EnumInstType * >( dest )->baseEnum; 337 if ( decl->base ) { 338 cost = Cost::infinity; 339 } else { 340 cost = Cost::unsafe; 341 } // if 336 342 } // if 337 343 // no cases for zero_t/one_t because it should not be possible to convert int, etc. to zero_t/one_t. … … 610 616 } else if ( dynamic_cast< const ast::EnumInstType * >( dst ) ) { 611 617 // xxx - not positive this is correct, but appears to allow casting int => enum 612 cost = Cost::unsafe; 618 const ast::EnumDecl * decl = (dynamic_cast< const ast::EnumInstType * >( dst ))->base.get(); 619 if ( decl->base ) { 620 cost = Cost::infinity; 621 } else { 622 cost = Cost::unsafe; 623 } // if 613 624 } 614 625 } -
src/ResolvExpr/RenameVars.h
rba897d21 r2e9b59b 36 36 }; 37 37 const ast::Type * renameTyVars( const ast::Type *, RenameMode mode = GEN_USAGE, bool reset = true ); 38 39 38 40 39 /// resets internal state of renamer to avoid overflow 41 40 void resetTyVarRenaming(); 42 43 44 41 } // namespace ResolvExpr 45 42 -
src/ResolvExpr/ResolveTypeof.cc
rba897d21 r2e9b59b 9 9 // Author : Richard C. Bilson 10 10 // Created On : Sun May 17 12:12:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue May 19 16:49:04 201513 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 16:09:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 22 22 #include "AST/Node.hpp" 23 23 #include "AST/Pass.hpp" 24 #include "AST/TranslationUnit.hpp" 24 25 #include "AST/Type.hpp" 25 26 #include "AST/TypeEnvironment.hpp" … … 119 120 namespace { 120 121 struct ResolveTypeof_new : public ast::WithShortCircuiting { 121 const ast::SymbolTable & localSymtab; 122 123 ResolveTypeof_new( const ast::SymbolTable & syms ) : localSymtab( syms ) {} 122 const ResolveContext & context; 123 124 ResolveTypeof_new( const ResolveContext & context ) : 125 context( context ) {} 124 126 125 127 void previsit( const ast::TypeofType * ) { visit_children = false; } … … 137 139 ast::TypeEnvironment dummy; 138 140 ast::ptr< ast::Expr > newExpr = 139 resolveInVoidContext( typeofType->expr, localSymtab, dummy );141 resolveInVoidContext( typeofType->expr, context, dummy ); 140 142 assert( newExpr->result && ! newExpr->result->isVoid() ); 141 143 newType = newExpr->result; … … 161 163 } // anonymous namespace 162 164 163 const ast::Type * resolveTypeof( const ast::Type * type , const ast::SymbolTable & symtab) {164 ast::Pass< ResolveTypeof_new > mutator { symtab };165 const ast::Type * resolveTypeof( const ast::Type * type , const ResolveContext & context ) { 166 ast::Pass< ResolveTypeof_new > mutator( context ); 165 167 return type->accept( mutator ); 166 168 } 167 169 168 170 struct FixArrayDimension { 169 // should not require a mutable symbol table - prevent pass template instantiation 170 const ast::SymbolTable & _symtab; 171 FixArrayDimension(const ast::SymbolTable & symtab): _symtab(symtab) {} 171 const ResolveContext & context; 172 FixArrayDimension(const ResolveContext & context) : context( context ) {} 172 173 173 174 const ast::ArrayType * previsit (const ast::ArrayType * arrayType) { 174 175 if (!arrayType->dimension) return arrayType; 175 176 auto mutType = mutate(arrayType); 176 ast::ptr<ast::Type> sizetype = ast::sizeType ? ast::sizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt); 177 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, _symtab); 177 auto globalSizeType = context.global.sizeType; 178 ast::ptr<ast::Type> sizetype = globalSizeType ? globalSizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt); 179 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, context ); 178 180 179 181 if (InitTweak::isConstExpr(mutType->dimension)) { … … 187 189 }; 188 190 189 const ast::Type * fixArrayType( const ast::Type * type, const ast::SymbolTable & symtab) {190 ast::Pass<FixArrayDimension> visitor {symtab};191 const ast::Type * fixArrayType( const ast::Type * type, const ResolveContext & context ) { 192 ast::Pass<FixArrayDimension> visitor(context); 191 193 return type->accept(visitor); 192 194 } 193 195 194 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab ) { 195 if (!decl->isTypeFixed) { 196 auto mutDecl = mutate(decl); 197 auto resolvedType = resolveTypeof(decl->type, symtab); 198 resolvedType = fixArrayType(resolvedType, symtab); 196 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & context ) { 197 if (decl->isTypeFixed) { 198 return decl; 199 } 200 201 auto mutDecl = mutate(decl); 202 { 203 auto resolvedType = resolveTypeof(decl->type, context); 204 resolvedType = fixArrayType(resolvedType, context); 199 205 mutDecl->type = resolvedType; 200 201 // check variable length if object is an array. 202 // xxx - should this be part of fixObjectType? 203 204 /* 205 if (auto arrayType = dynamic_cast<const ast::ArrayType *>(resolvedType)) { 206 auto dimExpr = findSingleExpression(arrayType->dimension, ast::sizeType, symtab); 207 if (auto varexpr = arrayType->dimension.as<ast::VariableExpr>()) {// hoisted previously 208 if (InitTweak::isConstExpr(varexpr->var.strict_as<ast::ObjectDecl>()->init)) { 209 auto mutType = mutate(arrayType); 210 mutType->isVarLen = ast::LengthFlag::VariableLen; 211 mutDecl->type = mutType; 212 } 213 } 214 } 215 */ 216 217 218 if (!mutDecl->name.empty()) 219 mutDecl->mangleName = Mangle::mangle(mutDecl); // do not mangle unnamed variables 220 221 mutDecl->type = renameTyVars(mutDecl->type, RenameMode::GEN_EXPR_ID); 222 mutDecl->isTypeFixed = true; 223 return mutDecl; 224 } 225 return decl; 206 } 207 208 // Do not mangle unnamed variables. 209 if (!mutDecl->name.empty()) { 210 mutDecl->mangleName = Mangle::mangle(mutDecl); 211 } 212 213 mutDecl->type = renameTyVars(mutDecl->type, RenameMode::GEN_EXPR_ID); 214 mutDecl->isTypeFixed = true; 215 return mutDecl; 226 216 } 227 217 -
src/ResolvExpr/ResolveTypeof.h
rba897d21 r2e9b59b 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // ResolveTypeof.h -- 7 // ResolveTypeof.h -- 8 8 // 9 9 // Author : Richard C. Bilson 10 10 // Created On : Sun May 17 12:14:53 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Sat Jul 22 09:38:35 201713 // Update Count : 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 11:33:00 2022 13 // Update Count : 4 14 14 // 15 15 … … 22 22 namespace ast { 23 23 class Type; 24 class SymbolTable;25 24 class ObjectDecl; 26 25 } 27 26 28 27 namespace ResolvExpr { 28 struct ResolveContext; 29 29 30 Type *resolveTypeof( Type*, const SymTab::Indexer &indexer ); 30 const ast::Type * resolveTypeof( const ast::Type *, const ast::SymbolTable& );31 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab);31 const ast::Type * resolveTypeof( const ast::Type *, const ResolveContext & ); 32 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & ); 32 33 } // namespace ResolvExpr 33 34 -
src/ResolvExpr/Resolver.cc
rba897d21 r2e9b59b 9 9 // Author : Aaron B. Moss 10 10 // Created On : Sun May 17 12:17:01 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue Feb 1 16:27:14202213 // Update Count : 24 511 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Mar 18 10:41:00 2022 13 // Update Count : 247 14 14 // 15 15 … … 997 997 /// Calls the CandidateFinder and finds the single best candidate 998 998 CandidateRef findUnfinishedKindExpression( 999 const ast::Expr * untyped, const ast::SymbolTable & symtab, const std::string & kind,999 const ast::Expr * untyped, const ResolveContext & context, const std::string & kind, 1000 1000 std::function<bool(const Candidate &)> pred = anyCandidate, ResolvMode mode = {} 1001 1001 ) { … … 1007 1007 ++recursion_level; 1008 1008 ast::TypeEnvironment env; 1009 CandidateFinder finder { symtab, env };1009 CandidateFinder finder( context, env ); 1010 1010 finder.find( untyped, recursion_level == 1 ? mode.atTopLevel() : mode ); 1011 1011 --recursion_level; … … 1129 1129 1130 1130 ast::ptr< ast::Expr > resolveInVoidContext( 1131 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env 1131 const ast::Expr * expr, const ResolveContext & context, 1132 ast::TypeEnvironment & env 1132 1133 ) { 1133 1134 assertf( expr, "expected a non-null expression" ); … … 1136 1137 ast::ptr< ast::CastExpr > untyped = new ast::CastExpr{ expr }; 1137 1138 CandidateRef choice = findUnfinishedKindExpression( 1138 untyped, symtab, "", anyCandidate, ResolvMode::withAdjustment() );1139 untyped, context, "", anyCandidate, ResolvMode::withAdjustment() ); 1139 1140 1140 1141 // a cast expression has either 0 or 1 interpretations (by language rules); … … 1149 1150 /// context. 1150 1151 ast::ptr< ast::Expr > findVoidExpression( 1151 const ast::Expr * untyped, const ast::SymbolTable & symtab1152 const ast::Expr * untyped, const ResolveContext & context 1152 1153 ) { 1153 1154 ast::TypeEnvironment env; 1154 ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, symtab, env );1155 ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, context, env ); 1155 1156 finishExpr( newExpr, env, untyped->env ); 1156 1157 return newExpr; … … 1163 1164 /// lowest cost, returning the resolved version 1164 1165 ast::ptr< ast::Expr > findKindExpression( 1165 const ast::Expr * untyped, const ast::SymbolTable & symtab,1166 const ast::Expr * untyped, const ResolveContext & context, 1166 1167 std::function<bool(const Candidate &)> pred = anyCandidate, 1167 1168 const std::string & kind = "", ResolvMode mode = {} … … 1169 1170 if ( ! untyped ) return {}; 1170 1171 CandidateRef choice = 1171 findUnfinishedKindExpression( untyped, symtab, kind, pred, mode );1172 findUnfinishedKindExpression( untyped, context, kind, pred, mode ); 1172 1173 ResolvExpr::finishExpr( choice->expr, choice->env, untyped->env ); 1173 1174 return std::move( choice->expr ); … … 1176 1177 /// Resolve `untyped` to the single expression whose candidate is the best match 1177 1178 ast::ptr< ast::Expr > findSingleExpression( 1178 const ast::Expr * untyped, const ast::SymbolTable & symtab1179 const ast::Expr * untyped, const ResolveContext & context 1179 1180 ) { 1180 1181 Stats::ResolveTime::start( untyped ); 1181 auto res = findKindExpression( untyped, symtab);1182 auto res = findKindExpression( untyped, context ); 1182 1183 Stats::ResolveTime::stop(); 1183 1184 return res; … … 1186 1187 1187 1188 ast::ptr< ast::Expr > findSingleExpression( 1188 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab 1189 const ast::Expr * untyped, const ast::Type * type, 1190 const ResolveContext & context 1189 1191 ) { 1190 1192 assert( untyped && type ); 1191 1193 ast::ptr< ast::Expr > castExpr = new ast::CastExpr{ untyped, type }; 1192 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, symtab);1193 removeExtraneousCast( newExpr, symtab );1194 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, context ); 1195 removeExtraneousCast( newExpr, context.symtab ); 1194 1196 return newExpr; 1195 1197 } … … 1217 1219 /// Resolve `untyped` as an integral expression, returning the resolved version 1218 1220 ast::ptr< ast::Expr > findIntegralExpression( 1219 const ast::Expr * untyped, const ast::SymbolTable & symtab1221 const ast::Expr * untyped, const ResolveContext & context 1220 1222 ) { 1221 return findKindExpression( untyped, symtab, hasIntegralType, "condition" );1223 return findKindExpression( untyped, context, hasIntegralType, "condition" ); 1222 1224 } 1223 1225 … … 1249 1251 // for work previously in GenInit 1250 1252 static InitTweak::ManagedTypes_new managedTypes; 1253 ResolveContext context; 1251 1254 1252 1255 bool inEnumDecl = false; … … 1254 1257 public: 1255 1258 static size_t traceId; 1256 Resolver_new() = default; 1257 Resolver_new( const ast::SymbolTable & syms ) { symtab = syms; } 1259 Resolver_new( const ast::TranslationGlobal & global ) : 1260 context{ symtab, global } {} 1261 Resolver_new( const ResolveContext & context ) : 1262 ast::WithSymbolTable{ context.symtab }, 1263 context{ symtab, context.global } {} 1258 1264 1259 1265 const ast::FunctionDecl * previsit( const ast::FunctionDecl * ); … … 1272 1278 const ast::AsmStmt * previsit( const ast::AsmStmt * ); 1273 1279 const ast::IfStmt * previsit( const ast::IfStmt * ); 1274 const ast::WhileDoStmt * 1280 const ast::WhileDoStmt * previsit( const ast::WhileDoStmt * ); 1275 1281 const ast::ForStmt * previsit( const ast::ForStmt * ); 1276 1282 const ast::SwitchStmt * previsit( const ast::SwitchStmt * ); 1277 const ast::Case Stmt * previsit( const ast::CaseStmt* );1283 const ast::CaseClause * previsit( const ast::CaseClause * ); 1278 1284 const ast::BranchStmt * previsit( const ast::BranchStmt * ); 1279 1285 const ast::ReturnStmt * previsit( const ast::ReturnStmt * ); 1280 1286 const ast::ThrowStmt * previsit( const ast::ThrowStmt * ); 1281 const ast::Catch Stmt * previsit( const ast::CatchStmt* );1282 const ast::Catch Stmt * postvisit( const ast::CatchStmt* );1287 const ast::CatchClause * previsit( const ast::CatchClause * ); 1288 const ast::CatchClause * postvisit( const ast::CatchClause * ); 1283 1289 const ast::WaitForStmt * previsit( const ast::WaitForStmt * ); 1284 1290 const ast::WithStmt * previsit( const ast::WithStmt * ); … … 1299 1305 1300 1306 void resolve( ast::TranslationUnit& translationUnit ) { 1301 ast::Pass< Resolver_new >::run( translationUnit );1307 ast::Pass< Resolver_new >::run( translationUnit, translationUnit.global ); 1302 1308 } 1303 1309 1304 1310 ast::ptr< ast::Init > resolveCtorInit( 1305 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab1311 const ast::ConstructorInit * ctorInit, const ResolveContext & context 1306 1312 ) { 1307 1313 assert( ctorInit ); 1308 ast::Pass< Resolver_new > resolver { symtab };1314 ast::Pass< Resolver_new > resolver( context ); 1309 1315 return ctorInit->accept( resolver ); 1310 1316 } 1311 1317 1312 1318 const ast::Expr * resolveStmtExpr( 1313 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab1319 const ast::StmtExpr * stmtExpr, const ResolveContext & context 1314 1320 ) { 1315 1321 assert( stmtExpr ); 1316 ast::Pass< Resolver_new > resolver { symtab };1322 ast::Pass< Resolver_new > resolver( context ); 1317 1323 auto ret = mutate(stmtExpr->accept(resolver)); 1318 1324 strict_dynamic_cast< ast::StmtExpr * >( ret )->computeResult(); … … 1321 1327 1322 1328 namespace { 1323 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ast::SymbolTable & symtab) {1329 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ResolveContext & context) { 1324 1330 std::string name = attr->normalizedName(); 1325 1331 if (name == "constructor" || name == "destructor") { 1326 1332 if (attr->params.size() == 1) { 1327 1333 auto arg = attr->params.front(); 1328 auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), symtab);1334 auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), context ); 1329 1335 auto result = eval(arg); 1330 1336 … … 1369 1375 1370 1376 for (auto & attr: mutDecl->attributes) { 1371 attr = handleAttribute(mutDecl->location, attr, symtab);1377 attr = handleAttribute(mutDecl->location, attr, context ); 1372 1378 } 1373 1379 … … 1379 1385 for (auto & typeParam : mutDecl->type_params) { 1380 1386 symtab.addType(typeParam); 1381 mutType->forall.emplace_back(new ast::TypeInstType(typeParam ->name, typeParam));1387 mutType->forall.emplace_back(new ast::TypeInstType(typeParam)); 1382 1388 } 1383 1389 for (auto & asst : mutDecl->assertions) { 1384 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), symtab);1390 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), context); 1385 1391 symtab.addId(asst); 1386 1392 mutType->assertions.emplace_back(new ast::VariableExpr(functionDecl->location, asst)); … … 1394 1400 1395 1401 for (auto & param : mutDecl->params) { 1396 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), symtab);1402 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), context); 1397 1403 symtab.addId(param); 1398 1404 paramTypes.emplace_back(param->get_type()); 1399 1405 } 1400 1406 for (auto & ret : mutDecl->returns) { 1401 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), symtab);1407 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), context); 1402 1408 returnTypes.emplace_back(ret->get_type()); 1403 1409 } … … 1470 1476 // enumerator initializers should not use the enum type to initialize, since the 1471 1477 // enum type is still incomplete at this point. Use `int` instead. 1472 objectDecl = fixObjectType(objectDecl, symtab); 1473 currentObject = ast::CurrentObject{ 1474 objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } }; 1478 1479 if (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base) { // const ast::PointerType & 1480 // const ast::Type * enumBase = (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base.get()); 1481 // const ast::PointerType * enumBaseAsPtr = dynamic_cast<const ast::PointerType *>(enumBase); 1482 1483 // if ( enumBaseAsPtr ) { 1484 // const ast::Type * pointerBase = enumBaseAsPtr->base.get(); 1485 // if ( dynamic_cast<const ast::BasicType *>(pointerBase) ) { 1486 // objectDecl = fixObjectType(objectDecl, context); 1487 // if (dynamic_cast<const ast::BasicType *>(pointerBase)->kind == ast::BasicType::Char) 1488 // currentObject = ast::CurrentObject{ 1489 // objectDecl->location, new ast::PointerType{ 1490 // new ast::BasicType{ ast::BasicType::Char } 1491 // } }; 1492 // } else { 1493 // objectDecl = fixObjectType(objectDecl, context); 1494 // currentObject = ast::CurrentObject{objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } }; 1495 // } 1496 // } 1497 objectDecl = fixObjectType( objectDecl, context ); 1498 const ast::Type * enumBase = (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base.get()); 1499 currentObject = ast::CurrentObject{ 1500 objectDecl->location, 1501 enumBase 1502 }; 1503 } else { 1504 objectDecl = fixObjectType( objectDecl, context ); 1505 currentObject = ast::CurrentObject{ 1506 objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } }; 1507 } 1508 1475 1509 } 1476 1510 else { 1477 1511 if (!objectDecl->isTypeFixed) { 1478 auto newDecl = fixObjectType(objectDecl, symtab);1512 auto newDecl = fixObjectType(objectDecl, context); 1479 1513 auto mutDecl = mutate(newDecl); 1480 1514 … … 1507 1541 // nested type decls are hoisted already. no need to do anything 1508 1542 if (auto obj = member.as<ast::ObjectDecl>()) { 1509 member = fixObjectType(obj, symtab);1543 member = fixObjectType(obj, context); 1510 1544 } 1511 1545 } … … 1530 1564 return ast::mutate_field( 1531 1565 assertDecl, &ast::StaticAssertDecl::cond, 1532 findIntegralExpression( assertDecl->cond, symtab) );1566 findIntegralExpression( assertDecl->cond, context ) ); 1533 1567 } 1534 1568 1535 1569 template< typename PtrType > 1536 const PtrType * handlePtrType( const PtrType * type, const ast::SymbolTable & symtab) {1570 const PtrType * handlePtrType( const PtrType * type, const ResolveContext & context ) { 1537 1571 if ( type->dimension ) { 1538 ast::ptr< ast::Type > sizeType = ast::sizeType;1572 ast::ptr< ast::Type > sizeType = context.global.sizeType; 1539 1573 ast::mutate_field( 1540 1574 type, &PtrType::dimension, 1541 findSingleExpression( type->dimension, sizeType, symtab) );1575 findSingleExpression( type->dimension, sizeType, context ) ); 1542 1576 } 1543 1577 return type; … … 1545 1579 1546 1580 const ast::ArrayType * Resolver_new::previsit( const ast::ArrayType * at ) { 1547 return handlePtrType( at, symtab);1581 return handlePtrType( at, context ); 1548 1582 } 1549 1583 1550 1584 const ast::PointerType * Resolver_new::previsit( const ast::PointerType * pt ) { 1551 return handlePtrType( pt, symtab);1585 return handlePtrType( pt, context ); 1552 1586 } 1553 1587 … … 1557 1591 1558 1592 return ast::mutate_field( 1559 exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, symtab) );1593 exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, context ) ); 1560 1594 } 1561 1595 … … 1564 1598 1565 1599 asmExpr = ast::mutate_field( 1566 asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, symtab) );1600 asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, context ) ); 1567 1601 1568 1602 return asmExpr; … … 1578 1612 const ast::IfStmt * Resolver_new::previsit( const ast::IfStmt * ifStmt ) { 1579 1613 return ast::mutate_field( 1580 ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, symtab) );1614 ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, context ) ); 1581 1615 } 1582 1616 1583 1617 const ast::WhileDoStmt * Resolver_new::previsit( const ast::WhileDoStmt * whileDoStmt ) { 1584 1618 return ast::mutate_field( 1585 whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, symtab) );1619 whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, context ) ); 1586 1620 } 1587 1621 … … 1589 1623 if ( forStmt->cond ) { 1590 1624 forStmt = ast::mutate_field( 1591 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, symtab) );1625 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, context ) ); 1592 1626 } 1593 1627 1594 1628 if ( forStmt->inc ) { 1595 1629 forStmt = ast::mutate_field( 1596 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, symtab) );1630 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, context ) ); 1597 1631 } 1598 1632 … … 1604 1638 switchStmt = ast::mutate_field( 1605 1639 switchStmt, &ast::SwitchStmt::cond, 1606 findIntegralExpression( switchStmt->cond, symtab) );1640 findIntegralExpression( switchStmt->cond, context ) ); 1607 1641 currentObject = ast::CurrentObject{ switchStmt->location, switchStmt->cond->result }; 1608 1642 return switchStmt; 1609 1643 } 1610 1644 1611 const ast::Case Stmt * Resolver_new::previsit( const ast::CaseStmt* caseStmt ) {1645 const ast::CaseClause * Resolver_new::previsit( const ast::CaseClause * caseStmt ) { 1612 1646 if ( caseStmt->cond ) { 1613 1647 std::deque< ast::InitAlternative > initAlts = currentObject.getOptions(); … … 1617 1651 ast::ptr< ast::Expr > untyped = 1618 1652 new ast::CastExpr{ caseStmt->location, caseStmt->cond, initAlts.front().type }; 1619 ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, symtab);1653 ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, context ); 1620 1654 1621 1655 // case condition cannot have a cast in C, so it must be removed here, regardless of … … 1625 1659 } 1626 1660 1627 caseStmt = ast::mutate_field( caseStmt, &ast::Case Stmt::cond, newExpr );1661 caseStmt = ast::mutate_field( caseStmt, &ast::CaseClause::cond, newExpr ); 1628 1662 } 1629 1663 return caseStmt; … … 1638 1672 branchStmt = ast::mutate_field( 1639 1673 branchStmt, &ast::BranchStmt::computedTarget, 1640 findSingleExpression( branchStmt->computedTarget, target, symtab) );1674 findSingleExpression( branchStmt->computedTarget, target, context ) ); 1641 1675 } 1642 1676 return branchStmt; … … 1648 1682 returnStmt = ast::mutate_field( 1649 1683 returnStmt, &ast::ReturnStmt::expr, 1650 findSingleExpression( returnStmt->expr, functionReturn, symtab) );1684 findSingleExpression( returnStmt->expr, functionReturn, context ) ); 1651 1685 } 1652 1686 return returnStmt; … … 1663 1697 throwStmt = ast::mutate_field( 1664 1698 throwStmt, &ast::ThrowStmt::expr, 1665 findSingleExpression( throwStmt->expr, exceptType, symtab) );1699 findSingleExpression( throwStmt->expr, exceptType, context ) ); 1666 1700 } 1667 1701 return throwStmt; 1668 1702 } 1669 1703 1670 const ast::Catch Stmt * Resolver_new::previsit( const ast::CatchStmt * catchStmt) {1704 const ast::CatchClause * Resolver_new::previsit( const ast::CatchClause * catchClause ) { 1671 1705 // Until we are very sure this invarent (ifs that move between passes have then) 1672 1706 // holds, check it. This allows a check for when to decode the mangling. 1673 if ( auto ifStmt = catch Stmt->body.as<ast::IfStmt>() ) {1707 if ( auto ifStmt = catchClause->body.as<ast::IfStmt>() ) { 1674 1708 assert( ifStmt->then ); 1675 1709 } 1676 1710 // Encode the catchStmt so the condition can see the declaration. 1677 if ( catch Stmt->cond ) {1678 ast::Catch Stmt * stmt = mutate( catchStmt);1679 stmt->body = new ast::IfStmt( stmt->location, stmt->cond, nullptr, stmt->body );1680 stmt->cond = nullptr;1681 return stmt;1682 } 1683 return catch Stmt;1684 } 1685 1686 const ast::Catch Stmt * Resolver_new::postvisit( const ast::CatchStmt * catchStmt) {1711 if ( catchClause->cond ) { 1712 ast::CatchClause * clause = mutate( catchClause ); 1713 clause->body = new ast::IfStmt( clause->location, clause->cond, nullptr, clause->body ); 1714 clause->cond = nullptr; 1715 return clause; 1716 } 1717 return catchClause; 1718 } 1719 1720 const ast::CatchClause * Resolver_new::postvisit( const ast::CatchClause * catchClause ) { 1687 1721 // Decode the catchStmt so everything is stored properly. 1688 const ast::IfStmt * ifStmt = catch Stmt->body.as<ast::IfStmt>();1722 const ast::IfStmt * ifStmt = catchClause->body.as<ast::IfStmt>(); 1689 1723 if ( nullptr != ifStmt && nullptr == ifStmt->then ) { 1690 1724 assert( ifStmt->cond ); 1691 1725 assert( ifStmt->else_ ); 1692 ast::Catch Stmt * stmt = ast::mutate( catchStmt);1693 stmt->cond = ifStmt->cond;1694 stmt->body = ifStmt->else_;1726 ast::CatchClause * clause = ast::mutate( catchClause ); 1727 clause->cond = ifStmt->cond; 1728 clause->body = ifStmt->else_; 1695 1729 // ifStmt should be implicately deleted here. 1696 return stmt;1697 } 1698 return catch Stmt;1730 return clause; 1731 } 1732 return catchClause; 1699 1733 } 1700 1734 … … 1707 1741 1708 1742 ast::TypeEnvironment env; 1709 CandidateFinder funcFinder { symtab, env };1743 CandidateFinder funcFinder( context, env ); 1710 1744 1711 1745 // Find all candidates for a function in canonical form … … 1921 1955 ); 1922 1956 1923 clause2.target.args.emplace_back( findSingleExpression( init, symtab) );1957 clause2.target.args.emplace_back( findSingleExpression( init, context ) ); 1924 1958 } 1925 1959 1926 1960 // Resolve the conditions as if it were an IfStmt, statements normally 1927 clause2.cond = findSingleExpression( clause.cond, symtab);1961 clause2.cond = findSingleExpression( clause.cond, context ); 1928 1962 clause2.stmt = clause.stmt->accept( *visitor ); 1929 1963 … … 1940 1974 ast::ptr< ast::Type > target = 1941 1975 new ast::BasicType{ ast::BasicType::LongLongUnsignedInt }; 1942 timeout2.time = findSingleExpression( stmt->timeout.time, target, symtab);1943 timeout2.cond = findSingleExpression( stmt->timeout.cond, symtab);1976 timeout2.time = findSingleExpression( stmt->timeout.time, target, context ); 1977 timeout2.cond = findSingleExpression( stmt->timeout.cond, context ); 1944 1978 timeout2.stmt = stmt->timeout.stmt->accept( *visitor ); 1945 1979 … … 1954 1988 ast::WaitForStmt::OrElse orElse2; 1955 1989 1956 orElse2.cond = findSingleExpression( stmt->orElse.cond, symtab);1990 orElse2.cond = findSingleExpression( stmt->orElse.cond, context ); 1957 1991 orElse2.stmt = stmt->orElse.stmt->accept( *visitor ); 1958 1992 … … 1975 2009 for (auto & expr : exprs) { 1976 2010 // only struct- and union-typed expressions are viable candidates 1977 expr = findKindExpression( expr, symtab, structOrUnion, "with expression" );2011 expr = findKindExpression( expr, context, structOrUnion, "with expression" ); 1978 2012 1979 2013 // if with expression might be impure, create a temporary so that it is evaluated once … … 2001 2035 ast::ptr< ast::Expr > untyped = new ast::UntypedInitExpr{ 2002 2036 singleInit->location, singleInit->value, currentObject.getOptions() }; 2003 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, symtab);2037 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, context ); 2004 2038 const ast::InitExpr * initExpr = newExpr.strict_as< ast::InitExpr >(); 2005 2039 -
src/ResolvExpr/Resolver.h
rba897d21 r2e9b59b 9 9 // Author : Richard C. Bilson 10 10 // Created On : Sun May 17 12:18:34 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Mon Feb 18 20:40:38 201913 // Update Count : 411 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 11:32:00 2022 13 // Update Count : 5 14 14 // 15 15 … … 23 23 class Declaration; 24 24 class Expression; 25 class DeletedExpr; 25 26 class StmtExpr; 27 class Type; 26 28 namespace SymTab { 27 29 class Indexer; … … 35 37 class StmtExpr; 36 38 class SymbolTable; 39 class TranslationGlobal; 37 40 class TranslationUnit; 38 41 class Type; … … 55 58 void resolveWithExprs( std::list< Declaration * > & translationUnit ); 56 59 60 /// Helper Type: Passes around information between various sub-calls. 61 struct ResolveContext { 62 const ast::SymbolTable & symtab; 63 const ast::TranslationGlobal & global; 64 }; 65 57 66 /// Checks types and binds syntactic constructs to typed representations 58 67 void resolve( ast::TranslationUnit& translationUnit ); … … 62 71 /// context. 63 72 ast::ptr< ast::Expr > resolveInVoidContext( 64 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env );73 const ast::Expr * expr, const ResolveContext &, ast::TypeEnvironment & env ); 65 74 /// Resolve `untyped` to the single expression whose candidate is the best match for the 66 75 /// given type. 67 76 ast::ptr< ast::Expr > findSingleExpression( 68 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab);77 const ast::Expr * untyped, const ast::Type * type, const ResolveContext & ); 69 78 ast::ptr< ast::Expr > findVoidExpression( 70 const ast::Expr * untyped, const ast::SymbolTable & symtab);79 const ast::Expr * untyped, const ResolveContext & ); 71 80 /// Resolves a constructor init expression 72 81 ast::ptr< ast::Init > resolveCtorInit( 73 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab);82 const ast::ConstructorInit * ctorInit, const ResolveContext & context ); 74 83 /// Resolves a statement expression 75 84 const ast::Expr * resolveStmtExpr( 76 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab);85 const ast::StmtExpr * stmtExpr, const ResolveContext & context ); 77 86 } // namespace ResolvExpr 78 87 -
src/ResolvExpr/Unify.cc
rba897d21 r2e9b59b 943 943 // check that the other type is compatible and named the same 944 944 auto otherInst = dynamic_cast< const XInstType * >( other ); 945 this->result = otherInst && inst->name == otherInst->name;945 if (otherInst && inst->name == otherInst->name) this->result = otherInst; 946 946 return otherInst; 947 947 } -
src/SymTab/Validate.cc
rba897d21 r2e9b59b 395 395 TranslateDimensionGenericParameters::translateDimensions( translationUnit ); 396 396 }); 397 if (!useNewAST) { 397 398 Stats::Time::TimeBlock("Resolve Enum Initializers", [&]() { 398 399 acceptAll( translationUnit, rei ); // must happen after translateDimensions because rei needs identifier lookup, which needs name mangling 399 400 }); 401 } 400 402 Stats::Time::TimeBlock("Check Function Returns", [&]() { 401 403 ReturnChecker::checkFunctionReturns( translationUnit ); … … 405 407 }); 406 408 } 409 } 410 411 static void decayForallPointers( std::list< Declaration * > & translationUnit ) { 412 PassVisitor<TraitExpander_old> te; 413 acceptAll( translationUnit, te ); 414 PassVisitor<AssertionFixer_old> af; 415 acceptAll( translationUnit, af ); 416 PassVisitor<CheckOperatorTypes_old> cot; 417 acceptAll( translationUnit, cot ); 418 PassVisitor<FixUniqueIds_old> fui; 419 acceptAll( translationUnit, fui ); 407 420 } 408 421 … … 474 487 } 475 488 476 void decayForallPointers( std::list< Declaration * > & translationUnit ) {477 PassVisitor<TraitExpander_old> te;478 acceptAll( translationUnit, te );479 PassVisitor<AssertionFixer_old> af;480 acceptAll( translationUnit, af );481 PassVisitor<CheckOperatorTypes_old> cot;482 acceptAll( translationUnit, cot );483 PassVisitor<FixUniqueIds_old> fui;484 acceptAll( translationUnit, fui );485 }486 487 void decayForallPointersA( std::list< Declaration * > & translationUnit ) {488 PassVisitor<TraitExpander_old> te;489 acceptAll( translationUnit, te );490 }491 void decayForallPointersB( std::list< Declaration * > & translationUnit ) {492 PassVisitor<AssertionFixer_old> af;493 acceptAll( translationUnit, af );494 }495 void decayForallPointersC( std::list< Declaration * > & translationUnit ) {496 PassVisitor<CheckOperatorTypes_old> cot;497 acceptAll( translationUnit, cot );498 }499 void decayForallPointersD( std::list< Declaration * > & translationUnit ) {500 PassVisitor<FixUniqueIds_old> fui;501 acceptAll( translationUnit, fui );502 }503 504 489 void validate( std::list< Declaration * > &translationUnit, __attribute__((unused)) bool doDebug ) { 505 490 validate_A( translationUnit ); … … 989 974 // need to resolve enumerator initializers early so that other passes that determine if an expression is constexpr have the appropriate information. 990 975 SingleInit * init = strict_dynamic_cast<SingleInit *>( field->init ); 991 ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer ); 976 if ( !enumDecl->base || dynamic_cast<BasicType *>(enumDecl->base)) 977 ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer ); 978 else { 979 if (dynamic_cast<PointerType *>(enumDecl->base)) { 980 auto typePtr = dynamic_cast<PointerType *>(enumDecl->base); 981 ResolvExpr::findSingleExpression( init->value, 982 new PointerType( Type::Qualifiers(), typePtr->base ), indexer ); 983 } else { 984 ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer ); 985 } 986 } 987 992 988 } 993 989 } 990 994 991 } // if 995 992 } … … 1255 1252 declsToAddBefore.push_back( new UnionDecl( aggDecl->name, noAttributes, tyDecl->linkage ) ); 1256 1253 } else if ( EnumInstType * enumDecl = dynamic_cast< EnumInstType * >( designatorType ) ) { 1257 declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage ) ); 1254 // declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage, enumDecl->baseEnum->base ) ); 1255 if (enumDecl->baseEnum) { 1256 declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage, enumDecl->baseEnum->base ) ); 1257 } else { 1258 declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage ) ); 1259 } 1258 1260 } // if 1259 1261 return tyDecl->clone(); -
src/SymTab/Validate.h
rba897d21 r2e9b59b 42 42 void validate_E( std::list< Declaration * > &translationUnit ); 43 43 void validate_F( std::list< Declaration * > &translationUnit ); 44 void decayForallPointers( std::list< Declaration * > & translationUnit );45 void decayForallPointersA( std::list< Declaration * > & translationUnit );46 void decayForallPointersB( std::list< Declaration * > & translationUnit );47 void decayForallPointersC( std::list< Declaration * > & translationUnit );48 void decayForallPointersD( std::list< Declaration * > & translationUnit );49 44 50 45 const ast::Type * validateType( -
src/SynTree/AggregateDecl.cc
rba897d21 r2e9b59b 59 59 } // if 60 60 os << " with body " << has_body(); 61 62 61 if ( ! parameters.empty() ) { 63 62 os << endl << indent << "... with parameters" << endl; … … 106 105 const char * EnumDecl::typeString() const { return aggrString( Enum ); } 107 106 107 void EnumDecl::print( std::ostream & os, Indenter indent ) const { 108 AggregateDecl::print(os, indent); 109 os << " with base? " << (base? "True" : "False") << std::endl; 110 if ( base ) { 111 os << "Base Type of Enum:" << std::endl; 112 base->print(os, indent); 113 } 114 os << std::endl << "End of EnumDecl::print" << std::endl; 115 } 116 108 117 const char * TraitDecl::typeString() const { return aggrString( Trait ); } 109 118 -
src/SynTree/BasicType.cc
rba897d21 r2e9b59b 29 29 } 30 30 31 bool BasicType::isWholeNumber() const { 32 return kind == Bool || 33 kind ==Char || 34 kind == SignedChar || 35 kind == UnsignedChar || 36 kind == ShortSignedInt || 37 kind == ShortUnsignedInt || 38 kind == SignedInt || 39 kind == UnsignedInt || 40 kind == LongSignedInt || 41 kind == LongUnsignedInt || 42 kind == LongLongSignedInt || 43 kind ==LongLongUnsignedInt || 44 kind == SignedInt128 || 45 kind == UnsignedInt128; 46 } 47 31 48 bool BasicType::isInteger() const { 32 49 return kind <= UnsignedInt128; -
src/SynTree/Declaration.h
rba897d21 r2e9b59b 144 144 virtual void print( std::ostream & os, Indenter indent = {} ) const override; 145 145 virtual void printShort( std::ostream & os, Indenter indent = {} ) const override; 146 147 // TODO: Move to the right place 148 void checkAssignedValue() const; 146 149 }; 147 150 … … 287 290 AggregateDecl * set_body( bool body ) { AggregateDecl::body = body; return this; } 288 291 289 virtual void print( std::ostream & os, Indenter indent = {} ) const override final;292 virtual void print( std::ostream & os, Indenter indent = {} ) const override; 290 293 virtual void printShort( std::ostream & os, Indenter indent = {} ) const override; 291 294 protected: … … 335 338 typedef AggregateDecl Parent; 336 339 public: 337 EnumDecl( const std::string & name, const std::list< Attribute * > & attributes = std::list< class Attribute * >(), LinkageSpec::Spec linkage = LinkageSpec::Cforall ) : Parent( name, attributes, linkage ) {} 338 EnumDecl( const EnumDecl & other ) : Parent( other ) {} 340 EnumDecl( const std::string & name, 341 const std::list< Attribute * > & attributes = std::list< class Attribute * >(), 342 LinkageSpec::Spec linkage = LinkageSpec::Cforall, 343 Type * baseType = nullptr ) : Parent( name, attributes, linkage ) , base( baseType ){} 344 EnumDecl( const EnumDecl & other ) : Parent( other ), base( other.base ) {} 339 345 340 346 bool valueOf( Declaration * enumerator, long long int & value ); … … 344 350 virtual void accept( Visitor & v ) const override { v.visit( this ); } 345 351 virtual Declaration * acceptMutator( Mutator & m ) override { return m.mutate( this ); } 346 private: 352 Type * base; 347 353 std::unordered_map< std::string, long long int > enumValues; 354 virtual void print( std::ostream & os, Indenter indent = {} ) const override final; 355 private: 356 // std::unordered_map< std::string, long long int > enumValues; 348 357 virtual const char * typeString() const override; 349 358 }; -
src/SynTree/Type.h
rba897d21 r2e9b59b 268 268 virtual Type *acceptMutator( Mutator & m ) override { return m.mutate( this ); } 269 269 virtual void print( std::ostream & os, Indenter indent = {} ) const override; 270 270 bool isWholeNumber() const; 271 271 bool isInteger() const; 272 272 }; -
src/SynTree/Visitor.h
rba897d21 r2e9b59b 35 35 virtual void visit( UnionDecl * node ) { visit( const_cast<const UnionDecl *>(node) ); } 36 36 virtual void visit( const UnionDecl * aggregateDecl ) = 0; 37 virtual void visit( EnumDecl * node ) { visit( const_cast<const EnumDecl *>(node) ); } 37 virtual void visit( EnumDecl * node ) { visit( const_cast<const EnumDecl *>(node) ); } // Marker 1 38 38 virtual void visit( const EnumDecl * aggregateDecl ) = 0; 39 39 virtual void visit( TraitDecl * node ) { visit( const_cast<const TraitDecl *>(node) ); } … … 190 190 virtual void visit( UnionInstType * node ) { visit( const_cast<const UnionInstType *>(node) ); } 191 191 virtual void visit( const UnionInstType * aggregateUseType ) = 0; 192 virtual void visit( EnumInstType * node ) { visit( const_cast<const EnumInstType *>(node) ); } 192 virtual void visit( EnumInstType * node ) { visit( const_cast<const EnumInstType *>(node) ); } // Marker 2 193 193 virtual void visit( const EnumInstType * aggregateUseType ) = 0; 194 194 virtual void visit( TraitInstType * node ) { visit( const_cast<const TraitInstType *>(node) ); } -
src/Tuples/TupleAssignment.cc
rba897d21 r2e9b59b 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Fri Dec 13 23:45:33 201913 // Update Count : 911 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Mar 16 14:06:00 2022 13 // Update Count : 10 14 14 // 15 15 … … 465 465 // resolve ctor/dtor for the new object 466 466 ast::ptr< ast::Init > ctorInit = ResolvExpr::resolveCtorInit( 467 InitTweak::genCtorInit( location, ret ), spotter.crntFinder. localSyms);467 InitTweak::genCtorInit( location, ret ), spotter.crntFinder.context ); 468 468 // remove environments from subexpressions of stmtExpr 469 469 ast::Pass< EnvRemover > rm{ env }; … … 560 560 // resolve the cast expression so that rhsCand return type is bound by the cast 561 561 // type as needed, and transfer the resulting environment 562 ResolvExpr::CandidateFinder finder { spotter.crntFinder.localSyms, env };562 ResolvExpr::CandidateFinder finder( spotter.crntFinder.context, env ); 563 563 finder.find( rhsCand->expr, ResolvExpr::ResolvMode::withAdjustment() ); 564 564 assert( finder.candidates.size() == 1 ); … … 609 609 // explode the LHS so that each field of a tuple-valued expr is assigned 610 610 ResolvExpr::CandidateList lhs; 611 explode( *lhsCand, crntFinder. localSyms, back_inserter(lhs), true );611 explode( *lhsCand, crntFinder.context.symtab, back_inserter(lhs), true ); 612 612 for ( ResolvExpr::CandidateRef & cand : lhs ) { 613 613 // each LHS value must be a reference - some come in with a cast, if not … … 629 629 if ( isTuple( rhsCand->expr ) ) { 630 630 // multiple assignment 631 explode( *rhsCand, crntFinder. localSyms, back_inserter(rhs), true );631 explode( *rhsCand, crntFinder.context.symtab, back_inserter(rhs), true ); 632 632 matcher.reset( 633 633 new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } ); … … 648 648 // multiple assignment 649 649 ResolvExpr::CandidateList rhs; 650 explode( rhsCand, crntFinder. localSyms, back_inserter(rhs), true );650 explode( rhsCand, crntFinder.context.symtab, back_inserter(rhs), true ); 651 651 matcher.reset( 652 652 new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } ); … … 678 678 ) 679 679 680 ResolvExpr::CandidateFinder finder { crntFinder.localSyms, matcher->env };680 ResolvExpr::CandidateFinder finder( crntFinder.context, matcher->env ); 681 681 682 682 try { -
src/Validate/Autogen.cpp
rba897d21 r2e9b59b 248 248 structInst.params.push_back( new ast::TypeExpr( 249 249 typeDecl->location, 250 new ast::TypeInstType( typeDecl ->name, typeDecl)250 new ast::TypeInstType( typeDecl ) 251 251 ) ); 252 252 } … … 264 264 unionInst.params.push_back( new ast::TypeExpr( 265 265 unionDecl->location, 266 new ast::TypeInstType( typeDecl ->name, typeDecl)266 new ast::TypeInstType( typeDecl ) 267 267 ) ); 268 268 } -
src/Validate/FindSpecialDeclsNew.cpp
rba897d21 r2e9b59b 30 30 31 31 struct FindDeclsCore : public ast::WithShortCircuiting { 32 ast::Translation Unit::Global & global;33 FindDeclsCore( ast::Translation Unit::Global & g ) : global( g ) {}32 ast::TranslationGlobal & global; 33 FindDeclsCore( ast::TranslationGlobal & g ) : global( g ) {} 34 34 35 35 void previsit( const ast::Decl * decl ); … … 74 74 ast::Pass<FindDeclsCore>::run( translationUnit, translationUnit.global ); 75 75 76 // TODO: When everything gets the globals from the translation unit,77 // remove these.78 ast::dereferenceOperator = translationUnit.global.dereference;79 ast::dtorStruct = translationUnit.global.dtorStruct;80 ast::dtorStructDestroy = translationUnit.global.dtorDestroy;81 82 76 // TODO: conditionally generate 'fake' declarations for missing features, 83 77 // so that translation can proceed in the event that builtins, prelude, -
src/Validate/ForallPointerDecay.cpp
rba897d21 r2e9b59b 41 41 for ( auto & type_param : decl->type_params ) { 42 42 type->forall.emplace_back( 43 new ast::TypeInstType( type_param ->name, type_param) );43 new ast::TypeInstType( type_param ) ); 44 44 } 45 45 for ( auto & assertion : decl->assertions ) { … … 70 70 AssertionList assertions; 71 71 // Substitute trait decl parameters for instance parameters. 72 ast::TypeSubstitution sub( 73 inst->base->params.begin(), 74 inst->base->params.end(), 75 inst->params.begin() 76 ); 72 ast::TypeSubstitution sub( inst->base->params, inst->params ); 77 73 for ( const ast::ptr<ast::Decl> & decl : inst->base->members ) { 78 74 ast::ptr<ast::DeclWithType> copy = -
src/Validate/module.mk
rba897d21 r2e9b59b 22 22 Validate/ForallPointerDecay.cpp \ 23 23 Validate/ForallPointerDecay.hpp \ 24 Validate/GenericParameter.cpp \ 25 Validate/GenericParameter.hpp \ 24 26 Validate/HandleAttributes.cc \ 25 27 Validate/HandleAttributes.h \ … … 28 30 Validate/LabelAddressFixer.cpp \ 29 31 Validate/LabelAddressFixer.hpp \ 32 Validate/ReturnCheck.cpp \ 33 Validate/ReturnCheck.hpp \ 30 34 Validate/FindSpecialDeclsNew.cpp \ 31 35 Validate/FindSpecialDecls.cc \ -
src/Virtual/Tables.cc
rba897d21 r2e9b59b 10 10 // Created On : Mon Aug 31 11:11:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Apr 21 15:36:00 2021 13 // Update Count : 2 14 // 15 12 // Last Modified On : Fri Mar 11 10:40:00 2022 13 // Update Count : 3 14 // 15 16 #include "AST/Attribute.hpp" 17 #include "AST/Copy.hpp" 18 #include "AST/Decl.hpp" 19 #include "AST/Expr.hpp" 20 #include "AST/Init.hpp" 21 #include "AST/Stmt.hpp" 22 #include "AST/Type.hpp" 16 23 #include <SynTree/Attribute.h> 17 24 #include <SynTree/Declaration.h> … … 77 84 } 78 85 86 static ast::ObjectDecl * makeVtableDeclaration( 87 CodeLocation const & location, std::string const & name, 88 ast::StructInstType const * type, ast::Init const * init ) { 89 ast::Storage::Classes storage; 90 if ( nullptr == init ) { 91 storage.is_extern = true; 92 } 93 return new ast::ObjectDecl( 94 location, 95 name, 96 type, 97 init, 98 storage, 99 ast::Linkage::Cforall 100 ); 101 } 102 79 103 ObjectDecl * makeVtableForward( std::string const & name, StructInstType * type ) { 80 104 assert( type ); 81 105 return makeVtableDeclaration( name, type, nullptr ); 106 } 107 108 ast::ObjectDecl * makeVtableForward( 109 CodeLocation const & location, std::string const & name, 110 ast::StructInstType const * vtableType ) { 111 assert( vtableType ); 112 return makeVtableDeclaration( location, name, vtableType, nullptr ); 82 113 } 83 114 … … 123 154 } 124 155 156 static std::vector<ast::ptr<ast::Init>> buildInits( 157 CodeLocation const & location, 158 //std::string const & name, 159 ast::StructInstType const * vtableType, 160 ast::Type const * objectType ) { 161 ast::StructDecl const * vtableStruct = vtableType->base; 162 163 std::vector<ast::ptr<ast::Init>> inits; 164 inits.reserve( vtableStruct->members.size() ); 165 166 // This is designed to run before the resolver. 167 for ( auto field : vtableStruct->members ) { 168 if ( std::string( "parent" ) == field->name ) { 169 // This will not work with polymorphic state. 170 auto oField = field.strict_as<ast::ObjectDecl>(); 171 auto fieldType = oField->type.strict_as<ast::PointerType>(); 172 auto parentType = fieldType->base.strict_as<ast::StructInstType>(); 173 std::string const & parentInstance = instanceName( parentType->name ); 174 inits.push_back( 175 new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, parentInstance ) ) ) ); 176 } else if ( std::string( "__cfavir_typeid" ) == field->name ) { 177 std::string const & baseType = baseTypeName( vtableType->name ); 178 std::string const & typeId = typeIdName( baseType ); 179 inits.push_back( new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, typeId ) ) ) ); 180 } else if ( std::string( "size" ) == field->name ) { 181 inits.push_back( new ast::SingleInit( location, new ast::SizeofExpr( location, objectType ) 182 ) ); 183 } else if ( std::string( "align" ) == field->name ) { 184 inits.push_back( new ast::SingleInit( location, 185 new ast::AlignofExpr( location, objectType ) 186 ) ); 187 } else { 188 inits.push_back( new ast::SingleInit( location, 189 new ast::NameExpr( location, field->name ) 190 ) ); 191 } 192 //ast::Expr * expr = buildInitExpr(...); 193 //inits.push_back( new ast::SingleInit( location, expr ) ) 194 } 195 196 return inits; 197 } 198 199 ast::ObjectDecl * makeVtableInstance( 200 CodeLocation const & location, 201 std::string const & name, 202 ast::StructInstType const * vtableType, 203 ast::Type const * objectType, 204 ast::Init const * init ) { 205 assert( vtableType ); 206 assert( objectType ); 207 208 // Build the initialization. 209 if ( nullptr == init ) { 210 init = new ast::ListInit( location, 211 buildInits( location, vtableType, objectType ) ); 212 213 // The provided init should initialize everything except the parent 214 // pointer, the size-of and align-of fields. These should be inserted. 215 } else { 216 // Except this is not yet supported. 217 assert(false); 218 } 219 return makeVtableDeclaration( location, name, vtableType, init ); 220 } 221 125 222 namespace { 126 223 std::string const functionName = "get_exception_vtable"; … … 140 237 new ReferenceType( noQualifiers, vtableType ), 141 238 nullptr, 142 239 { new Attribute("unused") } 143 240 ) ); 144 241 type->parameters.push_back( new ObjectDecl( … … 157 254 type, 158 255 nullptr 256 ); 257 } 258 259 ast::FunctionDecl * makeGetExceptionForward( 260 CodeLocation const & location, 261 ast::Type const * vtableType, 262 ast::Type const * exceptType ) { 263 assert( vtableType ); 264 assert( exceptType ); 265 return new ast::FunctionDecl( 266 location, 267 functionName, 268 { /* forall */ }, 269 { new ast::ObjectDecl( 270 location, 271 "__unused", 272 new ast::PointerType( exceptType ) 273 ) }, 274 { new ast::ObjectDecl( 275 location, 276 "_retvalue", 277 new ast::ReferenceType( vtableType ) 278 ) }, 279 nullptr, 280 ast::Storage::Classes(), 281 ast::Linkage::Cforall, 282 { new ast::Attribute( "unused" ) } 159 283 ); 160 284 } … … 172 296 } 173 297 298 ast::FunctionDecl * makeGetExceptionFunction( 299 CodeLocation const & location, 300 ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ) { 301 assert( vtableInstance ); 302 assert( exceptType ); 303 ast::FunctionDecl * func = makeGetExceptionForward( 304 location, ast::deepCopy( vtableInstance->type ), exceptType ); 305 func->stmts = new ast::CompoundStmt( location, { 306 new ast::ReturnStmt( location, new ast::VariableExpr( location, vtableInstance ) ) 307 } ); 308 return func; 309 } 310 174 311 ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ) { 175 312 assert( typeIdType ); … … 191 328 } 192 329 193 } 330 ast::ObjectDecl * makeTypeIdInstance( 331 CodeLocation const & location, 332 ast::StructInstType const * typeIdType ) { 333 assert( typeIdType ); 334 ast::StructInstType * type = ast::mutate( typeIdType ); 335 type->set_const( true ); 336 std::string const & typeid_name = typeIdTypeToInstance( typeIdType->name ); 337 return new ast::ObjectDecl( 338 location, 339 typeid_name, 340 type, 341 new ast::ListInit( location, { 342 new ast::SingleInit( location, 343 new ast::AddressExpr( location, 344 new ast::NameExpr( location, "__cfatid_exception_t" ) ) ) 345 } ), 346 ast::Storage::Classes(), 347 ast::Linkage::Cforall, 348 nullptr, 349 { new ast::Attribute( "cfa_linkonce" ) } 350 ); 351 } 352 353 } -
src/Virtual/Tables.h
rba897d21 r2e9b59b 10 10 // Created On : Mon Aug 31 11:07:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : We d Apr 21 10:30:00 202113 // Update Count : 212 // Last Modified On : Wec Dec 8 16:58:00 2021 13 // Update Count : 3 14 14 // 15 15 16 16 #include <list> // for list 17 17 18 #include <string> 19 #include "AST/Fwd.hpp" 18 20 class Declaration; 19 21 class StructDecl; … … 35 37 * vtableType node is consumed. 36 38 */ 39 ast::ObjectDecl * makeVtableForward( 40 CodeLocation const & location, std::string const & name, 41 ast::StructInstType const * vtableType ); 37 42 38 43 ObjectDecl * makeVtableInstance( … … 43 48 * vtableType and init (if provided) nodes are consumed. 44 49 */ 50 ast::ObjectDecl * makeVtableInstance( 51 CodeLocation const & location, 52 std::string const & name, 53 ast::StructInstType const * vtableType, 54 ast::Type const * objectType, 55 ast::Init const * init = nullptr ); 45 56 46 57 // Some special code for how exceptions interact with virtual tables. … … 49 60 * linking the vtableType to the exceptType. Both nodes are consumed. 50 61 */ 62 ast::FunctionDecl * makeGetExceptionForward( 63 CodeLocation const & location, 64 ast::Type const * vtableType, 65 ast::Type const * exceptType ); 51 66 52 67 FunctionDecl * makeGetExceptionFunction( … … 55 70 * exceptType node is consumed. 56 71 */ 72 ast::FunctionDecl * makeGetExceptionFunction( 73 CodeLocation const & location, 74 ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ); 57 75 58 76 ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ); … … 60 78 * TODO: Should take the parent type. Currently locked to the exception_t. 61 79 */ 80 ast::ObjectDecl * makeTypeIdInstance( 81 const CodeLocation & location, ast::StructInstType const * typeIdType ); 62 82 63 83 } -
src/main.cc
rba897d21 r2e9b59b 10 10 // Created On : Fri May 15 23:12:02 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Jan 26 14:09:00 202213 // Update Count : 67 012 // Last Modified On : Wed Apr 13 11:11:00 2022 13 // Update Count : 672 14 14 // 15 15 … … 32 32 33 33 #include "AST/Convert.hpp" 34 #include "AST/Print.hpp"35 34 #include "CompilationState.h" 36 35 #include "../config.h" // for CFA_LIBDIR … … 76 75 #include "Tuples/Tuples.h" // for expandMemberTuples, expan... 77 76 #include "Validate/Autogen.hpp" // for autogenerateRoutines 77 #include "Validate/GenericParameter.hpp" // for fillGenericParameters, tr... 78 78 #include "Validate/FindSpecialDecls.h" // for findGlobalDecls 79 79 #include "Validate/ForallPointerDecay.hpp" // for decayForallPointers … … 81 81 #include "Validate/InitializerLength.hpp" // for setLengthFromInitializer 82 82 #include "Validate/LabelAddressFixer.hpp" // for fixLabelAddresses 83 #include "Validate/ReturnCheck.hpp" // for checkReturnStatements 83 84 #include "Virtual/ExpandCasts.h" // for expandCasts 84 85 … … 328 329 PASS( "Validate-A", SymTab::validate_A( translationUnit ) ); 329 330 PASS( "Validate-B", SymTab::validate_B( translationUnit ) ); 330 PASS( "Validate-C", SymTab::validate_C( translationUnit ) );331 331 332 332 CodeTools::fillLocations( translationUnit ); 333 333 334 334 if( useNewAST ) { 335 PASS( "Implement Concurrent Keywords", Concurrency::applyKeywords( translationUnit ) );336 //PASS( "Forall Pointer Decay - A", SymTab::decayForallPointersA( translationUnit ) );337 //PASS( "Forall Pointer Decay - B", SymTab::decayForallPointersB( translationUnit ) );338 //PASS( "Forall Pointer Decay - C", SymTab::decayForallPointersC( translationUnit ) );339 //PASS( "Forall Pointer Decay - D", SymTab::decayForallPointersD( translationUnit ) );340 335 CodeTools::fillLocations( translationUnit ); 341 336 … … 347 342 348 343 forceFillCodeLocations( transUnit ); 344 345 // Check as early as possible. Can't happen before 346 // LinkReferenceToType, observed failing when attempted 347 // before eliminateTypedef 348 PASS( "Validate Generic Parameters", Validate::fillGenericParameters( transUnit ) ); 349 350 PASS( "Translate Dimensions", Validate::translateDimensionParameters( transUnit ) ); 351 PASS( "Check Function Returns", Validate::checkReturnStatements( transUnit ) ); 352 353 // Must happen before Autogen. 354 PASS( "Fix Return Statements", InitTweak::fixReturnStatements( transUnit ) ); 355 356 PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords( transUnit ) ); 349 357 350 358 // Must be after implement concurrent keywords; because uniqueIds … … 430 438 translationUnit = convert( move( transUnit ) ); 431 439 } else { 440 PASS( "Validate-C", SymTab::validate_C( translationUnit ) ); 432 441 PASS( "Validate-D", SymTab::validate_D( translationUnit ) ); 433 442 PASS( "Validate-E", SymTab::validate_E( translationUnit ) ); … … 497 506 PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) ); 498 507 } 499 500 501 508 502 509 PASS( "Gen Waitfor" , Concurrency::generateWaitFor( translationUnit ) ); -
tests/Makefile.am
rba897d21 r2e9b59b 28 28 DEBUG_FLAGS=-debug -g -O0 29 29 30 quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes 30 quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes meta/dumpable 31 31 32 32 concurrent= … … 66 66 PRETTY_PATH=mkdir -p $(dir $(abspath ${@})) && cd ${srcdir} && 67 67 68 .PHONY: list .validate 69 .INTERMEDIATE: .validate .validate.cfa 68 .PHONY: list .validate .test_makeflags 69 .INTERMEDIATE: .validate .validate.cfa .test_makeflags 70 70 EXTRA_PROGRAMS = avl_test linkonce .dummy_hack # build but do not install 71 71 EXTRA_DIST = test.py \ … … 123 123 @+${TEST_PY} --list ${concurrent} 124 124 125 .test_makeflags: 126 @echo "${MAKEFLAGS}" 127 125 128 .validate: .validate.cfa 126 129 $(CFACOMPILE) .validate.cfa -fsyntax-only -Wall -Wextra -Werror -
tests/PRNG.cfa
rba897d21 r2e9b59b 8 8 // Created On : Wed Dec 29 09:38:12 2021 9 9 // Last Modified By : Peter A. Buhr 10 // Last Modified On : Sat Feb 12 12:23:57202211 // Update Count : 34 210 // Last Modified On : Sat Apr 9 15:21:14 2022 11 // Update Count : 344 12 12 // 13 13 … … 22 22 #include <mutex_stmt.hfa> 23 23 24 // FIX ME: spurious characters appear in output25 Duration default_preemption() { return 0; }26 27 24 #ifdef TIME // use -O2 -nodebug 28 25 #define STARTTIME start = timeHiRes() -
tests/collections/.expect/vector-err-pass-perm-it-byval.txt
rba897d21 r2e9b59b 1 error: Unique best alternative includes deleted identifier in Generated Cast of:1 collections/vector-demo.cfa:95:1 error: Unique best alternative includes deleted identifier in Generated Cast of: 2 2 Application of 3 3 Deleted Expression -
tests/concurrent/mutexstmt/.expect/locks.txt
rba897d21 r2e9b59b 3 3 Start Test: multi lock deadlock/mutual exclusion 4 4 End Test: multi lock deadlock/mutual exclusion 5 Start Test: single scoped lock mutual exclusion 6 End Test: single scoped lock mutual exclusion 7 Start Test: multi scoped lock deadlock/mutual exclusion 8 End Test: multi scoped lock deadlock/mutual exclusion 5 Start Test: multi polymorphic lock deadlock/mutual exclusion 6 End Test: multi polymorphic lock deadlock/mutual exclusion -
tests/concurrent/mutexstmt/locks.cfa
rba897d21 r2e9b59b 3 3 4 4 const unsigned int num_times = 10000; 5 6 Duration default_preemption() { return 0; } 5 7 6 8 single_acquisition_lock m1, m2, m3, m4, m5; … … 22 24 } 23 25 26 void refTest( single_acquisition_lock & m ) { 27 mutex ( m ) { 28 assert(!insideFlag); 29 insideFlag = true; 30 assert(insideFlag); 31 insideFlag = false; 32 } 33 } 34 24 35 thread T_Multi {}; 25 36 26 37 void main( T_Multi & this ) { 27 38 for (unsigned int i = 0; i < num_times; i++) { 39 refTest( m1 ); 28 40 mutex ( m1 ) { 29 41 assert(!insideFlag); … … 59 71 } 60 72 61 thread T_Mutex_Scoped {}; 73 single_acquisition_lock l1; 74 linear_backoff_then_block_lock l2; 75 owner_lock l3; 62 76 63 void main( T_Mutex_Scoped & this ) { 77 monitor monitor_t {}; 78 79 monitor_t l4; 80 81 thread T_Multi_Poly {}; 82 83 void main( T_Multi_Poly & this ) { 64 84 for (unsigned int i = 0; i < num_times; i++) { 65 { 66 scoped_lock(single_acquisition_lock) s{m1}; 67 count++; 68 } 69 { 70 scoped_lock(single_acquisition_lock) s{m1}; 85 refTest( l1 ); 86 mutex ( l1, l4 ) { 71 87 assert(!insideFlag); 72 88 insideFlag = true; … … 74 90 insideFlag = false; 75 91 } 76 } 77 } 78 79 thread T_Multi_Scoped {}; 80 81 void main( T_Multi_Scoped & this ) { 82 for (unsigned int i = 0; i < num_times; i++) { 83 { 84 scoped_lock(single_acquisition_lock) s{m1}; 92 mutex ( l1, l2, l3 ) { 85 93 assert(!insideFlag); 86 94 insideFlag = true; … … 88 96 insideFlag = false; 89 97 } 90 { 91 scoped_lock(single_acquisition_lock) s1{m1}; 92 scoped_lock(single_acquisition_lock) s2{m2}; 93 scoped_lock(single_acquisition_lock) s3{m3}; 94 scoped_lock(single_acquisition_lock) s4{m4}; 95 scoped_lock(single_acquisition_lock) s5{m5}; 98 mutex ( l3, l1, l4 ) { 96 99 assert(!insideFlag); 97 100 insideFlag = true; … … 99 102 insideFlag = false; 100 103 } 101 { 102 scoped_lock(single_acquisition_lock) s1{m1}; 103 scoped_lock(single_acquisition_lock) s3{m3}; 104 assert(!insideFlag); 105 insideFlag = true; 106 assert(insideFlag); 107 insideFlag = false; 108 } 109 { 110 scoped_lock(single_acquisition_lock) s1{m1}; 111 scoped_lock(single_acquisition_lock) s2{m2}; 112 scoped_lock(single_acquisition_lock) s4{m4}; 113 assert(!insideFlag); 114 insideFlag = true; 115 assert(insideFlag); 116 insideFlag = false; 117 } 118 { 119 scoped_lock(single_acquisition_lock) s1{m1}; 120 scoped_lock(single_acquisition_lock) s3{m3}; 121 scoped_lock(single_acquisition_lock) s4{m4}; 122 scoped_lock(single_acquisition_lock) s5{m5}; 104 mutex ( l1, l2, l4 ) { 123 105 assert(!insideFlag); 124 106 insideFlag = true; … … 131 113 int num_tasks = 10; 132 114 int main() { 133 processor p[ 10];115 processor p[num_tasks - 1]; 134 116 135 117 printf("Start Test: single lock mutual exclusion\n"); 136 118 { 137 T_Mutex t[ 10];119 T_Mutex t[num_tasks]; 138 120 } 139 121 assert(count == num_tasks * num_times); … … 141 123 printf("Start Test: multi lock deadlock/mutual exclusion\n"); 142 124 { 143 T_Multi t[ 10];125 T_Multi t[num_tasks]; 144 126 } 145 127 printf("End Test: multi lock deadlock/mutual exclusion\n"); 146 147 count = 0; 148 printf("Start Test: single scoped lock mutual exclusion\n"); 128 printf("Start Test: multi polymorphic lock deadlock/mutual exclusion\n"); 149 129 { 150 T_Mu tex_Scoped t[10];130 T_Multi_Poly t[num_tasks]; 151 131 } 152 assert(count == num_tasks * num_times); 153 printf("End Test: single scoped lock mutual exclusion\n"); 154 printf("Start Test: multi scoped lock deadlock/mutual exclusion\n"); 155 { 156 T_Multi_Scoped t[10]; 157 } 158 printf("End Test: multi scoped lock deadlock/mutual exclusion\n"); 132 printf("End Test: multi polymorphic lock deadlock/mutual exclusion\n"); 159 133 } -
tests/designations.cfa
rba897d21 r2e9b59b 10 10 // Created On : Thu Jun 29 15:26:36 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jul 27 11:46:35 201713 // Update Count : 312 // Last Modified On : Mon Mar 28 22:41:55 2022 13 // Update Count : 15 14 14 // 15 15 … … 18 18 // is used for the designation syntax 19 19 #ifdef __cforall 20 #define DES : 20 #define _ : 21 #define AT @ 21 22 #else 22 int printf(const char *, ...); 23 #define DES = 23 int printf( const char *, ...); 24 #define _ = 25 #define AT 24 26 #endif 25 27 26 28 const int indentAmt = 2; 27 void indent( int level) {28 for ( int i = 0; i < level; ++i) {29 printf( " ");29 void indent( int level ) { 30 for ( int i = 0; i < level; ++i ) { 31 printf( " " ); 30 32 } 31 33 } … … 36 38 int * ptr; 37 39 }; 38 void printA( struct A a, int level) {39 indent( level);40 printf( "(A){ %d %d %p }\n", a.x, a.y, a.ptr);40 void printA( struct A a, int level ) { 41 indent( level ); 42 printf( "(A){ %d %d %p }\n", a.x, a.y, a.ptr ); 41 43 } 42 44 … … 45 47 struct A a0, a1; 46 48 }; 47 void printB( struct B b, int level) {48 indent( level);49 printf( "(B){\n");50 printA( b.a0, level+indentAmt);51 printA( b.a1, level+indentAmt);52 indent( level);53 printf( "}\n");49 void printB( struct B b, int level ) { 50 indent( level ); 51 printf( "(B){\n" ); 52 printA( b.a0, level+indentAmt ); 53 printA( b.a1, level+indentAmt ); 54 indent( level ); 55 printf( "}\n" ); 54 56 } 55 57 … … 59 61 struct B b; 60 62 }; 61 void printC( struct C c, int level) {62 indent( level);63 printf( "(C){\n");64 indent( level+indentAmt);65 printf( "(int[]{ %d %d %d }\n", c.arr[0], c.arr[1], c.arr[2]);66 printB( c.b, level+indentAmt);67 indent( level);68 printf( "}\n");63 void printC( struct C c, int level ) { 64 indent( level ); 65 printf( "(C){\n" ); 66 indent( level+indentAmt ); 67 printf( "(int[]{ %d %d %d }\n", c.arr[0], c.arr[1], c.arr[2]); 68 printB( c.b, level+indentAmt ); 69 indent( level ); 70 printf( "}\n" ); 69 71 } 70 72 … … 75 77 }; 76 78 }; 77 void printD( struct D d, int level) {78 indent( level);79 printf( "(D){ %d }\n", d.x);79 void printD( struct D d, int level ) { 80 indent( level); 81 printf( "(D ){ %d }\n", d.x ); 80 82 } 81 83 … … 99 101 } m; 100 102 }; 101 struct Fred s1 @= { .m.j :3 };102 struct Fred s2 @= { .i : { [2] :2 } };103 struct Fred s1 AT= { .m.j _ 3 }; 104 struct Fred s2 AT= { .i _ { [2] _ 2 } }; 103 105 104 106 int main() { 105 107 // simple designation case - starting from beginning of structure, leaves ptr default-initialized (zero) 106 108 struct A y0 = { 107 .x DES2,108 .y DES3109 .x _ 2, 110 .y _ 3 109 111 }; 110 112 … … 117 119 // use designation to move to member y, leaving x default-initialized (zero) 118 120 struct A y2 = { 119 .y DES3,121 .y _ 3, 120 122 0 121 123 }; … … 127 129 #endif 128 130 129 printf( "=====A=====\n");130 printA( y0, 0);131 printA( y1, 0);132 printA( y2, 0);133 printf( "=====A=====\n\n");131 printf( "=====A=====\n" ); 132 printA( y0, 0 ); 133 printA( y1, 0 ); 134 printA( y2, 0 ); 135 printf( "=====A=====\n\n" ); 134 136 135 137 // initialize only first element (z0.a.x), leaving everything else default-initialized (zero), no nested curly-braces … … 140 142 { 3 }, // z1.a0 141 143 { 4 }, // z1.a1 142 .a0 DES{ 5 }, // z1.a0144 .a0 _ { 5 }, // z1.a0 143 145 { 6 }, // z1.a1 144 .a0.y DES2, // z1.a0.y146 .a0.y _ 2, // z1.a0.y 145 147 0, // z1.a0.ptr 146 148 }; … … 170 172 }; 171 173 172 printf( "=====B=====\n");173 printB( z0, 0);174 printB( z1, 0);175 printB( z2, 0);176 printB( z3, 0);177 printB( z5, 0);178 printB( z6, 0);179 printf( "=====B=====\n\n");174 printf( "=====B=====\n" ); 175 printB( z0, 0 ); 176 printB( z1, 0 ); 177 printB( z2, 0 ); 178 printB( z3, 0 ); 179 printB( z5, 0 ); 180 printB( z6, 0 ); 181 printf( "=====B=====\n\n" ); 180 182 181 183 // TODO: what about extra things in a nested init? are empty structs skipped?? … … 188 190 }; 189 191 190 printf( "=====C=====\n");191 printC( c1, 0);192 printf( "=====C=====\n\n");192 printf( "=====C=====\n" ); 193 printC( c1, 0 ); 194 printf( "=====C=====\n\n" ); 193 195 194 196 #if ERROR … … 213 215 #endif 214 216 // array designation 215 int i[2] = { [1] :3 };217 int i[2] = { [1] _ 3 }; 216 218 // allowed to have 'too many' initialized lists - essentially they are ignored. 217 219 int i1 = { 3 }; … … 219 221 // doesn't work yet. 220 222 // designate unnamed object's members 221 // struct D d = { .x DES3 };223 // struct D d = { .x _ 3 }; 222 224 #if ERROR 223 struct D d1 = { .y DES3 };225 struct D d1 = { .y _ 3 }; 224 226 #endif 225 227 … … 241 243 // move cursor to e4.b.a0.x and initialize until e3.b.a1.ptr inclusive 242 244 union E e3 = { 243 .b.a0.x DES2, 3, 0, 5, 6, 0244 }; 245 246 printf( "=====E=====\n");247 printA( e0.a, 0);248 printA( e1.a, 0);249 printA( e2.a, 0);250 printB( e3.b, 0);251 printf( "=====E=====\n\n");245 .b.a0.x _ 2, 3, 0, 5, 6, 0 246 }; 247 248 printf( "=====E=====\n" ); 249 printA( e0.a, 0 ); 250 printA( e1.a, 0 ); 251 printA( e2.a, 0 ); 252 printB( e3.b, 0 ); 253 printf( "=====E=====\n\n" ); 252 254 253 255 // special case of initialization: char[] can be initialized with a string literal 254 256 const char * str0 = "hello"; 255 257 char str1[] = "hello"; 256 const char c1[] = "abc"; 257 const char c2[] = { 'a', 'b', 'c' }; 258 const char c3[][2] = { { 'a', 'b' }, { 'c', 'd'}, { 'c', 'd'} }; 258 const char c2[] = "abc"; 259 const char c3[] = { 'a', 'b', 'c' }; 260 const char c4[][2] = { { 'a', 'b' }, { 'c', 'd'}, { 'c', 'd'} }; 261 262 // more cases 263 264 // int widths[] = { [3 ... 9] _ 1, [10 ... 99] _ 2, [100] _ 3 }; 265 // int widths[] = { [3 ~ 9] _ 1, [10 ~ 99] _ 2, [100] _ 3 }; 266 struct point { int x, y; }; 267 struct point p = { .y _ 5, .x _ 7 }; 268 union foo { int i; double d; }; 269 union foo f = { .d _ 4 }; 270 int v1, v2, v4; 271 int w[6] = { [1] _ v1, v2, [4] _ v4 }; 272 int whitespace[256] = { [' '] _ 1, ['\t'] _ 1, ['\v'] _ 1, ['\f'] _ 1, ['\n'] _ 1, ['\r'] _ 1 }; 273 struct point ptarray[10] = { [2].y _ 34, [2].x _ 35, [0].x _ 36 }; 259 274 } 260 275 -
tests/io/away_fair.cfa
rba897d21 r2e9b59b 20 20 #include <thread.hfa> 21 21 #include <iofwd.hfa> 22 #include <io/types.hfa>23 22 24 23 Duration default_preemption() { … … 51 50 } 52 51 53 // ----- S pinner -----52 // ----- Submitter ----- 54 53 // try to submit io but yield so that it's likely we are moved to the slow path 55 54 thread Submitter {}; -
tests/io/io-acquire.cfa
rba897d21 r2e9b59b 10 10 // Created On : Mon Mar 1 18:40:09 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jan 14 09:13:18202213 // Update Count : 7 412 // Last Modified On : Sat Apr 9 15:22:03 2022 13 // Update Count : 76 14 14 // 15 15 … … 17 17 #include <thread.hfa> 18 18 #include <mutex_stmt.hfa> 19 20 Duration default_preemption() { return 0; }21 19 22 20 thread T {}; -
tests/meta/dumpable.cfa
rba897d21 r2e9b59b 14 14 // 15 15 16 #include <errno.h> 16 17 #include <limits.h> 17 #include < errno.h>18 #include <string.h> 18 19 19 20 #include <fstream.hfa> 20 21 21 22 extern "C" { 23 #include <fcntl.h> 24 #include <unistd.h> 22 25 #include <sys/prctl.h> 23 26 #include <sys/resource.h> 24 27 #include <sys/statvfs.h> 25 #include <unistd.h> 28 #include <sys/stat.h> 29 #include <sys/types.h> 26 30 } 27 31 … … 102 106 } 103 107 108 void check_core_pattern() { 109 int ret; 110 int cp = open("/proc/sys/kernel/core_pattern", 0, O_RDONLY); 111 if(cp < 0) { 112 perror("open(/proc/sys/kernel/core_pattern, O_RDONLY) error"); 113 return; 114 } 115 116 try { 117 const char * expected = "core\n"; 118 const int sz = sizeof("core\n"); 119 char buf[512]; 120 ret = read(cp, buf, 512); 121 if(ret < 0) { 122 perror("first core pattern read error"); 123 return; 124 } 125 ret = strncmp(expected, buf, sz - 1); 126 if(ret != 0) { 127 serr | "/proc/sys/kernel/core_pattern does not contain 'core', was:" | nl | nl | buf | nl 128 | "Test script expect cores files to be dumped with name 'core' in current working directory." | nl 129 | "Apport is not supported, it should be deactivated in /etc/default/apport for the test suite to work with core dumps."; 130 131 return; 132 } 133 } 134 finally { 135 ret = close(cp); 136 if(ret < 0) perror("close(/proc/sys/kernel/core_pattern) error"); 137 } 138 139 } 140 104 141 int main() { 105 142 check_ulimit(); … … 113 150 check_dumpflag(); 114 151 152 check_core_pattern(); 153 115 154 sout | "Done"; 116 155 } -
tests/pybin/settings.py
rba897d21 r2e9b59b 155 155 global generating 156 156 global make 157 global make_jobfds 157 158 global output_width 158 159 global timeout … … 168 169 generating = options.regenerate_expected 169 170 make = ['make'] 171 make_jobfds = [] 170 172 output_width = 24 171 173 timeout = Timeouts(options.timeout, options.global_timeout) … … 177 179 os.putenv('DISTCC_LOG', os.path.join(BUILDDIR, 'distcc_error.log')) 178 180 179 def update_make_cmd(f orce, jobs):181 def update_make_cmd(flags): 180 182 global make 181 182 make = ['make'] if not force else ['make', "-j%i" % jobs] 183 make = ['make', *flags] 184 185 def update_make_fds(r, w): 186 global make_jobfds 187 make_jobfds = (r, w) 183 188 184 189 def validate(): … … 187 192 global distcc 188 193 distcc = "DISTCC_CFA_PATH=~/.cfadistcc/%s/cfa" % tools.config_hash() 189 errf = os.path.join(BUILDDIR, ".validate.err") 190 make_ret, out = tools.make( ".validate", error_file = errf, output_file=subprocess.DEVNULL, error=subprocess.DEVNULL ) 194 make_ret, out, err = tools.make( ".validate", output_file=subprocess.PIPE, error=subprocess.PIPE ) 191 195 if make_ret != 0: 192 with open (errf, "r") as myfile:193 error=myfile.read()194 196 print("ERROR: Invalid configuration %s:%s" % (arch.string, debug.string), file=sys.stderr) 195 print(" verify returned : \n%s" % error, file=sys.stderr) 196 tools.rm(errf) 197 print(" verify returned : \n%s" % err, file=sys.stderr) 197 198 sys.exit(1) 198 199 tools.rm(errf)200 199 201 200 def prep_output(tests): -
tests/pybin/tools.py
rba897d21 r2e9b59b 23 23 24 24 # helper functions to run terminal commands 25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False ):25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False, pass_fds = []): 26 26 try: 27 27 cmd = list(cmd) … … 65 65 **({'input' : bytes(input_text, encoding='utf-8')} if input_text else {'stdin' : input_file}), 66 66 stdout = output_file, 67 stderr = error 67 stderr = error, 68 pass_fds = pass_fds 68 69 ) as proc: 69 70 70 71 try: 71 out, _= proc.communicate(72 out, errout = proc.communicate( 72 73 timeout = settings.timeout.single if timeout else None 73 74 ) 74 75 75 return proc.returncode, out.decode("latin-1") if out else None 76 return proc.returncode, out.decode("latin-1") if out else None, errout.decode("latin-1") if errout else None 76 77 except subprocess.TimeoutExpired: 77 78 if settings.timeout2gdb: 78 79 print("Process {} timeout".format(proc.pid)) 79 80 proc.communicate() 80 return 124, str(None) 81 return 124, str(None), "Subprocess Timeout 2 gdb" 81 82 else: 82 83 proc.send_signal(signal.SIGABRT) 83 84 proc.communicate() 84 return 124, str(None) 85 return 124, str(None), "Subprocess Timeout 2 gdb" 85 86 86 87 except Exception as ex: … … 105 106 return (False, "No file") 106 107 107 code, out = sh("file", fname, output_file=subprocess.PIPE)108 code, out, err = sh("file", fname, output_file=subprocess.PIPE) 108 109 if code != 0: 109 return (False, "'file EXPECT' failed with code {} ".format(code))110 return (False, "'file EXPECT' failed with code {} '{}'".format(code, err)) 110 111 111 112 match = re.search(".*: (.*)", out) … … 190 191 ] 191 192 cmd = [s for s in cmd if s] 192 return sh(*cmd, output_file=output_file, error=error )193 return sh(*cmd, output_file=output_file, error=error, pass_fds=settings.make_jobfds) 193 194 194 195 def make_recon(target): … … 241 242 # move a file 242 243 def mv(source, dest): 243 ret, _ = sh("mv", source, dest)244 ret, _, _ = sh("mv", source, dest) 244 245 return ret 245 246 246 247 # cat one file into the other 247 248 def cat(source, dest): 248 ret, _ = sh("cat", source, output_file=dest)249 ret, _, _ = sh("cat", source, output_file=dest) 249 250 return ret 250 251 … … 289 290 # system 290 291 ################################################################################ 292 def jobserver_version(): 293 make_ret, out, err = sh('make', '.test_makeflags', '-j2', output_file=subprocess.PIPE, error=subprocess.PIPE) 294 if make_ret != 0: 295 print("ERROR: cannot find Makefile jobserver version", file=sys.stderr) 296 print(" test returned : {} '{}'".format(make_ret, err), file=sys.stderr) 297 sys.exit(1) 298 299 re_jobs = re.search("--jobserver-(auth|fds)", out) 300 if not re_jobs: 301 print("ERROR: cannot find Makefile jobserver version", file=sys.stderr) 302 print(" MAKEFLAGS are : '{}'".format(out), file=sys.stderr) 303 sys.exit(1) 304 305 return "--jobserver-{}".format(re_jobs.group(1)) 306 307 def prep_recursive_make(N): 308 if N < 2: 309 return [] 310 311 # create the pipe 312 (r, w) = os.pipe() 313 314 # feel it with N-1 tokens, (Why N-1 and not N, I don't know it's in the manpage for make) 315 os.write(w, b'+' * (N - 1)); 316 317 # prep the flags for make 318 make_flags = ["-j{}".format(N), "--jobserver-auth={},{}".format(r, w)] 319 320 # tell make about the pipes 321 os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = " ".join(make_flags) 322 323 # make sure pass the pipes to our children 324 settings.update_make_fds(r, w) 325 326 return make_flags 327 328 def prep_unlimited_recursive_make(): 329 # prep the flags for make 330 make_flags = ["-j"] 331 332 # tell make about the pipes 333 os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = "-j" 334 335 return make_flags 336 337 338 def eval_hardware(): 339 # we can create as many things as we want 340 # how much hardware do we have? 341 if settings.distribute: 342 # remote hardware is allowed 343 # how much do we have? 344 ret, jstr, _ = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True) 345 return int(jstr.strip()) if ret == 0 else multiprocessing.cpu_count() 346 else: 347 # remote isn't allowed, use local cpus 348 return multiprocessing.cpu_count() 349 291 350 # count number of jobs to create 292 def job_count( options , tests):351 def job_count( options ): 293 352 # check if the user already passed in a number of jobs for multi-threading 294 if not options.jobs: 295 make_flags = os.environ.get('MAKEFLAGS') 296 force = bool(make_flags) 297 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None 298 if make_jobs_fds : 299 tokens = os.read(int(make_jobs_fds.group(2)), 1024) 300 options.jobs = len(tokens) 301 os.write(int(make_jobs_fds.group(3)), tokens) 302 else : 303 if settings.distribute: 304 ret, jstr = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True) 305 if ret == 0: 306 options.jobs = int(jstr.strip()) 307 else : 308 options.jobs = multiprocessing.cpu_count() 309 else: 310 options.jobs = multiprocessing.cpu_count() 353 make_env = os.environ.get('MAKEFLAGS') 354 make_flags = make_env.split() if make_env else None 355 jobstr = jobserver_version() 356 357 if options.jobs and make_flags: 358 print('WARNING: -j options should not be specified when called form Make', file=sys.stderr) 359 360 # Top level make is calling the shots, just follow 361 if make_flags: 362 # do we have -j and --jobserver-... 363 jobopt = None 364 exists_fds = None 365 for f in make_flags: 366 jobopt = f if f.startswith("-j") else jobopt 367 exists_fds = f if f.startswith(jobstr) else exists_fds 368 369 # do we have limited parallelism? 370 if exists_fds : 371 try: 372 rfd, wfd = tuple(exists_fds.split('=')[1].split(',')) 373 except: 374 print("ERROR: jobserver has unrecoginzable format, was '{}'".format(exists_fds), file=sys.stderr) 375 sys.exit(1) 376 377 # read the token pipe to count number of available tokens and restore the pipe 378 # this assumes the test suite script isn't invoked in parellel with something else 379 tokens = os.read(int(rfd), 65536) 380 os.write(int(wfd), tokens) 381 382 # the number of tokens is off by one for obscure but well documented reason 383 # see man make for more details 384 options.jobs = len(tokens) + 1 385 386 # do we have unlimited parallelism? 387 elif jobopt and jobopt != "-j1": 388 # check that this actually make sense 389 if jobopt != "-j": 390 print("ERROR: -j option passed by make but no {}, was '{}'".format(jobstr, jobopt), file=sys.stderr) 391 sys.exit(1) 392 393 options.jobs = eval_hardware() 394 flags = prep_unlimited_recursive_make() 395 396 397 # then no parallelism 398 else: 399 options.jobs = 1 400 401 # keep all flags make passed along, except the weird 'w' which is about subdirectories 402 flags = [f for f in make_flags if f != 'w'] 403 404 # Arguments are calling the shots, fake the top level make 405 elif options.jobs : 406 407 # make sure we have a valid number of jobs that corresponds to user input 408 if options.jobs < 0 : 409 print('ERROR: Invalid number of jobs', file=sys.stderr) 410 sys.exit(1) 411 412 flags = prep_recursive_make(options.jobs) 413 414 # Arguments are calling the shots, fake the top level make, but 0 is a special case 415 elif options.jobs == 0: 416 options.jobs = eval_hardware() 417 flags = prep_unlimited_recursive_make() 418 419 # No one says to run in parallel, then don't 311 420 else : 312 force = True313 314 # make sure we have a valid number of jobs that corresponds to user input 315 if options.jobs <= 0 :316 print('ERROR: Invalid number of jobs', file=sys.stderr)317 sys.exit(1) 318 319 return min( options.jobs, len(tests) ), force421 options.jobs = 1 422 flags = [] 423 424 # Make sure we call make as expected 425 settings.update_make_cmd( flags ) 426 427 # return the job count 428 return options.jobs 320 429 321 430 # enable core dumps for all the test children … … 334 443 distcc_hash = os.path.join(settings.SRCDIR, '../tools/build/distcc_hash') 335 444 config = "%s-%s" % (settings.arch.target, settings.debug.path) 336 _, out = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)445 _, out, _ = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True) 337 446 return out.strip() 338 447 … … 374 483 375 484 if not os.path.isfile(core): 376 return 1, "ERR No core dump (limit soft: {} hard: {})".format(*resource.getrlimit(resource.RLIMIT_CORE))485 return 1, "ERR No core dump, expected '{}' (limit soft: {} hard: {})".format(core, *resource.getrlimit(resource.RLIMIT_CORE)) 377 486 378 487 try: 379 return sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE) 488 ret, out, err = sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE) 489 if ret == 0: 490 return 0, out 491 else: 492 return 1, err 380 493 except: 381 494 return 1, "ERR Could not read core with gdb" -
tests/test.py
rba897d21 r2e9b59b 140 140 parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true') 141 141 parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='') 142 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously ', type=int)142 parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int) 143 143 parser.add_argument('--list-comp', help='List all valide arguments', action='store_true') 144 144 parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true') … … 195 195 # build, skipping to next test on error 196 196 with Timed() as comp_dur: 197 make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )197 make_ret, _, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file ) 198 198 199 199 # ---------- … … 208 208 if settings.dry_run or is_exe(exe_file): 209 209 # run test 210 retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)210 retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True) 211 211 else : 212 212 # simply cat the result into the output … … 226 226 else : 227 227 # fetch return code and error from the diff command 228 retcode, error = diff(cmp_file, out_file)228 retcode, error, _ = diff(cmp_file, out_file) 229 229 230 230 else: … … 366 366 print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ') 367 367 print(os.path.relpath(t.input() , settings.SRCDIR), end=' ') 368 code, out = make_recon(t.target())368 code, out, err = make_recon(t.target()) 369 369 370 370 if code != 0: 371 print('ERROR: recond failed for test {} '.format(t.target()), file=sys.stderr)371 print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr) 372 372 sys.exit(1) 373 373 … … 417 417 if is_empty(t.expect()): 418 418 print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr) 419 420 options.jobs = job_count( options ) 419 421 420 422 # for each build configurations, run the test … … 430 432 local_tests = settings.ast.filter( tests ) 431 433 local_tests = settings.arch.filter( local_tests ) 432 options.jobs, forceJobs = job_count( options, local_tests )433 settings.update_make_cmd(forceJobs, options.jobs)434 434 435 435 # check the build configuration works 436 436 settings.validate() 437 jobs = min(options.jobs, len(local_tests)) 437 438 438 439 # print configuration … … 440 441 'Regenerating' if settings.generating else 'Running', 441 442 len(local_tests), 442 options.jobs,443 jobs, 443 444 settings.ast.string, 444 445 settings.arch.string, … … 450 451 451 452 # otherwise run all tests and make sure to return the correct error code 452 failed = run_tests(local_tests, options.jobs)453 failed = run_tests(local_tests, jobs) 453 454 if failed: 454 455 if not settings.continue_:
Note:
See TracChangeset
for help on using the changeset viewer.