Changes in / [2e9b59b:ba897d21]


Ignore:
Files:
6 added
76 deleted
135 edited

Legend:

Unmodified
Added
Removed
  • Jenkinsfile

    r2e9b59b rba897d21  
    108108
    109109                        // Configure libcfa
    110                         sh 'make -j $(nproc) --no-print-directory configure-libcfa'
     110                        sh 'make -j 8 --no-print-directory configure-libcfa'
    111111                }
    112112        }
     
    116116                dir (BuildDir) {
    117117                        // Build driver
    118                         sh 'make -j $(nproc) --no-print-directory -C driver'
     118                        sh 'make -j 8 --no-print-directory -C driver'
    119119
    120120                        // Build translator
    121                         sh 'make -j $(nproc) --no-print-directory -C src'
     121                        sh 'make -j 8 --no-print-directory -C src'
    122122                }
    123123        }
     
    126126                // Build outside of the src tree to ease cleaning
    127127                dir (BuildDir) {
    128                         sh "make -j \$(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-debug"
     128                        sh "make -j 8 --no-print-directory -C libcfa/${Settings.Architecture.name}-debug"
    129129                }
    130130        }
     
    133133                // Build outside of the src tree to ease cleaning
    134134                dir (BuildDir) {
    135                         sh "make -j \$(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"
     135                        sh "make -j 8 --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"
    136136                }
    137137        }
     
    140140                // Build outside of the src tree to ease cleaning
    141141                dir (BuildDir) {
    142                         sh 'make -j $(nproc) --no-print-directory install'
     142                        sh "make -j 8 --no-print-directory install"
    143143                }
    144144        }
     
    161161                Tools.BuildStage('Test: full', Settings.RunAllTests) {
    162162                        dir (BuildDir) {
    163                                         jopt = '-j $(nproc)'
     163                                        jopt = ""
    164164                                        if( Settings.Architecture.node == 'x86' ) {
    165                                                 jopt = '-j2'
     165                                                jopt = "-j2"
    166166                                        }
    167167                                        //Run the tests from the tests directory
  • benchmark/io/http/main.cfa

    r2e9b59b rba897d21  
    3333//============================================================================================='
    3434
    35 thread StatsPrinter {
    36         Worker * workers;
    37         int worker_cnt;
    38 };
     35thread StatsPrinter {};
    3936
    4037void ?{}( StatsPrinter & this, cluster & cl ) {
    4138        ((thread&)this){ "Stats Printer Thread", cl };
    42         this.worker_cnt = 0;
    4339}
    4440
    4541void ^?{}( StatsPrinter & mutex this ) {}
    46 
    47 #define eng3(X) (ws(3, 3, unit(eng( X ))))
    4842
    4943void main(StatsPrinter & this) {
     
    5751
    5852                print_stats_now( *active_cluster(), CFA_STATS_READY_Q | CFA_STATS_IO );
    59                 if(this.worker_cnt != 0) {
    60                         uint64_t tries = 0;
    61                         uint64_t calls = 0;
    62                         uint64_t header = 0;
    63                         uint64_t splcin = 0;
    64                         uint64_t splcot = 0;
    65                         struct {
    66                                 volatile uint64_t calls;
    67                                 volatile uint64_t bytes;
    68                         } avgrd[zipf_cnts];
    69                         memset(avgrd, 0, sizeof(avgrd));
    70 
    71                         for(i; this.worker_cnt) {
    72                                 tries += this.workers[i].stats.sendfile.tries;
    73                                 calls += this.workers[i].stats.sendfile.calls;
    74                                 header += this.workers[i].stats.sendfile.header;
    75                                 splcin += this.workers[i].stats.sendfile.splcin;
    76                                 splcot += this.workers[i].stats.sendfile.splcot;
    77                                 for(j; zipf_cnts) {
    78                                         avgrd[j].calls += this.workers[i].stats.sendfile.avgrd[j].calls;
    79                                         avgrd[j].bytes += this.workers[i].stats.sendfile.avgrd[j].bytes;
    80                                 }
    81                         }
    82 
    83                         double ratio = ((double)tries) / calls;
    84 
    85                         sout | "----- Worker Stats -----";
    86                         sout | "sendfile  : " | calls | "calls," | tries | "tries (" | ratio | " try/call)";
    87                         sout | "            " | header | "header," | splcin | "splice in," | splcot | "splice out";
    88                         sout | " - zipf sizes:";
    89                         for(i; zipf_cnts) {
    90                                 double written = avgrd[i].calls > 0 ? ((double)avgrd[i].bytes) / avgrd[i].calls : 0;
    91                                 sout | "        " | zipf_sizes[i] | "bytes," | avgrd[i].calls | "shorts," | written | "written";
    92                         }
    93                 }
    94                 else {
    95                         sout | "No Workers!";
    96                 }
    9753        }
    9854}
     
    262218                        {
    263219                                Worker * workers = anew(options.clopts.nworkers);
    264                                 cl[0].prnt->workers = workers;
    265                                 cl[0].prnt->worker_cnt = options.clopts.nworkers;
    266220                                for(i; options.clopts.nworkers) {
    267221                                        // if( options.file_cache.fixed_fds ) {
     
    357311        }
    358312}
    359 
    360 const size_t zipf_sizes[] = { 102, 204, 307, 409, 512, 614, 716, 819, 921, 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192, 9216, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 92160, 102400, 204800, 307200, 409600, 512000, 614400, 716800, 819200, 921600 };
    361 static_assert(zipf_cnts == sizeof(zipf_sizes) / sizeof(zipf_sizes[0]));
  • benchmark/io/http/parhttperf

    r2e9b59b rba897d21  
    66
    77mkdir -p out
    8 rm out/*
    9 echo "httperf --client [0-$(($NTHREADS - 1))]/$NTHREADS $@ > out/result.[0-$(($NTHREADS - 1))].out"
     8rm -v out/*
    109for ((i=0; i<$NTHREADS; i++))
    1110do
     11        # echo "httperf --client $i/$NTHREADS $@ > out/result.$i.out"
    1212        httperf --client $i/$NTHREADS $@ > out/result.$i.out &
    1313done
  • benchmark/io/http/protocol.cfa

    r2e9b59b rba897d21  
    2424
    2525#include "options.hfa"
    26 #include "worker.hfa"
    2726
    2827#define PLAINTEXT_1WRITE
     
    157156
    158157                count -= ret;
     158                offset += ret;
    159159                size_t in_pipe = ret;
    160160                SPLICE2: while(in_pipe > 0) {
     
    249249}
    250250
    251 static inline int wait_and_process(header_g & this, sendfile_stats_t & stats) {
     251static inline int wait_and_process(header_g & this) {
    252252        wait(this.f);
    253253
     
    278278        }
    279279
    280         stats.header++;
    281 
    282280        // It must be a Short read
    283281        this.len  -= this.f.result;
     
    291289        io_future_t f;
    292290        int fd; int pipe; size_t len; off_t off;
    293         short zipf_idx;
    294291        FSM_Result res;
    295292};
     
    300297        this.len = len;
    301298        this.off = 0;
    302         this.zipf_idx = -1;
    303         STATS: for(i; zipf_cnts) {
    304                 if(len <= zipf_sizes[i]) {
    305                         this.zipf_idx = i;
    306                         break STATS;
    307                 }
    308         }
    309         if(this.zipf_idx < 0) mutex(serr) serr | "SPLICE IN" | len | " greated than biggest zipf file";
    310299}
    311300
     
    323312}
    324313
    325 static inline int wait_and_process(splice_in_t & this, sendfile_stats_t & stats ) {
     314static inline int wait_and_process(splice_in_t & this) {
    326315        wait(this.f);
    327316
     
    339328                        return error(this.res, -ECONNRESET);
    340329                }
    341                 mutex(serr) serr | "SPLICE IN got" | error | ", WTF!";
    342                 return error(this.res, -ECONNRESET);
    343330        }
    344331
     
    353340                return done(this.res);
    354341        }
    355 
    356         stats.splcin++;
    357         stats.avgrd[this.zipf_idx].calls++;
    358         stats.avgrd[this.zipf_idx].bytes += this.f.result;
    359342
    360343        // It must be a Short read
     
    398381}
    399382
    400 static inline void wait_and_process(splice_out_g & this, sendfile_stats_t & stats ) {
     383static inline void wait_and_process(splice_out_g & this) {
    401384        wait(this.f);
    402385
     
    414397                        return error(this, -ECONNRESET);
    415398                }
    416                 mutex(serr) serr | "SPLICE OUT got" | error | ", WTF!";
    417                 return error(this, -ECONNRESET);
    418399        }
    419400
     
    430411
    431412SHORT_WRITE:
    432         stats.splcot++;
    433 
    434413        // It must be a Short Write
    435414        this.len -= this.f.result;
     
    438417}
    439418
    440 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t fsize, sendfile_stats_t & stats ) {
    441         stats.calls++;
     419int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t fsize ) {
    442420        #if defined(LINKED_IO)
    443421                char buffer[512];
     
    448426
    449427                RETRY_LOOP: for() {
    450                         stats.tries++;
    451428                        int have = need(header.res) + need(splice_in.res) + 1;
    452429                        int idx = 0;
     
    467444                        // we may need to kill the connection if it fails
    468445                        // If it already completed, this is a no-op
    469                         wait_and_process(splice_in, stats);
     446                        wait_and_process(splice_in);
    470447
    471448                        if(is_error(splice_in.res)) {
     
    475452
    476453                        // Process the other 2
    477                         wait_and_process(header, stats);
    478                         wait_and_process(splice_out, stats);
     454                        wait_and_process(header);
     455                        wait_and_process(splice_out);
    479456
    480457                        if(is_done(splice_out.res)) {
     
    496473                return len + fsize;
    497474        #else
    498                 stats.tries++;
    499475                int ret = answer_header(fd, fsize);
    500476                if( ret < 0 ) { close(fd); return ret; }
  • benchmark/io/http/protocol.hfa

    r2e9b59b rba897d21  
    11#pragma once
    2 
    3 struct sendfile_stats_t;
    42
    53enum HttpCode {
     
    2018int answer_plaintext( int fd );
    2119int answer_empty( int fd );
    22 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t count, struct sendfile_stats_t & );
     20int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t count );
    2321
    2422[HttpCode code, bool closed, * const char file, size_t len] http_read(int fd, []char buffer, size_t len);
  • benchmark/io/http/worker.cfa

    r2e9b59b rba897d21  
    2323        this.pipe[1] = -1;
    2424        this.done = false;
    25 
    26         this.stats.sendfile.calls = 0;
    27         this.stats.sendfile.tries = 0;
    28         this.stats.sendfile.header = 0;
    29         this.stats.sendfile.splcin = 0;
    30         this.stats.sendfile.splcot = 0;
    31         for(i; zipf_cnts) {
    32                 this.stats.sendfile.avgrd[i].calls = 0;
    33                 this.stats.sendfile.avgrd[i].bytes = 0;
    34         }
    3525}
    3626
     
    133123
    134124                        // Send the desired file
    135                         int ret = answer_sendfile( this.pipe, fd, ans_fd, count, this.stats.sendfile );
     125                        int ret = answer_sendfile( this.pipe, fd, ans_fd, count);
    136126                        if( ret == -ECONNRESET ) break REQUEST;
    137127
  • benchmark/io/http/worker.hfa

    r2e9b59b rba897d21  
    1111//=============================================================================================
    1212
    13 extern const size_t zipf_sizes[];
    14 enum { zipf_cnts = 36, };
    15 
    16 struct sendfile_stats_t {
    17         volatile uint64_t calls;
    18         volatile uint64_t tries;
    19         volatile uint64_t header;
    20         volatile uint64_t splcin;
    21         volatile uint64_t splcot;
    22         struct {
    23                 volatile uint64_t calls;
    24                 volatile uint64_t bytes;
    25         } avgrd[zipf_cnts];
    26 };
    27 
    2813thread Worker {
    2914        int pipe[2];
     
    3318        int flags;
    3419        volatile bool done;
    35         struct {
    36                 sendfile_stats_t sendfile;
    37         } stats;
    3820};
    3921void ?{}( Worker & this);
  • benchmark/plot.py

    r2e9b59b rba897d21  
    4040}
    4141
    42 def plot(data, x, y, out):
     42def plot(data, x, y):
    4343        fig, ax = plt.subplots()
    4444        colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00'])
     
    6767        ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
    6868        plt.legend(loc='upper left')
    69         if out:
    70                 plt.savefig(out)
    71         else:
    72                 plt.show()
     69        plt.show()
    7370
    7471
     
    7875        parser = parser = argparse.ArgumentParser(description='Python Script to draw R.M.I.T. results')
    7976        parser.add_argument('-f', '--file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
    80         parser.add_argument('-o', '--out', nargs='?', type=str, default=None)
    81         parser.add_argument('-y', nargs='?', type=str, default="")
    8277
    8378        try:
     
    108103                        fields.add(label)
    109104
    110         if not options.out :
    111                 print(series)
    112                 print("fields")
    113                 for f in fields:
    114                         print("{}".format(f))
     105        print(series)
     106        print("fields")
     107        for f in fields:
     108                print("{}".format(f))
    115109
    116         if options.y and options.y in field_names.keys():
    117                 plot(data, "Number of processors", options.y, options.out)
    118         else:
    119                 if options.y:
    120                         print("Could not find key '{}', defaulting to 'ns per ops'".format(options.y))
    121                 plot(data, "Number of processors", "ns per ops", options.out)
     110        plot(data, "Number of processors", "ns per ops")
  • doc/LaTeXmacros/common.sty

    r2e9b59b rba897d21  
    1111%% Created On       : Sat Apr  9 10:06:17 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Sat Apr  2 17:35:23 2022
    14 %% Update Count     : 570
     13%% Last Modified On : Mon Feb  7 23:00:46 2022
     14%% Update Count     : 569
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    301301  {=>}{$\Rightarrow$}2
    302302  {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
     303defaultdialect={CFA},
    303304}% lstset
    304305}% CFAStyle
  • doc/LaTeXmacros/common.tex

    r2e9b59b rba897d21  
    1111%% Created On       : Sat Apr  9 10:06:17 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Sat Apr  2 16:42:31 2022
    14 %% Update Count     : 553
     13%% Last Modified On : Mon Feb  7 23:00:08 2022
     14%% Update Count     : 552
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    306306  {=>}{$\Rightarrow$}2
    307307  {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
     308defaultdialect={CFA},
    308309}% lstset
    309310}% CFAStyle
  • doc/theses/mubeen_zulfiqar_MMath/Makefile

    r2e9b59b rba897d21  
    1 # Configuration variables
    2 
     1# directory for latex clutter files
    32Build = build
    43Figures = figures
    54Pictures = pictures
    6 
    7 LaTMac = ../../LaTeXmacros
    8 BibRep = ../../bibliography
    9 
    105TeXSRC = ${wildcard *.tex}
    116FigSRC = ${notdir ${wildcard ${Figures}/*.fig}}
    127PicSRC = ${notdir ${wildcard ${Pictures}/*.fig}}
    13 BibSRC = ${wildcard *.bib}
    14 
    15 TeXLIB = .:${LaTMac}:${Build}:
    16 BibLIB = .:${BibRep}:
     8BIBSRC = ${wildcard *.bib}
     9TeXLIB = .:../../LaTeXmacros:${Build}: # common latex macros
     10BibLIB = .:../../bibliography # common citation repository
    1711
    1812MAKEFLAGS = --no-print-directory # --silent
    1913VPATH = ${Build} ${Figures} ${Pictures} # extra search path for file names used in document
    2014
    21 DOCUMENT = uw-ethesis.pdf
    22 BASE = ${basename ${DOCUMENT}}                  # remove suffix
     15### Special Rules:
    2316
    24 # Commands
     17.PHONY: all clean
     18.PRECIOUS: %.dvi %.ps # do not delete intermediate files
     19
     20### Commands:
    2521
    2622LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
    27 BibTeX = BIBINPUTS=${BibLIB} && export BIBINPUTS && bibtex
     23BibTeX = BIBINPUTS=${BibLIB} bibtex
    2824#Glossary = INDEXSTYLE=${Build} makeglossaries-lite
    2925
    30 # Rules and Recipes
     26### Rules and Recipes:
    3127
    32 .PHONY : all clean                              # not file names
    33 .PRECIOUS: %.dvi %.ps # do not delete intermediate files
    34 .ONESHELL :
     28DOC = uw-ethesis.pdf
     29BASE = ${DOC:%.pdf=%} # remove suffix
    3530
    36 all : ${DOCUMENT}
     31all: ${DOC}
    3732
    38 clean :
    39         @rm -frv ${DOCUMENT} ${Build}
     33clean:
     34        @rm -frv ${DOC} ${Build}
    4035
    41 # File Dependencies
     36# File Dependencies #
    4237
    43 %.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BibSRC} ${BibRep}/pl.bib ${LaTMac}/common.tex Makefile | ${Build}
     38${Build}/%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BIBSRC} Makefile | ${Build}
    4439        ${LaTeX} ${BASE}
    4540        ${BibTeX} ${Build}/${BASE}
    4641        ${LaTeX} ${BASE}
    47         # if needed, run latex again to get citations
     42        # if nedded, run latex again to get citations
    4843        if fgrep -s "LaTeX Warning: Citation" ${basename $@}.log ; then ${LaTeX} ${BASE} ; fi
    4944#       ${Glossary} ${Build}/${BASE}
     
    5146
    5247${Build}:
    53         mkdir -p $@
     48        mkdir $@
    5449
    5550%.pdf : ${Build}/%.ps | ${Build}
  • doc/theses/mubeen_zulfiqar_MMath/allocator.tex

    r2e9b59b rba897d21  
    11\chapter{Allocator}
    22
    3 This chapter presents a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code), called llheap (low-latency heap), for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
    4 The new allocator fulfills the GNU C Library allocator API~\cite{GNUallocAPI}.
    5 
    6 
    7 \section{llheap}
    8 
    9 The primary design objective for llheap is low-latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator have a delay during an allocator call.
    10 (Large allocations requiring initialization, \eg zero fill, and/or copying are not covered by the low-latency objective.)
    11 A direct consequence of this objective is very simple or no storage coalescing;
    12 hence, llheap's design is willing to use more storage to lower latency.
    13 This objective is apropos because systems research and industrial applications are striving for low latency and computers have huge amounts of RAM memory.
    14 Finally, llheap's performance should be comparable with the current best allocators (see performance comparison in \VRef[Chapter]{c:Performance}).
    15 
    16 % The objective of llheap's new design was to fulfill following requirements:
    17 % \begin{itemize}
    18 % \item It should be concurrent and thread-safe for multi-threaded programs.
    19 % \item It should avoid global locks, on resources shared across all threads, as much as possible.
    20 % \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
    21 % \item It should be a lightweight memory allocator.
    22 % \end{itemize}
     3\section{uHeap}
     4uHeap is a lightweight memory allocator. The objective behind uHeap is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements).
     5
     6The objective of uHeap's new design was to fulfill following requirements:
     7\begin{itemize}
     8\item It should be concurrent and thread-safe for multi-threaded programs.
     9\item It should avoid global locks, on resources shared across all threads, as much as possible.
     10\item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
     11\item It should be a lightweight memory allocator.
     12\end{itemize}
    2313
    2414%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    2515
    26 <<<<<<< HEAD
    2716\section{Design choices for uHeap}\label{sec:allocatorSec}
    2817uHeap's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed:
    29 =======
    30 \section{Design Choices}
    31 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
    32 
    33 llheap's design was reviewed and changed multiple times throughout the thesis.
    34 Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}).
    35 Note, a few simples tests for a design choice were compared with the current best allocators to determine the viability of a design.
    36 
    37 
    38 \subsection{Allocation Fastpath}
    39 \label{s:AllocationFastpath}
    40 
    41 These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced.
    42 \paragraph{T:1 model}
    43 \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared across KTs.
    44 This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5 different request sizes.
    45 When KTs $\le$ N, the common bucket sizes are uncontented;
    46 when KTs $>$ N, the free buckets are contented and latency increases significantly.
    47 In all cases, a KT must acquire/release a lock, contented or uncontented, along the fast allocation path because a bucket is shared.
    48 Therefore, while threads are contending for a small number of buckets sizes, the buckets are distributed among them to reduce contention, which lowers latency;
    49 however, picking N is workload specific.
     18
     19\paragraph{Design 1: Centralized}
     20One heap, but lower bucket sizes are N-shared across KTs.
     21This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.
     22When KTs $\le$ N, the important bucket sizes are uncontented.
     23When KTs $>$ N, the free buckets are contented.
     24Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.
     25\begin{cquote}
     26\centering
     27\input{AllocDS2}
     28\end{cquote}
     29Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.
     30When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated).
     31
     32\paragraph{Design 2: Decentralized N Heaps}
     33Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area.
     34Kernel threads (KT) are assigned to the N heaps.
     35When KTs $\le$ N, the heaps are uncontented.
     36When KTs $>$ N, the heaps are contented.
     37By adjusting N, this approach reduces storage at the cost of speed due to contention.
     38In all cases, a thread acquires/releases a lock, contented or uncontented.
     39\begin{cquote}
     40\centering
     41\input{AllocDS1}
     42\end{cquote}
     43Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT.
     44
     45\paragraph{Design 3: Decentralized Per-thread Heaps}
     46Design 3 is similar to design 2 but instead of having an M:N model, it uses a 1:1 model. So, instead of having N heaos and sharing them among M KTs, Design 3 has one heap for each KT.
     47Dynamic number of heaps: create a thread-local heap for each kernel thread (KT) with a bump-area allocated from the @sbrk@ area.
     48Each KT will have its own exclusive thread-local heap. Heap will be uncontended between KTs regardless how many KTs have been created.
     49Operations on @sbrk@ area will still be protected by locks.
     50%\begin{cquote}
     51%\centering
     52%\input{AllocDS3} FIXME add figs
     53%\end{cquote}
     54Problems: We cannot destroy the heap when a KT exits because our dynamic objects have ownership and they are returned to the heap that created them when the program frees a dynamic object. All dynamic objects point back to their owner heap. If a thread A creates an object O, passes it to another thread B, and A itself exits. When B will free object O, O should return to A's heap so A's heap should be preserved for the lifetime of the whole program as their might be objects in-use of other threads that were allocated by A. Also, we need to know when a KT is created and destroyed to know when to create/destroy a heap for the KT.
     55
     56\paragraph{Design 4: Decentralized Per-CPU Heaps}
     57Design 4 is similar to Design 3 but instead of having a heap for each thread, it creates a heap for each CPU.
     58Fixed number of heaps for a machine: create a heap for each CPU with a bump-area allocated from the @sbrk@ area.
     59Each CPU will have its own CPU-local heap. When the program does a dynamic memory operation, it will be entertained by the heap of the CPU where the process is currently running on.
     60Each CPU will have its own exclusive heap. Just like Design 3(FIXME cite), heap will be uncontended between KTs regardless how many KTs have been created.
     61Operations on @sbrk@ area will still be protected by locks.
     62To deal with preemtion during a dynamic memory operation, librseq(FIXME cite) will be used to make sure that the whole dynamic memory operation completes on one CPU. librseq's restartable sequences can make it possible to re-run a critical section and undo the current writes if a preemption happened during the critical section's execution.
     63%\begin{cquote}
     64%\centering
     65%\input{AllocDS4} FIXME add figs
     66%\end{cquote}
     67
     68Problems: This approach was slower than the per-thread model. Also, librseq does not provide such restartable sequences to detect preemtions in user-level threading system which is important to us as CFA(FIXME cite) has its own threading system that we want to support.
     69
     70Out of the four designs, Design 3 was chosen because of the following reasons.
     71\begin{itemize}
     72\item
     73Decentralized designes are better in general as compared to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designes shard the whole heap which has all the buckets with the addition of sharding sbrk area. So Design 1 was eliminated.
     74\item
     75Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenerio.
     76\item
     77Design 4 was eliminated because it was slower than Design 3 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achive user-threading safety which has some cost to it. Desing 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
     78\end{itemize}
     79
     80
     81\subsection{Advantages of distributed design}
     82
     83The distributed design of uHeap is concurrent to work in multi-threaded applications.
     84
     85Some key benefits of the distributed design of uHeap are as follows:
     86
     87\begin{itemize}
     88\item
     89The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The call to sbrk will be protected using locks but bump allocation (on memory taken from sbrk) will not be contended once the sbrk call has returned.
     90\item
     91Low or almost no contention on heap resources.
     92\item
     93It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
     94\item
     95Distributed design avoids unnecassry locks on resources shared across all KTs.
     96\end{itemize}
     97
     98%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     99
     100\section{uHeap Structure}
     101
     102As described in (FIXME cite 2.4) uHeap uses following features of multi-threaded memory allocators.
     103\begin{itemize}
     104\item
     105uHeap has multiple heaps without a global heap and uses 1:1 model. (FIXME cite 2.5 1:1 model)
     106\item
     107uHeap uses object ownership. (FIXME cite 2.5.2)
     108\item
     109uHeap does not use object containers (FIXME cite 2.6) or any coalescing technique. Instead each dynamic object allocated by uHeap has a header than contains bookkeeping information.
     110\item
     111Each thread-local heap in uHeap has its own allocation buffer that is taken from the system using sbrk() call. (FIXME cite 2.7)
     112\item
     113Unless a heap is freeing an object that is owned by another thread's heap or heap is using sbrk() system call, uHeap is mostly lock-free which eliminates most of the contention on shared resources. (FIXME cite 2.8)
     114\end{itemize}
     115
     116As uHeap uses a heap per-thread model to reduce contention on heap resources, we manage a list of heaps (heap-list) that can be used by threads. The list is empty at the start of the program. When a kernel thread (KT) is created, we check if heap-list is empty. If no then a heap is removed from the heap-list and is given to this new KT to use exclusively. If yes then a new heap object is created in dynamic memory and is given to this new KT to use exclusively. When a KT exits, its heap is not destroyed but instead its heap is put on the heap-list and is ready to be reused by new KTs.
     117
     118This reduces the memory footprint as the objects on free-lists of a KT that has exited can be reused by a new KT. Also, we preserve all the heaps that were created during the lifetime of the program till the end of the program. uHeap uses object ownership where an object is freed to the free-buckets of the heap that allocated it. Even after a KT A has exited, its heap has to be preserved as there might be objects in-use of other threads that were initially allocated by A and the passed to other threads.
    50119
    51120\begin{figure}
    52121\centering
    53 \input{AllocDS1}
    54 \caption{T:1 with Shared Buckets}
    55 \label{f:T1SharedBuckets}
    56 \end{figure}
    57 
    58 Problems:
    59 \begin{itemize}
    60 \item
    61 Need to know when a KT is created/destroyed to assign/unassign a shared bucket-number from the memory allocator.
    62 \item
    63 When no thread is assigned a bucket number, its free storage is unavailable.
    64 \item
    65 All KTs contend for the global-pool lock for initial allocations, before free-lists get populated.
    66 \end{itemize}
    67 Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency.
    68 
    69 \paragraph{T:H model}
    70 \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs.
    71 A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket.
    72 When KT $\le$ N, the heaps are uncontented;
    73 when KTs $>$ N, the heaps are contented.
    74 In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared.
    75 By adjusting N upwards, this approach reduces contention but increases storage (time versus space);
    76 however, picking N is workload specific.
    77 
    78 \begin{figure}
    79 \centering
    80 \input{AllocDS2}
    81 \caption{T:H with Shared Heaps}
    82 \label{f:THSharedHeaps}
    83 \end{figure}
    84 
    85 Problems:
    86 \begin{itemize}
    87 \item
    88 Need to know when a KT is created/destroyed to assign/unassign a heap from the memory allocator.
    89 \item
    90 When no thread is assigned to a heap, its free storage is unavailable.
    91 \item
    92 Ownership issues arise (see \VRef{s:Ownership}).
    93 \item
    94 All KTs contend for the local/global-pool lock for initial allocations, before free-lists get populated.
    95 \end{itemize}
    96 Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency.
    97 
    98 \paragraph{T:H model, H = number of CPUs}
    99 This design is the T:H model but H is set to the number of CPUs on the computer or the number restricted to an application, \eg via @taskset@.
    100 (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per CPU.)
    101 Hence, each CPU logically has its own private heap and local pool.
    102 A memory operation is serviced from the heap associated with the CPU executing the operation.
    103 This approach removes fastpath locking and contention, regardless of the number of KTs mapped across the CPUs, because only one KT is running on each CPU at a time (modulo operations on the global pool and ownership).
    104 This approach is essentially an M:N approach where M is the number if KTs and N is the number of CPUs.
    105 
    106 Problems:
    107 \begin{itemize}
    108 \item
    109 Need to know when a CPU is added/removed from the @taskset@.
    110 \item
    111 Need a fast way to determine the CPU a KT is executing on to access the appropriate heap.
    112 \item
    113 Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
    114 \begin{quote}
    115 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}
    116 \end{quote}
    117 If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
    118 Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
    119 Essentially, the serially-reusable problem is a race condition on an unprotected critical section, where the operating system is providing the second thread via the signal handler.
    120 
    121 Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical section after undoing its writes, if the critical section is preempted.
    122 \end{itemize}
    123 Tests showed that @librseq@ can determine the particular CPU quickly but setting up the restartable critical-section along the allocation fast-path produced a significant increase in allocation costs.
    124 Also, the number of undoable writes in @librseq@ is limited and restartable sequences cannot deal with user-level thread (UT) migration across KTs.
    125 For example, UT$_1$ is executing a memory operation by KT$_1$ on CPU$_1$ and a time-slice preemption occurs.
    126 The signal handler context switches UT$_1$ onto the user-level ready-queue and starts running UT$_2$ on KT$_1$, which immediately calls a memory operation.
    127 Since KT$_1$ is still executing on CPU$_1$, @librseq@ takes no action because it assumes KT$_1$ is still executing the same critical section.
    128 Then UT$_1$ is scheduled onto KT$_2$ by the user-level scheduler, and its memory operation continues in parallel with UT$_2$ using references into the heap associated with CPU$_1$, which corrupts CPU$_1$'s heap.
    129 If @librseq@ had an @rseq_abort@ which:
    130 \begin{enumerate}
    131 \item
    132 Marked the current restartable critical-section as cancelled so it restarts when attempting to commit.
    133 \item
    134 Do nothing if there is no current restartable critical section in progress.
    135 \end{enumerate}
    136 Then @rseq_abort@ could be called on the backside of a  user-level context-switching.
    137 A feature similar to this idea might exist for hardware transactional-memory.
    138 A significant effort was made to make this approach work but its complexity, lack of robustness, and performance costs resulted in its rejection.
    139 
    140 \paragraph{1:1 model}
    141 This design is the T:H model with T = H, where there is one thread-local heap for each KT.
    142 (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
    143 Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
    144 Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership).
    145 
    146 Problems:
    147 \begin{itemize}
    148 \item
    149 Need to know when a KT is starts/terminates to create/delete its heap.
    150 
    151 \noindent
    152 It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
    153 \item
    154 There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
    155 
    156 \noindent
    157 The classic solution only deletes a heap after all referents are returned, which is complex.
    158 The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
    159 If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
    160 To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
    161 In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
    162 \item
    163 There can be significant external fragmentation as the number of KTs increases.
    164 
    165 \noindent
    166 In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
    167 Since the number of CPUs is relatively small, >~1024, and a heap relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
    168 \item
    169 There is the same serially-reusable problem with UTs migrating across KTs.
    170 \end{itemize}
    171 Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
    172 
    173 
    174 \vspace{5pt}
    175 \noindent
    176 The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
    177 For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps maybe shared by multiple threads, even when KTs $\le$ N.
    178 For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
    179 However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
    180 More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
    181 Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required.
    182 The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
    183 
    184 
    185 % \begin{itemize}
    186 % \item
    187 % A decentralized design is better to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designs shard the whole heap which has all the buckets with the addition of sharding @sbrk@ area. So Design 1 was eliminated.
    188 % \item
    189 % Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenario.
    190 % \item
    191 % Design 3 was eliminated because it was slower than Design 4 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achieve user-threading safety which has some cost to it.
    192 % that  because of 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
    193 % \end{itemize}
    194 % Of the four designs for a low-latency memory allocator, the 1:1 model was chosen for the following reasons:
    195 
    196 % \subsection{Advantages of distributed design}
    197 %
    198 % The distributed design of llheap is concurrent to work in multi-threaded applications.
    199 % Some key benefits of the distributed design of llheap are as follows:
    200 % \begin{itemize}
    201 % \item
    202 % The bump allocation is concurrent as memory taken from @sbrk@ is sharded across all heaps as bump allocation reserve. The call to @sbrk@ will be protected using locks but bump allocation (on memory taken from @sbrk@) will not be contended once the @sbrk@ call has returned.
    203 % \item
    204 % Low or almost no contention on heap resources.
    205 % \item
    206 % It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
    207 % \item
    208 % Distributed design avoids unnecessary locks on resources shared across all KTs.
    209 % \end{itemize}
    210 
    211 \subsection{Allocation Latency}
    212 
    213 A primary goal of llheap is low latency.
    214 Two forms of latency are internal and external.
    215 Internal latency is the time to perform an allocation, while external latency is time to obtain/return storage from/to the operating system.
    216 Ideally latency is $O(1)$ with a small constant.
    217 
    218 To obtain $O(1)$ internal latency means no searching on the allocation fastpath, largely prohibits coalescing, which leads to external fragmentation.
    219 The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
    220 
    221 To obtain $O(1)$ external latency means obtaining one large storage area from the operating system and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation.
    222 Excluding real-time operating-systems, operating-system operations are unbounded, and hence some external latency is unavoidable.
    223 The mitigating factor is that operating-system calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \VPageref{p:malloc_expansion}).
    224 Furthermore, while operating-system calls are unbounded, many are now reasonably fast, so their latency is tolerable and infrequent.
    225 
    226 
    227 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    228 
    229 \section{llheap Structure}
    230 
    231 \VRef[Figure]{f:llheapStructure} shows the design of llheap, which uses the following features:
    232 \begin{itemize}
    233 \item
    234 1:1 multiple-heap model to minimize the fastpath,
    235 \item
    236 can be built with or without heap ownership,
    237 \item
    238 headers per allocation versus containers,
    239 \item
    240 no coalescing to minimize latency,
    241 \item
    242 global heap memory (pool) obtained from the operating system using @mmap@ to create and reuse heaps needed by threads,
    243 \item
    244 local reserved memory (pool) per heap obtained from global pool,
    245 \item
    246 global reserved memory (pool) obtained from the operating system using @sbrk@ call,
    247 \item
    248 optional fast-lookup table for converting allocation requests into bucket sizes,
    249 \item
    250 optional statistic-counters table for accumulating counts of allocation operations.
    251 \end{itemize}
    252 
    253 \begin{figure}
    254 \centering
    255 <<<<<<< HEAD
    256122\includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
    257123\caption{uHeap Structure}
    258124\label{fig:heapStructureFig}
    259 =======
    260 % \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
    261 \input{llheap}
    262 \caption{llheap Structure}
    263 \label{f:llheapStructure}
    264 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
    265125\end{figure}
    266126
    267 llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
    268 There is a global bump-pointer to the next free heap in the array.
    269 When this array is exhausted, another array is allocated.
    270 There is a global top pointer for a heap intrusive link to chain free heaps from terminated threads.
    271 When statistics are turned on, there is a global top pointer for a heap intrusive link to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
    272 
    273 When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
    274 When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps.
    275 The free heaps is a stack so hot storage is reused first.
    276 Preserving all heaps created during the program lifetime, solves the storage lifetime problem, when ownership is used.
    277 This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
    278 llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
    279 
    280 Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
    281 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system.
    282 Each free bucket of a specific size has the following two lists:
    283 \begin{itemize}
    284 \item
    285 A free stack used solely by the KT heap-owner, so push/pop operations do not require locking.
    286 The free objects are a stack so hot storage is reused first.
    287 \item
    288 For ownership, a shared away-stack for KTs to return storage allocated by other KTs, so push/pop operations require locking.
    289 When the free stack is empty, the entire ownership stack is removed and becomes the head of the corresponding free stack.
    290 \end{itemize}
    291 
    292 Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$.
    293 First, the allocation is divided into small (@sbrk@) or large (@mmap@).
    294 For large allocations, the storage is mapped directly from the operating system.
    295 For small allocations, $S$ is quantized into a bucket size.
    296 Quantizing is performed using a binary search over the ordered bucket array.
    297 An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
    298 (Type @char@ restricts the number of bucket sizes to 256.)
    299 For $S$ > 64K, a binary search is used.
    300 Then, the allocation storage is obtained from the following locations (in order), with increasing latency.
    301 \begin{enumerate}[topsep=0pt,itemsep=0pt,parsep=0pt]
    302 \item
    303 bucket's free stack,
    304 \item
    305 bucket's away stack,
    306 \item
    307 heap's local pool
    308 \item
    309 global pool
    310 \item
    311 operating system (@sbrk@)
    312 \end{enumerate}
    313 
    314 \begin{figure}
    315 \vspace*{-10pt}
    316 \begin{algorithm}[H]
    317 \small
    318 \caption{Dynamic object allocation of size $S$}\label{alg:heapObjectAlloc}
     127Each heap uses seggregated free-buckets that have free objects of a specific size. Each free-bucket of a specific size has following 2 lists in it:
     128\begin{itemize}
     129\item
     130Free list is used when a thread is freeing an object that is owned by its own heap so free list does not use any locks/atomic-operations as it is only used by the owner KT.
     131\item
     132Away list is used when a thread A is freeing an object that is owned by another KT B's heap. This object should be freed to the owner heap (B's heap) so A will place the object on the away list of B. Away list is lock protected as it is shared by all other threads.
     133\end{itemize}
     134
     135When a dynamic object of a size S is requested. The thread-local heap will check if S is greater than or equal to the mmap threshhold. Any request larger than the mmap threshhold is fulfilled by allocating an mmap area of that size and such requests are not allocated on sbrk area. The value of this threshhold can be changed using mallopt routine but the new value should not be larger than our biggest free-bucket size.
     136
     137Algorithm~\ref{alg:heapObjectAlloc} briefly shows how an allocation request is fulfilled.
     138
     139\begin{algorithm}
     140\caption{Dynamic object allocation of size S}\label{alg:heapObjectAlloc}
    319141\begin{algorithmic}[1]
    320142\State $\textit{O} \gets \text{NULL}$
    321 \If {$S >= \textit{mmap-threshhold}$}
    322         \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
    323 \Else
    324         \State $\textit{B} \gets \text{smallest free-bucket} \geq S$
     143\If {$S < \textit{mmap-threshhold}$}
     144        \State $\textit{B} \gets (\text{smallest free-bucket} \geq S)$
    325145        \If {$\textit{B's free-list is empty}$}
    326146                \If {$\textit{B's away-list is empty}$}
    327147                        \If {$\textit{heap's allocation buffer} < S$}
    328                                 \State $\text{get allocation from global pool (which might call \lstinline{sbrk})}$
     148                                \State $\text{get allocation buffer using system call sbrk()}$
    329149                        \EndIf
    330150                        \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
     
    337157        \EndIf
    338158        \State $\textit{O's owner} \gets \text{B}$
     159\Else
     160        \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
    339161\EndIf
    340162\State $\Return \textit{ O}$
     
    342164\end{algorithm}
    343165
    344 <<<<<<< HEAD
    345166Algorithm~\ref{alg:heapObjectFreeOwn} shows how a free request is fulfilled if object ownership is turned on. Algorithm~\ref{alg:heapObjectFreeNoOwn} shows how the same free request is fulfilled without object ownership.
    346167
     
    350171\If {$\textit{A was mmap-ed}$}
    351172        \State $\text{return A's dynamic memory to system using system call munmap}$
    352 =======
    353 \vspace*{-15pt}
    354 \begin{algorithm}[H]
    355 \small
    356 \caption{Dynamic object free at address $A$ with object ownership}\label{alg:heapObjectFreeOwn}
    357 \begin{algorithmic}[1]
    358 \If {$\textit{A mapped allocation}$}
    359         \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
    360 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
    361173\Else
    362174        \State $\text{B} \gets \textit{O's owner}$
     
    369181\end{algorithmic}
    370182\end{algorithm}
    371 <<<<<<< HEAD
    372183
    373184\begin{algorithm}
     
    388199\end{algorithm}
    389200
    390 =======
    391 >>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
    392 
    393 \vspace*{-15pt}
    394 \begin{algorithm}[H]
    395 \small
    396 \caption{Dynamic object free at address $A$ without object ownership}\label{alg:heapObjectFreeNoOwn}
    397 \begin{algorithmic}[1]
    398 \If {$\textit{A mapped allocation}$}
    399         \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
    400 \Else
    401         \State $\text{B} \gets \textit{O's owner}$
    402         \If {$\textit{B is thread-local heap's bucket}$}
    403                 \State $\text{push A to B's free-list}$
    404         \Else
    405                 \State $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
    406                 \State $\text{push A to C's free-list}$
    407         \EndIf
    408 \EndIf
    409 \end{algorithmic}
    410 \end{algorithm}
    411 \end{figure}
    412 
    413 Algorithm~\ref{alg:heapObjectFreeOwn} shows the de-allocation (free) outline for an object at address $A$ with ownership.
    414 First, the address is divided into small (@sbrk@) or large (@mmap@).
    415 For large allocations, the storage is unmapped back to the operating system.
    416 For small allocations, the bucket associated with the request size is retrieved.
    417 If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
    418 If the bucket is not local to the thread, the allocation is pushed onto the owning thread's associated away stack.
    419 
    420 Algorithm~\ref{alg:heapObjectFreeNoOwn} shows the de-allocation (free) outline for an object at address $A$ without ownership.
    421 The algorithm is the same as for ownership except if the bucket is not local to the thread.
    422 Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
    423 
    424 Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap.
    425 Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
    426 This design simplifies heap-management code during development and maintenance.
    427 
    428 
    429 \subsection{Alignment}
    430 
    431 All dynamic memory allocations must have a minimum storage alignment for the contained object(s).
    432 Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX).
    433 In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers.
    434 For consistency, the object header is normally aligned at this same boundary.
    435 Larger alignments must be a power of 2, such page alignment (4/8K).
    436 Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment.
    437 
    438 For alignments greater than the minimum, the obvious approach for aligning to address @A@ is: compute the next address that is a multiple of @N@ after the current end of the heap, @E@, plus room for the header before @A@ and the size of the allocation after @A@, moving the end of the heap to @E'@.
    439 \begin{center}
    440 \input{Alignment1}
    441 \end{center}
    442 The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations.
    443 This approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
    444 In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation.
    445 However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object.
    446 As well, it does not work for large allocations, where many memory allocators switch from program @sbrk@ to operating-system @mmap@.
    447 The reason is that @mmap@ only starts on a page boundary, and it is difficult to reuse the storage before the alignment boundary for other requests.
    448 Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request.
    449 
    450 Instead, llheap alignment is accomplished by making a \emph{pessimistically} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
    451 \begin{center}
    452 \input{Alignment2}
    453 \end{center}
    454 The amount of storage necessary is @alignment - M + size@, which ensures there is an address, @A@, after the storage returned from @malloc@, @P@, that is a multiple of @alignment@ followed by sufficient storage for the data object.
    455 The approach is pessimistic because if @P@ already has the correct alignment @N@, the initial allocation has already requested sufficient space to move to the next multiple of @N@.
    456 For this special case, there is @alignment - M@ bytes of unused storage after the data object, which subsequently can be used by @realloc@.
    457 
    458 Note, the address returned is @A@, which is subsequently returned to @free@.
    459 However, to correctly free the allocated object, the value @P@ must be computable, since that is the value generated by @malloc@ and returned within @memalign@.
    460 Hence, there must be a mechanism to detect when @P@ $\neq$ @A@ and how to compute @P@ from @A@.
    461 
    462 The llheap approach uses two headers:
    463 the \emph{original} header associated with a memory allocation from @malloc@, and a \emph{fake} header within this storage before the alignment boundary @A@, which is returned from @memalign@, e.g.:
    464 \begin{center}
    465 \input{Alignment2Impl}
    466 \end{center}
    467 Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater.
    468 When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation.
    469 Therefore, there is always room for an @M@-byte fake header before @A@.
    470 
    471 The fake header must supply an indicator to distinguish it from a normal header and the location of address @P@ generated by @malloc@.
    472 This information is encoded as an offset from A to P and the initialize alignment (discussed in \VRef{s:ReallocStickyProperties}).
    473 To distinguish a fake header from a normal header, the least-significant bit of the alignment is used because the offset participates in multiple calculations, while the alignment is just remembered data.
    474 \begin{center}
    475 \input{FakeHeader}
    476 \end{center}
    477 
    478 
    479 \subsection{\lstinline{realloc} and Sticky Properties}
    480 \label{s:ReallocStickyProperties}
    481 
    482 Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
    483 \begin{flushleft}
    484 \begin{tabular}{ll}
    485 \multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manually}} \\
    486 \begin{lstlisting}
    487 T * naddr = realloc( oaddr, newSize );
    488 
    489 
    490 
    491 \end{lstlisting}
    492 &
    493 \begin{lstlisting}
    494 T * naddr = (T *)malloc( newSize ); $\C[2.4in]{// new storage}$
    495 memcpy( naddr, addr, oldSize );  $\C{// copy old bytes}$
    496 free( addr );                           $\C{// free old storage}$
    497 addr = naddr;                           $\C{// change pointer}\CRT$
    498 \end{lstlisting}
    499 \end{tabular}
    500 \end{flushleft}
    501 The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying.
    502 This pattern is not used enough to reduce storage management costs.
    503 In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern.
    504 
    505 The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation.
    506 Are these properties transient or persistent (``sticky'')?
    507 For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size.
    508 That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve?
    509 Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment.
    510 This silent problem is unintuitive to programmers and difficult to locate because it is transient.
    511 To prevent these problems, llheap preserves initial allocation properties for the lifetime of an allocation and the semantics of @realloc@ are augmented to preserve these properties, with additional query routines.
    512 This change makes the realloc pattern efficient and safe.
    513 
    514 
    515 \subsection{Header}
    516 
    517 To preserve allocation properties requires storing additional information with an allocation,
    518 The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.
    519 The header has two data field sized appropriately for 32/64-bit alignment requirements.
    520 The first field is a union of three values:
    521 \begin{description}
    522 \item[bucket pointer]
    523 is for allocated storage and points back to the bucket associated with this storage requests (see \VRef[Figure]{f:llheapStructure} for the fields accessible in a bucket).
    524 \item[mapped size]
    525 is for mapped storage and is the storage size for use in unmapping.
    526 \item[next free block]
    527 is for free storage and is an intrusive pointer chaining same-size free blocks onto a bucket's free stack.
    528 \end{description}
    529 The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes.
    530 Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors.
    531 
    532 \begin{figure}
    533 \centering
    534 \input{Header}
    535 \caption{llheap Normal Header}
    536 \label{f:llheapNormalHeader}
    537 \end{figure}
    538 
    539 The low-order 3-bits of the first field are \emph{unused} for any stored values, whereas the second field may use all of its bits.
    540 The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
    541 Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header.
    542 This implementation allows a fast test if any of the lower 3-bits are on (@&@ and compare).
    543 If no bits are on, it implies a basic allocation, which is handled quickly;
    544 otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
    545 Since most allocations are basic, this implementation results in a significant performance gain along the allocation and free fastpath.
    546 
    547 
    548 \section{Statistics and Debugging}
    549 
    550 llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand allocation behaviour.
    551 Incrementing statistic counters must appear on the allocation fastpath.
    552 As noted, any atomic operation along the fastpath produces a significant increase in allocation costs.
    553 To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so heap operations do not require atomic operations.
    554 
    555 To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
    556 Note, the list is locked to prevent errors traversing an active list;
    557 the statistics counters are not locked and can flicker during accumulation, which is not an issue with atomic read/write.
    558 \VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
    559 No other memory allocator studied provides as comprehensive statistical information.
    560 Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness, and hence, should be equally valuable to application developers.
    561 
    562 \begin{figure}
    563 \begin{lstlisting}
    564 Heap statistics: (storage request / allocation)
    565   malloc >0 calls 2,766; 0 calls 2,064; storage 12,715 / 13,367 bytes
    566   aalloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    567   calloc >0 calls 6; 0 calls 0; storage 1,008 / 1,104 bytes
    568   memalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    569   amemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    570   cmemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    571   resize >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    572   realloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
    573   free !null calls 2,766; null calls 4,064; storage 12,715 / 13,367 bytes
    574   away pulls 0; pushes 0; storage 0 / 0 bytes
    575   sbrk calls 1; storage 10,485,760 bytes
    576   mmap calls 10,000; storage 10,000 / 10,035 bytes
    577   munmap calls 10,000; storage 10,000 / 10,035 bytes
    578   threads started 4; exited 3
    579   heaps new 4; reused 0
    580 \end{lstlisting}
    581 \caption{Statistics Output}
    582 \label{f:StatiticsOutput}
    583 \end{figure}
    584 
    585 llheap can also be built with debug checking, which inserts many asserts along all allocation paths.
    586 These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruptions because internal values (like header fields) are overwritten.
    587 These checks are best effort as opposed to complete allocation checking as in @valgrind@.
    588 Nevertheless, the checks detect many allocation problems.
    589 There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
    590 For example, @printf@ allocates a 1024 buffer on first call and never deletes this buffer.
    591 To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
    592 Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
    593 
    594 Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
    595 
    596 
    597 \section{User-level Threading Support}
    598 \label{s:UserlevelThreadingSupport}
    599 
    600 The serially-reusable problem (see \VRef{s:AllocationFastpath}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
    601 The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections.
    602 Locking these critical sections negates any attempt for a quick fastpath and results in high contention.
    603 For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
    604 Without time slicing, a user thread performing a long computation can prevent execution (starve) other threads.
    605 To prevent starvation for an allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
    606 The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
    607 
    608 llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period.
    609 On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately.
    610 On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and not thread-safe.
    611 For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
    612 Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
    613 
    614 The fast technique defines a special code section and places all non-interruptible routines in this section.
    615 The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified.
    616 Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag.
    617 This technique is fragile because any calls in the non-interruptible code outside of the non-interruptible section (like @sbrk@) must be bracketed with disable/enable interrupts and these calls must be along the slowpath.
    618 Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section.
    619 This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined.
    620 These techniques are used in both the \uC and \CFA versions of llheap, where both of these systems have user-level threading.
    621 
    622 
    623 \section{Bootstrapping}
    624 
    625 There are problems bootstrapping a memory allocator.
    626 \begin{enumerate}
    627 \item
    628 Programs can be statically or dynamically linked.
    629 \item
    630 The order the linker schedules startup code is poorly supported.
    631 \item
    632 Knowing a KT's start and end independently from the KT code is difficult.
    633 \end{enumerate}
    634 
    635 For static linking, the allocator is loaded with the program.
    636 Hence, allocation calls immediately invoke the allocator operation defined by the loaded allocation library and there is only one memory allocator used in the program.
    637 This approach allows allocator substitution by placing an allocation library before any other in the linked/load path.
    638 
    639 Allocator substitution is similar for dynamic linking, but the problem is that the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
    640 As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including from the dynamic loader.
    641 Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
    642 Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
    643 Testing showed up to a 5\% performance increase for dynamic linking over static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
    644 
    645 All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
    646 The problem is getting initialized done before the first allocator call.
    647 However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
    648 As a result, calls to allocation routines occur without initialization.
    649 To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
    650 
    651 Two other important execution points are program startup and termination, which include prologue or epilogue code to bootstrap a program, which programmers are unaware of.
    652 For example, dynamic-memory allocations before/after the application starts should not be considered in statistics because the application does not make these calls.
    653 llheap establishes these two points using routines:
    654 \begin{lstlisting}
    655 __attribute__(( constructor( 100 ) )) static void startup( void ) {
    656         // clear statistic counters
    657         // reset allocUnfreed counter
    658 }
    659 __attribute__(( destructor( 100 ) )) static void shutdown( void ) {
    660         // sum allocUnfreed for all heaps
    661         // subtract global unfreed storage
    662         // if allocUnfreed > 0 then print warning message
    663 }
    664 \end{lstlisting}
    665 which use global constructor/destructor priority 100, where the linker calls these routines at program prologue/epilogue in increasing/decreasing order of priority.
    666 Application programs may only use global constructor/destructor priorities greater than 100.
    667 Hence, @startup@ is called after the program prologue but before the application starts, and @shutdown@ is called after the program terminates but before the program epilogue.
    668 By resetting counters in @startup@, prologue allocations are ignored, and checking unfreed storage in @shutdown@ checks only application memory management, ignoring the program epilogue.
    669 
    670 While @startup@/@shutdown@ apply to the program KT, a concurrent program creates additional KTs that do not trigger these routines.
    671 However, it is essential for the allocator to know when each KT is started/terminated.
    672 One approach is to create a thread-local object with a construct/destructor, which is triggered after a new KT starts and before it terminates, respectively.
    673 \begin{lstlisting}
    674 struct ThreadManager {
    675         volatile bool pgm_thread;
    676         ThreadManager() {} // unusable
    677         ~ThreadManager() { if ( pgm_thread ) heapManagerDtor(); }
    678 };
    679 static thread_local ThreadManager threadManager;
    680 \end{lstlisting}
    681 Unfortunately, thread-local variables are created lazily, \ie on the first dereference of @threadManager@, which then triggers its constructor.
    682 Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
    683 Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
    684 Now when a KT terminates, @~ThreadManager@ is called to chained it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
    685 The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
    686 
    687 Finally, there is a recursive problem when the singleton pattern dereferences @pgm_thread@ to initialize the thread-local object, because its initialization calls @atExit@, which immediately calls @malloc@ to obtain storage.
    688 This recursion is handled with another thread-local flag to prevent double initialization.
    689 A similar problem exists when the KT terminates and calls member @~ThreadManager@, because immediately afterwards, the terminating KT calls @free@ to deallocate the storage obtained from the @atExit@.
    690 In the meantime, the terminated heap has been put on the global-heap free-stack, and may be active by a new KT, so the @atExit@ free is handled as a free to another heap and put onto the away list using locking.
    691 
    692 For user threading systems, the KTs are controlled by the runtime, and hence, start/end pointers are known and interact directly with the llheap allocator for \uC and \CFA, which eliminates or simplifies several of these problems.
    693 The following API was created to provide interaction between the language runtime and the allocator.
    694 \begin{lstlisting}
    695 void startTask();                       $\C{// KT starts}$
    696 void finishTask();                      $\C{// KT ends}$
    697 void startup();                         $\C{// when application code starts}$
    698 void shutdown();                        $\C{// when application code ends}$
    699 bool traceHeap();                       $\C{// enable allocation/free printing for debugging}$
    700 bool traceHeapOn();                     $\C{// start printing allocation/free calls}$
    701 bool traceHeapOff();                    $\C{// stop printing allocation/free calls}$
    702 \end{lstlisting}
    703 This kind of API is necessary to allow concurrent runtime systems to interact with difference memory allocators in a consistent way.
    704201
    705202%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    706203
    707204\section{Added Features and Methods}
    708 
    709 The C dynamic-allocation API (see \VRef[Figure]{f:CDynamicAllocationAPI}) is neither orthogonal nor complete.
    710 For example,
    711 \begin{itemize}
    712 \item
    713 It is possible to zero fill or align an allocation but not both.
    714 \item
    715 It is \emph{only} possible to zero fill an array allocation.
    716 \item
    717 It is not possible to resize a memory allocation without data copying.
    718 \item
    719 @realloc@ does not preserve initial allocation properties.
    720 \end{itemize}
    721 As a result, programmers must provide these options, which is error prone, resulting in blaming the entire programming language for a poor dynamic-allocation API.
    722 Furthermore, newer programming languages have better type systems that can provide safer and more powerful APIs for memory allocation.
    723 
    724 \begin{figure}
    725 \begin{lstlisting}
    726 void * malloc( size_t size );
    727 void * calloc( size_t nmemb, size_t size );
    728 void * realloc( void * ptr, size_t size );
    729 void * reallocarray( void * ptr, size_t nmemb, size_t size );
    730 void free( void * ptr );
    731 void * memalign( size_t alignment, size_t size );
    732 void * aligned_alloc( size_t alignment, size_t size );
    733 int posix_memalign( void ** memptr, size_t alignment, size_t size );
    734 void * valloc( size_t size );
    735 void * pvalloc( size_t size );
    736 
    737 struct mallinfo mallinfo( void );
    738 int mallopt( int param, int val );
    739 int malloc_trim( size_t pad );
    740 size_t malloc_usable_size( void * ptr );
    741 void malloc_stats( void );
    742 int malloc_info( int options, FILE * fp );
    743 \end{lstlisting}
    744 \caption{C Dynamic-Allocation API}
    745 \label{f:CDynamicAllocationAPI}
    746 \end{figure}
    747 
    748 The following presents design and API changes for C, \CC (\uC), and \CFA, all of which are implemented in llheap.
    749 
     205To improve the uHeap allocator (FIX ME: cite uHeap) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a \CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.
     206
     207\subsection{C Interface}
     208We added a few more features and routines to the allocator's C interface that can make the allocator more usable to the programmers. THese features will programmer more control on the dynamic memory allocation.
    750209
    751210\subsection{Out of Memory}
     
    753212Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
    754213hence the need to return an alternate value for a zero-sized allocation.
    755 A different approach allowed by the C API is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
    756 In theory, notifying the programmer of memory failure allows recovery;
    757 in practice, it is almost impossible to gracefully recover when out of memory.
    758 Hence, the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen because no pseudo allocation is necessary.
    759 
    760 
    761 \subsection{C Interface}
    762 
    763 For C, it is possible to increase functionality and orthogonality of the dynamic-memory API to make allocation better for programmers.
    764 
    765 For existing C allocation routines:
    766 \begin{itemize}
    767 \item
    768 @calloc@ sets the sticky zero-fill property.
    769 \item
    770 @memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property.
    771 \item
    772 @realloc@ and @reallocarray@ preserve sticky properties.
    773 \end{itemize}
    774 
    775 The C dynamic-memory API is extended with the following routines:
    776 
    777 \paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
    778 extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory.
    779 @aalloc@ is significantly faster than @calloc@, which is the only alternative.
    780 
    781 \noindent\textbf{Usage}
     214The alternative is to abort a program when out of memory.
     215In theory, notifying the programmer allows recovery;
     216in practice, it is almost impossible to gracefully when out of memory, so the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen.
     217
     218
     219\subsection{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
     220@aalloc@ is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0.
     221\paragraph{Usage}
    782222@aalloc@ takes two parameters.
    783 \begin{itemize}
    784 \item
    785 @dim@: number of array objects
    786 \item
    787 @elemSize@: size of array object
    788 \end{itemize}
    789 It returns the address of the dynamic array or @NULL@ if either @dim@ or @elemSize@ are zero.
    790 
    791 \paragraph{\lstinline{void * resize( void * oaddr, size_t size )}}
    792 extends @realloc@ for resizing an existing allocation \emph{without} copying previous data into the new allocation or preserving sticky properties.
    793 @resize@ is significantly faster than @realloc@, which is the only alternative.
    794 
    795 \noindent\textbf{Usage}
     223
     224\begin{itemize}
     225\item
     226@dim@: number of objects in the array
     227\item
     228@elemSize@: size of the object in the array.
     229\end{itemize}
     230It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns a @NULL@ pointer.
     231
     232\subsection{\lstinline{void * resize( void * oaddr, size_t size )}}
     233@resize@ is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is @realloc@ but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object.
     234\paragraph{Usage}
    796235@resize@ takes two parameters.
    797 \begin{itemize}
    798 \item
    799 @oaddr@: address to be resized
    800 \item
    801 @size@: new allocation size (smaller or larger than previous)
    802 \end{itemize}
    803 It returns the address of the old or new storage with the specified new size or @NULL@ if @size@ is zero.
    804 
    805 \paragraph{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
    806 extends @aalloc@ and @memalign@ for allocating an aligned dynamic array of objects.
    807 Sets sticky alignment property.
    808 
    809 \noindent\textbf{Usage}
    810 @amemalign@ takes three parameters.
    811 \begin{itemize}
    812 \item
    813 @alignment@: alignment requirement
    814 \item
    815 @dim@: number of array objects
    816 \item
    817 @elemSize@: size of array object
    818 \end{itemize}
    819 It returns the address of the aligned dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
    820 
    821 \paragraph{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
    822 extends @amemalign@ with zero fill and has the same usage as @amemalign@.
    823 Sets sticky zero-fill and alignment property.
    824 It returns the address of the aligned, zero-filled dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
    825 
    826 \paragraph{\lstinline{size_t malloc_alignment( void * addr )}}
    827 returns the alignment of the dynamic object for use in aligning similar allocations.
    828 
    829 \noindent\textbf{Usage}
    830 @malloc_alignment@ takes one parameter.
    831 \begin{itemize}
    832 \item
    833 @addr@: address of an allocated object.
    834 \end{itemize}
    835 It returns the alignment of the given object, where objects not allocated with alignment return the minimal allocation alignment.
    836 
    837 \paragraph{\lstinline{bool malloc_zero_fill( void * addr )}}
    838 returns true if the object has the zero-fill sticky property for use in zero filling similar allocations.
    839 
    840 \noindent\textbf{Usage}
     236
     237\begin{itemize}
     238\item
     239@oaddr@: the address of the old object that needs to be resized.
     240\item
     241@size@: the new size requirement of the to which the old object needs to be resized.
     242\end{itemize}
     243It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns a @NULL@ pointer.
     244
     245\subsection{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
     246This @resize@ is an extension of the above @resize@ (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement.
     247\paragraph{Usage}
     248This resize takes three parameters. It takes an additional parameter of nalign as compared to the above resize (FIX ME: cite above resize).
     249
     250\begin{itemize}
     251\item
     252@oaddr@: the address of the old object that needs to be resized.
     253\item
     254@nalign@: the new alignment to which the old object needs to be realigned.
     255\item
     256@size@: the new size requirement of the to which the old object needs to be resized.
     257\end{itemize}
     258It returns an object with the size and alignment given in the parameters. On failure, it returns a @NULL@ pointer.
     259
     260\subsection{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
     261amemalign is a hybrid of memalign and aalloc. It allows programmer to allocate an aligned dynamic array of objects without calculating the total size of the array explicitly. It frees the programmer from calculating the total size of the array.
     262\paragraph{Usage}
     263amemalign takes three parameters.
     264
     265\begin{itemize}
     266\item
     267@alignment@: the alignment to which the dynamic array needs to be aligned.
     268\item
     269@dim@: number of objects in the array
     270\item
     271@elemSize@: size of the object in the array.
     272\end{itemize}
     273It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns a @NULL@ pointer.
     274
     275\subsection{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
     276cmemalign is a hybrid of amemalign and calloc. It allows programmer to allocate an aligned dynamic array of objects that is 0 filled. The current way to do this in other allocators is to allocate an aligned object with memalign and then fill it with 0 explicitly. This routine provides both features of aligning and 0 filling, implicitly.
     277\paragraph{Usage}
     278cmemalign takes three parameters.
     279
     280\begin{itemize}
     281\item
     282@alignment@: the alignment to which the dynamic array needs to be aligned.
     283\item
     284@dim@: number of objects in the array
     285\item
     286@elemSize@: size of the object in the array.
     287\end{itemize}
     288It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns a @NULL@ pointer.
     289
     290\subsection{\lstinline{size_t malloc_alignment( void * addr )}}
     291@malloc_alignment@ returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.
     292\paragraph{Usage}
     293@malloc_alignment@ takes one parameters.
     294
     295\begin{itemize}
     296\item
     297@addr@: the address of the currently allocated dynamic object.
     298\end{itemize}
     299@malloc_alignment@ returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeap allocator.
     300
     301\subsection{\lstinline{bool malloc_zero_fill( void * addr )}}
     302@malloc_zero_fill@ returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.
     303\paragraph{Usage}
    841304@malloc_zero_fill@ takes one parameters.
    842305
    843306\begin{itemize}
    844307\item
    845 @addr@: address of an allocated object.
    846 \end{itemize}
    847 It returns true if the zero-fill sticky property is set and false otherwise.
    848 
    849 \paragraph{\lstinline{size_t malloc_size( void * addr )}}
    850 returns the request size of the dynamic object (updated when an object is resized) for use in similar allocations.
    851 See also @malloc_usable_size@.
    852 
    853 \noindent\textbf{Usage}
     308@addr@: the address of the currently allocated dynamic object.
     309\end{itemize}
     310@malloc_zero_fill@ returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.
     311
     312\subsection{\lstinline{size_t malloc_size( void * addr )}}
     313@malloc_size@ returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is @malloc_usable_size@. But, @malloc_size@ is different from @malloc_usable_size@ as @malloc_usabe_size@ returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, @malloc_size@ returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.
     314\paragraph{Usage}
    854315@malloc_size@ takes one parameters.
    855 \begin{itemize}
    856 \item
    857 @addr@: address of an allocated object.
    858 \end{itemize}
    859 It returns the request size or zero if @addr@ is @NULL@.
    860 
    861 \paragraph{\lstinline{int malloc_stats_fd( int fd )}}
    862 changes the file descriptor where @malloc_stats@ writes statistics (default @stdout@).
    863 
    864 \noindent\textbf{Usage}
    865 @malloc_stats_fd@ takes one parameters.
    866 \begin{itemize}
    867 \item
    868 @fd@: files description.
    869 \end{itemize}
    870 It returns the previous file descriptor.
    871 
    872 \paragraph{\lstinline{size_t malloc_expansion()}}
    873 \label{p:malloc_expansion}
    874 set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
    875 It returns the heap extension size used throughout a program, \ie called once at heap initialization.
    876 
    877 \paragraph{\lstinline{size_t malloc_mmap_start()}}
    878 set the crossover between allocations occurring in the @sbrk@ area or separately mapped.
    879 It returns the crossover point used throughout a program, \ie called once at heap initialization.
    880 
    881 \paragraph{\lstinline{size_t malloc_unfreed()}}
    882 \label{p:malloc_unfreed}
    883 amount subtracted to adjust for unfreed program storage (debug only).
    884 It returns the new subtraction amount and called by @malloc_stats@.
    885 
    886 
    887 \subsection{\CC Interface}
    888 
    889 The following extensions take advantage of overload polymorphism in the \CC type-system.
    890 
    891 \paragraph{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
    892 extends @resize@ with an alignment re\-quirement.
    893 
    894 \noindent\textbf{Usage}
    895 takes three parameters.
    896 \begin{itemize}
    897 \item
    898 @oaddr@: address to be resized
    899 \item
    900 @nalign@: alignment requirement
    901 \item
    902 @size@: new allocation size (smaller or larger than previous)
    903 \end{itemize}
    904 It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
    905 
    906 \paragraph{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
    907 extends @realloc@ with an alignment re\-quirement and has the same usage as aligned @resize@.
    908 
    909 
    910 \subsection{\CFA Interface}
    911 
    912 The following extensions take advantage of overload polymorphism in the \CFA type-system.
    913 The key safety advantage of the \CFA type system is using the return type to select overloads;
    914 hence, a polymorphic routine knows the returned type and its size.
    915 This capability is used to remove the object size parameter and correctly cast the return storage to match the result type.
    916 For example, the following is the \CFA wrapper for C @malloc@:
    917 \begin{cfa}
    918 forall( T & | sized(T) ) {
    919         T * malloc( void ) {
    920                 if ( _Alignof(T) <= libAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
    921                 else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
    922         } // malloc
    923 \end{cfa}
    924 and is used as follows:
    925 \begin{lstlisting}
    926 int * i = malloc();
    927 double * d = malloc();
    928 struct Spinlock { ... } __attribute__(( aligned(128) ));
    929 Spinlock * sl = malloc();
    930 \end{lstlisting}
    931 where each @malloc@ call provides the return type as @T@, which is used with @sizeof@, @_Alignof@, and casting the storage to the correct type.
    932 This interface removes many of the common allocation errors in C programs.
    933 \VRef[Figure]{f:CFADynamicAllocationAPI} show the \CFA wrappers for the equivalent C/\CC allocation routines with same semantic behaviour.
    934 
    935 \begin{figure}
    936 \begin{lstlisting}
    937 T * malloc( void );
    938 T * aalloc( size_t dim );
    939 T * calloc( size_t dim );
    940 T * resize( T * ptr, size_t size );
    941 T * realloc( T * ptr, size_t size );
    942 T * memalign( size_t align );
    943 T * amemalign( size_t align, size_t dim );
    944 T * cmemalign( size_t align, size_t dim  );
    945 T * aligned_alloc( size_t align );
    946 int posix_memalign( T ** ptr, size_t align );
    947 T * valloc( void );
    948 T * pvalloc( void );
    949 \end{lstlisting}
    950 \caption{\CFA C-Style Dynamic-Allocation API}
    951 \label{f:CFADynamicAllocationAPI}
    952 \end{figure}
    953 
    954 In addition to the \CFA C-style allocator interface, a new allocator interface is provided to further increase orthogonality and usability of dynamic-memory allocation.
    955 This interface helps programmers in three ways.
    956 \begin{itemize}
    957 \item
    958 naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
    959 \item
    960 named arguments: individual allocation properties are specified using postfix function call, so programmers do have to remember parameter positions in allocation calls.
    961 \item
    962 object size: like the \CFA C-style interface, programmers do not have to specify object size or cast allocation results.
    963 \end{itemize}
    964 Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg
    965 \begin{cfa}
    966 duration ?@`@h( int h );                // ? denote the position of the function operand
    967 duration ?@`@m( int m );
    968 duration ?@`@s( int s );
    969 duration dur = 3@`@h + 42@`@m + 17@`@s;
    970 \end{cfa}
    971 @ttype@ polymorphism is similar to \CC variadic templates.
    972 
    973 \paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}}
    974 is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines.
    975 A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
    976 A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
    977 The variable number of arguments consist of allocation properties, which can be combined to produce different kinds of allocations.
    978 The only restriction is for properties @realloc@ and @resize@, which cannot be combined.
    979 
    980 The allocation property functions are:
    981 \subparagraph{\lstinline{T_align ?`align( size_t alignment )}}
    982 to align the allocation.
    983 The alignment parameter must be $\ge$ the default alignment (@libAlign()@ in \CFA) and a power of two, \eg:
    984 \begin{cfa}
    985 int * i0 = alloc( @4096`align@ );  sout | i0 | nl;
    986 int * i1 = alloc( 3, @4096`align@ );  sout | i1; for (i; 3 ) sout | &i1[i]; sout | nl;
    987 
    988 0x555555572000
    989 0x555555574000 0x555555574000 0x555555574004 0x555555574008
    990 \end{cfa}
    991 returns a dynamic object and object array aligned on a 4096-byte boundary.
    992 
    993 \subparagraph{\lstinline{S_fill(T) ?`fill ( /* various types */ )}}
    994 to initialize storage.
    995 There are three ways to fill storage:
    996 \begin{enumerate}
    997 \item
    998 A char fills each byte of each object.
    999 \item
    1000 An object of the returned type fills each object.
    1001 \item
    1002 An object array pointer fills some or all of the corresponding object array.
    1003 \end{enumerate}
    1004 For example:
    1005 \begin{cfa}[numbers=left]
    1006 int * i0 = alloc( @0n`fill@ );  sout | *i0 | nl;  // disambiguate 0
    1007 int * i1 = alloc( @5`fill@ );  sout | *i1 | nl;
    1008 int * i2 = alloc( @'\xfe'`fill@ ); sout | hex( *i2 ) | nl;
    1009 int * i3 = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | i3[i]; sout | nl;
    1010 int * i4 = alloc( 5, @0xdeadbeefN`fill@ );  for ( i; 5 ) sout | hex( i4[i] ); sout | nl;
    1011 int * i5 = alloc( 5, @i3`fill@ );  for ( i; 5 ) sout | i5[i]; sout | nl;
    1012 int * i6 = alloc( 5, @[i3, 3]`fill@ );  for ( i; 5 ) sout | i6[i]; sout | nl;
    1013 \end{cfa}
    1014 \begin{lstlisting}[numbers=left]
    1015 0
    1016 5
    1017 0xfefefefe
    1018 5 5 5 5 5
    1019 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef
    1020 5 5 5 5 5
    1021 5 5 5 -555819298 -555819298  // two undefined values
    1022 \end{lstlisting}
    1023 Examples 1 to 3, fill an object with a value or characters.
    1024 Examples 4 to 7, fill an array of objects with values, another array, or part of an array.
    1025 
    1026 \subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}}
    1027 used to resize, realign, and fill, where the old object data is not copied to the new object.
    1028 The old object type may be different from the new object type, since the values are not used.
    1029 For example:
    1030 \begin{cfa}[numbers=left]
    1031 int * i = alloc( @5`fill@ );  sout | i | *i;
    1032 i = alloc( @i`resize@, @256`align@, @7`fill@ );  sout | i | *i;
    1033 double * d = alloc( @i`resize@, @4096`align@, @13.5`fill@ );  sout | d | *d;
    1034 \end{cfa}
    1035 \begin{lstlisting}[numbers=left]
    1036 0x55555556d5c0 5
    1037 0x555555570000 7
    1038 0x555555571000 13.5
    1039 \end{lstlisting}
    1040 Examples 2 to 3 change the alignment, fill, and size for the initial storage of @i@.
    1041 
    1042 \begin{cfa}[numbers=left]
    1043 int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
    1044 ia = alloc( 10, @ia`resize@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
    1045 sout | ia; ia = alloc( 5, @ia`resize@, @512`align@, @13`fill@ ); sout | ia; for ( i; 5 ) sout | ia[i]; sout | nl;;
    1046 ia = alloc( 3, @ia`resize@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
    1047 \end{cfa}
    1048 \begin{lstlisting}[numbers=left]
    1049 5 5 5 5 5
    1050 7 7 7 7 7 7 7 7 7 7
    1051 0x55555556d560 0x555555571a00 13 13 13 13 13
    1052 0x555555572000 0x555555572000 2 0x555555572004 2 0x555555572008 2
    1053 \end{lstlisting}
    1054 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
    1055 
    1056 \subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}}
    1057 used to resize, realign, and fill, where the old object data is copied to the new object.
    1058 The old object type must be the same as the new object type, since the values used.
    1059 Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
    1060 For example:
    1061 \begin{cfa}[numbers=left]
    1062 int * i = alloc( @5`fill@ );  sout | i | *i;
    1063 i = alloc( @i`realloc@, @256`align@ );  sout | i | *i;
    1064 i = alloc( @i`realloc@, @4096`align@, @13`fill@ );  sout | i | *i;
    1065 \end{cfa}
    1066 \begin{lstlisting}[numbers=left]
    1067 0x55555556d5c0 5
    1068 0x555555570000 5
    1069 0x555555571000 5
    1070 \end{lstlisting}
    1071 Examples 2 to 3 change the alignment for the initial storage of @i@.
    1072 The @13`fill@ for example 3 does nothing because no extra space is added.
    1073 
    1074 \begin{cfa}[numbers=left]
    1075 int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
    1076 ia = alloc( 10, @ia`realloc@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
    1077 sout | ia; ia = alloc( 1, @ia`realloc@, @512`align@, @13`fill@ ); sout | ia; for ( i; 1 ) sout | ia[i]; sout | nl;;
    1078 ia = alloc( 3, @ia`realloc@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
    1079 \end{cfa}
    1080 \begin{lstlisting}[numbers=left]
    1081 5 5 5 5 5
    1082 5 5 5 5 5 7 7 7 7 7
    1083 0x55555556c560 0x555555570a00 5
    1084 0x555555571000 0x555555571000 5 0x555555571004 2 0x555555571008 2
    1085 \end{lstlisting}
    1086 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
    1087 The @13`fill@ for example 3 does nothing because no extra space is added.
    1088 
    1089 These \CFA allocation features are used extensively in the development of the \CFA runtime.
     316
     317\begin{itemize}
     318\item
     319@addr@: the address of the currently allocated dynamic object.
     320\end{itemize}
     321@malloc_size@ returns the allocation size of the given dynamic object. On failure, it return zero.
     322
     323\subsection{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
     324This @realloc@ is an extension of the default @realloc@ (FIX ME: cite default @realloc@). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.
     325\paragraph{Usage}
     326This @realloc@ takes three parameters. It takes an additional parameter of nalign as compared to the default @realloc@.
     327
     328\begin{itemize}
     329\item
     330@oaddr@: the address of the old object that needs to be reallocated.
     331\item
     332@nalign@: the new alignment to which the old object needs to be realigned.
     333\item
     334@size@: the new size requirement of the to which the old object needs to be resized.
     335\end{itemize}
     336It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a @NULL@ pointer.
     337
     338\subsection{\CFA Malloc Interface}
     339We added some routines to the malloc interface of \CFA. These routines can only be used in \CFA and not in our standalone uHeap allocator as these routines use some features that are only provided by \CFA and not by C. It makes the allocator even more usable to the programmers.
     340\CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.
     341
     342\subsection{\lstinline{T * malloc( void )}}
     343This malloc is a simplified polymorphic form of defualt malloc (FIX ME: cite malloc). It does not take any parameter as compared to default malloc that takes one parameter.
     344\paragraph{Usage}
     345This malloc takes no parameters.
     346It returns a dynamic object of the size of type @T@. On failure, it returns a @NULL@ pointer.
     347
     348\subsection{\lstinline{T * aalloc( size_t dim )}}
     349This aalloc is a simplified polymorphic form of above aalloc (FIX ME: cite aalloc). It takes one parameter as compared to the above aalloc that takes two parameters.
     350\paragraph{Usage}
     351aalloc takes one parameters.
     352
     353\begin{itemize}
     354\item
     355@dim@: required number of objects in the array.
     356\end{itemize}
     357It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
     358
     359\subsection{\lstinline{T * calloc( size_t dim )}}
     360This calloc is a simplified polymorphic form of defualt calloc (FIX ME: cite calloc). It takes one parameter as compared to the default calloc that takes two parameters.
     361\paragraph{Usage}
     362This calloc takes one parameter.
     363
     364\begin{itemize}
     365\item
     366@dim@: required number of objects in the array.
     367\end{itemize}
     368It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
     369
     370\subsection{\lstinline{T * resize( T * ptr, size_t size )}}
     371This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
     372\paragraph{Usage}
     373This resize takes two parameters.
     374
     375\begin{itemize}
     376\item
     377@ptr@: address of the old object.
     378\item
     379@size@: the required size of the new object.
     380\end{itemize}
     381It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
     382
     383\subsection{\lstinline{T * realloc( T * ptr, size_t size )}}
     384This @realloc@ is a simplified polymorphic form of defualt @realloc@ (FIX ME: cite @realloc@ with align). It takes two parameters as compared to the above @realloc@ that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
     385\paragraph{Usage}
     386This @realloc@ takes two parameters.
     387
     388\begin{itemize}
     389\item
     390@ptr@: address of the old object.
     391\item
     392@size@: the required size of the new object.
     393\end{itemize}
     394It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
     395
     396\subsection{\lstinline{T * memalign( size_t align )}}
     397This memalign is a simplified polymorphic form of defualt memalign (FIX ME: cite memalign). It takes one parameters as compared to the default memalign that takes two parameters.
     398\paragraph{Usage}
     399memalign takes one parameters.
     400
     401\begin{itemize}
     402\item
     403@align@: the required alignment of the dynamic object.
     404\end{itemize}
     405It returns a dynamic object of the size of type @T@ that is aligned to given parameter align. On failure, it returns a @NULL@ pointer.
     406
     407\subsection{\lstinline{T * amemalign( size_t align, size_t dim )}}
     408This amemalign is a simplified polymorphic form of above amemalign (FIX ME: cite amemalign). It takes two parameter as compared to the above amemalign that takes three parameters.
     409\paragraph{Usage}
     410amemalign takes two parameters.
     411
     412\begin{itemize}
     413\item
     414@align@: required alignment of the dynamic array.
     415\item
     416@dim@: required number of objects in the array.
     417\end{itemize}
     418It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align. On failure, it returns a @NULL@ pointer.
     419
     420\subsection{\lstinline{T * cmemalign( size_t align, size_t dim  )}}
     421This cmemalign is a simplified polymorphic form of above cmemalign (FIX ME: cite cmemalign). It takes two parameter as compared to the above cmemalign that takes three parameters.
     422\paragraph{Usage}
     423cmemalign takes two parameters.
     424
     425\begin{itemize}
     426\item
     427@align@: required alignment of the dynamic array.
     428\item
     429@dim@: required number of objects in the array.
     430\end{itemize}
     431It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align and is zero filled. On failure, it returns a @NULL@ pointer.
     432
     433\subsection{\lstinline{T * aligned_alloc( size_t align )}}
     434This @aligned_alloc@ is a simplified polymorphic form of defualt @aligned_alloc@ (FIX ME: cite @aligned_alloc@). It takes one parameter as compared to the default @aligned_alloc@ that takes two parameters.
     435\paragraph{Usage}
     436This @aligned_alloc@ takes one parameter.
     437
     438\begin{itemize}
     439\item
     440@align@: required alignment of the dynamic object.
     441\end{itemize}
     442It returns a dynamic object of the size of type @T@ that is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
     443
     444\subsection{\lstinline{int posix_memalign( T ** ptr, size_t align )}}
     445This @posix_memalign@ is a simplified polymorphic form of defualt @posix_memalign@ (FIX ME: cite @posix_memalign@). It takes two parameters as compared to the default @posix_memalign@ that takes three parameters.
     446\paragraph{Usage}
     447This @posix_memalign@ takes two parameter.
     448
     449\begin{itemize}
     450\item
     451@ptr@: variable address to store the address of the allocated object.
     452\item
     453@align@: required alignment of the dynamic object.
     454\end{itemize}
     455
     456It stores address of the dynamic object of the size of type @T@ in given parameter ptr. This object is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
     457
     458\subsection{\lstinline{T * valloc( void )}}
     459This @valloc@ is a simplified polymorphic form of defualt @valloc@ (FIX ME: cite @valloc@). It takes no parameters as compared to the default @valloc@ that takes one parameter.
     460\paragraph{Usage}
     461@valloc@ takes no parameters.
     462It returns a dynamic object of the size of type @T@ that is aligned to the page size. On failure, it returns a @NULL@ pointer.
     463
     464\subsection{\lstinline{T * pvalloc( void )}}
     465\paragraph{Usage}
     466@pvalloc@ takes no parameters.
     467It returns a dynamic object of the size that is calcutaed by rouding the size of type @T@. The returned object is also aligned to the page size. On failure, it returns a @NULL@ pointer.
     468
     469\subsection{Alloc Interface}
     470In addition to improve allocator interface both for \CFA and our standalone allocator uHeap in C. We also added a new alloc interface in \CFA that increases usability of dynamic memory allocation.
     471This interface helps programmers in three major ways.
     472
     473\begin{itemize}
     474\item
     475Routine Name: alloc interfce frees programmers from remmebring different routine names for different kind of dynamic allocations.
     476\item
     477Parametre Positions: alloc interface frees programmers from remembering parameter postions in call to routines.
     478\item
     479Object Size: alloc interface does not require programmer to mention the object size as \CFA allows allocator to determince the object size from returned type of alloc call.
     480\end{itemize}
     481
     482Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of \CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.
     483
     484\subsection{Routine: \lstinline{T * alloc( ... )}}
     485Call to alloc wihout any parameter returns one object of size of type @T@ allocated dynamically.
     486Only the dimension (dim) parameter for array allocation has the fixed position in the alloc routine. If programmer wants to allocate an array of objects that the required number of members in the array has to be given as the first parameter to the alloc routine.
     487alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except @`realloc@ and @`resize@ that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both @`resize@ and @`realloc@ are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.
     488
     489\paragraph{Dim}
     490This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type @T@.
     491It represents the required number of members in the array allocation as in \CFA's aalloc (FIX ME: cite aalloc).
     492This parameter should be of type @size_t@.
     493
     494Example: @int a = alloc( 5 )@
     495This call will return a dynamic array of five integers.
     496
     497\paragraph{Align}
     498This parameter is position-free and uses a backtick routine align (@`align@). The parameter passed with @`align@ should be of type @size_t@. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in \CFA) then the passed alignment parameter will be rejected and the default alignment will be used.
     499
     500Example: @int b = alloc( 5 , 64`align )@
     501This call will return a dynamic array of five integers. It will align the allocated object to 64.
     502
     503\paragraph{Fill}
     504This parameter is position-free and uses a backtick routine fill (@`fill@). In case of @realloc@, only the extra space after copying the data in the old object will be filled with given parameter.
     505Three types of parameters can be passed using `fill.
     506
     507\begin{itemize}
     508\item
     509@char@: A char can be passed with @`fill@ to fill the whole dynamic allocation with the given char recursively till the end of required allocation.
     510\item
     511Object of returned type: An object of type of returned type can be passed with @`fill@ to fill the whole dynamic allocation with the given object recursively till the end of required allocation.
     512\item
     513Dynamic object of returned type: A dynamic object of type of returned type can be passed with @`fill@ to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to @`fill@ or the end of requested allocation reaches.
     514\end{itemize}
     515
     516Example: @int b = alloc( 5 , 'a'`fill )@
     517This call will return a dynamic array of five integers. It will fill the allocated object with character 'a' recursively till the end of requested allocation size.
     518
     519Example: @int b = alloc( 5 , 4`fill )@
     520This call will return a dynamic array of five integers. It will fill the allocated object with integer 4 recursively till the end of requested allocation size.
     521
     522Example: @int b = alloc( 5 , a`fill )@ where @a@ is a pointer of int type
     523This call will return a dynamic array of five integers. It will copy data in a to the returned object non-recursively untill end of a or the newly allocated object is reached.
     524
     525\paragraph{Resize}
     526This parameter is position-free and uses a backtick routine resize (@`resize@). It represents the old dynamic object (oaddr) that the programmer wants to
     527\begin{itemize}
     528\item
     529resize to a new size.
     530\item
     531realign to a new alignment
     532\item
     533fill with something.
     534\end{itemize}
     535The data in old dynamic object will not be preserved in the new object. The type of object passed to @`resize@ and the returned type of alloc call can be different.
     536
     537Example: @int b = alloc( 5 , a`resize )@
     538This call will resize object a to a dynamic array that can contain 5 integers.
     539
     540Example: @int b = alloc( 5 , a`resize , 32`align )@
     541This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
     542
     543Example: @int b = alloc( 5 , a`resize , 32`align , 2`fill )@
     544This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32 and will be filled with 2.
     545
     546\paragraph{Realloc}
     547This parameter is position-free and uses a backtick routine @realloc@ (@`realloc@). It represents the old dynamic object (oaddr) that the programmer wants to
     548\begin{itemize}
     549\item
     550realloc to a new size.
     551\item
     552realign to a new alignment
     553\item
     554fill with something.
     555\end{itemize}
     556The data in old dynamic object will be preserved in the new object. The type of object passed to @`realloc@ and the returned type of alloc call cannot be different.
     557
     558Example: @int b = alloc( 5 , a`realloc )@
     559This call will realloc object a to a dynamic array that can contain 5 integers.
     560
     561Example: @int b = alloc( 5 , a`realloc , 32`align )@
     562This call will realloc object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
     563
     564Example: @int b = alloc( 5 , a`realloc , 32`align , 2`fill )@
     565This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. The extra space after copying data of a to the returned object will be filled with 2.
  • doc/theses/mubeen_zulfiqar_MMath/background.tex

    r2e9b59b rba897d21  
    3434\VRef[Figure]{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}.
    3535The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data.
    36 The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
     36The management data starts with fixed-sized information in the static-data memory that flows into the dynamic-allocation memory.
    3737The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
    38 Allocated objects (light grey) are variable sized, and allocated and maintained by the program;
     38Allocated objects (white) are variable sized, and allocated and maintained by the program;
    3939\ie only the program knows the location of allocated storage, not the memory allocator.
    4040\begin{figure}[h]
     
    4444\label{f:AllocatorComponents}
    4545\end{figure}
    46 Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating easy location of new allocations.
     46Freed objects (light grey) are memory deallocated by the program, which are linked into one or more lists facilitating easy location for new allocations.
    4747Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
    4848Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program;
     
    5454The trailer may be used to simplify an allocation implementation, \eg coalescing, and/or for security purposes to mark the end of an object.
    5555An object may be preceded by padding to ensure proper alignment.
    56 Some algorithms quantize allocation requests into distinct sizes, called \newterm{buckets}, resulting in additional spacing after objects less than the quantized value.
    57 (Note, the buckets are often organized as an array of ascending bucket sizes for fast searching, \eg binary search, and the array is stored in the heap management-area, where each bucket is a top point to the freed objects of that size.)
     56Some algorithms quantize allocation requests into distinct sizes resulting in additional spacing after objects less than the quantized value.
    5857When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
    5958A free object also contains management data, \eg size, chaining, etc.
     
    8281Fragmentation is memory requested from the operating system but not used by the program;
    8382hence, allocated objects are not fragmentation.
    84 \VRef[Figure]{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: internal or external.
     83\VRef[Figure]{f:InternalExternalFragmentation}) shows fragmentation is divided into two forms: internal or external.
    8584
    8685\begin{figure}
     
    9796An allocator should strive to keep internal management information to a minimum.
    9897
    99 \newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.
     98\newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes freed objects, all external management data, and reserved memory.
    10099This memory is problematic in two ways: heap blowup and highly fragmented memory.
    101100\newterm{Heap blowup} occurs when memory freed by the program is not reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
     
    126125\end{figure}
    127126
    128 For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
     127For a single-threaded memory allocator, three basic approaches for controlling fragmentation have been identified~\cite{Johnstone99}.
    129128The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
    130129Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
     
    133132
    134133The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
    135 When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to spacing after the object.
     134When an object is allocated, the requested size is rounded up to the nearest bin-size, possibly with spacing after the object.
    136135A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
    137136The fewer bin-sizes, the fewer lists need to be searched and maintained;
     
    159158Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.
    160159
    161 Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy.
     160Hardware takes advantage of temporal and spatial locality through multiple levels of caching (\ie memory hierarchy).
    162161When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
    163162For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory.
     
    172171
    173172There are a number of ways a memory allocator can degrade locality by increasing the working set.
    174 For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request, \eg sequential-fit algorithm.
     173For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request (\eg sequential-fit algorithm).
    175174If there are a (large) number of objects accessed in very different areas of memory, the allocator may perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}.
    176175Another way locality can be degraded is by spatially separating related data.
     
    182181
    183182A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program.
    184 In addition to single-threaded design issues of fragmentation and locality, a multi-threaded allocator is simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
     183In addition to single-threaded design issues of locality and fragmentation, a multi-threaded allocator may be simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
    185184
    186185
     
    193192Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released.
    194193Contention can be reduced in a number of ways:
    195 \begin{itemize}[itemsep=0pt]
    196 \item
    197194using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks;
    198 \item
    199195using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff;
    200 \item
    201196using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
    202 \end{itemize}
    203 However, all of these approaches have degenerate cases where program contention is high, which occurs outside of the allocator.
     197However, all of these approaches have degenerate cases where contention occurs.
    204198
    205199
     
    281275\label{s:MultipleHeaps}
    282276
    283 A multi-threaded allocator has potentially multiple threads and heaps.
     277A single-threaded allocator has at most one thread and heap, while a multi-threaded allocator has potentially multiple threads and heaps.
    284278The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity.
    285279The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see \VRef[Figure]{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see \VRef[Figure]{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see \VRef[Figure]{f:PerThreadHeap}), which is almost back to a single-threaded allocator.
     
    345339An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
    346340Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
    347 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the operating system.
     341Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area.
    348342
    349343\begin{figure}
     
    374368
    375369
    376 \paragraph{1:1 model (thread heaps)} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see ownership in \VRef{s:Ownership}).
     370\paragraph{1:1 model (thread heaps)} where each thread has its own heap, which eliminates most contention and locking because threads seldom accesses another thread's heap (see ownership in \VRef{s:Ownership}).
    377371An additional benefit of thread heaps is improved locality due to better memory layout.
    378372As each thread only allocates from its heap, all objects for a thread are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages.
     
    386380Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
    387381Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
    388 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible..
     382Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap.
    389383
    390384
     
    394388However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
    395389It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
    396 \VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime.
     390\VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model provided by the language runtime.
    397391Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
    398392
     
    406400Adopting this model results in a subtle problem with shared heaps.
    407401With kernel threading, an operation that is started by a kernel thread is always completed by that thread.
    408 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.
     402For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted.
     403Any correctness locking associated with the shared heap is preserved across preemption.
    409404
    410405However, this correctness property is not preserved for user-level threading.
     
    414409However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).
    415410Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
    416 Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation.
     411Occasionally ignoring a preemption should be benign.
    417412
    418413
     
    435430
    436431\newterm{Ownership} defines which heap an object is returned-to on deallocation.
    437 If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects.
    438 Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime.
     432If a thread returns an object to the heap it was originally allocated from, the heap has ownership of its objects.
     433Alternatively, a thread can return an object to the heap it is currently allocating from, which can be any heap accessible during a thread's lifetime.
    439434\VRef[Figure]{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
    440435Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
     
    544539Only with the 1:1 model and ownership is active and passive false-sharing avoided (see \VRef{s:Ownership}).
    545540Passive false-sharing may still occur, if delayed ownership is used.
    546 Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap.
    547541
    548542\begin{figure}
     
    559553\caption{Free-list Structure with Container Ownership}
    560554\end{figure}
     555
     556A fragmented heap has multiple containers that may be partially or completely free.
     557A completely free container can become reserved storage and be reset to allocate objects of a new size.
     558When a heap reaches a threshold of free objects, it moves some free storage to the global heap for reuse to prevent heap blowup.
     559Without ownership, when a heap frees objects to the global heap, individual objects must be passed, and placed on the global-heap's free-list.
     560Containers cannot be freed to the global heap unless completely free because
    561561
    562562When a container changes ownership, the ownership of all objects within it change as well.
     
    569569Note, once the object is freed by Task$_1$, no more false sharing can occur until the container changes ownership again.
    570570To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
    571 One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the operating system.
     571One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area.
    572572
    573573\begin{figure}
     
    700700\end{figure}
    701701
    702 As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified.
     702As mentioned, an implementation may have only one heap deal with the global heap, so the other heap can be simplified.
    703703For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
    704704To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
     
    721721An allocation buffer is reserved memory (see~\VRef{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
    722722That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
    723 Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
     723Both any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
    724724The allocation buffer reduces contention and the number of global/operating-system calls.
    725725For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
    726726
    727 Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation).
     727Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts.
    728728Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
    729 Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish.
     729Thus, allocation buffers are often allocated more frequently at program/thread start, and then their use often diminishes.
    730730
    731731Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
     
    746746\label{s:LockFreeOperations}
    747747
    748 A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual task has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}.
    749 (A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.)
     748A lock-free algorithm guarantees safe concurrent-access to a data structure, so that at least one thread can make progress in the system, but an individual task has no bound to execution, and hence, may starve~\cite[pp.~745--746]{Herlihy93}.
     749% A wait-free algorithm puts a finite bound on the number of steps any thread takes to complete an operation, so an individual task cannot starve
    750750Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
    751 While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress.
    752 With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design.
     751Locks are a problem for high contention or if the thread holding the lock is preempted and other threads attempt to use that lock.
     752With respect to the heap, these situations are unlikely unless all threads makes extremely high use of dynamic-memory allocation, which can be an indication of poor design.
    753753Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
    754 on the other hand, a thread may busy-wait for an unbounded period holding a processor.
     754on the other hand, a thread may busy-wait for an unbounded period.
    755755Finally, lock-free implementations have greater complexity and hardware dependency.
    756756Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
    757 Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex.
     757Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is more complex.
    758758Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
    759759
  • doc/theses/mubeen_zulfiqar_MMath/figures/AllocDS1.fig

    r2e9b59b rba897d21  
    88-2
    991200 2
    10 6 2850 2100 3150 2250
    11 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 2925 2175 20 20 2925 2175 2945 2175
    12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 2175 20 20 3000 2175 3020 2175
    13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 2175 20 20 3075 2175 3095 2175
     106 4200 1575 4500 1725
     111 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 1650 20 20 4275 1650 4295 1650
     121 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4350 1650 20 20 4350 1650 4370 1650
     131 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4425 1650 20 20 4425 1650 4445 1650
    1414-6
    15 6 4050 2100 4350 2250
    16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 2175 20 20 4125 2175 4145 2175
    17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 2175 20 20 4200 2175 4220 2175
    18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 2175 20 20 4275 2175 4295 2175
    19 -6
    20 6 4650 2100 4950 2250
    21 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4725 2175 20 20 4725 2175 4745 2175
    22 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 2175 20 20 4800 2175 4820 2175
    23 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 2175 20 20 4875 2175 4895 2175
    24 -6
    25 6 3450 2100 3750 2250
    26 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3525 2175 20 20 3525 2175 3545 2175
    27 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3600 2175 20 20 3600 2175 3620 2175
    28 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3675 2175 20 20 3675 2175 3695 2175
    29 -6
    30 6 3300 2175 3600 2550
     156 2850 2475 3150 2850
    31162 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    3217        1 1 1.00 45.00 90.00
    33          3375 2175 3375 2400
     18         2925 2475 2925 2700
    34192 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    35          3300 2400 3600 2400 3600 2550 3300 2550 3300 2400
     20         2850 2700 3150 2700 3150 2850 2850 2850 2850 2700
     21-6
     226 4350 2475 4650 2850
     232 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     24        1 1 1.00 45.00 90.00
     25         4425 2475 4425 2700
     262 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     27         4350 2700 4650 2700 4650 2850 4350 2850 4350 2700
     28-6
     296 3600 2475 3825 3150
     302 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     31        1 1 1.00 45.00 90.00
     32         3675 2475 3675 2700
     332 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     34         3600 2700 3825 2700 3825 2850 3600 2850 3600 2700
     352 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     36         3600 3000 3825 3000 3825 3150 3600 3150 3600 3000
     372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     38        1 1 1.00 45.00 90.00
     39         3675 2775 3675 3000
     40-6
     416 4875 3600 5175 3750
     421 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 3675 20 20 4950 3675 4970 3675
     431 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 3675 20 20 5025 3675 5045 3675
     441 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3675 20 20 5100 3675 5120 3675
     45-6
     466 4875 2325 5175 2475
     471 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 2400 20 20 4950 2400 4970 2400
     481 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 2400 20 20 5025 2400 5045 2400
     491 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 2400 20 20 5100 2400 5120 2400
     50-6
     516 5625 2325 5925 2475
     521 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5700 2400 20 20 5700 2400 5720 2400
     531 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5775 2400 20 20 5775 2400 5795 2400
     541 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5850 2400 20 20 5850 2400 5870 2400
     55-6
     566 5625 3600 5925 3750
     571 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5700 3675 20 20 5700 3675 5720 3675
     581 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5775 3675 20 20 5775 3675 5795 3675
     591 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5850 3675 20 20 5850 3675 5870 3675
    3660-6
    37612 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    38          3150 1800 3150 2250
     62         2400 2100 2400 2550
    39632 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    40          2850 1800 2850 2250
     64         2550 2100 2550 2550
    41652 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    42          4650 1800 4650 2250
     66         2700 2100 2700 2550
    43672 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    44          4950 1800 4950 2250
    45 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    46          4500 1725 4500 2250
    47 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    48          5100 1725 5100 2250
     68         2850 2100 2850 2550
    49692 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    50          3450 1800 3450 2250
     70         3000 2100 3000 2550
    51712 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    52          3750 1800 3750 2250
    53 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    54          3300 1725 3300 2250
    55 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    56          3900 1725 3900 2250
     72         3600 2100 3600 2550
    57732 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    58          5250 1800 5250 2250
     74         3900 2100 3900 2550
    59752 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    60          5400 1800 5400 2250
     76         4050 2100 4050 2550
    61772 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    62          5550 1800 5550 2250
     78         4200 2100 4200 2550
    63792 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    64          5700 1800 5700 2250
     80         4350 2100 4350 2550
    65812 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    66          5850 1800 5850 2250
    67 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    68          2700 1725 2700 2250
     82         4500 2100 4500 2550
     832 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     84         3300 1500 3300 1800
     852 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     86         3600 1500 3600 1800
     872 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     88         3900 1500 3900 1800
     892 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     90         3000 1500 4800 1500 4800 1800 3000 1800 3000 1500
     912 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
     92        1 1 1.00 45.00 90.00
     93         3225 1650 2625 2100
    69942 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    7095        1 1 1.00 45.00 90.00
    71          3375 1275 3375 1575
     96         3150 1650 2550 2100
    72972 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    7398        1 1 1.00 45.00 90.00
    74          2700 1275 2700 1575
     99         3450 1650 4050 2100
    751002 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
    76101        1 1 1.00 45.00 90.00
    77          2775 1275 2775 1575
     102         3375 1650 3975 2100
     1032 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     104         2100 2100 2100 2550
     1052 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     106         1950 2250 3150 2250
     1072 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     108         3450 2250 4650 2250
     1092 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     110         1950 2100 3150 2100 3150 2550 1950 2550 1950 2100
     1112 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     112         3450 2100 4650 2100 4650 2550 3450 2550 3450 2100
     1132 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     114         2250 2100 2250 2550
     1152 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     116         3750 2100 3750 2550
    781172 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    79118        1 1 1.00 45.00 90.00
    80          5175 1275 5175 1575
     119         2025 2475 2025 2700
    811202 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    82121        1 1 1.00 45.00 90.00
    83          5625 1275 5625 1575
    84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    85         1 1 1.00 45.00 90.00
    86          3750 1275 3750 1575
    87 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
    88         1 1 1.00 45.00 90.00
    89          3825 1275 3825 1575
    90 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    91          2700 1950 6000 1950
    92 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    93          2700 2100 6000 2100
     122         2025 2775 2025 3000
    941232 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    95          2700 1800 6000 1800 6000 2250 2700 2250 2700 1800
    96 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    97         1 1 1.00 45.00 90.00
    98          2775 2175 2775 2400
    99 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    100         1 1 1.00 45.00 90.00
    101          2775 2475 2775 2700
     124         1950 3000 2100 3000 2100 3150 1950 3150 1950 3000
    1021252 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    103          2700 2700 2850 2700 2850 2850 2700 2850 2700 2700
    104 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    105          2700 2400 2850 2400 2850 2550 2700 2550 2700 2400
    106 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    107         1 1 1.00 45.00 90.00
    108          4575 2175 4575 2400
    109 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    110          4500 2400 5025 2400 5025 2550 4500 2550 4500 2400
     126         1950 2700 2100 2700 2100 2850 1950 2850 1950 2700
    1111272 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    112128        1 1 1.00 45.00 90.00
    113          3600 3375 4350 3375 4350 3150
     129         1950 3750 2700 3750 2700 3525
    1141302 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    115          3600 3150 5100 3150 5100 3525 3600 3525 3600 3150
    116 4 2 0 50 -1 0 11 0.0000 2 135 300 2625 1950 lock\001
    117 4 1 0 50 -1 0 11 0.0000 2 150 1155 3000 1725 N$\\times$S$_1$\001
    118 4 1 0 50 -1 0 11 0.0000 2 150 1155 3600 1725 N$\\times$S$_2$\001
    119 4 1 0 50 -1 0 12 0.0000 2 180 390 4425 1500 heap\001
    120 4 2 0 50 -1 0 12 0.0000 2 135 1140 2550 1425 kernel threads\001
    121 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2100 size\001
    122 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2250 free\001
    123 4 2 0 50 -1 0 12 0.0000 2 135 600 2625 2700 free list\001
    124 4 0 0 50 -1 0 12 0.0000 2 135 360 3675 3325 lock\001
    125 4 1 0 50 -1 0 12 0.0000 2 180 1455 4350 3075 global pool (sbrk)\001
    126 4 1 0 50 -1 0 11 0.0000 2 150 1110 4800 1725 N$\\times$S$_t$\001
     131         1950 3525 3150 3525 3150 3900 1950 3900 1950 3525
     1322 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     133        1 1 1.00 45.00 90.00
     134         3450 3750 4200 3750 4200 3525
     1352 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     136         3450 3525 4650 3525 4650 3900 3450 3900 3450 3525
     1372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     138        1 1 1.00 45.00 90.00
     139         3150 4650 4200 4650 4200 4275
     1402 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     141         3150 4275 4650 4275 4650 4875 3150 4875 3150 4275
     1422 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     143         1950 2400 3150 2400
     1442 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     145         3450 2400 4650 2400
     1462 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     147         5400 2100 5400 3900
     1484 2 0 50 -1 0 11 0.0000 2 120 300 1875 2250 lock\001
     1494 1 0 50 -1 0 12 0.0000 2 135 1935 3900 1425 N kernel-thread buckets\001
     1504 1 0 50 -1 0 12 0.0000 2 195 810 4425 2025 heap$_2$\001
     1514 1 0 50 -1 0 12 0.0000 2 195 810 2175 2025 heap$_1$\001
     1524 2 0 50 -1 0 11 0.0000 2 120 270 1875 2400 size\001
     1534 2 0 50 -1 0 11 0.0000 2 120 270 1875 2550 free\001
     1544 1 0 50 -1 0 12 0.0000 2 180 825 2550 3450 local pool\001
     1554 0 0 50 -1 0 12 0.0000 2 135 360 3525 3700 lock\001
     1564 0 0 50 -1 0 12 0.0000 2 135 360 3225 4450 lock\001
     1574 2 0 50 -1 0 12 0.0000 2 135 600 1875 3000 free list\001
     1584 1 0 50 -1 0 12 0.0000 2 180 825 4050 3450 local pool\001
     1594 1 0 50 -1 0 12 0.0000 2 180 1455 3900 4200 global pool (sbrk)\001
     1604 0 0 50 -1 0 12 0.0000 2 135 360 2025 3700 lock\001
     1614 1 0 50 -1 0 12 0.0000 2 180 720 6450 3150 free pool\001
     1624 1 0 50 -1 0 12 0.0000 2 180 390 6450 2925 heap\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/AllocDS2.fig

    r2e9b59b rba897d21  
    88-2
    991200 2
    10 6 2850 2475 3150 2850
     106 2850 2100 3150 2250
     111 3 0 1 0 0 50 -1 20 0.000 1 0.0000 2925 2175 20 20 2925 2175 2945 2175
     121 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 2175 20 20 3000 2175 3020 2175
     131 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 2175 20 20 3075 2175 3095 2175
     14-6
     156 4050 2100 4350 2250
     161 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 2175 20 20 4125 2175 4145 2175
     171 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 2175 20 20 4200 2175 4220 2175
     181 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 2175 20 20 4275 2175 4295 2175
     19-6
     206 4650 2100 4950 2250
     211 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4725 2175 20 20 4725 2175 4745 2175
     221 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 2175 20 20 4800 2175 4820 2175
     231 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 2175 20 20 4875 2175 4895 2175
     24-6
     256 3450 2100 3750 2250
     261 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3525 2175 20 20 3525 2175 3545 2175
     271 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3600 2175 20 20 3600 2175 3620 2175
     281 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3675 2175 20 20 3675 2175 3695 2175
     29-6
     306 3300 2175 3600 2550
    11312 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    1232        1 1 1.00 45.00 90.00
    13          2925 2475 2925 2700
     33         3375 2175 3375 2400
    14342 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    15          2850 2700 3150 2700 3150 2850 2850 2850 2850 2700
     35         3300 2400 3600 2400 3600 2550 3300 2550 3300 2400
    1636-6
    17 6 4350 2475 4650 2850
     372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     38         3150 1800 3150 2250
     392 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     40         2850 1800 2850 2250
     412 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     42         4650 1800 4650 2250
     432 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     44         4950 1800 4950 2250
     452 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     46         4500 1725 4500 2250
     472 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     48         5100 1725 5100 2250
     492 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     50         3450 1800 3450 2250
     512 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     52         3750 1800 3750 2250
     532 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     54         3300 1725 3300 2250
     552 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     56         3900 1725 3900 2250
     572 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     58         5250 1800 5250 2250
     592 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     60         5400 1800 5400 2250
     612 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     62         5550 1800 5550 2250
     632 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     64         5700 1800 5700 2250
     652 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     66         5850 1800 5850 2250
     672 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     68         2700 1725 2700 2250
    18692 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    1970        1 1 1.00 45.00 90.00
    20          4425 2475 4425 2700
    21 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    22          4350 2700 4650 2700 4650 2850 4350 2850 4350 2700
    23 -6
    24 6 3600 2475 3825 3150
     71         3375 1275 3375 1575
    25722 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    2673        1 1 1.00 45.00 90.00
    27          3675 2475 3675 2700
    28 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    29          3600 2700 3825 2700 3825 2850 3600 2850 3600 2700
    30 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    31          3600 3000 3825 3000 3825 3150 3600 3150 3600 3000
     74         2700 1275 2700 1575
     752 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
     76        1 1 1.00 45.00 90.00
     77         2775 1275 2775 1575
    32782 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    3379        1 1 1.00 45.00 90.00
    34          3675 2775 3675 3000
    35 -6
    36 6 1950 3525 3150 3900
     80         5175 1275 5175 1575
     812 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     82        1 1 1.00 45.00 90.00
     83         5625 1275 5625 1575
     842 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     85        1 1 1.00 45.00 90.00
     86         3750 1275 3750 1575
     872 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
     88        1 1 1.00 45.00 90.00
     89         3825 1275 3825 1575
     902 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     91         2700 1950 6000 1950
     922 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     93         2700 2100 6000 2100
     942 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     95         2700 1800 6000 1800 6000 2250 2700 2250 2700 1800
     962 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     97        1 1 1.00 45.00 90.00
     98         2775 2175 2775 2400
     992 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     100        1 1 1.00 45.00 90.00
     101         2775 2475 2775 2700
     1022 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     103         2700 2700 2850 2700 2850 2850 2700 2850 2700 2700
     1042 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     105         2700 2400 2850 2400 2850 2550 2700 2550 2700 2400
     1062 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     107        1 1 1.00 45.00 90.00
     108         4575 2175 4575 2400
     1092 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     110         4500 2400 5025 2400 5025 2550 4500 2550 4500 2400
    371112 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    38112        1 1 1.00 45.00 90.00
    39          1950 3750 2700 3750 2700 3525
     113         3600 3525 4650 3525 4650 3150
    401142 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    41          1950 3525 3150 3525 3150 3900 1950 3900 1950 3525
    42 4 0 0 50 -1 0 12 0.0000 2 135 360 2025 3700 lock\001
    43 -6
    44 6 4050 1575 4350 1725
    45 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 1650 20 20 4125 1650 4145 1650
    46 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 1650 20 20 4200 1650 4220 1650
    47 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 1650 20 20 4275 1650 4295 1650
    48 -6
    49 6 4875 2325 6150 3750
    50 6 4875 2325 5175 2475
    51 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 2400 20 20 4950 2400 4970 2400
    52 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 2400 20 20 5025 2400 5045 2400
    53 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 2400 20 20 5100 2400 5120 2400
    54 -6
    55 6 4875 3600 5175 3750
    56 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 3675 20 20 4950 3675 4970 3675
    57 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 3675 20 20 5025 3675 5045 3675
    58 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3675 20 20 5100 3675 5120 3675
    59 -6
    60 4 1 0 50 -1 0 12 0.0000 2 180 900 5700 3150 local pools\001
    61 4 1 0 50 -1 0 12 0.0000 2 180 465 5700 2925 heaps\001
    62 -6
    63 6 3600 4050 5100 4650
    64 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    65         1 1 1.00 45.00 90.00
    66          3600 4500 4350 4500 4350 4275
    67 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    68          3600 4275 5100 4275 5100 4650 3600 4650 3600 4275
    69 4 1 0 50 -1 0 12 0.0000 2 180 1455 4350 4200 global pool (sbrk)\001
    70 4 0 0 50 -1 0 12 0.0000 2 135 360 3675 4450 lock\001
    71 -6
    72 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    73          2400 2100 2400 2550
    74 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    75          2550 2100 2550 2550
    76 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    77          2700 2100 2700 2550
    78 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    79          2850 2100 2850 2550
    80 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    81          3000 2100 3000 2550
    82 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    83          3600 2100 3600 2550
    84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    85          3900 2100 3900 2550
    86 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    87          4050 2100 4050 2550
    88 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    89          4200 2100 4200 2550
    90 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    91          4350 2100 4350 2550
    92 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    93          4500 2100 4500 2550
    94 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    95          3300 1500 3300 1800
    96 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    97          3600 1500 3600 1800
    98 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    99          3000 1500 4800 1500 4800 1800 3000 1800 3000 1500
    100 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    101         1 1 1.00 45.00 90.00
    102          3150 1650 2550 2100
    103 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    104         1 1 1.00 45.00 90.00
    105          3450 1650 4050 2100
    106 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    107          2100 2100 2100 2550
    108 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    109          1950 2250 3150 2250
    110 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    111          3450 2250 4650 2250
    112 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    113          1950 2100 3150 2100 3150 2550 1950 2550 1950 2100
    114 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    115          3450 2100 4650 2100 4650 2550 3450 2550 3450 2100
    116 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    117          2250 2100 2250 2550
    118 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    119          3750 2100 3750 2550
    120 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    121         1 1 1.00 45.00 90.00
    122          2025 2475 2025 2700
    123 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    124         1 1 1.00 45.00 90.00
    125          2025 2775 2025 3000
    126 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    127          1950 3000 2100 3000 2100 3150 1950 3150 1950 3000
    128 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    129          1950 2700 2100 2700 2100 2850 1950 2850 1950 2700
    130 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    131         1 1 1.00 45.00 90.00
    132          3450 3750 4200 3750 4200 3525
    133 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    134          3450 3525 4650 3525 4650 3900 3450 3900 3450 3525
    135 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    136          1950 2400 3150 2400
    137 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    138          3450 2400 4650 2400
    139 4 2 0 50 -1 0 11 0.0000 2 135 300 1875 2250 lock\001
    140 4 1 0 50 -1 0 12 0.0000 2 180 1245 3900 1425 H heap buckets\001
    141 4 1 0 50 -1 0 12 0.0000 2 180 810 4425 2025 heap$_2$\001
    142 4 1 0 50 -1 0 12 0.0000 2 180 810 2175 2025 heap$_1$\001
    143 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2400 size\001
    144 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2550 free\001
    145 4 1 0 50 -1 0 12 0.0000 2 180 825 2550 3450 local pool\001
    146 4 0 0 50 -1 0 12 0.0000 2 135 360 3525 3700 lock\001
    147 4 2 0 50 -1 0 12 0.0000 2 135 600 1875 3000 free list\001
    148 4 1 0 50 -1 0 12 0.0000 2 180 825 4050 3450 local pool\001
     115         3600 3150 5100 3150 5100 3750 3600 3750 3600 3150
     1164 2 0 50 -1 0 11 0.0000 2 120 300 2625 1950 lock\001
     1174 1 0 50 -1 0 10 0.0000 2 150 1155 3000 1725 N$\\times$S$_1$\001
     1184 1 0 50 -1 0 10 0.0000 2 150 1155 3600 1725 N$\\times$S$_2$\001
     1194 1 0 50 -1 0 12 0.0000 2 180 390 4425 1500 heap\001
     1204 2 0 50 -1 0 12 0.0000 2 135 1140 2550 1425 kernel threads\001
     1214 2 0 50 -1 0 11 0.0000 2 120 270 2625 2100 size\001
     1224 2 0 50 -1 0 11 0.0000 2 120 270 2625 2250 free\001
     1234 2 0 50 -1 0 12 0.0000 2 135 600 2625 2700 free list\001
     1244 0 0 50 -1 0 12 0.0000 2 135 360 3675 3325 lock\001
     1254 1 0 50 -1 0 12 0.0000 2 180 1455 4350 3075 global pool (sbrk)\001
     1264 1 0 50 -1 0 10 0.0000 2 150 1110 4800 1725 N$\\times$S$_t$\001
  • doc/theses/mubeen_zulfiqar_MMath/intro.tex

    r2e9b59b rba897d21  
    4848Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
    4949This thesis only examines dynamic memory-management with \emph{explicit} deallocation.
    50 While garbage collection and compaction are not part this work, many of the work's results are applicable to the allocation phase in any memory-management approach.
     50While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.
    5151
    5252Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime.
     
    6565\begin{enumerate}[leftmargin=*]
    6666\item
    67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
    68 
    69 \item
    70 Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@.
    71 
    72 \item
    73 Extend the standard C heap functionality by preserving with each allocation:
    74 \begin{itemize}[itemsep=0pt]
    75 \item
    76 its request size plus the amount allocated,
    77 \item
    78 whether an allocation is zero fill,
    79 \item
    80 and allocation alignment.
    81 \end{itemize}
    82 
    83 \item
    84 Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ to zero-fill and align when storage is extended or copied.
     67Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
     68
     69\item
     70Adopt returning of @nullptr@ for a zero-sized allocation, rather than an actual memory address, both of which can be passed to @free@.
     71
     72\item
     73Extended the standard C heap functionality by preserving with each allocation its original request size versus the amount allocated, if an allocation is zero fill, and the allocation alignment.
     74
     75\item
     76Use the zero fill and alignment as \emph{sticky} properties for @realloc@, to realign existing storage, or preserve existing zero-fill and alignment when storage is copied.
    8577Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying.
    8678This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient.
     
    9486@resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
    9587\item
    96 @realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment.
     88@realloc( oaddr, alignment, size )@ same as previous @realloc@ but adding or changing alignment.
    9789\item
    9890@aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled.
     
    10496
    10597\item
    106 Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties.
     98Provide additional heap wrapper functions in \CFA to provide a complete orthogonal set of allocation operations and properties.
    10799
    108100\item
     
    117109@malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@.
    118110\item
    119 @malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
     111@malloc_usable_size( addr )@ returns the usable size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
    120112\end{itemize}
    121113
     
    124116
    125117\item
    126 Provide complete, fast, and contention-free allocation statistics to help understand allocation behaviour:
     118Provide complete, fast, and contention-free allocation statistics to help understand program behaviour:
    127119\begin{itemize}
    128120\item
  • doc/theses/mubeen_zulfiqar_MMath/performance.tex

    r2e9b59b rba897d21  
    11\chapter{Performance}
    2 \label{c:Performance}
    32
    43\section{Machine Specification}
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.bib

    r2e9b59b rba897d21  
    124124}
    125125
    126 @misc{ptmalloc2,
    127     author      = {Wolfram Gloger},
    128     title       = {ptmalloc version 2},
    129     month       = jun,
    130     year        = 2006,
    131     note        = {\href{http://www.malloc.de/malloc/ptmalloc2-current.tar.gz}{http://www.malloc.de/\-malloc/\-ptmalloc2-current.tar.gz}},
    132 }
    133 
    134 @misc{GNUallocAPI,
    135     author      = {GNU},
    136     title       = {Summary of malloc-Related Functions},
    137     year        = 2020,
    138     note        = {\href{https://www.gnu.org/software/libc/manual/html\_node/Summary-of-Malloc.html}{https://www.gnu.org/\-software/\-libc/\-manual/\-html\_node/\-Summary-of-Malloc.html}},
    139 }
    140 
    141 @misc{SeriallyReusable,
    142     author      = {IBM},
    143     title       = {Serially reusable programs},
    144     month       = mar,
    145     year        = 2021,
    146     note        = {\href{https://www.ibm.com/docs/en/ztpf/1.1.0.15?topic=structures-serially-reusable-programs}{https://www.ibm.com/\-docs/\-en/\-ztpf/\-1.1.0.15?\-topic=structures-serially-reusable-programs}},
    147 }
    148 
    149 @misc{librseq,
    150     author      = {Mathieu Desnoyers},
    151     title       = {Library for Restartable Sequences},
    152     month       = mar,
    153     year        = 2022,
    154     note        = {\href{https://github.com/compudj/librseq}{https://github.com/compudj/librseq}},
     126@misc{nedmalloc,
     127    author      = {Niall Douglas},
     128    title       = {nedmalloc version 1.06 Beta},
     129    month       = jan,
     130    year        = 2010,
     131    note        = {\textsf{http://\-prdownloads.\-sourceforge.\-net/\-nedmalloc/\-nedmalloc\_v1.06beta1\_svn1151.zip}},
    155132}
    156133
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

    r2e9b59b rba897d21  
    6060% For hyperlinked PDF, suitable for viewing on a computer, use this:
    6161\documentclass[letterpaper,12pt,titlepage,oneside,final]{book}
    62 \usepackage[T1]{fontenc}        % Latin-1 => 256-bit characters, => | not dash, <> not Spanish question marks
    6362
    6463% For PDF, suitable for double-sided printing, change the PrintVersion variable below to "true" and use this \documentclass line instead of the one above:
     
    9594% Use the "hyperref" package
    9695% N.B. HYPERREF MUST BE THE LAST PACKAGE LOADED; ADD ADDITIONAL PKGS ABOVE
    97 \usepackage{url}
    98 \usepackage[dvips,pagebackref=true]{hyperref} % with basic options
     96\usepackage[pagebackref=true]{hyperref} % with basic options
    9997%\usepackage[pdftex,pagebackref=true]{hyperref}
    10098% N.B. pagebackref=true provides links back from the References to the body text. This can cause trouble for printing.
     
    115113    citecolor=blue,        % color of links to bibliography
    116114    filecolor=magenta,      % color of file links
    117     urlcolor=blue,           % color of external links
    118     breaklinks=true
     115    urlcolor=blue           % color of external links
    119116}
    120117\ifthenelse{\boolean{PrintVersion}}{   % for improved print quality, change some hyperref options
     
    125122    urlcolor=black
    126123}}{} % end of ifthenelse (no else)
    127 %\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,pagebackref=true,breaklinks=true,colorlinks=true,linkcolor=blue,citecolor=blue,urlcolor=blue]{hyperref}
    128 \usepackage{breakurl}
    129 \urlstyle{sf}
    130124
    131125%\usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package
     
    177171\input{common}
    178172%\usepackageinput{common}
    179 \CFAStyle                                               % CFA code-style
    180 \lstset{language=CFA}                                   % default language
     173\CFAStyle                                               % CFA code-style for all languages
    181174\lstset{basicstyle=\linespread{0.9}\sf}                 % CFA typewriter font
    182175\newcommand{\uC}{$\mu$\CC}
  • doc/theses/thierry_delisle_PhD/thesis/Makefile

    r2e9b59b rba897d21  
    2929PICTURES = ${addsuffix .pstex, \
    3030        base \
    31         base_avg \
    32         cache-share \
    33         cache-noshare \
    3431        empty \
    3532        emptybit \
     
    4138        system \
    4239        cycle \
    43         result.cycle.jax.ops \
    4440}
    4541
     
    116112        python3 $< $@
    117113
    118 build/result.%.ns.svg : data/% | ${Build}
    119         ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops"
    120 
    121 build/result.%.ops.svg : data/% | ${Build}
    122         ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second"
    123 
    124114## pstex with inverted colors
    125115%.dark.pstex : fig/%.fig Makefile | ${Build}
  • doc/theses/thierry_delisle_PhD/thesis/fig/base.fig

    r2e9b59b rba897d21  
    8989         5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470
    9090         5700 5210
    91 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    92          3600 5700 3600 1200
    93 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    94          4800 5700 4800 1200
    95 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    96          6000 5700 6000 1200
    97914 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001
    98924 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001
  • doc/theses/thierry_delisle_PhD/thesis/glossary.tex

    r2e9b59b rba897d21  
    101101
    102102\longnewglossaryentry{at}
    103 {name={task}}
     103{name={fred}}
    104104{
    105105Abstract object representing an unit of work. Systems will offer one or more concrete implementations of this concept (\eg \gls{kthrd}, \gls{job}), however, most of the concept of schedulings are independent of the particular implementations of the work representation. For this reason, this document use the term \Gls{at} to mean any representation and not one in particular.
  • doc/theses/thierry_delisle_PhD/thesis/local.bib

    r2e9b59b rba897d21  
    685685  note = "[Online; accessed 9-February-2021]"
    686686}
    687 
    688 @misc{wiki:rcu,
    689   author = "{Wikipedia contributors}",
    690   title = "Read-copy-update --- {W}ikipedia{,} The Free Encyclopedia",
    691   year = "2022",
    692   url = "https://en.wikipedia.org/wiki/Linear_congruential_generator",
    693   note = "[Online; accessed 12-April-2022]"
    694 }
    695 
    696 @misc{wiki:rwlock,
    697   author = "{Wikipedia contributors}",
    698   title = "Readers-writer lock --- {W}ikipedia{,} The Free Encyclopedia",
    699   year = "2021",
    700   url = "https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock",
    701   note = "[Online; accessed 12-April-2022]"
    702 }
  • doc/theses/thierry_delisle_PhD/thesis/text/core.tex

    r2e9b59b rba897d21  
    33Before discussing scheduling in general, where it is important to address systems that are changing states, this document discusses scheduling in a somewhat ideal scenario, where the system has reached a steady state. For this purpose, a steady state is loosely defined as a state where there are always \glspl{thrd} ready to run and the system has the resources necessary to accomplish the work, \eg, enough workers. In short, the system is neither overloaded nor underloaded.
    44
    5 It is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state tend to be pervasive in all states.
     5I believe it is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state can to be pervasive in all states.
    66
    77\section{Design Goals}
     
    2525It is important to note that these guarantees are expected only up to a point. \Glspl{thrd} that are ready to run should not be prevented to do so, but they still share the limited hardware resources. Therefore, the guarantee is considered respected if a \gls{thrd} gets access to a \emph{fair share} of the hardware resources, even if that share is very small.
    2626
    27 Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is compatitive to other popular languages, the guarantee will be consider achieved.
     27Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is equivalent to or lower than other popular languages, I consider the guarantee achieved.
    2828
    2929More precisely the scheduler should be:
     
    3333\end{itemize}
    3434
    35 \subsection{Fairness Goals}
    36 For this work fairness will be considered as having two strongly related requirements: true starvation freedom and ``fast'' load balancing.
    37 
    38 \paragraph{True starvation freedom} is more easily defined: As long as at least one \proc continues to dequeue \ats, all read \ats should be able to run eventually.
    39 In any running system, \procs can stop dequeing \ats if they start running a \at that will simply never park.
    40 Traditional workstealing schedulers do not have starvation freedom in these cases.
    41 Now this requirement begs the question, what about preemption?
    42 Generally speaking preemption happens on the timescale of several milliseconds, which brings us to the next requirement: ``fast'' load balancing.
    43 
    44 \paragraph{Fast load balancing} means that load balancing should happen faster than preemption would normally allow.
    45 For interactive applications that need to run at 60, 90, 120 frames per second, \ats having to wait for several millseconds to run are effectively starved.
    46 Therefore load-balancing should be done at a faster pace, one that can detect starvation at the microsecond scale.
    47 With that said, this is a much fuzzier requirement since it depends on the number of \procs, the number of \ats and the general load of the system.
    48 
    49 \subsection{Fairness vs Scheduler Locality} \label{fairnessvlocal}
     35\subsection{Fairness vs Scheduler Locality}
    5036An important performance factor in modern architectures is cache locality. Waiting for data at lower levels or not present in the cache can have a major impact on performance. Having multiple \glspl{hthrd} writing to the same cache lines also leads to cache lines that must be waited on. It is therefore preferable to divide data among each \gls{hthrd}\footnote{This partitioning can be an explicit division up front or using data structures where different \glspl{hthrd} are naturally routed to different cache lines.}.
    5137
    52 For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in the next section.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.
     38For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in part~\ref{Evaluation} on evaluation.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.
    5339
    5440However, I claim that in practice it is possible to strike a balance between fairness and performance because these goals do not necessarily overlap temporally, where Figure~\ref{fig:fair} shows a visual representation of this behaviour. As mentioned, some unfairness is acceptable; therefore it is desirable to have an algorithm that prioritizes cache locality as long as thread delay does not exceed the execution mental-model.
     
    6248\end{figure}
    6349
    64 \subsection{Performance Challenges}\label{pref:challenge}
    65 While there exists a multitude of potential scheduling algorithms, they generally always have to contend with the same performance challenges. Since these challenges are recurring themes in the design of a scheduler it is relevant to describe the central ones here before looking at the design.
    66 
    67 \subsubsection{Scalability}
    68 The most basic performance challenge of a scheduler is scalability.
    69 Given a large number of \procs and an even larger number of \ats, scalability measures how fast \procs can enqueue and dequeues \ats.
    70 One could expect that doubling the number of \procs would double the rate at which \ats are dequeued, but contention on the internal data structure of the scheduler can lead to worst improvements.
    71 While the ready-queue itself can be sharded to alleviate the main source of contention, auxillary scheduling features, \eg counting ready \ats, can also be sources of contention.
    72 
    73 \subsubsection{Migration Cost}
    74 Another important source of latency in scheduling is migration.
    75 An \at is said to have migrated if it is executed by two different \proc consecutively, which is the process discussed in \ref{fairnessvlocal}.
    76 Migrations can have many different causes, but it certain programs it can be all but impossible to limit migrations.
    77 Chapter~\ref{microbench} for example, has a benchmark where any \at can potentially unblock any other \at, which can leat to \ats migrating more often than not.
    78 Because of this it is important to design the internal data structures of the scheduler to limit the latency penalty from migrations.
    79 
    80 
    81 \section{Inspirations}
     50\section{Design}
    8251In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering.
    8352
    84 Before going into the design of \CFA's scheduler proper, it is relevant to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
     53Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
    8554
    8655\subsection{Work-Stealing}
    8756
    88 As mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing.
    89 In this pattern each \gls{proc} has its own local ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work on their local ready-queue.
    90 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed.
    91 In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
     57As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work.
     58The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
    9259On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work.
    93 This means that the scheduler never balances unfair loads unless they result in a \gls{proc} running out of work.
     60This means that the scheduler may never balance unfairness that does not result in a \gls{proc} running out of work.
    9461Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation.
    9562
    9663
    97 Based on these observation, the conclusion is that a \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
     64Based on these observation, I conclude that \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
    9865
    9966\subsection{Relaxed-Fifo}
    10067An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from.
    10168\Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element.
    102 All subqueues are protected by TryLocks and \procs simply pick a different subqueue if they fail to acquire the TryLock.
    10369The result is a queue that has both decent scalability and sufficient fairness.
    10470The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element.
     
    10975
    11076While the fairness, of this scheme is good, it does suffer in terms of performance.
    111 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and finding non-empty queues can be difficult if there are too few ready \ats.
     77It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult.
    11278
    113 \section{Relaxed-FIFO++}
    114 Since it has inherent fairness quelities and decent performance in the presence of many \ats, the relaxed-FIFO queue appears as a good candidate to form the basis of a scheduler.
    115 The most obvious problems is for workloads where the number of \ats is barely greater than the number of \procs.
    116 In these situations, the wide sharding means most of the sub-queues from which the relaxed queue is formed will be empty.
    117 The consequence is that when a dequeue operations attempts to pick a sub-queue at random, it is likely that it picks an empty sub-queue and will have to pick again.
    118 This problem can repeat an unbounded number of times.
    119 
    120 As this is the most obvious challenge, it is worth addressing first.
    121 The obvious solution is to supplement each subqueue with some sharded data structure that keeps track of which subqueues are empty.
    122 This data structure can take many forms, for example simple bitmask or a binary tree that tracks which branch are empty.
    123 Following a binary tree on each pick has fairly good Big O complexity and many modern architectures have powerful bitmask manipulation instructions.
    124 However, precisely tracking which sub-queues are empty is actually fundamentally problematic.
    125 The reason is that each subqueues are already a form of sharding and the sharding width has presumably already chosen to avoid contention.
    126 However, tracking which ready queue is empty is only useful if the tracking mechanism uses denser sharding than the sub queues, then it will invariably create a new source of contention.
    127 But if the tracking mechanism is not denser than the sub-queues, then it will generally not provide useful because reading this new data structure risks being as costly as simply picking a sub-queue at random.
    128 Early experiments with this approach have shown that even with low success rates, randomly picking a sub-queue can be faster than a simple tree walk.
    129 
    130 The exception to this rule is using local tracking.
    131 If each \proc keeps track locally of which sub-queue is empty, then this can be done with a very dense data structure without introducing a new source of contention.
    132 The consequence of local tracking however, is that the information is not complete.
    133 Each \proc is only aware of the last state it saw each subqueues but does not have any information about freshness.
    134 Even on systems with low \gls{hthrd} count, \eg 4 or 8, this can quickly lead to the local information being no better than the random pick.
    135 This is due in part to the cost of this maintaining this information and its poor quality.
    136 
    137 However, using a very low cost approach to local tracking may actually be beneficial.
    138 If the local tracking is no more costly than the random pick, than \emph{any} improvement to the succes rate, however low it is, would lead to a performance benefits.
    139 This leads to the following approach:
    140 
    141 \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
    142 The Relaxed-FIFO approach can be made to handle the case of mostly empty sub-queues by tweaking the \glsxtrlong{prng}.
    143 The \glsxtrshort{prng} state can be seen as containing a list of all the future sub-queues that will be accessed.
    144 While this is not particularly useful on its own, the consequence is that if the \glsxtrshort{prng} algorithm can be run \emph{backwards}, then the state also contains a list of all the subqueues that were accessed.
    145 Luckily, bidirectional \glsxtrshort{prng} algorithms do exist, for example some Linear Congruential Generators\cit{https://en.wikipedia.org/wiki/Linear\_congruential\_generator} support running the algorithm backwards while offering good quality and performance.
    146 This particular \glsxtrshort{prng} can be used as follows:
    147 
    148 Each \proc maintains two \glsxtrshort{prng} states, which whill be refered to as \texttt{F} and \texttt{B}.
    149 
    150 When a \proc attempts to dequeue a \at, it picks the subqueues by running the \texttt{B} backwards.
    151 When a \proc attempts to enqueue a \at, it runs \texttt{F} forward to pick to subqueue to enqueue to.
    152 If the enqueue is successful, the state \texttt{B} is overwritten with the content of \texttt{F}.
    153 
    154 The result is that each \proc will tend to dequeue \ats that it has itself enqueued.
    155 When most sub-queues are empty, this technique increases the odds of finding \ats at very low cost, while also offering an improvement on locality in many cases.
    156 
    157 However, while this approach does notably improve performance in many cases, this algorithm is still not competitive with work-stealing algorithms.
    158 The fundamental problem is that the constant randomness limits how much locality the scheduler offers.
    159 This becomes problematic both because the scheduler is likely to get cache misses on internal data-structures and because migration become very frequent.
    160 Therefore since the approach of modifying to relaxed-FIFO algorithm to behave more like work stealing does not seem to pan out, the alternative is to do it the other way around.
    161 
    162 \section{Work Stealing++}
    163 To add stronger fairness guarantees to workstealing a few changes.
    164 First, the relaxed-FIFO algorithm has fundamentally better fairness because each \proc always monitors all subqueues.
    165 Therefore the workstealing algorithm must be prepended with some monitoring.
    166 Before attempting to dequeue from a \proc's local queue, the \proc must make some effort to make sure remote queues are not being neglected.
    167 To make this possible, \procs must be able to determie which \at has been on the ready-queue the longest.
    168 Which is the second aspect that much be added.
    169 The relaxed-FIFO approach uses timestamps for each \at and this is also what is done here.
    170 
     79\section{\CFA}
     80The \CFA is effectively attempting to merge these two approaches, keeping the best of both.
     81It is based on the
    17182\begin{figure}
    17283        \centering
    17384        \input{base.pstex_t}
    174         \caption[Base \CFA design]{Base \CFA design \smallskip\newline A Pool of sub-ready queues offers the sharding, two per \glspl{proc}. Each \gls{proc} have local subqueues, however \glspl{proc} can access any of the sub-queues. Each \at is timestamped when enqueued.}
     85        \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.}
    17586        \label{fig:base}
    17687\end{figure}
    177 The algorithm is structure as shown in Figure~\ref{fig:base}.
    178 This is very similar to classic workstealing except the local queues are placed in an array so \procs can access eachother's queue in constant time.
    179 Sharding width can be adjusted based on need.
    180 When a \proc attempts to dequeue a \at, it first picks a random remote queue and compares its timestamp to the timestamps of the local queue(s), dequeue from the remote queue if needed.
    181 
    182 Implemented as as naively state above, this approach has some obvious performance problems.
    183 First, it is necessary to have some damping effect on helping.
    184 Random effects like cache misses and preemption can add spurious but short bursts of latency for which helping is not helpful, pun intended.
    185 The effect of these bursts would be to cause more migrations than needed and make this workstealing approach slowdown to the match the relaxed-FIFO approach.
    186 
    187 \begin{figure}
    188         \centering
    189         \input{base_avg.pstex_t}
    190         \caption[\CFA design with Moving Average]{\CFA design with Moving Average \smallskip\newline A moving average is added to each subqueue.}
    191         \label{fig:base-ma}
    192 \end{figure}
    193 
    194 A simple solution to this problem is to compare an exponential moving average\cit{https://en.wikipedia.org/wiki/Moving\_average\#Exponential\_moving\_average} instead if the raw timestamps, shown in Figure~\ref{fig:base-ma}.
    195 Note that this is slightly more complex than it sounds because since the \at at the head of a subqueue is still waiting, its wait time has not ended.
    196 Therefore the exponential moving average is actually an exponential moving average of how long each already dequeued \at have waited.
    197 To compare subqueues, the timestamp at the head must be compared to the current time, yielding the bestcase wait time for the \at at the head of the queue.
    198 This new waiting is averaged with the stored average.
    199 To limit even more the amount of unnecessary migration, a bias can be added to the local queue, where a remote queue is helped only if its moving average is more than \emph{X} times the local queue's average.
    200 None of the experimentation that I have run with these scheduler seem to indicate that the choice of the weight for the moving average or the choice of bis is particularly important.
    201 Weigths and biases of similar \emph{magnitudes} have similar effects.
    202 
    203 With these additions to workstealing, scheduling can be made as fair as the relaxed-FIFO approach, well avoiding the majority of unnecessary migrations.
    204 Unfortunately, the performance of this approach does suffer in the cases with no risks of starvation.
    205 The problem is that the constant polling of remote subqueues generally entail a cache miss.
    206 To make things worst, remote subqueues that are very active, \ie \ats are frequently enqueued and dequeued from them, the higher the chances are that polling will incurr a cache-miss.
    207 Conversly, the active subqueues do not benefit much from helping since starvation is already a non-issue.
    208 This puts this algorithm in an akward situation where it is paying for a cost, but the cost itself suggests the operation was unnecessary.
    209 The good news is that this problem can be mitigated
    210 
    211 \subsection{Redundant Timestamps}
    212 The problem with polling remote queues is due to a tension between the consistency requirement on the subqueue.
    213 For the subqueues, correctness is critical. There must be a consensus among \procs on which subqueues hold which \ats.
    214 Since the timestamps are use for fairness, it is alco important to have consensus and which \at is the oldest.
    215 However, when deciding if a remote subqueue is worth polling, correctness is much less of a problem.
    216 Since the only need is that a subqueue will eventually be polled, some data staleness can be acceptable.
    217 This leads to a tension where stale timestamps are only problematic in some cases.
    218 Furthermore, stale timestamps can be somewhat desirable since lower freshness requirements means less tension on the cache coherence protocol.
    21988
    22089
    221 \begin{figure}
    222         \centering
    223         % \input{base_ts2.pstex_t}
    224         \caption[\CFA design with Redundant Timestamps]{\CFA design with Redundant Timestamps \smallskip\newline A array is added containing a copy of the timestamps. These timestamps are written to with relaxed atomics, without fencing, leading to fewer cache invalidations.}
    225         \label{fig:base-ts2}
    226 \end{figure}
    227 A solution to this is to create a second array containing a copy of the timestamps and average.
    228 This copy is updated \emph{after} the subqueue's critical sections using relaxed atomics.
    229 \Glspl{proc} now check if polling is needed by comparing the copy of the remote timestamp instead of the actual timestamp.
    230 The result is that since there is no fencing, the writes can be buffered and cause fewer cache invalidations.
    23190
    232 The correctness argument here is somewhat subtle.
    233 The data used for deciding whether or not to poll a queue can be stale as long as it does not cause starvation.
    234 Therefore, it is acceptable if stale data make queues appear older than they really are but not fresher.
    235 For the timestamps, this means that missing writes to the timestamp is acceptable since they will make the head \at look older.
    236 For the moving average, as long as the operation are RW-safe, the average is guaranteed to yield a value that is between the oldest and newest values written.
    237 Therefore this unprotected read of the timestamp and average satisfy the limited correctness that is required.
     91% The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.
    23892
    239 \begin{figure}
    240         \centering
    241         \input{cache-share.pstex_t}
    242         \caption[CPU design with wide L3 sharing]{CPU design with wide L3 sharing \smallskip\newline A very simple CPU with 4 \glspl{hthrd}. L1 and L2 are private to each \gls{hthrd} but the L3 is shared across to entire core.}
    243         \label{fig:cache-share}
    244 \end{figure}
     93% \subsection{Sharding} \label{sec:sharding}
     94% An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again.
    24595
    246 \begin{figure}
    247         \centering
    248         \input{cache-noshare.pstex_t}
    249         \caption[CPU design with a narrower L3 sharing]{CPU design with a narrower L3 sharing \smallskip\newline A different CPU design, still with 4 \glspl{hthrd}. L1 and L2 are still private to each \gls{hthrd} but the L3 is shared some of the CPU but there is still two distinct L3 instances.}
    250         \label{fig:cache-noshare}
    251 \end{figure}
     96% \begin{figure}
     97%       \centering
     98%       \input{base.pstex_t}
     99%       \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}
     100%       \label{fig:base}
     101% \end{figure}
    252102
    253 With redundant tiemstamps this scheduling algorithm achieves both the fairness and performance requirements, on some machines.
    254 The problem is that the cost of polling and helping is not necessarily consistent across each \gls{hthrd}.
    255 For example, on machines where the motherboard holds multiple CPU, cache misses can be satisfied from a cache that belongs to the CPU that missed, the \emph{local} CPU, or by a different CPU, a \emph{remote} one.
    256 Cache misses that are satisfied by a remote CPU will have higher latency than if it is satisfied by the local CPU.
    257 However, this is not specific to systems with multiple CPUs.
    258 Depending on the cache structure, cache-misses can have different latency for the same CPU.
    259 The AMD EPYC 7662 CPUs that is described in Chapter~\ref{microbench} is an example of that.
    260 Figure~\ref{fig:cache-share} and Figure~\ref{fig:cache-noshare} show two different cache topologies with highlight this difference.
    261 In Figure~\ref{fig:cache-share}, all cache instances are either private to a \gls{hthrd} or shared to the entire system, this means latency due to cache-misses are likely fairly consistent.
    262 By comparison, in Figure~\ref{fig:cache-noshare} misses in the L2 cache can be satisfied by a hit in either instance of the L3.
    263 However, the memory access latency to the remote L3 instance will be notably higher than the memory access latency to the local L3.
    264 The impact of these different design on this algorithm is that scheduling will scale very well on architectures similar to Figure~\ref{fig:cache-share}, both will have notably worst scalling with many narrower L3 instances.
    265 This is simply because as the number of L3 instances grow, so two does the chances that the random helping will cause significant latency.
    266 The solution is to have the scheduler be aware of the cache topology.
     103% \subsection{Finding threads}
     104% Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.
    267105
    268 \subsection{Per CPU Sharding}
    269 Building a scheduler that is aware of cache topology poses two main challenges: discovering cache topology and matching \procs to cache instance.
    270 Sadly, there is no standard portable way to discover cache topology in C.
    271 Therefore, while this is a significant portability challenge, it is outside the scope of this thesis to design a cross-platform cache discovery mechanisms.
    272 The rest of this work assumes discovering the cache topology based on Linux's \texttt{/sys/devices/system/cpu} directory.
    273 This leaves the challenge of matching \procs to cache instance, or more precisely identifying which subqueues of the ready queue are local to which cache instance.
    274 Once this matching is available, the helping algorithm can be changed to add bias so that \procs more often help subqueues local to the same cache instance
    275 \footnote{Note that like other biases mentioned in this section, the actual bias value does not appear to need precise tuinng.}.
     106% \begin{figure}
     107%       \centering
     108%       \input{empty.pstex_t}
     109%       \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}
     110%       \label{fig:empty}
     111% \end{figure}
    276112
    277 The obvious approach to mapping cache instances to subqueues is to statically tie subqueues to CPUs.
    278 Instead of having each subqueue local to a specific \proc, the system is initialized with subqueues for each \glspl{hthrd} up front.
    279 Then \procs dequeue and enqueue by first asking which CPU id they are local to, in order to identify which subqueues are the local ones.
    280 \Glspl{proc} can get the CPU id from \texttt{sched\_getcpu} or \texttt{librseq}.
     113% There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information:
    281114
    282 This approach solves the performance problems on systems with topologies similar to Figure~\ref{fig:cache-noshare}.
    283 However, it actually causes some subtle fairness problems in some systems, specifically systems with few \procs and many \glspl{hthrd}.
    284 In these cases, the large number of subqueues and the bias agains subqueues tied to different cache instances make it so it is very unlikely any single subqueue is picked.
    285 To make things worst, the small number of \procs mean that few helping attempts will be made.
    286 This combination of few attempts and low chances make it so a \at stranded on a subqueue that is not actively dequeued from may wait very long before it gets randomly helped.
    287 On a system with 2 \procs, 256 \glspl{hthrd} with narrow cache sharing, and a 100:1 bias, it can actually take multiple seconds for a \at to get dequeued from a remote queue.
    288 Therefore, a more dynamic matching of subqueues to cache instance is needed.
     115% \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done.
    289116
    290 \subsection{Topological Work Stealing}
    291 The approach that is used in the \CFA scheduler is to have per-\proc subqueue, but have an excplicit data-structure track which cache instance each subqueue is tied to.
    292 This is requires some finess because reading this data structure must lead to fewer cache misses than not having the data structure in the first place.
    293 A key element however is that, like the timestamps for helping, reading the cache instance mapping only needs to give the correct result \emph{often enough}.
    294 Therefore the algorithm can be built as follows: Before enqueuing or dequeing a \at, each \proc queries the CPU id and the corresponding cache instance.
    295 Since subqueues are tied to \procs, each \proc can then update the cache instance mapped to the local subqueue(s).
    296 To avoid unnecessary cache line invalidation, the map is only written to if the mapping changes.
     117% \begin{figure}
     118%       \centering
     119%       \vspace*{-5pt}
     120%       {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}}
     121%       \vspace*{-5pt}
     122%       \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.}
     123%       \label{fig:emptybit}
    297124
     125%       \vspace*{10pt}
     126%       {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}}
     127%       \vspace*{-5pt}
     128%       \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.}
     129%       \label{fig:emptytree}
     130
     131%       \vspace*{10pt}
     132%       {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}}
     133%       \vspace*{-5pt}
     134%       \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}
     135%       \label{fig:emptytls}
     136% \end{figure}
     137
     138% \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow.
     139
     140% \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.
     141
     142% I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach.
     143
     144% \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
     145% In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later.
     146
     147% To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards.  Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached.
     148
     149% The algorithm works as follows:
     150% \begin{itemize}
     151%       \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$.
     152%       \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions:
     153%       \begin{itemize}
     154%               \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$.
     155%               \item Pop operations use $B$ going backwards on each try.
     156%       \end{itemize}
     157% \end{itemize}
     158
     159% The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.
     160
     161% \section{Details}
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex

    r2e9b59b rba897d21  
    33The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly.
    44This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler.
    5 
    6 \section{Benchmark Environment}
    7 All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine.
    8 
    9 \paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM.
    10 The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55.
    11 These EPYCs have 64 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 256 \glspl{hthrd}.
    12 The cpus each have 4 MB, 64 MB and 512 MB of L1, L2 and L3 caches respectively.
    13 Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared by 4 cores, therefore 8 \glspl{hthrd}.
    14 
    15 \paragraph{Intel} The Intel machine is a server with four Intel Xeon Platinum 8160 CPUs and 384GB of DDR4 RAM.
    16 The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55.
    17 These Xeon Platinums have 24 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 192 \glspl{hthrd}.
    18 The cpus each have 3 MB, 96 MB and 132 MB of L1, L2 and L3 caches respectively.
    19 Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared across the entire CPU, therefore 48 \glspl{hthrd}.
    20 
    21 This limited sharing of the last level cache on the AMD machine is markedly different than the Intel machine. Indeed, while on both architectures L2 cache misses that are served by L3 caches on a different cpu incurr a significant latency, on AMD it is also the case that cache misses served by a different L3 instance on the same cpu still incur high latency.
    22 
    235
    246\section{Cycling latency}
     
    4931\end{figure}
    5032
     33\todo{check term ``idle sleep handling''}
    5134To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
    5235Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
    53 This is to avoid the case where one of the \glspl{proc} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
     36This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
    5437
    5538The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
    5639
     40\todo{code, setup, results}
    5741\begin{lstlisting}
    5842        Thread.main() {
     
    6852\end{lstlisting}
    6953
    70 \begin{figure}
    71         \centering
    72         \input{result.cycle.jax.ops.pstex_t}
    73         \vspace*{-10pt}
    74         \label{fig:cycle:ns:jax}
    75 \end{figure}
    7654
    7755\section{Yield}
  • doc/theses/thierry_delisle_PhD/thesis/text/existing.tex

    r2e9b59b rba897d21  
    22Scheduling is the process of assigning resources to incomming requests.
    33A very common form of this is assigning available workers to work-requests.
    4 The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but scheduling is also common in other fields.
    5 For example, in assmebly lines assigning parts in need of assembly to line workers is a form of scheduling.
     4The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but it is also common in other fields.
     5For example, assmebly lines are an example of scheduling where parts needed assembly are assigned to line workers.
    66
    77In all these cases, the choice of a scheduling algorithm generally depends first and formost on how much information is available to the scheduler.
     
    1515
    1616\section{Naming Convention}
    17 Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{\Gls{at}} to refer to the abstract objects being scheduled and the term \newterm{\Gls{proc}} to refer to the objects which will execute these \glspl{at}.
     17Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{task} to refer to the abstract objects being scheduled and the term \newterm{worker} to refer to the objects which will execute these tasks.
    1818
    1919\section{Static Scheduling}
    20 Static schedulers require that \glspl{at} have their dependencies and costs explicitly and exhaustively specified prior schedule.
     20Static schedulers require that tasks have their dependencies and costs explicitly and exhaustively specified prior schedule.
    2121The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere.
    2222This approach is generally popular in real-time systems since the need for strong guarantees justifies the cost of supplying this information.
     
    2626
    2727\section{Dynamic Scheduling}
    28 It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of adding one or more new \gls{at}(s) to the system as their dependencies are resolved. As well as potentially halting or suspending a \gls{at} that dynamically detect unfulfilled dependencies. Each \gls{at} has the responsability of adding the dependent \glspl{at} back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only \glspl{at} we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
     28It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
    2929
    3030\subsection{Explicitly Informed Dynamic Schedulers}
    31 While dynamic schedulers do not have access to an exhaustive list of dependencies for a \gls{at}, they may require to provide more or less information about each \gls{at}, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a \glspl{at} takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of \glspl{at} and there complexity. For example, providing an exhaustive list of files read by 5 \glspl{at} is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct \glspl{at}.
     31While dynamic schedulers do not have access to an exhaustive list of dependencies for a task, they may require to provide more or less information about each task, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a tasks takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of tasks and there complexity. For example, providing an exhaustive list of files read by 5 tasks is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct tasks.
    3232
    3333Since the goal of this thesis is to provide a scheduler as a replacement for \CFA's existing \emph{uninformed} scheduler, Explicitly Informed schedulers are less relevant to this project. Nevertheless, some strategies are worth mentionnding.
    3434
    3535\subsubsection{Prority Scheduling}
    36 A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority \glspl{at} are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every \gls{at} have a distinct pre-established priority and always run the available \gls{at} with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of \glspl{at}. It can therefore be diserable for schedulers to support \glspl{at} with identical priorities and/or automatically setting and adjusting priorites for \glspl{at}. The most common operating some variation on priorities with overlaps and dynamic priority adjustments. For example, Microsoft Windows uses a pair of priorities
    37 \cit{https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities,https://docs.microsoft.com/en-us/windows/win32/taskschd/taskschedulerschema-priority-settingstype-element}, one specified by users out of ten possible options and one adjusted by the system.
     36A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority tasks are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every task have a distinct pre-established priority and always run the available task with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of tasks. It can therefore be diserable for schedulers to support tasks with identical priorities and/or automatically setting and adjusting priorites for tasks.
    3837
    3938\subsection{Uninformed and Self-Informed Dynamic Schedulers}
    40 Several scheduling algorithms do not require programmers to provide additionnal information on each \gls{at}, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.
     39Several scheduling algorithms do not require programmers to provide additionnal information on each task, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.
    4140
    4241
    4342\subsubsection{Feedback Scheduling}
    44 As mentionned, Schedulers may also gather information about each \glspl{at} to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain \glspl{at}, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
     43As mentionned, Schedulers may also gather information about each tasks to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain tasks, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
    4544
    4645
    4746\section{Work Stealing}\label{existing:workstealing}
    48 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local \glspl{at} first, but allows the possibility for other workers to steal local \glspl{at} if they run out of \glspl{at}. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of \glspl{at} to accomplish and workers without \glspl{at} steal \glspl{at} from random workers. (The Burton and Sleep algorithm had trees of \glspl{at} and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
     47One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasks and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
    4948
    5049Many variations of this algorithm have been proposed over the years\cite{DBLP:journals/ijpp/YangH18}, both optmizations of existing implementations and approaches that account for new metrics.
     
    5251\paragraph{Granularity} A significant portion of early Work Stealing research was concentrating on \newterm{Implicit Parellelism}\cite{wiki:implicitpar}. Since the system was responsible to split the work, granularity is a challenge that cannot be left to the programmers (as opposed to \newterm{Explicit Parellelism}\cite{wiki:explicitpar} where the burden can be left to programmers). In general, fine granularity is better for load balancing and coarse granularity reduces communication overhead. The best performance generally means finding a middle ground between the two. Several methods can be employed, but I believe these are less relevant for threads, which are generally explicit and more coarse grained.
    5352
    54 \paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating \glspl{at} from one core to another can be .  \cite{DBLP:journals/tpds/SquillanteL93}
     53\paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating tasks from one core to another can be .  \cite{DBLP:journals/tpds/SquillanteL93}
    5554
    5655\todo{The survey is not great on this subject}
     
    5958
    6059\subsection{Theoretical Results}
    61 There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among \glspl{at}.
     60There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among tasks.
    6261
    6362However, as \cite{DBLP:journals/ijpp/YangH18} highlights, it is worth mentionning that this theoretical research has mainly focused on ``fully-strict'' computations, \ie workloads that can be fully represented with a Direct Acyclic Graph. It is unclear how well these distributions represent workloads in real world scenarios.
    6463
    6564\section{Preemption}
    66 One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting \glspl{at} that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a \gls{at} are never further apart than some fixed duration. While this helps schedulers guarantee that no \glspl{at} will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.
     65One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting tasks that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a task are never further apart than some fixed duration. While this helps schedulers guarantee that no tasks will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.
    6766
    6867\section{Schedulers in Production}\label{existing:prod}
     
    7069
    7170\subsection{Operating System Schedulers}
    72 Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive \glspl{at} and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.
     71Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive tasks and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.
    7372
    7473\paragraph{Linux's CFS}
    75 The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of \glspl{at} waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the \gls{at} that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of \glspl{at} is also impacted by a group based notion of fairness, where \glspl{at} belonging to groups having spent less CPU time are preferred to \glspl{at} beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).
     74The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of tasks waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the task that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of tasks is also impacted by a group based notion of fairness, where tasks belonging to groups having spent less CPU time are preferred to tasks beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).
    7675
    77 \cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across \glspl{at} \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single \gls{at} and the other with one thousand \glspl{at}, the user with a single \gls{at} does not receive one one thousandth of the CPU time.}, increasing the complexity.
     76\cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across tasks \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single task and the other with one thousand tasks, the user with a single task does not receive one one thousandth of the CPU time.}, increasing the complexity.
    7877
    79 Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority \gls{at}, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}
     78Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority task, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}
    8079
    8180\paragraph{FreeBSD}
     
    8382
    8483\paragraph{Windows(OS)}
    85 Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules \glspl{at} based on the highest priorities (lowest number) and how much cpu time each \glspl{at} have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
     84Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules tasks based on the highest priorities (lowest number) and how much cpu time each tasks have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
    8685
    8786\todo{load balancing}
     
    10099
    101100\subsection{User-Level Schedulers}
    102 By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all \glspl{at} have the same user, and therefore cooperation is both feasible and probable.
     101By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all tasks have the same user, and therefore cooperation is both feasible and probable.
    103102\paragraph{Go}
    104103Go's scheduler uses a Randomized Work Stealing algorithm that has a global runqueue(\emph{GRQ}) and each processor(\emph{P}) has both a fixed-size runqueue(\emph{LRQ}) and a high-priority next ``chair'' holding a single element.\cite{GITHUB:go,YTUBE:go} Preemption is present, but only at function call boundaries.
     
    117116
    118117\paragraph{Intel\textregistered ~Threading Building Blocks}
    119 \newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs \newterm{jobs}, uninterruptable \glspl{at}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules \glspl{at} as follows (where \textit{t} is the last \gls{at} completed):
     118\newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs tasks or \newterm{jobs}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules tasks as follows (where \textit{t} is the last task completed):
    120119\begin{displayquote}
    121120        \begin{enumerate}
     
    137136
    138137\paragraph{Grand Central Dispatch}
    139 This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg \glspl{at} on queue $A$ are executed in \emph{FIFO} order.
     138This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg tasks on queue $A$ are executed in \emph{FIFO} order.
    140139
    141140\todo{load balancing and scheduling}
  • doc/theses/thierry_delisle_PhD/thesis/text/io.tex

    r2e9b59b rba897d21  
    173173The consequence is that the amount of parallelism used to prepare submissions for the next system call is limited.
    174174Beyond this limit, the length of the system call is the throughput limiting factor.
    175 I concluded from early experiments that preparing submissions seems to take at most as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.
     175I concluded from early experiments that preparing submissions seems to take about as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.
    176176Therefore the design of the submission engine must manage multiple instances of @io_uring@ running in parallel, effectively sharding @io_uring@ instances.
    177177Similarly to scheduling, this sharding can be done privately, \ie, one instance per \glspl{proc}, in decoupled pools, \ie, a pool of \glspl{proc} use a pool of @io_uring@ instances without one-to-one coupling between any given instance and any given \gls{proc}, or some mix of the two.
     
    200200The only added complexity is that the number of SQEs is fixed, which means allocation can fail.
    201201
    202 Allocation failures need to be pushed up to a routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.
     202Allocation failures need to be pushed up to the routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.
    203203Furthermore, the routing algorithm should block operations up-front if none of the instances have available SQEs.
    204204
     
    214214
    215215In the case of designating a \gls{thrd}, ideally, when multiple \glspl{thrd} attempt to submit operations to the same @io_uring@ instance, all requests would be batched together and one of the \glspl{thrd} would do the system call on behalf of the others, referred to as the \newterm{submitter}.
    216 In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a ``next submitter'' that guarentees everything that is missed by the current submitter is seen by the next one.
     216In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a current submitter and a next submitter.
    217217Indeed, as long as there is a ``next'' submitter, \glspl{thrd} submitting new \io requests can move on, knowing that some future system call will include their request.
    218218Once the system call is done, the submitter must also free SQEs so that the allocator can reused them.
     
    223223If the submission side does not designate submitters, polling can also submit all SQEs as it is polling events.
    224224A simple approach to polling is to allocate a \gls{thrd} per @io_uring@ instance and simply let the poller \glspl{thrd} poll their respective instances when scheduled.
     225This design is especially convenient for reasons explained in Chapter~\ref{practice}.
    225226
    226227With this pool of instances approach, the big advantage is that it is fairly flexible.
    227228It does not impose restrictions on what \glspl{thrd} submitting \io operations can and cannot do between allocations and submissions.
    228 It also can gracefully handle running out of ressources, SQEs or the kernel returning @EBUSY@.
     229It also can gracefully handles running out of ressources, SQEs or the kernel returning @EBUSY@.
    229230The down side to this is that many of the steps used for submitting need complex synchronization to work properly.
    230231The routing and allocation algorithm needs to keep track of which ring instances have available SQEs, block incoming requests if no instance is available, prevent barging if \glspl{thrd} are already queued up waiting for SQEs and handle SQEs being freed.
    231232The submission side needs to safely append SQEs to the ring buffer, correctly handle chains, make sure no SQE is dropped or left pending forever, notify the allocation side when SQEs can be reused and handle the kernel returning @EBUSY@.
    232 All this synchronization may have a significant cost and, compared to the next approach presented, this synchronization is entirely overhead.
     233All this synchronization may have a significant cost and, compare to the next approach presented, this synchronization is entirely overhead.
    233234
    234235\subsubsection{Private Instances}
    235236Another approach is to simply create one ring instance per \gls{proc}.
    236 This alleviates the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.
     237This alleviate the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.
    237238This is effectively the same requirement as using @thread_local@ variables.
    238239Since SQEs that are allocated must be submitted to the same ring, on the same \gls{proc}, this effectively forces the application to submit SQEs in allocation order
     
    330331\paragraph{Pending Allocations} can be more complicated to handle.
    331332If the arbiter has available instances, the arbiter can attempt to directly hand over the instance and satisfy the request.
    332 Otherwise it must hold onto the list of threads until SQEs are made available again.
    333 This handling becomes that much more complex if pending allocation require more than one SQE, since the arbiter must make a decision between statisfying requests in FIFO ordering or satisfy requests for fewer SQEs first.
    334 
    335 While this arbiter has the potential to solve many of the problems mentionned in above, it also introduces a significant amount of complexity.
    336 Tracking which processors are borrowing which instances and which instances have SQEs available ends-up adding a significant synchronization prelude to any I/O operation.
    337 Any submission must start with a handshake that pins the currently borrowed instance, if available.
    338 An attempt to allocate is then made, but the arbiter can concurrently be attempting to allocate from the same instance from a different \gls{hthrd}.
    339 Once the allocation is completed, the submission must still check that the instance is still burrowed before attempt to flush.
    340 These extra synchronization steps end-up having a similar cost to the multiple shared instances approach.
    341 Furthermore, if the number of instances does not match the number of processors actively submitting I/O, the system can fall into a state where instances are constantly being revoked and end-up cycling the processors, which leads to significant cache deterioration.
    342 Because of these reasons, this approach, which sounds promising on paper, does not improve on the private instance approach in practice.
    343 
    344 \subsubsection{Private Instances V2}
    345 
     333Otherwise
    346334
    347335
     
    406394Finally, the last important part of the \io subsystem is it's interface. There are multiple approaches that can be offered to programmers, each with advantages and disadvantages. The new \io subsystem can replace the C runtime's API or extend it. And in the later case the interface can go from very similar to vastly different. The following sections discuss some useful options using @read@ as an example. The standard Linux interface for C is :
    407395
    408 @ssize_t read(int fd, void *buf, size_t count);@
     396@ssize_t read(int fd, void *buf, size_t count);@.
    409397
    410398\subsection{Replacement}
    411 Replacing the C \glsxtrshort{api} is the more intrusive and draconian approach.
    412 The goal is to convince the compiler and linker to replace any calls to @read@ to direct them to the \CFA implementation instead of glibc's.
    413 This has the advantage of potentially working transparently and supporting existing binaries without needing recompilation.
    414 It also offers a, presumably, well known and familiar API that C programmers can simply continue to work with.
    415 However, this approach also entails a plethora of subtle technical challenges which generally boils down to making a perfect replacement.
    416 If the \CFA interface replaces only \emph{some} of the calls to glibc, then this can easily lead to esoteric concurrency bugs.
    417 Since the gcc ecosystems does not offer a scheme for such perfect replacement, this approach was rejected as being laudable but infeasible.
     399Replacing the C \glsxtrshort{api}
    418400
    419401\subsection{Synchronous Extension}
    420 An other interface option is to simply offer an interface that is different in name only. For example:
    421 
    422 @ssize_t cfa_read(int fd, void *buf, size_t count);@
    423 
    424 \noindent This is much more feasible but still familiar to C programmers.
    425 It comes with the caveat that any code attempting to use it must be recompiled, which can be a big problem considering the amount of existing legacy C binaries.
    426 However, it has the advantage of implementation simplicity.
    427402
    428403\subsection{Asynchronous Extension}
    429 It is important to mention that there is a certain irony to using only synchronous, therefore blocking, interfaces for a feature often referred to as ``non-blocking'' \io.
    430 A fairly traditional way of doing this is using futures\cit{wikipedia futures}.
    431 As simple way of doing so is as follows:
    432 
    433 @future(ssize_t) read(int fd, void *buf, size_t count);@
    434 
    435 \noindent Note that this approach is not necessarily the most idiomatic usage of futures.
    436 The definition of read above ``returns'' the read content through an output parameter which cannot be synchronized on.
    437 A more classical asynchronous API could look more like:
    438 
    439 @future([ssize_t, void *]) read(int fd, size_t count);@
    440 
    441 \noindent However, this interface immediately introduces memory lifetime challenges since the call must effectively allocate a buffer to be returned.
    442 Because of the performance implications of this, the first approach is considered preferable as it is more familiar to C programmers.
    443404
    444405\subsection{Interface directly to \lstinline{io_uring}}
    445 Finally, an other interface that can be relevant is to simply expose directly the underlying \texttt{io\_uring} interface. For example:
    446 
    447 @array(SQE, want) cfa_io_allocate(int want);@
    448 
    449 @void cfa_io_submit( const array(SQE, have) & );@
    450 
    451 \noindent This offers more flexibility to users wanting to fully use all of the \texttt{io\_uring} features.
    452 However, it is not the most user-friendly option.
    453 It obviously imposes a strong dependency between user code and \texttt{io\_uring} but at the same time restricting users to usages that are compatible with how \CFA internally uses \texttt{io\_uring}.
    454 
    455 
  • doc/theses/thierry_delisle_PhD/thesis/text/practice.tex

    r2e9b59b rba897d21  
    22The scheduling algorithm discribed in Chapter~\ref{core} addresses scheduling in a stable state.
    33However, it does not address problems that occur when the system changes state.
    4 Indeed the \CFA runtime, supports expanding and shrinking the number of \procs, both manually and, to some extent, automatically.
     4Indeed the \CFA runtime, supports expanding and shrinking the number of KTHREAD\_place \todo{add kthrd to glossary}, both manually and, to some extent automatically.
    55This entails that the scheduling algorithm must support these transitions.
    66
    7 More precise \CFA supports adding \procs using the RAII object @processor@.
    8 These objects can be created at any time and can be destroyed at any time.
    9 They are normally create as automatic stack variables, but this is not a requirement.
    10 
    11 The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence.
    12 
    13 \section{Manual Resizing}
    14 The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed.
    15 This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}.
    16 
    17 There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown.
    18 However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks.
    19 It should also avoid as much as possible any effect on performance when the number of \procs remain constant.
    20 This later requirement prehibits simple solutions, like simply adding a global lock to these arrays.
    21 
    22 \subsection{Read-Copy-Update}
    23 One solution is to use the Read-Copy-Update\cite{wiki:rcu} pattern.
    24 In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy.
    25 This approach potentially has the advantage that it may not need any synchronization to do the switch.
    26 The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in.
    27 The important question then becomes whether or not this race can be recovered from.
    28 If the changes that arrived late can be transferred from the original to the copy then this solution works.
    29 
    30 For linked-lists, dequeing is somewhat of a problem.
    31 Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at.
    32 Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves.
    33 
    34 Another challenge is that the original must be kept until all \procs have witnessed the change.
    35 This is a straight forward memory reclamation challenge but it does mean that every operation will need \emph{some} form of synchronization.
    36 If each of these operation does need synchronization then it is possible a simpler solution achieves the same performance.
    37 Because in addition to the classic challenge of memory reclamation, transferring the original data to the copy before reclaiming it poses additional challenges.
    38 Especially merging subqueues while having a minimal impact on fairness and locality.
    39 
    40 \subsection{Read-Writer Lock}
    41 A simpler approach would be to use a \newterm{Readers-Writer Lock}\cite{wiki:rwlock} where the resizing requires acquiring the lock as a writer while simply enqueing/dequeing \ats requires acquiring the lock as a reader.
    42 Using a Readers-Writer lock solves the problem of dynamically resizing and leaves the challenge of finding or building a lock with sufficient good read-side performance.
    43 Since this is not a very complex challenge and an ad-hoc solution is perfectly acceptable, building a Readers-Writer lock was the path taken.
    44 
    45 To maximize reader scalability, the readers should not contend with eachother when attempting to acquire and release the critical sections.
    46 This effectively requires that each reader have its own piece of memory to mark as locked and unlocked.
    47 Reades then acquire the lock wait for writers to finish the critical section and then acquire their local spinlocks.
    48 Writers acquire the global lock, so writers have mutual exclusion among themselves, and then acquires each of the local reader locks.
    49 Acquiring all the local locks guarantees mutual exclusion between the readers and the writer, while the wait on the read side prevents readers from continously starving the writer.
    50 \todo{reference listings}
    51 
    52 \begin{lstlisting}
    53 void read_lock() {
    54         // Step 1 : make sure no writers in
    55         while write_lock { Pause(); }
    56 
    57         // May need fence here
    58 
    59         // Step 2 : acquire our local lock
    60         while atomic_xchg( tls.lock ) {
    61                 Pause();
    62         }
    63 }
    64 
    65 void read_unlock() {
    66         tls.lock = false;
    67 }
    68 \end{lstlisting}
    69 
    70 \begin{lstlisting}
    71 void write_lock()  {
    72         // Step 1 : lock global lock
    73         while atomic_xchg( write_lock ) {
    74                 Pause();
    75         }
    76 
    77         // Step 2 : lock per-proc locks
    78         for t in all_tls {
    79                 while atomic_xchg( t.lock ) {
    80                         Pause();
    81                 }
    82         }
    83 }
    84 
    85 void write_unlock() {
    86         // Step 1 : release local locks
    87         for t in all_tls {
    88                 t.lock = false;
    89         }
    90 
    91         // Step 2 : release global lock
    92         write_lock = false;
    93 }
    94 \end{lstlisting}
     7\section{Resizing}
    958
    969\section{Idle-Sleep}
    97 In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful.
    98 While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time.
    99 Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice.
    100 Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
    101 This state is referred to as \newterm{Idle-Sleep}.
    102 
    103 Idle sleep effectively encompasses several challenges.
    104 First some data structure needs to keep track of all \procs that are in idle sleep.
    105 Because of idle sleep can be spurious, this data structure has strict performance requirements in addition to the strict correctness requirements.
    106 Next, some tool must be used to block kernel threads \glspl{kthrd}, \eg \texttt{pthread\_cond\_wait}, pthread semaphores.
    107 The complexity here is to support \at parking and unparking, timers, \io operations and all other \CFA features with minimal complexity.
    108 Finally, idle sleep also includes a heuristic to determine the appropriate number of \procs to be in idle sleep an any given time.
    109 This third challenge is however outside the scope of this thesis because developping a general heuristic is involved enough to justify its own work.
    110 The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work.
    111 
    112 
    113 \section{Tracking Sleepers}
    114 Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
    115 The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
    116 
    117 Furthermore, the ``Race-to-Idle'' approach means that there is some
    118 
    119 \section{Sleeping}
    120 
    121 \subsection{Event FDs}
    122 
    123 \subsection{Epoll}
    124 
    125 \subsection{\texttt{io\_uring}}
    126 
    127 \section{Reducing Latency}
  • doc/theses/thierry_delisle_PhD/thesis/thesis.tex

    r2e9b59b rba897d21  
    202202
    203203\newcommand\io{\glsxtrshort{io}\xspace}%
    204 \newcommand\at{\gls{at}\xspace}%
    205 \newcommand\ats{\glspl{at}\xspace}%
    206 \newcommand\proc{\gls{proc}\xspace}%
    207 \newcommand\procs{\glspl{proc}\xspace}%
    208204
    209205%======================================================================
  • libcfa/src/Makefile.am

    r2e9b59b rba897d21  
    5858        bits/queue.hfa \
    5959        bits/sequence.hfa \
     60        containers/array.hfa \
    6061        concurrency/iofwd.hfa \
    61         concurrency/barrier.hfa \
    62         containers/array.hfa \
    6362        containers/list.hfa \
    6463        containers/queueLockFree.hfa \
     
    120119        concurrency/exception.hfa \
    121120        concurrency/kernel.hfa \
    122         concurrency/kernel/cluster.hfa \
    123121        concurrency/locks.hfa \
    124122        concurrency/monitor.hfa \
     
    136134        concurrency/io/call.cfa \
    137135        concurrency/iofwd.hfa \
    138         concurrency/kernel/private.hfa \
     136        concurrency/kernel_private.hfa \
    139137        concurrency/kernel/startup.cfa \
    140138        concurrency/preemption.cfa \
  • libcfa/src/concurrency/coroutine.cfa

    r2e9b59b rba897d21  
    2727#include <unwind.h>
    2828
    29 #include "kernel/private.hfa"
     29#include "kernel_private.hfa"
    3030#include "exception.hfa"
    3131#include "math.hfa"
  • libcfa/src/concurrency/io.cfa

    r2e9b59b rba897d21  
    4141        #include "kernel.hfa"
    4242        #include "kernel/fwd.hfa"
    43         #include "kernel/private.hfa"
    44         #include "kernel/cluster.hfa"
     43        #include "kernel_private.hfa"
    4544        #include "io/types.hfa"
    4645
     
    9493        extern void __kernel_unpark( thread$ * thrd, unpark_hint );
    9594
    96         static void ioring_syscsll( struct $io_context & ctx, unsigned int min_comp, unsigned int flags ) {
    97                 __STATS__( true, io.calls.flush++; )
    98                 int ret;
    99                 for() {
    100                         ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
     95        bool __cfa_io_drain( processor * proc ) {
     96                /* paranoid */ verify( ! __preemption_enabled() );
     97                /* paranoid */ verify( ready_schedule_islocked() );
     98                /* paranoid */ verify( proc );
     99                /* paranoid */ verify( proc->io.ctx );
     100
     101                // Drain the queue
     102                $io_context * ctx = proc->io.ctx;
     103                unsigned head = *ctx->cq.head;
     104                unsigned tail = *ctx->cq.tail;
     105                const __u32 mask = *ctx->cq.mask;
     106
     107                __u32 count = tail - head;
     108                __STATS__( false, io.calls.drain++; io.calls.completed += count; )
     109
     110                if(count == 0) return false;
     111
     112                for(i; count) {
     113                        unsigned idx = (head + i) & mask;
     114                        volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
     115
     116                        /* paranoid */ verify(&cqe);
     117
     118                        struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
     119                        __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
     120
     121                        __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
     122                }
     123
     124                __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
     125
     126                // Mark to the kernel that the cqe has been seen
     127                // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
     128                __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
     129
     130                /* paranoid */ verify( ready_schedule_islocked() );
     131                /* paranoid */ verify( ! __preemption_enabled() );
     132
     133                return true;
     134        }
     135
     136        bool __cfa_io_flush( processor * proc, int min_comp ) {
     137                /* paranoid */ verify( ! __preemption_enabled() );
     138                /* paranoid */ verify( proc );
     139                /* paranoid */ verify( proc->io.ctx );
     140
     141                __attribute__((unused)) cluster * cltr = proc->cltr;
     142                $io_context & ctx = *proc->io.ctx;
     143
     144                __ioarbiter_flush( ctx );
     145
     146                if(ctx.sq.to_submit != 0 || min_comp > 0) {
     147
     148                        __STATS__( true, io.calls.flush++; )
     149                        int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8);
    101150                        if( ret < 0 ) {
    102151                                switch((int)errno) {
     152                                case EAGAIN:
    103153                                case EINTR:
    104                                         continue;
    105                                 case EAGAIN:
    106154                                case EBUSY:
    107155                                        // Update statistics
     
    112160                                }
    113161                        }
    114                         break;
    115                 }
    116 
    117                 __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
    118                 __STATS__( true, io.calls.submitted += ret; )
    119                 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
    120                 /* paranoid */ verify( ctx.sq.to_submit >= ret );
    121 
    122                 ctx.sq.to_submit -= ret;
    123 
    124                 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
    125 
    126                 // Release the consumed SQEs
    127                 __release_sqes( ctx );
    128 
    129                 /* paranoid */ verify( ! __preemption_enabled() );
    130 
    131                 __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
    132         }
    133 
    134         static bool try_acquire( $io_context * ctx ) __attribute__((nonnull(1))) {
    135                 /* paranoid */ verify( ! __preemption_enabled() );
    136                 /* paranoid */ verify( ready_schedule_islocked() );
    137 
    138 
    139                 {
    140                         const __u32 head = *ctx->cq.head;
    141                         const __u32 tail = *ctx->cq.tail;
    142 
    143                         if(head == tail) return false;
    144                 }
    145 
    146                 // Drain the queue
    147                 if(!__atomic_try_acquire(&ctx->cq.lock)) {
    148                         __STATS__( false, io.calls.locked++; )
    149                         return false;
    150                 }
    151 
    152                 return true;
    153         }
    154 
    155         static bool __cfa_do_drain( $io_context * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) {
    156                 /* paranoid */ verify( ! __preemption_enabled() );
    157                 /* paranoid */ verify( ready_schedule_islocked() );
    158                 /* paranoid */ verify( ctx->cq.lock == true );
    159 
    160                 const __u32 mask = *ctx->cq.mask;
    161                 unsigned long long ts_prev = ctx->cq.ts;
    162 
    163                 // re-read the head and tail in case it already changed.
    164                 const __u32 head = *ctx->cq.head;
    165                 const __u32 tail = *ctx->cq.tail;
    166                 const __u32 count = tail - head;
    167                 __STATS__( false, io.calls.drain++; io.calls.completed += count; )
    168 
    169                 for(i; count) {
    170                         unsigned idx = (head + i) & mask;
    171                         volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
    172 
    173                         /* paranoid */ verify(&cqe);
    174 
    175                         struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
    176                         // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
    177 
    178                         __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
    179                 }
    180 
    181                 unsigned long long ts_next = ctx->cq.ts = rdtscl();
    182 
    183                 // Mark to the kernel that the cqe has been seen
    184                 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
    185                 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
    186                 ctx->proc->idle_wctx.drain_time = ts_next;
    187 
    188                 __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next);
    189                 /* paranoid */ verify( ready_schedule_islocked() );
    190                 /* paranoid */ verify( ! __preemption_enabled() );
    191 
    192                 __atomic_unlock(&ctx->cq.lock);
    193 
    194                 touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next );
    195 
    196                 return true;
    197         }
    198 
    199         bool __cfa_io_drain( processor * proc ) {
    200                 bool local = false;
    201                 bool remote = false;
     162
     163                        __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
     164                        __STATS__( true, io.calls.submitted += ret; )
     165                        /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
     166                        /* paranoid */ verify( ctx.sq.to_submit >= ret );
     167
     168                        ctx.sq.to_submit -= ret;
     169
     170                        /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
     171
     172                        // Release the consumed SQEs
     173                        __release_sqes( ctx );
     174
     175                        /* paranoid */ verify( ! __preemption_enabled() );
     176
     177                        ctx.proc->io.pending = false;
     178                }
    202179
    203180                ready_schedule_lock();
    204 
    205                 cluster * const cltr = proc->cltr;
    206                 $io_context * const ctx = proc->io.ctx;
    207                 /* paranoid */ verify( cltr );
    208                 /* paranoid */ verify( ctx );
    209 
    210                 with(cltr->sched) {
    211                         const size_t ctxs_count = io.count;
    212 
    213                         /* paranoid */ verify( ready_schedule_islocked() );
    214                         /* paranoid */ verify( ! __preemption_enabled() );
    215                         /* paranoid */ verify( active_processor() == proc );
    216                         /* paranoid */ verify( __shard_factor.io > 0 );
    217                         /* paranoid */ verify( ctxs_count > 0 );
    218                         /* paranoid */ verify( ctx->cq.id < ctxs_count );
    219 
    220                         const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io);
    221                         const unsigned long long ctsc = rdtscl();
    222 
    223                         if(proc->io.target == MAX) {
    224                                 uint64_t chaos = __tls_rand();
    225                                 unsigned ext = chaos & 0xff;
    226                                 unsigned other  = (chaos >> 8) % (ctxs_count);
    227 
    228                                 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
    229                                         proc->io.target = other;
    230                                 }
    231                         }
    232                         else {
    233                                 const unsigned target = proc->io.target;
    234                                 /* paranoid */ verify( io.tscs[target].tv != MAX );
    235                                 HELP: if(target < ctxs_count) {
    236                                         const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io);
    237                                         const unsigned long long age = moving_average(ctsc, io.tscs[target].tv, io.tscs[target].ma);
    238                                         __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
    239                                         if(age <= cutoff) break HELP;
    240 
    241                                         if(!try_acquire(io.data[target])) break HELP;
    242 
    243                                         if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
    244 
    245                                         remote = true;
    246                                         __STATS__( false, io.calls.helped++; )
    247                                 }
    248                                 proc->io.target = MAX;
    249                         }
    250                 }
    251 
    252 
    253                 // Drain the local queue
    254                 if(try_acquire( proc->io.ctx )) {
    255                         local = __cfa_do_drain( proc->io.ctx, cltr );
    256                 }
    257 
    258                 /* paranoid */ verify( ready_schedule_islocked() );
    259                 /* paranoid */ verify( ! __preemption_enabled() );
    260                 /* paranoid */ verify( active_processor() == proc );
    261 
     181                bool ret = __cfa_io_drain( proc );
    262182                ready_schedule_unlock();
    263                 return local || remote;
    264         }
    265 
    266         bool __cfa_io_flush( processor * proc ) {
    267                 /* paranoid */ verify( ! __preemption_enabled() );
    268                 /* paranoid */ verify( proc );
    269                 /* paranoid */ verify( proc->io.ctx );
    270 
    271                 $io_context & ctx = *proc->io.ctx;
    272 
    273                 __ioarbiter_flush( ctx );
    274 
    275                 if(ctx.sq.to_submit != 0) {
    276                         ioring_syscsll(ctx, 0, 0);
    277 
    278                 }
    279 
    280                 return __cfa_io_drain( proc );
     183                return ret;
    281184        }
    282185
     
    306209                struct io_uring_sqe * sqes = ctx->sq.sqes;
    307210                for(i; want) {
    308                         // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
     211                        __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
    309212                        out_sqes[i] = &sqes[idxs[i]];
    310213                }
     
    324227                // copy all the indexes we want from the available list
    325228                for(i; want) {
    326                         // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
     229                        __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
    327230                        idxs[i] = sq.free_ring.array[(fhead + i) & mask];
    328231                }
     
    341244        // sqe == &sqes[idx]
    342245        struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
    343                 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
     246                __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
    344247
    345248                disable_interrupts();
     
    349252                /* paranoid */ verify( ctx );
    350253
    351                 // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
     254                __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
    352255
    353256                // We can proceed to the fast path
     
    357260                        enable_interrupts();
    358261
    359                         // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
     262                        __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
    360263
    361264                        __fill( sqes, want, idxs, ctx );
     
    372275                /* paranoid */ verify( ioarb );
    373276
    374                 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
     277                __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
    375278
    376279                struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want);
    377280
    378                 // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
     281                __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
    379282
    380283                __fill( sqes, want, idxs,ret );
     
    393296                // Add the sqes to the array
    394297                for( i; have ) {
    395                         // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
     298                        __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
    396299                        sq.kring.array[ (tail + i) & mask ] = idxs[i];
    397300                }
     
    401304                sq.to_submit += have;
    402305
    403                 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
    404                 __atomic_store_n(&ctx->proc->io.dirty  , true, __ATOMIC_RELAXED);
     306                ctx->proc->io.pending = true;
     307                ctx->proc->io.dirty   = true;
    405308        }
    406309
     
    411314                if(sq.to_submit > 30) {
    412315                        __tls_stats()->io.flush.full++;
    413                         __cfa_io_flush( ctx->proc );
     316                        __cfa_io_flush( ctx->proc, 0 );
    414317                }
    415318                if(!lazy) {
    416319                        __tls_stats()->io.flush.eager++;
    417                         __cfa_io_flush( ctx->proc );
     320                        __cfa_io_flush( ctx->proc, 0 );
    418321                }
    419322        }
    420323
    421324        void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
    422                 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
     325                __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
    423326
    424327                disable_interrupts();
     
    437340                        enable_interrupts();
    438341
    439                         // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
     342                        __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
    440343                        return;
    441344                }
     
    445348                enable_interrupts();
    446349
    447                 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
     350                __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
    448351
    449352                __ioarbiter_submit(inctx, idxs, have, lazy);
     
    489392                // go through the range and release the sqes
    490393                for( i; count ) {
    491                         // __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
     394                        __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
    492395                        __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
    493396                        ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
     
    529432
    530433        static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) {
    531                 // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
     434                __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
    532435
    533436                __STATS__( false, io.alloc.block += 1; )
     
    596499                bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
    597500
    598                 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
     501                ctx->proc->io.pending = true;
    599502
    600503                if( we ) {
     
    641544
    642545                        // We can proceed to the fast path
    643                         if( !__alloc(ctx, &idx, 1) ) {
    644                                 /* paranoid */ verify( false ); // for now check if this happens, next time just abort the sleep.
    645                                 return false;
    646                         }
     546                        if( !__alloc(ctx, &idx, 1) ) return false;
    647547
    648548                        // Allocation was successful
     
    674574
    675575                        /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
    676                         __submit_only( ctx, &idx, 1 );
     576                        __submit( ctx, &idx, 1, true );
    677577
    678578                        /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
     
    681581                        return true;
    682582                }
    683 
    684                 void __cfa_io_idle( processor * proc ) {
    685                         iovec iov;
    686                         __atomic_acquire( &proc->io.ctx->cq.lock );
    687 
    688                         __attribute__((used)) volatile bool was_reset = false;
    689 
    690                         with( proc->idle_wctx) {
    691 
    692                                 // Do we already have a pending read
    693                                 if(available(*ftr)) {
    694                                         // There is no pending read, we need to add one
    695                                         reset(*ftr);
    696 
    697                                         iov.iov_base = rdbuf;
    698                                         iov.iov_len  = sizeof(eventfd_t);
    699                                         __kernel_read(proc, *ftr, iov, evfd );
    700                                         ftr->result = 0xDEADDEAD;
    701                                         *((eventfd_t *)rdbuf) = 0xDEADDEADDEADDEAD;
    702                                         was_reset = true;
    703                                 }
    704                         }
    705 
    706                         if( !__atomic_load_n( &proc->do_terminate, __ATOMIC_SEQ_CST ) ) {
    707                                 __ioarbiter_flush( *proc->io.ctx );
    708                                 proc->idle_wctx.sleep_time = rdtscl();
    709                                 ioring_syscsll( *proc->io.ctx, 1, IORING_ENTER_GETEVENTS);
    710                         }
    711 
    712                         ready_schedule_lock();
    713                         __cfa_do_drain( proc->io.ctx, proc->cltr );
    714                         ready_schedule_unlock();
    715 
    716                         asm volatile ("" :: "m" (was_reset));
    717                 }
    718583        #endif
    719584#endif
  • libcfa/src/concurrency/io/setup.cfa

    r2e9b59b rba897d21  
    3232
    3333        void __cfa_io_start( processor * proc ) {}
    34         bool __cfa_io_flush( processor * proc ) { return false; }
    35         bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; }
    36         void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}
     34        bool __cfa_io_flush( processor * proc, int ) { return false; }
    3735        void __cfa_io_stop ( processor * proc ) {}
    3836
     
    4139
    4240#else
    43 #pragma GCC diagnostic push
    44 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
    4541        #include <errno.h>
    4642        #include <stdint.h>
     
    6157        #include "bitmanip.hfa"
    6258        #include "fstream.hfa"
    63         #include "kernel/private.hfa"
    64         #include "limits.hfa"
     59        #include "kernel_private.hfa"
    6560        #include "thread.hfa"
    66 #pragma GCC diagnostic pop
    6761
    6862        void ?{}(io_context_params & this) {
     
    118112                this.ext_sq.empty = true;
    119113                (this.ext_sq.queue){};
    120                 __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd );
     114                __io_uring_setup( this, cl.io.params, proc->idle_fd );
    121115                __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this);
    122116        }
     
    128122                __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd);
    129123        }
     124
     125        extern void __disable_interrupts_hard();
     126        extern void __enable_interrupts_hard();
    130127
    131128        static void __io_uring_setup( $io_context & this, const io_context_params & params_in, int procfd ) {
     
    217214
    218215                // completion queue
    219                 cq.lock      = false;
    220                 cq.id        = MAX;
    221                 cq.ts        = rdtscl();
    222216                cq.head      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
    223217                cq.tail      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
     
    233227                        __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
    234228
     229                        __disable_interrupts_hard();
     230
    235231                        int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
    236232                        if (ret < 0) {
    237233                                abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
    238234                        }
     235
     236                        __enable_interrupts_hard();
    239237
    240238                        __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
  • libcfa/src/concurrency/io/types.hfa

    r2e9b59b rba897d21  
    2323#include "bits/locks.hfa"
    2424#include "bits/queue.hfa"
    25 #include "iofwd.hfa"
    2625#include "kernel/fwd.hfa"
    27 #include "limits.hfa"
    2826
    2927#if defined(CFA_HAVE_LINUX_IO_URING_H)
     
    7977
    8078        struct __cmp_ring_t {
    81                 volatile bool lock;
    82 
    83                 unsigned id;
    84 
    85                 unsigned long long ts;
    86 
    8779                // Head and tail of the ring
    8880                volatile __u32 * head;
     
    136128        };
    137129
    138         static inline unsigned long long ts($io_context *& this) {
    139                 const __u32 head = *this->cq.head;
    140                 const __u32 tail = *this->cq.tail;
    141 
    142                 if(head == tail) return MAX;
    143 
    144                 return this->cq.ts;
    145         }
    146 
    147130        struct __pending_alloc {
    148131                inline __outstanding_io;
     
    187170        // void __ioctx_prepare_block($io_context & ctx);
    188171#endif
     172
     173//-----------------------------------------------------------------------
     174// IO user data
     175struct io_future_t {
     176        future_t self;
     177        __s32 result;
     178};
     179
     180static inline {
     181        thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
     182                this.result = result;
     183                return fulfil(this.self, do_unpark);
     184        }
     185
     186        // Wait for the future to be fulfilled
     187        bool wait     ( io_future_t & this ) { return wait     (this.self); }
     188        void reset    ( io_future_t & this ) { return reset    (this.self); }
     189        bool available( io_future_t & this ) { return available(this.self); }
     190}
  • libcfa/src/concurrency/iofwd.hfa

    r2e9b59b rba897d21  
    2525}
    2626#include "bits/defs.hfa"
    27 #include "kernel/fwd.hfa"
    2827#include "time.hfa"
    2928
     
    4948
    5049struct cluster;
     50struct io_future_t;
    5151struct $io_context;
    5252
     
    5858
    5959struct io_uring_sqe;
    60 
    61 //-----------------------------------------------------------------------
    62 // IO user data
    63 struct io_future_t {
    64         future_t self;
    65         __s32 result;
    66 };
    67 
    68 static inline {
    69         thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
    70                 this.result = result;
    71                 return fulfil(this.self, do_unpark);
    72         }
    73 
    74         // Wait for the future to be fulfilled
    75         bool wait     ( io_future_t & this ) { return wait     (this.self); }
    76         void reset    ( io_future_t & this ) { return reset    (this.self); }
    77         bool available( io_future_t & this ) { return available(this.self); }
    78 }
    7960
    8061//----------
  • libcfa/src/concurrency/kernel.cfa

    r2e9b59b rba897d21  
    1919// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
    2020
    21 #pragma GCC diagnostic push
    22 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
    23 
    2421//C Includes
    2522#include <errno.h>
     
    2825#include <signal.h>
    2926#include <unistd.h>
    30 
    3127extern "C" {
    3228        #include <sys/eventfd.h>
     
    3531
    3632//CFA Includes
    37 #include "kernel/private.hfa"
     33#include "kernel_private.hfa"
    3834#include "preemption.hfa"
    3935#include "strstream.hfa"
     
    4440#define __CFA_INVOKE_PRIVATE__
    4541#include "invoke.h"
    46 #pragma GCC diagnostic pop
    4742
    4843#if !defined(__CFA_NO_STATISTICS__)
     
    132127static void __wake_one(cluster * cltr);
    133128
    134 static void idle_sleep(processor * proc);
     129static void idle_sleep(processor * proc, io_future_t & future, iovec & iov);
    135130static bool mark_idle (__cluster_proc_list & idles, processor & proc);
    136131static void mark_awake(__cluster_proc_list & idles, processor & proc);
    137132
    138 extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
    139 extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
    140 extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
     133extern void __cfa_io_start( processor * );
     134extern bool __cfa_io_drain( processor * );
     135extern bool __cfa_io_flush( processor *, int min_comp );
     136extern void __cfa_io_stop ( processor * );
     137static inline bool __maybe_io_drain( processor * );
    141138
    142139#if defined(CFA_WITH_IO_URING_IDLE)
     
    162159        verify(this);
    163160
    164         /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
    165         /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
    166 
    167         // used for idle sleep when io_uring is present
    168         // mark it as already fulfilled so we know if there is a pending request or not
    169         this->idle_wctx.ftr->self.ptr = 1p;
     161        io_future_t future; // used for idle sleep when io_uring is present
     162        future.self.ptr = 1p;  // mark it as already fulfilled so we know if there is a pending request or not
     163        eventfd_t idle_val;
     164        iovec idle_iovec = { &idle_val, sizeof(idle_val) };
     165
     166        __cfa_io_start( this );
    170167
    171168        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
     
    192189                for() {
    193190                        // Check if there is pending io
    194                         __cfa_io_drain( this );
     191                        __maybe_io_drain( this );
    195192
    196193                        // Try to get the next thread
     
    198195
    199196                        if( !readyThread ) {
    200                                 // there is no point in holding submissions if we are idle
    201197                                __IO_STATS__(true, io.flush.idle++; )
    202                                 __cfa_io_flush( this );
    203 
    204                                 // drain again in case something showed up
    205                                 __cfa_io_drain( this );
     198                                __cfa_io_flush( this, 0 );
    206199
    207200                                readyThread = __next_thread( this->cltr );
     
    209202
    210203                        if( !readyThread ) for(5) {
     204                                __IO_STATS__(true, io.flush.idle++; )
     205
    211206                                readyThread = __next_thread_slow( this->cltr );
    212207
    213208                                if( readyThread ) break;
    214209
    215                                 // It's unlikely we still I/O to submit, but the arbiter could
    216                                 __IO_STATS__(true, io.flush.idle++; )
    217                                 __cfa_io_flush( this );
    218 
    219                                 // drain again in case something showed up
    220                                 __cfa_io_drain( this );
     210                                __cfa_io_flush( this, 0 );
    221211                        }
    222212
     
    241231                                }
    242232
    243                                 idle_sleep( this );
     233                                idle_sleep( this, future, idle_iovec );
    244234
    245235                                // We were woken up, remove self from idle
     
    261251                        if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    262252
    263                         if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
     253                        if(this->io.pending && !this->io.dirty) {
    264254                                __IO_STATS__(true, io.flush.dirty++; )
    265                                 __cfa_io_flush( this );
     255                                __cfa_io_flush( this, 0 );
    266256                        }
    267257                }
     
    269259                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
    270260        }
     261
     262        for(int i = 0; !available(future); i++) {
     263                if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);
     264                __cfa_io_flush( this, 1 );
     265        }
     266
     267        __cfa_io_stop( this );
    271268
    272269        post( this->terminated );
     
    637634
    638635        int fd = 1;
    639         if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
    640                 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
     636        if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) {
     637                fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED);
    641638        }
    642639
    643640        switch(fd) {
    644                 __attribute__((unused)) int ret;
    645641        case 0:
    646642                // If the processor isn't ready to sleep then the exchange will already wake it up
     
    660656                // If the processor was ready to sleep, we need to wake it up with an actual write
    661657                val = 1;
    662                 ret = eventfd_write( fd, val );
    663                 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
     658                eventfd_write( fd, val );
    664659
    665660                #if !defined(__CFA_NO_STATISTICS__)
     
    682677        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    683678
    684         this->idle_wctx.sem = 1;
    685 
    686         this->idle_wctx.wake__time = rdtscl();
     679        this->idle_wctx.fd = 1;
    687680
    688681        eventfd_t val;
    689682        val = 1;
    690         __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val );
    691 
    692         /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
    693         /* paranoid */ verify( ! __preemption_enabled() );
    694 }
    695 
    696 static void idle_sleep(processor * this) {
    697         /* paranoid */ verify( this->idle_wctx.evfd != 1 );
    698         /* paranoid */ verify( this->idle_wctx.evfd != 2 );
    699 
     683        eventfd_write( this->idle_fd, val );
     684
     685        /* paranoid */ verify( ! __preemption_enabled() );
     686}
     687
     688static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {
    700689        // Tell everyone we are ready to go do sleep
    701690        for() {
    702                 int expected = this->idle_wctx.sem;
     691                int expected = this->idle_wctx.fd;
    703692
    704693                // Someone already told us to wake-up! No time for a nap.
     
    706695
    707696                // Try to mark that we are going to sleep
    708                 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     697                if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
    709698                        // Every one agreed, taking a nap
    710699                        break;
     
    724713                {
    725714                        eventfd_t val;
    726                         ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
     715                        ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
    727716                        if(ret < 0) {
    728717                                switch((int)errno) {
     
    746735                #endif
    747736        #else
    748                 __cfa_io_idle( this );
     737                // Do we already have a pending read
     738                if(available(future)) {
     739                        // There is no pending read, we need to add one
     740                        reset(future);
     741
     742                        __kernel_read(this, future, iov, this->idle_fd );
     743                }
     744
     745                __cfa_io_flush( this, 1 );
    749746        #endif
    750747}
     
    753750        __STATS__(true, ready.sleep.halts++; )
    754751
    755         proc.idle_wctx.sem = 0;
     752        proc.idle_wctx.fd = 0;
    756753
    757754        /* paranoid */ verify( ! __preemption_enabled() );
     
    834831#endif
    835832
    836 
     833static inline bool __maybe_io_drain( processor * proc ) {
     834        bool ret = false;
     835        #if defined(CFA_HAVE_LINUX_IO_URING_H)
     836                __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
     837
     838                // Check if we should drain the queue
     839                $io_context * ctx = proc->io.ctx;
     840                unsigned head = *ctx->cq.head;
     841                unsigned tail = *ctx->cq.tail;
     842                if(head == tail) return false;
     843                ready_schedule_lock();
     844                ret = __cfa_io_drain( proc );
     845                ready_schedule_unlock();
     846        #endif
     847        return ret;
     848}
    837849
    838850//-----------------------------------------------------------------------------
     
    891903        void print_stats_now( cluster & this, int flags ) {
    892904                crawl_cluster_stats( this );
    893                 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
     905                __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
    894906        }
    895907#endif
  • libcfa/src/concurrency/kernel.hfa

    r2e9b59b rba897d21  
    4848extern struct cluster * mainCluster;
    4949
    50 // Coroutine used py processors for the 2-step context switch
     50// Processor id, required for scheduling threads
     51
     52
    5153coroutine processorCtx_t {
    5254        struct processor * proc;
    5355};
    5456
    55 struct io_future_t;
    56 
    57 // Information needed for idle sleep
     57
    5858struct __fd_waitctx {
    59         // semaphore/future like object
    60         // values can be 0, 1 or some file descriptor.
    61         // 0 - is the default state
    62         // 1 - means the proc should wake-up immediately
    63         // FD - means the proc is going asleep and should be woken by writing to the FD.
    64         volatile int sem;
    65 
    66         // The event FD that corresponds to this processor
    67         int evfd;
    68 
    69         // buffer into which the proc will read from evfd
    70         // unused if not using io_uring for idle sleep
    71         void * rdbuf;
    72 
    73         // future use to track the read of the eventfd
    74         // unused if not using io_uring for idle sleep
    75         io_future_t * ftr;
    76 
    77         volatile unsigned long long wake__time;
    78         volatile unsigned long long sleep_time;
    79         volatile unsigned long long drain_time;
     59        volatile int fd;
    8060};
    8161
     
    11292        struct {
    11393                $io_context * ctx;
    114                 unsigned target;
    115                 volatile bool pending;
    116                 volatile bool dirty;
     94                bool pending;
     95                bool dirty;
    11796        } io;
    11897
     
    124103        bool pending_preemption;
    125104
    126         // context for idle sleep
     105        // Idle lock (kernel semaphore)
     106        int idle_fd;
     107
     108        // Idle waitctx
    127109        struct __fd_waitctx idle_wctx;
    128110
     
    173155void ^?{}(__intrusive_lane_t & this);
    174156
    175 // Aligned timestamps which are used by the ready queue and io subsystem
     157// Aligned timestamps which are used by the relaxed ready queue
    176158struct __attribute__((aligned(128))) __timestamp_t {
    177159        volatile unsigned long long tv;
     
    179161};
    180162
     163struct __attribute__((aligned(16))) __cache_id_t {
     164        volatile unsigned id;
     165};
     166
     167// Aligned timestamps which are used by the relaxed ready queue
     168struct __attribute__((aligned(128))) __help_cnts_t {
     169        volatile unsigned long long src;
     170        volatile unsigned long long dst;
     171        volatile unsigned long long tri;
     172};
     173
    181174static inline void  ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }
    182175static inline void ^?{}(__timestamp_t &) {}
    183176
    184 
    185 struct __attribute__((aligned(16))) __cache_id_t {
    186         volatile unsigned id;
    187 };
     177struct __attribute__((aligned(128))) __ready_queue_caches_t;
     178void  ?{}(__ready_queue_caches_t & this);
     179void ^?{}(__ready_queue_caches_t & this);
     180
     181//TODO adjust cache size to ARCHITECTURE
     182// Structure holding the ready queue
     183struct __ready_queue_t {
     184        // Data tracking the actual lanes
     185        // On a seperate cacheline from the used struct since
     186        // used can change on each push/pop but this data
     187        // only changes on shrink/grow
     188        struct {
     189                // Arary of lanes
     190                __intrusive_lane_t * volatile data;
     191
     192                // Array of times
     193                __timestamp_t * volatile tscs;
     194
     195                __cache_id_t * volatile caches;
     196
     197                // Array of stats
     198                __help_cnts_t * volatile help;
     199
     200                // Number of lanes (empty or not)
     201                volatile size_t count;
     202        } lanes;
     203};
     204
     205void  ?{}(__ready_queue_t & this);
     206void ^?{}(__ready_queue_t & this);
     207#if !defined(__CFA_NO_STATISTICS__)
     208        unsigned cnt(const __ready_queue_t & this, unsigned idx);
     209#endif
    188210
    189211// Idle Sleep
     
    211233// Cluster
    212234struct __attribute__((aligned(128))) cluster {
    213         struct {
    214                 struct {
    215                         // Arary of subqueues
    216                         __intrusive_lane_t * data;
    217 
    218                         // Time since subqueues were processed
    219                         __timestamp_t * tscs;
    220 
    221                         // Number of subqueue / timestamps
    222                         size_t count;
    223                 } readyQ;
    224 
    225                 struct {
    226                         // Array of $io_
    227                         $io_context ** data;
    228 
    229                         // Time since subqueues were processed
    230                         __timestamp_t * tscs;
    231 
    232                         // Number of I/O subqueues
    233                         size_t count;
    234                 } io;
    235 
    236                 // Cache each kernel thread belongs to
    237                 __cache_id_t * caches;
    238         } sched;
    239 
    240         // // Ready queue for threads
    241         // __ready_queue_t ready_queue;
     235        // Ready queue for threads
     236        __ready_queue_t ready_queue;
    242237
    243238        // Name of the cluster
  • libcfa/src/concurrency/kernel/fwd.hfa

    r2e9b59b rba897d21  
    248248                        // check if the future is available
    249249                        bool available( future_t & this ) {
    250                                 while( this.ptr == 2p ) Pause();
    251250                                return this.ptr == 1p;
    252251                        }
  • libcfa/src/concurrency/kernel/startup.cfa

    r2e9b59b rba897d21  
    3232
    3333// CFA Includes
    34 #include "kernel/private.hfa"
    35 #include "iofwd.hfa"
     34#include "kernel_private.hfa"
    3635#include "startup.hfa"                                  // STARTUP_PRIORITY_XXX
    3736#include "limits.hfa"
     
    9897extern void __kernel_alarm_startup(void);
    9998extern void __kernel_alarm_shutdown(void);
    100 extern void __cfa_io_start( processor * );
    101 extern void __cfa_io_stop ( processor * );
    10299
    103100//-----------------------------------------------------------------------------
     
    114111KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    115112KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
    116 KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
    117 KERNEL_STORAGE(io_future_t,          mainIdleFuture);
    118113#if !defined(__CFA_NO_STATISTICS__)
    119114KERNEL_STORAGE(__stats_t, mainProcStats);
     
    229224        (*mainProcessor){};
    230225
    231         mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;
    232         mainProcessor->idle_wctx.ftr   = (io_future_t*)&storage_mainIdleFuture;
    233         /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
    234 
    235         __cfa_io_start( mainProcessor );
    236226        register_tls( mainProcessor );
    237227
     
    315305
    316306        unregister_tls( mainProcessor );
    317         __cfa_io_stop( mainProcessor );
    318307
    319308        // Destroy the main processor and its context in reverse order of construction
     
    364353        proc->local_data = &__cfaabi_tls;
    365354
    366         __cfa_io_start( proc );
    367355        register_tls( proc );
    368 
    369         // used for idle sleep when io_uring is present
    370         io_future_t future;
    371         eventfd_t idle_buf;
    372         proc->idle_wctx.ftr = &future;
    373         proc->idle_wctx.rdbuf = &idle_buf;
    374 
    375356
    376357        // SKULLDUGGERY: We want to create a context for the processor coroutine
     
    414395
    415396        unregister_tls( proc );
    416         __cfa_io_stop( proc );
    417397
    418398        return 0p;
     
    535515        this.rdq.its = 0;
    536516        this.rdq.itr = 0;
    537         this.rdq.id  = 0;
     517        this.rdq.id  = MAX;
    538518        this.rdq.target = MAX;
    539519        this.rdq.last = MAX;
     
    552532        this.local_data = 0p;
    553533
    554         idle_wctx.evfd = eventfd(0, 0);
    555         if (idle_wctx.evfd < 0) {
     534        this.idle_fd = eventfd(0, 0);
     535        if (idle_fd < 0) {
    556536                abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno));
    557537        }
    558538
    559         idle_wctx.sem = 0;
    560         idle_wctx.wake__time = 0;
     539        this.idle_wctx.fd = 0;
    561540
    562541        // I'm assuming these two are reserved for standard input and output
    563542        // so I'm using them as sentinels with idle_wctx.
    564         /* paranoid */ verify( idle_wctx.evfd != 0 );
    565         /* paranoid */ verify( idle_wctx.evfd != 1 );
     543        /* paranoid */ verify( this.idle_fd != 0 );
     544        /* paranoid */ verify( this.idle_fd != 1 );
    566545
    567546        #if !defined(__CFA_NO_STATISTICS__)
     
    575554// Not a ctor, it just preps the destruction but should not destroy members
    576555static void deinit(processor & this) {
    577         close(this.idle_wctx.evfd);
     556        close(this.idle_fd);
    578557}
    579558
     
    626605        this.name = name;
    627606        this.preemption_rate = preemption_rate;
    628         this.sched.readyQ.data = 0p;
    629         this.sched.readyQ.tscs = 0p;
    630         this.sched.readyQ.count = 0;
    631         this.sched.io.tscs = 0p;
    632         this.sched.io.data = 0p;
    633         this.sched.caches = 0p;
     607        ready_queue{};
    634608
    635609        #if !defined(__CFA_NO_STATISTICS__)
     
    670644        // Unlock the RWlock
    671645        ready_mutate_unlock( last_size );
    672 
    673         ready_queue_close( &this );
    674         /* paranoid */ verify( this.sched.readyQ.data == 0p );
    675         /* paranoid */ verify( this.sched.readyQ.tscs == 0p );
    676         /* paranoid */ verify( this.sched.readyQ.count == 0 );
    677         /* paranoid */ verify( this.sched.io.tscs == 0p );
    678         /* paranoid */ verify( this.sched.caches == 0p );
    679 
    680646        enable_interrupts( false ); // Don't poll, could be in main cluster
    681 
    682647
    683648        #if !defined(__CFA_NO_STATISTICS__)
  • libcfa/src/concurrency/locks.cfa

    r2e9b59b rba897d21  
    1919
    2020#include "locks.hfa"
    21 #include "kernel/private.hfa"
     21#include "kernel_private.hfa"
    2222
    2323#include <kernel.hfa>
  • libcfa/src/concurrency/locks.hfa

    r2e9b59b rba897d21  
    164164}
    165165
    166 static inline void lock(linear_backoff_then_block_lock & this) with(this) {
     166static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
    167167        // if owner just return
    168         if (active_thread() == owner) return;
     168        if (active_thread() == owner) return true;
    169169        size_t compare_val = 0;
    170170        int spin = spin_start;
     
    172172        for( ;; ) {
    173173                compare_val = 0;
    174                 if (internal_try_lock(this, compare_val)) return;
     174                if (internal_try_lock(this, compare_val)) return true;
    175175                if (2 == compare_val) break;
    176176                for (int i = 0; i < spin; i++) Pause();
     
    179179        }
    180180
    181         if(2 != compare_val && try_lock_contention(this)) return;
     181        if(2 != compare_val && try_lock_contention(this)) return true;
    182182        // block until signalled
    183         while (block(this)) if(try_lock_contention(this)) return;
     183        while (block(this)) if(try_lock_contention(this)) return true;
     184
     185        // this should never be reached as block(this) always returns true
     186        return false;
    184187}
    185188
  • libcfa/src/concurrency/monitor.cfa

    r2e9b59b rba897d21  
    2222#include <inttypes.h>
    2323
    24 #include "kernel/private.hfa"
     24#include "kernel_private.hfa"
    2525
    2626#include "bits/algorithm.hfa"
  • libcfa/src/concurrency/mutex.cfa

    r2e9b59b rba897d21  
    2121#include "mutex.hfa"
    2222
    23 #include "kernel/private.hfa"
     23#include "kernel_private.hfa"
    2424
    2525//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/mutex_stmt.hfa

    r2e9b59b rba897d21  
    1212};
    1313
     14forall(L & | is_lock(L)) {
    1415
    15 struct __mutex_stmt_lock_guard {
    16     void ** lockarr;
    17     __lock_size_t count;
    18 };
     16    struct __mutex_stmt_lock_guard {
     17        L ** lockarr;
     18        __lock_size_t count;
     19    };
     20   
     21    static inline void ?{}( __mutex_stmt_lock_guard(L) & this, L * lockarr [], __lock_size_t count  ) {
     22        this.lockarr = lockarr;
     23        this.count = count;
    1924
    20 static inline void ?{}( __mutex_stmt_lock_guard & this, void * lockarr [], __lock_size_t count  ) {
    21     this.lockarr = lockarr;
    22     this.count = count;
     25        // Sort locks based on address
     26        __libcfa_small_sort(this.lockarr, count);
    2327
    24     // Sort locks based on address
    25     __libcfa_small_sort(this.lockarr, count);
    26 
    27     // acquire locks in order
    28     // for ( size_t i = 0; i < count; i++ ) {
    29     //     lock(*this.lockarr[i]);
    30     // }
    31 }
    32 
    33 static inline void ^?{}( __mutex_stmt_lock_guard & this ) with(this) {
    34     // for ( size_t i = count; i > 0; i-- ) {
    35     //     unlock(*lockarr[i - 1]);
    36     // }
    37 }
    38 
    39 forall(L & | is_lock(L)) {
     28        // acquire locks in order
     29        for ( size_t i = 0; i < count; i++ ) {
     30            lock(*this.lockarr[i]);
     31        }
     32    }
     33   
     34    static inline void ^?{}( __mutex_stmt_lock_guard(L) & this ) with(this) {
     35        for ( size_t i = count; i > 0; i-- ) {
     36            unlock(*lockarr[i - 1]);
     37        }
     38    }
    4039
    4140    struct scoped_lock {
     
    5251    }
    5352
    54     static inline void * __get_mutexstmt_lock_ptr( L & this ) {
     53    static inline L * __get_ptr( L & this ) {
    5554        return &this;
    5655    }
    5756
    58     static inline L __get_mutexstmt_lock_type( L & this );
     57    static inline L __get_type( L & this );
    5958
    60     static inline L __get_mutexstmt_lock_type( L * this );
     59    static inline L __get_type( L * this );
    6160}
  • libcfa/src/concurrency/preemption.cfa

    r2e9b59b rba897d21  
    3131#include "bits/debug.hfa"
    3232#include "bits/signal.hfa"
    33 #include "kernel/private.hfa"
     33#include "kernel_private.hfa"
    3434
    3535
  • libcfa/src/concurrency/ready_queue.cfa

    r2e9b59b rba897d21  
    2020
    2121
     22// #define USE_RELAXED_FIFO
     23// #define USE_WORK_STEALING
     24// #define USE_CPU_WORK_STEALING
    2225#define USE_AWARE_STEALING
    2326
    2427#include "bits/defs.hfa"
    2528#include "device/cpu.hfa"
    26 #include "kernel/cluster.hfa"
    27 #include "kernel/private.hfa"
    28 
    29 // #include <errno.h>
    30 // #include <unistd.h>
     29#include "kernel_private.hfa"
     30
     31#include "stdlib.hfa"
     32#include "limits.hfa"
     33#include "math.hfa"
     34
     35#include <errno.h>
     36#include <unistd.h>
     37
     38extern "C" {
     39        #include <sys/syscall.h>  // __NR_xxx
     40}
    3141
    3242#include "ready_subqueue.hfa"
     
    4050#endif
    4151
     52// No overriden function, no environment variable, no define
     53// fall back to a magic number
     54#ifndef __CFA_MAX_PROCESSORS__
     55        #define __CFA_MAX_PROCESSORS__ 1024
     56#endif
     57
     58#if   defined(USE_AWARE_STEALING)
     59        #define READYQ_SHARD_FACTOR 2
     60        #define SEQUENTIAL_SHARD 2
     61#elif defined(USE_CPU_WORK_STEALING)
     62        #define READYQ_SHARD_FACTOR 2
     63#elif defined(USE_RELAXED_FIFO)
     64        #define BIAS 4
     65        #define READYQ_SHARD_FACTOR 4
     66        #define SEQUENTIAL_SHARD 1
     67#elif defined(USE_WORK_STEALING)
     68        #define READYQ_SHARD_FACTOR 2
     69        #define SEQUENTIAL_SHARD 2
     70#else
     71        #error no scheduling strategy selected
     72#endif
     73
    4274static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
    4375static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
    4476static inline struct thread$ * search(struct cluster * cltr);
     77static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
     78
     79
     80// returns the maximum number of processors the RWLock support
     81__attribute__((weak)) unsigned __max_processors() {
     82        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
     83        if(!max_cores_s) {
     84                __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
     85                return __CFA_MAX_PROCESSORS__;
     86        }
     87
     88        char * endptr = 0p;
     89        long int max_cores_l = strtol(max_cores_s, &endptr, 10);
     90        if(max_cores_l < 1 || max_cores_l > 65535) {
     91                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
     92                return __CFA_MAX_PROCESSORS__;
     93        }
     94        if('\0' != *endptr) {
     95                __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
     96                return __CFA_MAX_PROCESSORS__;
     97        }
     98
     99        return max_cores_l;
     100}
     101
     102#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
     103        // No forward declaration needed
     104        #define __kernel_rseq_register rseq_register_current_thread
     105        #define __kernel_rseq_unregister rseq_unregister_current_thread
     106#elif defined(CFA_HAVE_LINUX_RSEQ_H)
     107        static void __kernel_raw_rseq_register  (void);
     108        static void __kernel_raw_rseq_unregister(void);
     109
     110        #define __kernel_rseq_register __kernel_raw_rseq_register
     111        #define __kernel_rseq_unregister __kernel_raw_rseq_unregister
     112#else
     113        // No forward declaration needed
     114        // No initialization needed
     115        static inline void noop(void) {}
     116
     117        #define __kernel_rseq_register noop
     118        #define __kernel_rseq_unregister noop
     119#endif
     120
     121//=======================================================================
     122// Cluster wide reader-writer lock
     123//=======================================================================
     124void  ?{}(__scheduler_RWLock_t & this) {
     125        this.max   = __max_processors();
     126        this.alloc = 0;
     127        this.ready = 0;
     128        this.data  = alloc(this.max);
     129        this.write_lock  = false;
     130
     131        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
     132        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
     133
     134}
     135void ^?{}(__scheduler_RWLock_t & this) {
     136        free(this.data);
     137}
     138
     139
     140//=======================================================================
     141// Lock-Free registering/unregistering of threads
     142unsigned register_proc_id( void ) with(*__scheduler_lock) {
     143        __kernel_rseq_register();
     144
     145        bool * handle = (bool *)&kernelTLS().sched_lock;
     146
     147        // Step - 1 : check if there is already space in the data
     148        uint_fast32_t s = ready;
     149
     150        // Check among all the ready
     151        for(uint_fast32_t i = 0; i < s; i++) {
     152                bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems
     153                /* paranoid */ verify( handle != *cell );
     154
     155                bool * null = 0p; // Re-write every loop since compare thrashes it
     156                if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null
     157                        && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
     158                        /* paranoid */ verify(i < ready);
     159                        /* paranoid */ verify( (kernelTLS().sched_id = i, true) );
     160                        return i;
     161                }
     162        }
     163
     164        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
     165
     166        // Step - 2 : F&A to get a new spot in the array.
     167        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
     168        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
     169
     170        // Step - 3 : Mark space as used and then publish it.
     171        data[n] = handle;
     172        while() {
     173                unsigned copy = n;
     174                if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
     175                        && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
     176                        break;
     177                Pause();
     178        }
     179
     180        // Return new spot.
     181        /* paranoid */ verify(n < ready);
     182        /* paranoid */ verify( (kernelTLS().sched_id = n, true) );
     183        return n;
     184}
     185
     186void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
     187        /* paranoid */ verify(id < ready);
     188        /* paranoid */ verify(id == kernelTLS().sched_id);
     189        /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);
     190
     191        bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems
     192
     193        __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
     194
     195        __kernel_rseq_unregister();
     196}
     197
     198//-----------------------------------------------------------------------
     199// Writer side : acquire when changing the ready queue, e.g. adding more
     200//  queues or removing them.
     201uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
     202        /* paranoid */ verify( ! __preemption_enabled() );
     203
     204        // Step 1 : lock global lock
     205        // It is needed to avoid processors that register mid Critical-Section
     206        //   to simply lock their own lock and enter.
     207        __atomic_acquire( &write_lock );
     208
     209        // Make sure we won't deadlock ourself
     210        // Checking before acquiring the writer lock isn't safe
     211        // because someone else could have locked us.
     212        /* paranoid */ verify( ! kernelTLS().sched_lock );
     213
     214        // Step 2 : lock per-proc lock
     215        // Processors that are currently being registered aren't counted
     216        //   but can't be in read_lock or in the critical section.
     217        // All other processors are counted
     218        uint_fast32_t s = ready;
     219        for(uint_fast32_t i = 0; i < s; i++) {
     220                volatile bool * llock = data[i];
     221                if(llock) __atomic_acquire( llock );
     222        }
     223
     224        /* paranoid */ verify( ! __preemption_enabled() );
     225        return s;
     226}
     227
     228void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
     229        /* paranoid */ verify( ! __preemption_enabled() );
     230
     231        // Step 1 : release local locks
     232        // This must be done while the global lock is held to avoid
     233        //   threads that where created mid critical section
     234        //   to race to lock their local locks and have the writer
     235        //   immidiately unlock them
     236        // Alternative solution : return s in write_lock and pass it to write_unlock
     237        for(uint_fast32_t i = 0; i < last_s; i++) {
     238                volatile bool * llock = data[i];
     239                if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);
     240        }
     241
     242        // Step 2 : release global lock
     243        /*paranoid*/ assert(true == write_lock);
     244        __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);
     245
     246        /* paranoid */ verify( ! __preemption_enabled() );
     247}
     248
     249//=======================================================================
     250// caches handling
     251
     252struct __attribute__((aligned(128))) __ready_queue_caches_t {
     253        // Count States:
     254        // - 0  : No one is looking after this cache
     255        // - 1  : No one is looking after this cache, BUT it's not empty
     256        // - 2+ : At least one processor is looking after this cache
     257        volatile unsigned count;
     258};
     259
     260void  ?{}(__ready_queue_caches_t & this) { this.count = 0; }
     261void ^?{}(__ready_queue_caches_t & this) {}
     262
     263static inline void depart(__ready_queue_caches_t & cache) {
     264        /* paranoid */ verify( cache.count > 1);
     265        __atomic_fetch_add(&cache.count, -1, __ATOMIC_SEQ_CST);
     266        /* paranoid */ verify( cache.count != 0);
     267        /* paranoid */ verify( cache.count < 65536 ); // This verify assumes no cluster will have more than 65000 kernel threads mapped to a single cache, which could be correct but is super weird.
     268}
     269
     270static inline void arrive(__ready_queue_caches_t & cache) {
     271        // for() {
     272        //      unsigned expected = cache.count;
     273        //      unsigned desired  = 0 == expected ? 2 : expected + 1;
     274        // }
     275}
    45276
    46277//=======================================================================
    47278// Cforall Ready Queue used for scheduling
    48279//=======================================================================
    49 // void ?{}(__ready_queue_t & this) with (this) {
    50 //      lanes.data   = 0p;
    51 //      lanes.tscs   = 0p;
    52 //      lanes.caches = 0p;
    53 //      lanes.count  = 0;
    54 // }
    55 
    56 // void ^?{}(__ready_queue_t & this) with (this) {
    57 //      free(lanes.data);
    58 //      free(lanes.tscs);
    59 //      free(lanes.caches);
    60 // }
     280unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) {
     281        /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );
     282        /* paranoid */ verifyf( instsc  < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );
     283        /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg );
     284
     285        const unsigned long long new_val = currtsc > instsc ? currtsc - instsc : 0;
     286        const unsigned long long total_weight = 16;
     287        const unsigned long long new_weight   = 4;
     288        const unsigned long long old_weight = total_weight - new_weight;
     289        const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight;
     290        return ret;
     291}
     292
     293void ?{}(__ready_queue_t & this) with (this) {
     294        #if defined(USE_CPU_WORK_STEALING)
     295                lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR;
     296                lanes.data = alloc( lanes.count );
     297                lanes.tscs = alloc( lanes.count );
     298                lanes.help = alloc( cpu_info.hthrd_count );
     299
     300                for( idx; (size_t)lanes.count ) {
     301                        (lanes.data[idx]){};
     302                        lanes.tscs[idx].tv = rdtscl();
     303                        lanes.tscs[idx].ma = rdtscl();
     304                }
     305                for( idx; (size_t)cpu_info.hthrd_count ) {
     306                        lanes.help[idx].src = 0;
     307                        lanes.help[idx].dst = 0;
     308                        lanes.help[idx].tri = 0;
     309                }
     310        #else
     311                lanes.data   = 0p;
     312                lanes.tscs   = 0p;
     313                lanes.caches = 0p;
     314                lanes.help   = 0p;
     315                lanes.count  = 0;
     316        #endif
     317}
     318
     319void ^?{}(__ready_queue_t & this) with (this) {
     320        #if !defined(USE_CPU_WORK_STEALING)
     321                verify( SEQUENTIAL_SHARD == lanes.count );
     322        #endif
     323
     324        free(lanes.data);
     325        free(lanes.tscs);
     326        free(lanes.caches);
     327        free(lanes.help);
     328}
    61329
    62330//-----------------------------------------------------------------------
    63 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) {
    64         processor * const proc = kernelTLS().this_processor;
    65         const bool external = (!proc) || (cltr != proc->cltr);
    66         const bool remote   = hint == UNPARK_REMOTE;
    67         const size_t lanes_count = readyQ.count;
    68 
    69         /* paranoid */ verify( __shard_factor.readyq > 0 );
    70         /* paranoid */ verify( lanes_count > 0 );
    71 
    72         unsigned i;
    73         if( external || remote ) {
    74                 // Figure out where thread was last time and make sure it's valid
    75                 /* paranoid */ verify(thrd->preferred >= 0);
    76                 unsigned start = thrd->preferred * __shard_factor.readyq;
    77                 if(start < lanes_count) {
    78                         do {
    79                                 unsigned r = __tls_rand();
    80                                 i = start + (r % __shard_factor.readyq);
    81                                 /* paranoid */ verify( i < lanes_count );
    82                                 // If we can't lock it retry
    83                         } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
     331#if defined(USE_AWARE_STEALING)
     332        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
     333                processor * const proc = kernelTLS().this_processor;
     334                const bool external = (!proc) || (cltr != proc->cltr);
     335                const bool remote   = hint == UNPARK_REMOTE;
     336
     337                unsigned i;
     338                if( external || remote ) {
     339                        // Figure out where thread was last time and make sure it's valid
     340                        /* paranoid */ verify(thrd->preferred >= 0);
     341                        if(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count) {
     342                                /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
     343                                unsigned start = thrd->preferred * READYQ_SHARD_FACTOR;
     344                                do {
     345                                        unsigned r = __tls_rand();
     346                                        i = start + (r % READYQ_SHARD_FACTOR);
     347                                        /* paranoid */ verify( i < lanes.count );
     348                                        // If we can't lock it retry
     349                                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
     350                        } else {
     351                                do {
     352                                        i = __tls_rand() % lanes.count;
     353                                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
     354                        }
    84355                } else {
    85356                        do {
    86                                 i = __tls_rand() % lanes_count;
    87                         } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
    88                 }
    89         } else {
     357                                unsigned r = proc->rdq.its++;
     358                                i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
     359                                /* paranoid */ verify( i < lanes.count );
     360                                // If we can't lock it retry
     361                        } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
     362                }
     363
     364                // Actually push it
     365                push(lanes.data[i], thrd);
     366
     367                // Unlock and return
     368                __atomic_unlock( &lanes.data[i].lock );
     369
     370                #if !defined(__CFA_NO_STATISTICS__)
     371                        if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
     372                        else __tls_stats()->ready.push.local.success++;
     373                #endif
     374        }
     375
     376        static inline unsigned long long calc_cutoff(const unsigned long long ctsc, const processor * proc, __ready_queue_t & rdq) {
     377                unsigned start = proc->rdq.id;
     378                unsigned long long max = 0;
     379                for(i; READYQ_SHARD_FACTOR) {
     380                        unsigned long long ptsc = ts(rdq.lanes.data[start + i]);
     381                        if(ptsc != -1ull) {
     382                                /* paranoid */ verify( start + i < rdq.lanes.count );
     383                                unsigned long long tsc = moving_average(ctsc, ptsc, rdq.lanes.tscs[start + i].ma);
     384                                if(tsc > max) max = tsc;
     385                        }
     386                }
     387                return (max + 2 * max) / 2;
     388        }
     389
     390        __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     391                /* paranoid */ verify( lanes.count > 0 );
     392                /* paranoid */ verify( kernelTLS().this_processor );
     393                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
     394
     395                processor * const proc = kernelTLS().this_processor;
     396                unsigned this = proc->rdq.id;
     397                /* paranoid */ verify( this < lanes.count );
     398                __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
     399
     400                // Figure out the current cpu and make sure it is valid
     401                const int cpu = __kernel_getcpu();
     402                /* paranoid */ verify(cpu >= 0);
     403                /* paranoid */ verify(cpu < cpu_info.hthrd_count);
     404                unsigned this_cache = cpu_info.llc_map[cpu].cache;
     405
     406                // Super important: don't write the same value over and over again
     407                // We want to maximise our chances that his particular values stays in cache
     408                if(lanes.caches[this / READYQ_SHARD_FACTOR].id != this_cache)
     409                        __atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED);
     410
     411                const unsigned long long ctsc = rdtscl();
     412
     413                if(proc->rdq.target == MAX) {
     414                        uint64_t chaos = __tls_rand();
     415                        unsigned ext = chaos & 0xff;
     416                        unsigned other  = (chaos >> 8) % (lanes.count);
     417
     418                        if(ext < 3 || __atomic_load_n(&lanes.caches[other / READYQ_SHARD_FACTOR].id, __ATOMIC_RELAXED) == this_cache) {
     419                                proc->rdq.target = other;
     420                        }
     421                }
     422                else {
     423                        const unsigned target = proc->rdq.target;
     424                        __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, lanes.tscs[target].tv);
     425                        /* paranoid */ verify( lanes.tscs[target].tv != MAX );
     426                        if(target < lanes.count) {
     427                                const unsigned long long cutoff = calc_cutoff(ctsc, proc, cltr->ready_queue);
     428                                const unsigned long long age = moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma);
     429                                __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
     430                                if(age > cutoff) {
     431                                        thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
     432                                        if(t) return t;
     433                                }
     434                        }
     435                        proc->rdq.target = MAX;
     436                }
     437
     438                for(READYQ_SHARD_FACTOR) {
     439                        unsigned i = this + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
     440                        if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     441                }
     442
     443                // All lanes where empty return 0p
     444                return 0p;
     445
     446        }
     447        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
     448                unsigned i = __tls_rand() % lanes.count;
     449                return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
     450        }
     451        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
     452                return search(cltr);
     453        }
     454#endif
     455#if defined(USE_CPU_WORK_STEALING)
     456        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
     457                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
     458
     459                processor * const proc = kernelTLS().this_processor;
     460                const bool external = (!proc) || (cltr != proc->cltr);
     461
     462                // Figure out the current cpu and make sure it is valid
     463                const int cpu = __kernel_getcpu();
     464                /* paranoid */ verify(cpu >= 0);
     465                /* paranoid */ verify(cpu < cpu_info.hthrd_count);
     466                /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
     467
     468                // Figure out where thread was last time and make sure it's
     469                /* paranoid */ verify(thrd->preferred >= 0);
     470                /* paranoid */ verify(thrd->preferred < cpu_info.hthrd_count);
     471                /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
     472                const int prf = thrd->preferred * READYQ_SHARD_FACTOR;
     473
     474                const cpu_map_entry_t & map;
     475                choose(hint) {
     476                        case UNPARK_LOCAL : &map = &cpu_info.llc_map[cpu];
     477                        case UNPARK_REMOTE: &map = &cpu_info.llc_map[prf];
     478                }
     479                /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
     480                /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
     481                /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
     482
     483                const int start = map.self * READYQ_SHARD_FACTOR;
     484                unsigned i;
    90485                do {
    91                         unsigned r = proc->rdq.its++;
    92                         i = proc->rdq.id + (r % __shard_factor.readyq);
    93                         /* paranoid */ verify( i < lanes_count );
     486                        unsigned r;
     487                        if(unlikely(external)) { r = __tls_rand(); }
     488                        else { r = proc->rdq.its++; }
     489                        choose(hint) {
     490                                case UNPARK_LOCAL : i = start + (r % READYQ_SHARD_FACTOR);
     491                                case UNPARK_REMOTE: i = prf   + (r % READYQ_SHARD_FACTOR);
     492                        }
    94493                        // If we can't lock it retry
    95                 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
    96         }
    97 
    98         // Actually push it
    99         push(readyQ.data[i], thrd);
    100 
    101         // Unlock and return
    102         __atomic_unlock( &readyQ.data[i].lock );
    103 
    104         #if !defined(__CFA_NO_STATISTICS__)
    105                 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    106                 else __tls_stats()->ready.push.local.success++;
    107         #endif
    108 }
    109 
    110 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) {
    111         const size_t lanes_count = readyQ.count;
    112 
    113         /* paranoid */ verify( __shard_factor.readyq > 0 );
    114         /* paranoid */ verify( lanes_count > 0 );
    115         /* paranoid */ verify( kernelTLS().this_processor );
    116         /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count );
    117 
    118         processor * const proc = kernelTLS().this_processor;
    119         unsigned this = proc->rdq.id;
    120         /* paranoid */ verify( this < lanes_count );
    121         __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
    122 
    123         // Figure out the current cache is
    124         const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq);
    125         const unsigned long long ctsc = rdtscl();
    126 
    127         if(proc->rdq.target == MAX) {
    128                 uint64_t chaos = __tls_rand();
    129                 unsigned ext = chaos & 0xff;
    130                 unsigned other  = (chaos >> 8) % (lanes_count);
    131 
    132                 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) {
    133                         proc->rdq.target = other;
    134                 }
    135         }
    136         else {
    137                 const unsigned target = proc->rdq.target;
    138                 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
    139                 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
    140                 if(target < lanes_count) {
    141                         const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
    142                         const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma);
    143                         __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
    144                         if(age > cutoff) {
     494                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
     495
     496                // Actually push it
     497                push(lanes.data[i], thrd);
     498
     499                // Unlock and return
     500                __atomic_unlock( &lanes.data[i].lock );
     501
     502                #if !defined(__CFA_NO_STATISTICS__)
     503                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
     504                        else __tls_stats()->ready.push.local.success++;
     505                #endif
     506
     507                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
     508
     509        }
     510
     511        // Pop from the ready queue from a given cluster
     512        __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     513                /* paranoid */ verify( lanes.count > 0 );
     514                /* paranoid */ verify( kernelTLS().this_processor );
     515
     516                processor * const proc = kernelTLS().this_processor;
     517                const int cpu = __kernel_getcpu();
     518                /* paranoid */ verify(cpu >= 0);
     519                /* paranoid */ verify(cpu < cpu_info.hthrd_count);
     520                /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
     521
     522                const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
     523                /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
     524                /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
     525                /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
     526
     527                const int start = map.self * READYQ_SHARD_FACTOR;
     528                const unsigned long long ctsc = rdtscl();
     529
     530                // Did we already have a help target
     531                if(proc->rdq.target == MAX) {
     532                        unsigned long long max = 0;
     533                        for(i; READYQ_SHARD_FACTOR) {
     534                                unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma);
     535                                if(tsc > max) max = tsc;
     536                        }
     537                        //  proc->rdq.cutoff = (max + 2 * max) / 2;
     538                        /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores.
     539                        /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores.
     540
     541                        if(0 == (__tls_rand() % 100)) {
     542                                proc->rdq.target = __tls_rand() % lanes.count;
     543                        } else {
     544                                unsigned cpu_chaos = map.start + (__tls_rand() % map.count);
     545                                proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (__tls_rand() % READYQ_SHARD_FACTOR);
     546                                /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR));
     547                                /* paranoid */ verify(proc->rdq.target <  ((map.start + map.count) * READYQ_SHARD_FACTOR));
     548                        }
     549
     550                        /* paranoid */ verify(proc->rdq.target != MAX);
     551                }
     552                else {
     553                        unsigned long long max = 0;
     554                        for(i; READYQ_SHARD_FACTOR) {
     555                                unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma);
     556                                if(tsc > max) max = tsc;
     557                        }
     558                        const unsigned long long cutoff = (max + 2 * max) / 2;
     559                        {
     560                                unsigned target = proc->rdq.target;
     561                                proc->rdq.target = MAX;
     562                                lanes.help[target / READYQ_SHARD_FACTOR].tri++;
     563                                if(moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma) > cutoff) {
     564                                        thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
     565                                        proc->rdq.last = target;
     566                                        if(t) return t;
     567                                }
     568                                proc->rdq.target = MAX;
     569                        }
     570
     571                        unsigned last = proc->rdq.last;
     572                        if(last != MAX && moving_average(ctsc, lanes.tscs[last].tv, lanes.tscs[last].ma) > cutoff) {
     573                                thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
     574                                if(t) return t;
     575                        }
     576                        else {
     577                                proc->rdq.last = MAX;
     578                        }
     579                }
     580
     581                for(READYQ_SHARD_FACTOR) {
     582                        unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
     583                        if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     584                }
     585
     586                // All lanes where empty return 0p
     587                return 0p;
     588        }
     589
     590        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
     591                processor * const proc = kernelTLS().this_processor;
     592                unsigned last = proc->rdq.last;
     593                if(last != MAX) {
     594                        struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
     595                        if(t) return t;
     596                        proc->rdq.last = MAX;
     597                }
     598
     599                unsigned i = __tls_rand() % lanes.count;
     600                return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
     601        }
     602        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
     603                return search(cltr);
     604        }
     605#endif
     606#if defined(USE_RELAXED_FIFO)
     607        //-----------------------------------------------------------------------
     608        // get index from random number with or without bias towards queues
     609        static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
     610                unsigned i;
     611                bool local;
     612                unsigned rlow  = r % BIAS;
     613                unsigned rhigh = r / BIAS;
     614                if((0 != rlow) && preferred >= 0) {
     615                        // (BIAS - 1) out of BIAS chances
     616                        // Use perferred queues
     617                        i = preferred + (rhigh % READYQ_SHARD_FACTOR);
     618                        local = true;
     619                }
     620                else {
     621                        // 1 out of BIAS chances
     622                        // Use all queues
     623                        i = rhigh;
     624                        local = false;
     625                }
     626                return [i, local];
     627        }
     628
     629        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
     630                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
     631
     632                const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
     633                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
     634
     635                bool local;
     636                int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
     637
     638                // Try to pick a lane and lock it
     639                unsigned i;
     640                do {
     641                        // Pick the index of a lane
     642                        unsigned r = __tls_rand_fwd();
     643                        [i, local] = idx_from_r(r, preferred);
     644
     645                        i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     646
     647                        #if !defined(__CFA_NO_STATISTICS__)
     648                                if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
     649                                else if(local) __tls_stats()->ready.push.local.attempt++;
     650                                else __tls_stats()->ready.push.share.attempt++;
     651                        #endif
     652
     653                        // If we can't lock it retry
     654                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
     655
     656                // Actually push it
     657                push(lanes.data[i], thrd);
     658
     659                // Unlock and return
     660                __atomic_unlock( &lanes.data[i].lock );
     661
     662                // Mark the current index in the tls rng instance as having an item
     663                __tls_rand_advance_bck();
     664
     665                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
     666
     667                // Update statistics
     668                #if !defined(__CFA_NO_STATISTICS__)
     669                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
     670                        else if(local) __tls_stats()->ready.push.local.success++;
     671                        else __tls_stats()->ready.push.share.success++;
     672                #endif
     673        }
     674
     675        // Pop from the ready queue from a given cluster
     676        __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     677                /* paranoid */ verify( lanes.count > 0 );
     678                /* paranoid */ verify( kernelTLS().this_processor );
     679                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
     680
     681                unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     682                int preferred = kernelTLS().this_processor->rdq.id;
     683
     684
     685                // As long as the list is not empty, try finding a lane that isn't empty and pop from it
     686                for(25) {
     687                        // Pick two lists at random
     688                        unsigned ri = __tls_rand_bck();
     689                        unsigned rj = __tls_rand_bck();
     690
     691                        unsigned i, j;
     692                        __attribute__((unused)) bool locali, localj;
     693                        [i, locali] = idx_from_r(ri, preferred);
     694                        [j, localj] = idx_from_r(rj, preferred);
     695
     696                        i %= count;
     697                        j %= count;
     698
     699                        // try popping from the 2 picked lists
     700                        struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
     701                        if(thrd) {
     702                                return thrd;
     703                        }
     704                }
     705
     706                // All lanes where empty return 0p
     707                return 0p;
     708        }
     709
     710        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
     711        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
     712                return search(cltr);
     713        }
     714#endif
     715#if defined(USE_WORK_STEALING)
     716        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
     717                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
     718
     719                // #define USE_PREFERRED
     720                #if !defined(USE_PREFERRED)
     721                const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
     722                /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
     723                #else
     724                        unsigned preferred = thrd->preferred;
     725                        const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == MAX || thrd->curr_cluster != cltr;
     726                        /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
     727
     728                        unsigned r = preferred % READYQ_SHARD_FACTOR;
     729                        const unsigned start = preferred - r;
     730                #endif
     731
     732                // Try to pick a lane and lock it
     733                unsigned i;
     734                do {
     735                        #if !defined(__CFA_NO_STATISTICS__)
     736                                if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
     737                                else __tls_stats()->ready.push.local.attempt++;
     738                        #endif
     739
     740                        if(unlikely(external)) {
     741                                i = __tls_rand() % lanes.count;
     742                        }
     743                        else {
     744                                #if !defined(USE_PREFERRED)
     745                                        processor * proc = kernelTLS().this_processor;
     746                                        unsigned r = proc->rdq.its++;
     747                                        i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
     748                                #else
     749                                        i = start + (r++ % READYQ_SHARD_FACTOR);
     750                                #endif
     751                        }
     752                        // If we can't lock it retry
     753                } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
     754
     755                // Actually push it
     756                push(lanes.data[i], thrd);
     757
     758                // Unlock and return
     759                __atomic_unlock( &lanes.data[i].lock );
     760
     761                #if !defined(__CFA_NO_STATISTICS__)
     762                        if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
     763                        else __tls_stats()->ready.push.local.success++;
     764                #endif
     765
     766                __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
     767        }
     768
     769        // Pop from the ready queue from a given cluster
     770        __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     771                /* paranoid */ verify( lanes.count > 0 );
     772                /* paranoid */ verify( kernelTLS().this_processor );
     773                /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
     774
     775                processor * proc = kernelTLS().this_processor;
     776
     777                if(proc->rdq.target == MAX) {
     778                        unsigned long long min = ts(lanes.data[proc->rdq.id]);
     779                        for(int i = 0; i < READYQ_SHARD_FACTOR; i++) {
     780                                unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]);
     781                                if(tsc < min) min = tsc;
     782                        }
     783                        proc->rdq.cutoff = min;
     784                        proc->rdq.target = __tls_rand() % lanes.count;
     785                }
     786                else {
     787                        unsigned target = proc->rdq.target;
     788                        proc->rdq.target = MAX;
     789                        const unsigned long long bias = 0; //2_500_000_000;
     790                        const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
     791                        if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
    145792                                thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    146793                                if(t) return t;
    147794                        }
    148795                }
    149                 proc->rdq.target = MAX;
    150         }
    151 
    152         for(__shard_factor.readyq) {
    153                 unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq);
    154                 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    155         }
    156 
    157         // All lanes where empty return 0p
    158         return 0p;
    159 
    160 }
    161 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) {
    162         unsigned i = __tls_rand() % (cltr->sched.readyQ.count);
    163         return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    164 }
    165 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    166         return search(cltr);
    167 }
     796
     797                for(READYQ_SHARD_FACTOR) {
     798                        unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
     799                        if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     800                }
     801                return 0p;
     802        }
     803
     804        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
     805                unsigned i = __tls_rand() % lanes.count;
     806                return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
     807        }
     808
     809        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
     810                return search(cltr);
     811        }
     812#endif
    168813
    169814//=======================================================================
     
    175820//-----------------------------------------------------------------------
    176821// try to pop from a lane given by index w
    177 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
    178         /* paranoid */ verify( w < readyQ.count );
     822static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
     823        /* paranoid */ verify( w < lanes.count );
    179824        __STATS( stats.attempt++; )
    180825
    181826        // Get relevant elements locally
    182         __intrusive_lane_t & lane = readyQ.data[w];
     827        __intrusive_lane_t & lane = lanes.data[w];
    183828
    184829        // If list looks empty retry
     
    200845        // Actually pop the list
    201846        struct thread$ * thrd;
    202         unsigned long long ts_prev = ts(lane);
    203         unsigned long long ts_next;
    204         [thrd, ts_next] = pop(lane);
     847        #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
     848                unsigned long long tsc_before = ts(lane);
     849        #endif
     850        unsigned long long tsv;
     851        [thrd, tsv] = pop(lane);
    205852
    206853        /* paranoid */ verify(thrd);
    207         /* paranoid */ verify(ts_next);
     854        /* paranoid */ verify(tsv);
    208855        /* paranoid */ verify(lane.lock);
    209856
     
    214861        __STATS( stats.success++; )
    215862
    216         touch_tsc(readyQ.tscs, w, ts_prev, ts_next);
    217 
    218         thrd->preferred = w / __shard_factor.readyq;
     863        #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
     864                if (tsv != MAX) {
     865                        unsigned long long now = rdtscl();
     866                        unsigned long long pma = __atomic_load_n(&lanes.tscs[w].ma, __ATOMIC_RELAXED);
     867                        __atomic_store_n(&lanes.tscs[w].tv, tsv, __ATOMIC_RELAXED);
     868                        __atomic_store_n(&lanes.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
     869                }
     870        #endif
     871
     872        #if defined(USE_AWARE_STEALING) || defined(USE_CPU_WORK_STEALING)
     873                thrd->preferred = w / READYQ_SHARD_FACTOR;
     874        #else
     875                thrd->preferred = w;
     876        #endif
    219877
    220878        // return the popped thread
     
    225883// try to pop from any lanes making sure you don't miss any threads push
    226884// before the start of the function
    227 static inline struct thread$ * search(struct cluster * cltr) {
    228         const size_t lanes_count = cltr->sched.readyQ.count;
    229         /* paranoid */ verify( lanes_count > 0 );
    230         unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED );
     885static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) {
     886        /* paranoid */ verify( lanes.count > 0 );
     887        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    231888        unsigned offset = __tls_rand();
    232889        for(i; count) {
     
    245902// get preferred ready for new thread
    246903unsigned ready_queue_new_preferred() {
    247         unsigned pref = MAX;
     904        unsigned pref = 0;
    248905        if(struct thread$ * thrd = publicTLS_get( this_thread )) {
    249906                pref = thrd->preferred;
    250907        }
     908        else {
     909                #if defined(USE_CPU_WORK_STEALING)
     910                        pref = __kernel_getcpu();
     911                #endif
     912        }
     913
     914        #if defined(USE_CPU_WORK_STEALING)
     915                /* paranoid */ verify(pref >= 0);
     916                /* paranoid */ verify(pref < cpu_info.hthrd_count);
     917        #endif
    251918
    252919        return pref;
     920}
     921
     922//-----------------------------------------------------------------------
     923// Check that all the intrusive queues in the data structure are still consistent
     924static void check( __ready_queue_t & q ) with (q) {
     925        #if defined(__CFA_WITH_VERIFY__)
     926                {
     927                        for( idx ; lanes.count ) {
     928                                __intrusive_lane_t & sl = lanes.data[idx];
     929                                assert(!lanes.data[idx].lock);
     930
     931                                        if(is_empty(sl)) {
     932                                                assert( sl.anchor.next == 0p );
     933                                                assert( sl.anchor.ts   == -1llu );
     934                                                assert( mock_head(sl)  == sl.prev );
     935                                        } else {
     936                                                assert( sl.anchor.next != 0p );
     937                                                assert( sl.anchor.ts   != -1llu );
     938                                                assert( mock_head(sl)  != sl.prev );
     939                                        }
     940                        }
     941                }
     942        #endif
    253943}
    254944
    255945//-----------------------------------------------------------------------
    256946// Given 2 indexes, pick the list with the oldest push an try to pop from it
    257 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
     947static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
    258948        // Pick the bet list
    259949        int w = i;
    260         if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) {
    261                 w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j;
     950        if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
     951                w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
    262952        }
    263953
    264954        return try_pop(cltr, w __STATS(, stats));
    265955}
     956
     957// Call this function of the intrusive list was moved using memcpy
     958// fixes the list so that the pointers back to anchors aren't left dangling
     959static inline void fix(__intrusive_lane_t & ll) {
     960                        if(is_empty(ll)) {
     961                                verify(ll.anchor.next == 0p);
     962                                ll.prev = mock_head(ll);
     963                        }
     964}
     965
     966static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {
     967        processor * it = &list`first;
     968        for(unsigned i = 0; i < count; i++) {
     969                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
     970                it->rdq.id = value;
     971                it->rdq.target = MAX;
     972                value += READYQ_SHARD_FACTOR;
     973                it = &(*it)`next;
     974        }
     975}
     976
     977static void reassign_cltr_id(struct cluster * cltr) {
     978        unsigned preferred = 0;
     979        assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
     980        assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
     981}
     982
     983static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
     984        #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING)
     985                lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
     986                for(i; lanes.count) {
     987                        lanes.tscs[i].tv = rdtscl();
     988                        lanes.tscs[i].ma = 0;
     989                }
     990        #endif
     991}
     992
     993#if defined(USE_CPU_WORK_STEALING)
     994        // ready_queue size is fixed in this case
     995        void ready_queue_grow(struct cluster * cltr) {}
     996        void ready_queue_shrink(struct cluster * cltr) {}
     997#else
     998        // Grow the ready queue
     999        void ready_queue_grow(struct cluster * cltr) {
     1000                size_t ncount;
     1001                int target = cltr->procs.total;
     1002
     1003                /* paranoid */ verify( ready_mutate_islocked() );
     1004                __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
     1005
     1006                // Make sure that everything is consistent
     1007                /* paranoid */ check( cltr->ready_queue );
     1008
     1009                // grow the ready queue
     1010                with( cltr->ready_queue ) {
     1011                        // Find new count
     1012                        // Make sure we always have atleast 1 list
     1013                        if(target >= 2) {
     1014                                ncount = target * READYQ_SHARD_FACTOR;
     1015                        } else {
     1016                                ncount = SEQUENTIAL_SHARD;
     1017                        }
     1018
     1019                        // Allocate new array (uses realloc and memcpies the data)
     1020                        lanes.data = alloc( ncount, lanes.data`realloc );
     1021
     1022                        // Fix the moved data
     1023                        for( idx; (size_t)lanes.count ) {
     1024                                fix(lanes.data[idx]);
     1025                        }
     1026
     1027                        // Construct new data
     1028                        for( idx; (size_t)lanes.count ~ ncount) {
     1029                                (lanes.data[idx]){};
     1030                        }
     1031
     1032                        // Update original
     1033                        lanes.count = ncount;
     1034
     1035                        lanes.caches = alloc( target, lanes.caches`realloc );
     1036                }
     1037
     1038                fix_times(cltr);
     1039
     1040                reassign_cltr_id(cltr);
     1041
     1042                // Make sure that everything is consistent
     1043                /* paranoid */ check( cltr->ready_queue );
     1044
     1045                __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
     1046
     1047                /* paranoid */ verify( ready_mutate_islocked() );
     1048        }
     1049
     1050        // Shrink the ready queue
     1051        void ready_queue_shrink(struct cluster * cltr) {
     1052                /* paranoid */ verify( ready_mutate_islocked() );
     1053                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
     1054
     1055                // Make sure that everything is consistent
     1056                /* paranoid */ check( cltr->ready_queue );
     1057
     1058                int target = cltr->procs.total;
     1059
     1060                with( cltr->ready_queue ) {
     1061                        // Remember old count
     1062                        size_t ocount = lanes.count;
     1063
     1064                        // Find new count
     1065                        // Make sure we always have atleast 1 list
     1066                        lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
     1067                        /* paranoid */ verify( ocount >= lanes.count );
     1068                        /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
     1069
     1070                        // for printing count the number of displaced threads
     1071                        #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
     1072                                __attribute__((unused)) size_t displaced = 0;
     1073                        #endif
     1074
     1075                        // redistribute old data
     1076                        for( idx; (size_t)lanes.count ~ ocount) {
     1077                                // Lock is not strictly needed but makes checking invariants much easier
     1078                                __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
     1079                                verify(locked);
     1080
     1081                                // As long as we can pop from this lane to push the threads somewhere else in the queue
     1082                                while(!is_empty(lanes.data[idx])) {
     1083                                        struct thread$ * thrd;
     1084                                        unsigned long long _;
     1085                                        [thrd, _] = pop(lanes.data[idx]);
     1086
     1087                                        push(cltr, thrd, true);
     1088
     1089                                        // for printing count the number of displaced threads
     1090                                        #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
     1091                                                displaced++;
     1092                                        #endif
     1093                                }
     1094
     1095                                // Unlock the lane
     1096                                __atomic_unlock(&lanes.data[idx].lock);
     1097
     1098                                // TODO print the queue statistics here
     1099
     1100                                ^(lanes.data[idx]){};
     1101                        }
     1102
     1103                        __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
     1104
     1105                        // Allocate new array (uses realloc and memcpies the data)
     1106                        lanes.data = alloc( lanes.count, lanes.data`realloc );
     1107
     1108                        // Fix the moved data
     1109                        for( idx; (size_t)lanes.count ) {
     1110                                fix(lanes.data[idx]);
     1111                        }
     1112
     1113                        lanes.caches = alloc( target, lanes.caches`realloc );
     1114                }
     1115
     1116                fix_times(cltr);
     1117
     1118
     1119                reassign_cltr_id(cltr);
     1120
     1121                // Make sure that everything is consistent
     1122                /* paranoid */ check( cltr->ready_queue );
     1123
     1124                __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
     1125                /* paranoid */ verify( ready_mutate_islocked() );
     1126        }
     1127#endif
     1128
     1129#if !defined(__CFA_NO_STATISTICS__)
     1130        unsigned cnt(const __ready_queue_t & this, unsigned idx) {
     1131                /* paranoid */ verify(this.lanes.count > idx);
     1132                return this.lanes.data[idx].cnt;
     1133        }
     1134#endif
     1135
     1136
     1137#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
     1138        // No definition needed
     1139#elif defined(CFA_HAVE_LINUX_RSEQ_H)
     1140
     1141        #if defined( __x86_64 ) || defined( __i386 )
     1142                #define RSEQ_SIG        0x53053053
     1143        #elif defined( __ARM_ARCH )
     1144                #ifdef __ARMEB__
     1145                #define RSEQ_SIG    0xf3def5e7      /* udf    #24035    ; 0x5de3 (ARMv6+) */
     1146                #else
     1147                #define RSEQ_SIG    0xe7f5def3      /* udf    #24035    ; 0x5de3 */
     1148                #endif
     1149        #endif
     1150
     1151        extern void __disable_interrupts_hard();
     1152        extern void __enable_interrupts_hard();
     1153
     1154        static void __kernel_raw_rseq_register  (void) {
     1155                /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );
     1156
     1157                // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);
     1158                int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);
     1159                if(ret != 0) {
     1160                        int e = errno;
     1161                        switch(e) {
     1162                        case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");
     1163                        case ENOSYS: abort("KERNEL ERROR: rseq register no supported");
     1164                        case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");
     1165                        case EBUSY : abort("KERNEL ERROR: rseq register already registered");
     1166                        case EPERM : abort("KERNEL ERROR: rseq register sig  argument  on unregistration does not match the signature received on registration");
     1167                        default: abort("KERNEL ERROR: rseq register unexpected return %d", e);
     1168                        }
     1169                }
     1170        }
     1171
     1172        static void __kernel_raw_rseq_unregister(void) {
     1173                /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );
     1174
     1175                // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);
     1176                int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
     1177                if(ret != 0) {
     1178                        int e = errno;
     1179                        switch(e) {
     1180                        case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");
     1181                        case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");
     1182                        case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");
     1183                        case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");
     1184                        case EPERM : abort("KERNEL ERROR: rseq unregister sig  argument  on unregistration does not match the signature received on registration");
     1185                        default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);
     1186                        }
     1187                }
     1188        }
     1189#else
     1190        // No definition needed
     1191#endif
  • libcfa/src/concurrency/ready_subqueue.hfa

    r2e9b59b rba897d21  
    33#define __CFA_NO_SCHED_STATS__
    44
    5 #include "limits.hfa"
     5#include "containers/queueLockFree.hfa"
    66
    77// Intrusives lanes which are used by the relaxed ready queue
     
    2727}
    2828
     29// Ctor
     30void ?{}( __intrusive_lane_t & this ) {
     31        this.lock = false;
     32        this.prev = mock_head(this);
     33        this.anchor.next = 0p;
     34        this.anchor.ts   = -1llu;
     35        #if !defined(__CFA_NO_STATISTICS__)
     36                this.cnt  = 0;
     37        #endif
     38
     39        // We add a boat-load of assertions here because the anchor code is very fragile
     40        /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
     41        /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
     42        /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) );
     43        /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next );
     44        /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
     45        /* paranoid */ verify( mock_head(this)->link.next == 0p );
     46        /* paranoid */ verify( mock_head(this)->link.ts   == -1llu  );
     47        /* paranoid */ verify( mock_head(this) == this.prev );
     48        /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );
     49        /* paranoid */ verify( __alignof__(this) == 128 );
     50        /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) );
     51}
     52
     53// Dtor is trivial
     54void ^?{}( __intrusive_lane_t & this ) {
     55        // Make sure the list is empty
     56        /* paranoid */ verify( this.anchor.next == 0p );
     57        /* paranoid */ verify( this.anchor.ts   == -1llu );
     58        /* paranoid */ verify( mock_head(this)  == this.prev );
     59}
     60
    2961// Push a thread onto this lane
    3062// returns true of lane was empty before push, false otherwise
     
    3264        /* paranoid */ verify( this.lock );
    3365        /* paranoid */ verify( node->link.next == 0p );
    34         /* paranoid */ verify( node->link.ts   == MAX  );
     66        /* paranoid */ verify( node->link.ts   == -1llu  );
    3567        /* paranoid */ verify( this.prev->link.next == 0p );
    36         /* paranoid */ verify( this.prev->link.ts   == MAX  );
     68        /* paranoid */ verify( this.prev->link.ts   == -1llu  );
    3769        if( this.anchor.next == 0p ) {
    3870                /* paranoid */ verify( this.anchor.next == 0p );
    39                 /* paranoid */ verify( this.anchor.ts   == MAX );
     71                /* paranoid */ verify( this.anchor.ts   == -1llu );
    4072                /* paranoid */ verify( this.anchor.ts   != 0  );
    4173                /* paranoid */ verify( this.prev == mock_head( this ) );
    4274        } else {
    4375                /* paranoid */ verify( this.anchor.next != 0p );
    44                 /* paranoid */ verify( this.anchor.ts   != MAX );
     76                /* paranoid */ verify( this.anchor.ts   != -1llu );
    4577                /* paranoid */ verify( this.anchor.ts   != 0  );
    4678                /* paranoid */ verify( this.prev != mock_head( this ) );
     
    6294        /* paranoid */ verify( this.lock );
    6395        /* paranoid */ verify( this.anchor.next != 0p );
    64         /* paranoid */ verify( this.anchor.ts   != MAX );
     96        /* paranoid */ verify( this.anchor.ts   != -1llu );
    6597        /* paranoid */ verify( this.anchor.ts   != 0  );
    6698
     
    71103        bool is_empty = this.anchor.next == 0p;
    72104        node->link.next = 0p;
    73         node->link.ts   = MAX;
     105        node->link.ts   = -1llu;
    74106        #if !defined(__CFA_NO_STATISTICS__)
    75107                this.cnt--;
     
    80112
    81113        /* paranoid */ verify( node->link.next == 0p );
    82         /* paranoid */ verify( node->link.ts   == MAX  );
     114        /* paranoid */ verify( node->link.ts   == -1llu  );
    83115        /* paranoid */ verify( node->link.ts   != 0  );
    84116        /* paranoid */ verify( this.anchor.ts  != 0  );
  • libcfa/src/concurrency/stats.cfa

    r2e9b59b rba897d21  
    5555                        stats->io.calls.drain       = 0;
    5656                        stats->io.calls.completed   = 0;
    57                         stats->io.calls.locked      = 0;
    58                         stats->io.calls.helped      = 0;
    5957                        stats->io.calls.errors.busy = 0;
    6058                        stats->io.ops.sockread      = 0;
     
    125123                        tally_one( &cltr->io.calls.drain      , &proc->io.calls.drain       );
    126124                        tally_one( &cltr->io.calls.completed  , &proc->io.calls.completed   );
    127                         tally_one( &cltr->io.calls.locked     , &proc->io.calls.locked      );
    128                         tally_one( &cltr->io.calls.helped     , &proc->io.calls.helped      );
    129125                        tally_one( &cltr->io.calls.errors.busy, &proc->io.calls.errors.busy );
    130126                        tally_one( &cltr->io.ops.sockread     , &proc->io.ops.sockread      );
     
    209205                                     |   " sub " | eng3(io.calls.submitted) | "/" | eng3(io.calls.flush) | "(" | ws(3, 3, avgsubs) | "/flush)"
    210206                                     | " - cmp " | eng3(io.calls.completed) | "/" | eng3(io.calls.drain) | "(" | ws(3, 3, avgcomp) | "/drain)"
    211                                      | " - cmp " | eng3(io.calls.locked) | "locked, " | eng3(io.calls.helped) | "helped"
    212207                                     | " - " | eng3(io.calls.errors.busy) | " EBUSY";
    213208                                sstr | " - sub: " | eng3(io.flush.full) | "full, " | eng3(io.flush.dirty) | "drty, " | eng3(io.flush.idle) | "idle, " | eng3(io.flush.eager) | "eagr, " | eng3(io.flush.external) | "ext";
  • libcfa/src/concurrency/stats.hfa

    r2e9b59b rba897d21  
    103103                                volatile uint64_t drain;
    104104                                volatile uint64_t completed;
    105                                 volatile uint64_t locked;
    106                                 volatile uint64_t helped;
    107105                                volatile uint64_t flush;
    108106                                volatile uint64_t submitted;
  • libcfa/src/concurrency/thread.cfa

    r2e9b59b rba897d21  
    1919#include "thread.hfa"
    2020
    21 #include "kernel/private.hfa"
     21#include "kernel_private.hfa"
    2222#include "exception.hfa"
    2323
  • libcfa/src/containers/array.hfa

    r2e9b59b rba897d21  
    1 #include <assert.h>
    21
    32
     
    3534
    3635    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, int i ) {
    37         assert( i < N );
    3836        return (Timmed &) a.strides[i];
    3937    }
    4038
    4139    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, unsigned int i ) {
    42         assert( i < N );
    4340        return (Timmed &) a.strides[i];
    4441    }
    4542
    4643    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, long int i ) {
    47         assert( i < N );
    4844        return (Timmed &) a.strides[i];
    4945    }
    5046
    5147    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, unsigned long int i ) {
    52         assert( i < N );
    5348        return (Timmed &) a.strides[i];
    5449    }
  • libcfa/src/device/cpu.hfa

    r2e9b59b rba897d21  
    1313// Update Count     :
    1414//
    15 
    16 #pragma once
    1715
    1816#include <stddef.h>
  • libcfa/src/fstream.cfa

    r2e9b59b rba897d21  
    1010// Created On       : Wed May 27 17:56:53 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Apr  9 14:55:54 2022
    13 // Update Count     : 515
     12// Last Modified On : Mon Jan 10 08:45:05 2022
     13// Update Count     : 513
    1414//
    1515
     
    161161    for ( cnt; 10 ) {
    162162                errno = 0;
    163                 disable_interrupts();
    164163                len = vfprintf( (FILE *)(os.file$), format, args );
    165                 enable_interrupts();
    166164          if ( len != EOF || errno != EINTR ) break;            // timer interrupt ?
    167165          if ( cnt == 9 ) abort( "ofstream fmt EINTR spinning exceeded" );
     
    295293    for () {                                                                                    // no check for EINTR limit waiting for keyboard input
    296294                errno = 0;
    297                 disable_interrupts();
    298295                len = vfscanf( (FILE *)(is.file$), format, args );
    299                 enable_interrupts();
    300296          if ( len != EOF || errno != EINTR ) break;            // timer interrupt ?
    301297    } // for
  • src/AST/Convert.cpp

    r2e9b59b rba897d21  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu May 09 15::37::05 2019
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 15:01:00 2022
    13 // Update Count     : 42
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 13:19:22 2022
     13// Update Count     : 41
    1414//
    1515
     
    4949//================================================================================================
    5050namespace ast {
    51 // These are the shared local information used by ConverterNewToOld and
    52 // ConverterOldToNew to update the global information in the two versions.
    53 
    54 static ast::ptr<ast::Type> sizeType = nullptr;
    55 static const ast::FunctionDecl * dereferenceOperator = nullptr;
    56 static const ast::StructDecl   * dtorStruct = nullptr;
    57 static const ast::FunctionDecl * dtorStructDestroy = nullptr;
     51
     52// This is to preserve the FindSpecialDecls hack. It does not (and perhaps should not)
     53// allow us to use the same stratagy in the new ast.
     54// xxx - since convert back pass works, this concern seems to be unnecessary.
     55
     56// these need to be accessed in new FixInit now
     57ast::ptr<ast::Type> sizeType = nullptr;
     58const ast::FunctionDecl * dereferenceOperator = nullptr;
     59const ast::StructDecl   * dtorStruct = nullptr;
     60const ast::FunctionDecl * dtorStructDestroy = nullptr;
    5861
    5962}
     
    273276                decl->parent = get<AggregateDecl>().accept1( node->parent );
    274277                declPostamble( decl, node );
    275                 return nullptr; // ??
     278                return nullptr;
    276279        }
    277280
     
    307310                        node->name,
    308311                        get<Attribute>().acceptL( node->attributes ),
    309                         LinkageSpec::Spec( node->linkage.val ),
    310                         get<Type>().accept1(node->base)
    311                 );
    312                 return aggregatePostamble( decl, node ); // Node info, including members, processed in aggregatePostamble
     312                        LinkageSpec::Spec( node->linkage.val )
     313                );
     314                return aggregatePostamble( decl, node );
    313315        }
    314316
     
    352354                this->node = stmt;
    353355                return nullptr;
    354         }
    355 
    356         void clausePostamble( Statement * stmt, const ast::StmtClause * node ) {
    357                 stmt->location = node->location;
    358                 this->node = stmt;
    359356        }
    360357
     
    407404                auto stmt = new SwitchStmt(
    408405                        get<Expression>().accept1( node->cond ),
    409                         get<Statement>().acceptL( node->cases )
     406                        get<Statement>().acceptL( node->stmts )
    410407                );
    411408                return stmtPostamble( stmt, node );
    412409        }
    413410
    414         const ast::CaseClause * visit( const ast::CaseClause * node ) override final {
     411        const ast::Stmt * visit( const ast::CaseStmt * node ) override final {
    415412                if ( inCache( node ) ) return nullptr;
    416413                auto stmt = new CaseStmt(
     
    419416                        node->isDefault()
    420417                );
    421                 clausePostamble( stmt, node );
    422                 return nullptr;
     418                return stmtPostamble( stmt, node );
    423419        }
    424420
     
    516512        }
    517513
    518         const ast::CatchClause * visit( const ast::CatchClause * node ) override final {
     514        const ast::Stmt * visit( const ast::CatchStmt * node ) override final {
    519515                if ( inCache( node ) ) return nullptr;
    520516                CatchStmt::Kind kind;
     
    527523                        break;
    528524                default:
    529                         assertf(false, "Invalid ast::ExceptionKind: %d\n", node->kind);
     525                        assertf(false, "Invalid ast::CatchStmt::Kind: %d\n", node->kind);
    530526                }
    531527                auto stmt = new CatchStmt(
     
    535531                        get<Statement>().accept1( node->body )
    536532                );
    537                 return clausePostamble( stmt, node ), nullptr;
    538         }
    539 
    540         const ast::FinallyClause * visit( const ast::FinallyClause * node ) override final {
     533                return stmtPostamble( stmt, node );
     534        }
     535
     536        const ast::Stmt * visit( const ast::FinallyStmt * node ) override final {
    541537                if ( inCache( node ) ) return nullptr;
    542538                auto stmt = new FinallyStmt( get<CompoundStmt>().accept1( node->body ) );
    543                 return clausePostamble( stmt, node ), nullptr;
     539                return stmtPostamble( stmt, node );
    544540        }
    545541
     
    951947        }
    952948
    953         const ast::Expr * visit( const ast::DimensionExpr * node ) override final {
    954                 auto expr = visitBaseExpr( node, new DimensionExpr( node->name ) );
    955                 this->node = expr;
    956                 return nullptr;
    957         }
    958 
    959949        const ast::Expr * visit( const ast::AsmExpr * node ) override final {
    960950                auto expr = visitBaseExpr( node,
     
    14771467                return strict_dynamic_cast< ast::Decl * >( node );
    14781468        }
    1479        
     1469
    14801470        ConverterOldToNew() = default;
    14811471        ConverterOldToNew(const ConverterOldToNew &) = delete;
     
    15051495                getAccept1< ast::type, decltype( old->child ) >( old->child )
    15061496
    1507 
    15081497        template<typename NewT, typename OldC>
    15091498        std::vector< ast::ptr<NewT> > getAcceptV( const OldC& old ) {
     
    15201509#       define GET_ACCEPT_V(child, type) \
    15211510                getAcceptV< ast::type, decltype( old->child ) >( old->child )
    1522 
    1523 #       define GET_ACCEPT_E(child, type) \
    1524                 getAccept1< ast::type, decltype( old->base ) >( old->base )
    15251511
    15261512        template<typename NewT, typename OldC>
     
    17241710        }
    17251711
    1726         // Convert SynTree::EnumDecl to AST::EnumDecl
    17271712        virtual void visit( const EnumDecl * old ) override final {
    17281713                if ( inCache( old ) ) return;
     
    17311716                        old->name,
    17321717                        GET_ACCEPT_V(attributes, Attribute),
    1733                         { old->linkage.val },
    1734                         GET_ACCEPT_1(base, Type),
    1735                         old->enumValues
     1718                        { old->linkage.val }
    17361719                );
    17371720                cache.emplace( old, decl );
     
    17431726                decl->uniqueId   = old->uniqueId;
    17441727                decl->storage    = { old->storageClasses.val };
     1728
    17451729                this->node = decl;
    17461730        }
     
    19031887                        old->location,
    19041888                        GET_ACCEPT_1(condition, Expr),
    1905                         GET_ACCEPT_V(statements, CaseClause),
     1889                        GET_ACCEPT_V(statements, Stmt),
    19061890                        GET_LABELS_V(old->labels)
    19071891                );
     
    19111895        virtual void visit( const CaseStmt * old ) override final {
    19121896                if ( inCache( old ) ) return;
    1913                 this->node = new ast::CaseClause(
     1897                this->node = new ast::CaseStmt(
    19141898                        old->location,
    19151899                        GET_ACCEPT_1(condition, Expr),
    1916                         GET_ACCEPT_V(stmts, Stmt)
    1917                 );
    1918                 auto labels = GET_LABELS_V(old->labels);
    1919                 assertf(labels.empty(), "Labels found on CaseStmt.");
     1900                        GET_ACCEPT_V(stmts, Stmt),
     1901                        GET_LABELS_V(old->labels)
     1902                );
    19201903                cache.emplace( old, this->node );
    19211904        }
     
    20252008                        old->location,
    20262009                        GET_ACCEPT_1(block, CompoundStmt),
    2027                         GET_ACCEPT_V(handlers, CatchClause),
    2028                         GET_ACCEPT_1(finallyBlock, FinallyClause),
     2010                        GET_ACCEPT_V(handlers, CatchStmt),
     2011                        GET_ACCEPT_1(finallyBlock, FinallyStmt),
    20292012                        GET_LABELS_V(old->labels)
    20302013                );
     
    20462029                }
    20472030
    2048                 this->node = new ast::CatchClause(
     2031                this->node = new ast::CatchStmt(
    20492032                        old->location,
    20502033                        kind,
    20512034                        GET_ACCEPT_1(decl, Decl),
    20522035                        GET_ACCEPT_1(cond, Expr),
    2053                         GET_ACCEPT_1(body, Stmt)
    2054                 );
    2055                 auto labels = GET_LABELS_V(old->labels);
    2056                 assertf(labels.empty(), "Labels found on CatchStmt.");
     2036                        GET_ACCEPT_1(body, Stmt),
     2037                        GET_LABELS_V(old->labels)
     2038                );
    20572039                cache.emplace( old, this->node );
    20582040        }
     
    20602042        virtual void visit( const FinallyStmt * old ) override final {
    20612043                if ( inCache( old ) ) return;
    2062                 this->node = new ast::FinallyClause(
    2063                         old->location,
    2064                         GET_ACCEPT_1(block, CompoundStmt)
    2065                 );
    2066                 auto labels = GET_LABELS_V(old->labels);
    2067                 assertf(labels.empty(), "Labels found on FinallyStmt.");
     2044                this->node = new ast::FinallyStmt(
     2045                        old->location,
     2046                        GET_ACCEPT_1(block, CompoundStmt),
     2047                        GET_LABELS_V(old->labels)
     2048                );
    20682049                cache.emplace( old, this->node );
    20692050        }
     
    24692450
    24702451        virtual void visit( const DimensionExpr * old ) override final {
    2471                 this->node = visitBaseExpr( old,
    2472                         new ast::DimensionExpr( old->location, old->name )
    2473                 );
     2452                // DimensionExpr gets desugared away in Validate.
     2453                // As long as new-AST passes don't use it, this cheap-cheerful error
     2454                // detection helps ensure that these occurrences have been compiled
     2455                // away, as expected.  To move the DimensionExpr boundary downstream
     2456                // or move the new-AST translation boundary upstream, implement
     2457                // DimensionExpr in the new AST and implement a conversion.
     2458                (void) old;
     2459                assert(false && "DimensionExpr should not be present at new-AST boundary");
    24742460        }
    24752461
     
    27252711
    27262712                for (auto & param : foralls) {
    2727                         ty->forall.emplace_back(new ast::TypeInstType(param));
     2713                        ty->forall.emplace_back(new ast::TypeInstType(param->name, param));
    27282714                        for (auto asst : param->assertions) {
    27292715                                ty->assertions.emplace_back(new ast::VariableExpr({}, asst));
     
    27752761        }
    27762762
    2777         virtual void visit( const EnumInstType * old ) override final { // Here is visiting the EnumInst Decl not the usage.
    2778                 ast::EnumInstType * ty; 
     2763        virtual void visit( const EnumInstType * old ) override final {
     2764                ast::EnumInstType * ty;
    27792765                if ( old->baseEnum ) {
    2780                         ty = new ast::EnumInstType{ // Probably here: missing the specification of the base
     2766                        ty = new ast::EnumInstType{
    27812767                                GET_ACCEPT_1( baseEnum, EnumDecl ),
    27822768                                cv( old ),
  • src/AST/Decl.cpp

    r2e9b59b rba897d21  
    6868        }
    6969        for (auto & tp : this->type_params) {
    70                 ftype->forall.emplace_back(new TypeInstType(tp));
     70                ftype->forall.emplace_back(new TypeInstType(tp->name, tp));
    7171                for (auto & ap: tp->assertions) {
    7272                        ftype->assertions.emplace_back(new VariableExpr(loc, ap));
     
    136136
    137137        auto it = enumValues.find( enumerator->name );
    138        
    139138        if ( it != enumValues.end() ) {
    140                        
    141                 // Handle typed enum by casting the value in (C++) compiler
    142                 // if ( base ) { // A typed enum
    143                 //      if ( const BasicType * bt = dynamic_cast<const BasicType *>(base) ) {
    144                 //              switch( bt->kind ) {
    145                 //                      case BasicType::Kind::Bool:     value = (bool) it->second; break;
    146                 //                      case BasicType::Kind::Char: value = (char) it->second; break;
    147                 //                      case BasicType::Kind::SignedChar: value = (signed char) it->second; break;
    148                 //                      case BasicType::Kind::UnsignedChar: value = (unsigned char) it->second; break;
    149                 //                      case BasicType::Kind::ShortSignedInt: value = (short signed int) it->second; break;
    150                 //                      case BasicType::Kind::SignedInt: value = (signed int) it->second; break;
    151                 //                      case BasicType::Kind::UnsignedInt: value = (unsigned int) it->second; break;
    152                 //                      case BasicType::Kind::LongSignedInt: value = (long signed int) it->second; break;
    153                 //                      case BasicType::Kind::LongUnsignedInt: value = (long unsigned int) it->second; break;
    154                 //                      case BasicType::Kind::LongLongSignedInt: value = (long long signed int) it->second; break;
    155                 //                      case BasicType::Kind::LongLongUnsignedInt: value = (long long unsigned int) it->second; break;
    156                 //                      // TODO: value should be able to handle long long unsigned int
    157 
    158                 //                      default:
    159                 //                      value = it->second;
    160                 //              }
    161                 //      }
    162                 // } else {
    163                         value = it->second;
    164                 //}
    165 
     139                value = it->second;
    166140                return true;
    167141        }
  • src/AST/Decl.hpp

    r2e9b59b rba897d21  
    302302class EnumDecl final : public AggregateDecl {
    303303public:
    304         ptr<Type> base;
    305 
    306304        EnumDecl( const CodeLocation& loc, const std::string& name,
    307                 std::vector<ptr<Attribute>>&& attrs = {}, Linkage::Spec linkage = Linkage::Cforall, Type * base = nullptr,
    308                  std::unordered_map< std::string, long long > enumValues = std::unordered_map< std::string, long long >() )
    309         : AggregateDecl( loc, name, std::move(attrs), linkage ), base(base), enumValues(enumValues) {}
     305                std::vector<ptr<Attribute>>&& attrs = {}, Linkage::Spec linkage = Linkage::Cforall )
     306        : AggregateDecl( loc, name, std::move(attrs), linkage ), enumValues() {}
    310307
    311308        /// gets the integer value for this enumerator, returning true iff value found
    312         // Maybe it is not used in producing the enum value
    313309        bool valueOf( const Decl * enumerator, long long& value ) const;
    314310
     
    316312
    317313        const char * typeString() const override { return aggrString( Enum ); }
    318 
    319         bool isTyped() {return base && base.get();}
    320314
    321315private:
  • src/AST/Expr.hpp

    r2e9b59b rba897d21  
    604604};
    605605
    606 class DimensionExpr final : public Expr {
    607 public:
    608         std::string name;
    609 
    610         DimensionExpr( const CodeLocation & loc, std::string name )
    611         : Expr( loc ), name( name ) {}
    612 
    613         const Expr * accept( Visitor & v ) const override { return v.visit( this ); }
    614 private:
    615         DimensionExpr * clone() const override { return new DimensionExpr{ *this }; }
    616         MUTATE_FRIEND
    617 };
    618 
    619606/// A GCC "asm constraint operand" used in an asm statement, e.g. `[output] "=f" (result)`.
    620607/// https://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/Machine-Constraints.html#Machine-Constraints
  • src/AST/Fwd.hpp

    r2e9b59b rba897d21  
    4747class ForStmt;
    4848class SwitchStmt;
    49 class CaseClause;
     49class CaseStmt;
    5050class BranchStmt;
    5151class ReturnStmt;
    5252class ThrowStmt;
    5353class TryStmt;
    54 class CatchClause;
    55 class FinallyClause;
     54class CatchStmt;
     55class FinallyStmt;
    5656class SuspendStmt;
    5757class WaitForStmt;
     
    8484class CommaExpr;
    8585class TypeExpr;
    86 class DimensionExpr;
    8786class AsmExpr;
    8887class ImplicitCopyCtorExpr;
     
    142141
    143142class TranslationUnit;
    144 class TranslationGlobal;
     143// TODO: Get from the TranslationUnit:
     144extern ptr<Type> sizeType;
     145extern const FunctionDecl * dereferenceOperator;
     146extern const StructDecl   * dtorStruct;
     147extern const FunctionDecl * dtorStructDestroy;
    145148
    146149}
  • src/AST/GenericSubstitution.cpp

    r2e9b59b rba897d21  
    4545                        visit_children = false;
    4646                        const AggregateDecl * aggr = ty->aggr();
    47                         sub = TypeSubstitution( aggr->params, ty->params );
     47                        sub = TypeSubstitution{ aggr->params.begin(), aggr->params.end(), ty->params.begin() };
    4848                }
    4949
  • src/AST/Node.cpp

    r2e9b59b rba897d21  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu May 16 14:16:00 2019
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 25 10:30:00 2022
    13 // Update Count     : 4
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Tue Feb  1 09:09:39 2022
     13// Update Count     : 3
    1414//
    1515
     
    1919#include <csignal>  // MEMORY DEBUG -- for raise
    2020#include <iostream>
    21 #include <utility>
    2221
    2322#include "Attribute.hpp"
     
    7776void ast::ptr_base<node_t, ref_t>::_check() const {
    7877        // if(node) assert(node->was_ever_strong == false || node->strong_count > 0);
    79 }
    80 
    81 template< typename node_t, enum ast::Node::ref_type ref_t >
    82 void ast::ptr_base<node_t, ref_t>::swap( ptr_base & other ) noexcept {
    83         std::swap( this->node, other.node );
    84         _trap( this->node );
    85         _trap( other.node );
    8678}
    8779
     
    160152template class ast::ptr_base< ast::SwitchStmt, ast::Node::ref_type::weak >;
    161153template class ast::ptr_base< ast::SwitchStmt, ast::Node::ref_type::strong >;
    162 template class ast::ptr_base< ast::CaseClause, ast::Node::ref_type::weak >;
    163 template class ast::ptr_base< ast::CaseClause, ast::Node::ref_type::strong >;
     154template class ast::ptr_base< ast::CaseStmt, ast::Node::ref_type::weak >;
     155template class ast::ptr_base< ast::CaseStmt, ast::Node::ref_type::strong >;
    164156template class ast::ptr_base< ast::BranchStmt, ast::Node::ref_type::weak >;
    165157template class ast::ptr_base< ast::BranchStmt, ast::Node::ref_type::strong >;
     
    170162template class ast::ptr_base< ast::TryStmt, ast::Node::ref_type::weak >;
    171163template class ast::ptr_base< ast::TryStmt, ast::Node::ref_type::strong >;
    172 template class ast::ptr_base< ast::CatchClause, ast::Node::ref_type::weak >;
    173 template class ast::ptr_base< ast::CatchClause, ast::Node::ref_type::strong >;
    174 template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::weak >;
    175 template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::strong >;
     164template class ast::ptr_base< ast::CatchStmt, ast::Node::ref_type::weak >;
     165template class ast::ptr_base< ast::CatchStmt, ast::Node::ref_type::strong >;
     166template class ast::ptr_base< ast::FinallyStmt, ast::Node::ref_type::weak >;
     167template class ast::ptr_base< ast::FinallyStmt, ast::Node::ref_type::strong >;
    176168template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::weak >;
    177169template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::strong >;
  • src/AST/Node.hpp

    r2e9b59b rba897d21  
    1010// Created On       : Wed May 8 10:27:04 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 25 10:33:00 2022
    13 // Update Count     : 7
     12// Last Modified On : Fri Jun 5 9:47:00 2020
     13// Update Count     : 6
    1414//
    1515
     
    103103
    104104/// Mutate a node field (only clones if not equal to existing value)
    105 template<typename node_t, typename super_t, typename field_t, typename assn_t>
    106 const node_t * mutate_field( const node_t * node, field_t super_t::* field, assn_t && val ) {
     105template<typename node_t, typename parent_t, typename field_t, typename assn_t>
     106const node_t * mutate_field( const node_t * node, field_t parent_t::* field, assn_t && val ) {
    107107        // skip mutate if equivalent
    108108        if ( node->*field == val ) return node;
     
    115115
    116116/// Mutate a single index of a node field (only clones if not equal to existing value)
    117 template<typename node_t, typename super_t, typename coll_t, typename ind_t, typename field_t>
     117template<typename node_t, typename parent_t, typename coll_t, typename ind_t, typename field_t>
    118118const node_t * mutate_field_index(
    119         const node_t * node, coll_t super_t::* field, ind_t i, field_t && val
     119        const node_t * node, coll_t parent_t::* field, ind_t i, field_t && val
    120120) {
    121121        // skip mutate if equivalent
     
    129129
    130130/// Mutate an entire indexed collection by cloning to accepted value
    131 template<typename node_t, typename super_t, typename coll_t>
    132 const node_t * mutate_each( const node_t * node, coll_t super_t::* field, Visitor & v ) {
     131template<typename node_t, typename parent_t, typename coll_t>
     132const node_t * mutate_each( const node_t * node, coll_t parent_t::* field, Visitor & v ) {
    133133        for ( unsigned i = 0; i < (node->*field).size(); ++i ) {
    134134                node = mutate_field_index( node, field, i, (node->*field)[i]->accept( v ) );
     
    230230        }
    231231
    232         /// Swaps the nodes contained within two pointers.
    233         void swap( ptr_base & other ) noexcept;
    234 
    235232        const node_t * get() const { _check(); return  node; }
    236233        const node_t * operator->() const { _check(); return  node; }
     
    295292template< typename node_t >
    296293using readonly = ptr_base< node_t, Node::ref_type::weak >;
    297 
    298 /// Non-member swap that an participate in overload resolution.
    299 template< typename node_t, enum Node::ref_type ref_t >
    300 void swap( ptr_base< node_t, ref_t > & l, ptr_base< node_t, ref_t > & r ) {
    301         l.swap( r );
    302 }
    303 
    304294}
    305295
  • src/AST/Pass.hpp

    r2e9b59b rba897d21  
    149149        const ast::Stmt *             visit( const ast::ForStmt              * ) override final;
    150150        const ast::Stmt *             visit( const ast::SwitchStmt           * ) override final;
    151         const ast::CaseClause *       visit( const ast::CaseClause           * ) override final;
     151        const ast::Stmt *             visit( const ast::CaseStmt             * ) override final;
    152152        const ast::Stmt *             visit( const ast::BranchStmt           * ) override final;
    153153        const ast::Stmt *             visit( const ast::ReturnStmt           * ) override final;
    154154        const ast::Stmt *             visit( const ast::ThrowStmt            * ) override final;
    155155        const ast::Stmt *             visit( const ast::TryStmt              * ) override final;
    156         const ast::CatchClause *      visit( const ast::CatchClause          * ) override final;
    157         const ast::FinallyClause *    visit( const ast::FinallyClause        * ) override final;
     156        const ast::Stmt *             visit( const ast::CatchStmt            * ) override final;
     157        const ast::Stmt *             visit( const ast::FinallyStmt          * ) override final;
    158158        const ast::Stmt *             visit( const ast::SuspendStmt          * ) override final;
    159159        const ast::Stmt *             visit( const ast::WaitForStmt          * ) override final;
     
    184184        const ast::Expr *             visit( const ast::CommaExpr            * ) override final;
    185185        const ast::Expr *             visit( const ast::TypeExpr             * ) override final;
    186         const ast::Expr *             visit( const ast::DimensionExpr        * ) override final;
    187186        const ast::Expr *             visit( const ast::AsmExpr              * ) override final;
    188187        const ast::Expr *             visit( const ast::ImplicitCopyCtorExpr * ) override final;
  • src/AST/Pass.impl.hpp

    r2e9b59b rba897d21  
    354354                        // Take all the elements that are different in 'values'
    355355                        // and swap them into 'container'
    356                         if( values[i] != nullptr ) swap(container[i], values[i]);
     356                        if( values[i] != nullptr ) std::swap(container[i], values[i]);
    357357                }
    358358
     
    399399
    400400        template< typename core_t >
    401         template<typename node_t, typename super_t, typename field_t>
     401        template<typename node_t, typename parent_t, typename child_t>
    402402        void ast::Pass< core_t >::maybe_accept(
    403403                const node_t * & parent,
    404                 field_t super_t::*field
     404                child_t parent_t::*child
    405405        ) {
    406                 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" );
    407 
    408                 if(__pass::skip(parent->*field)) return;
    409                 const auto & old_val = __pass::get(parent->*field, 0);
     406                static_assert( std::is_base_of<parent_t, node_t>::value, "Error deducing member object" );
     407
     408                if(__pass::skip(parent->*child)) return;
     409                const auto & old_val = __pass::get(parent->*child, 0);
    410410
    411411                static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR");
     
    417417                if( new_val.differs ) {
    418418                        auto new_parent = __pass::mutate<core_t>(parent);
    419                         new_val.apply(new_parent, field);
     419                        new_val.apply(new_parent, child);
    420420                        parent = new_parent;
    421421                }
     
    423423
    424424        template< typename core_t >
    425         template<typename node_t, typename super_t, typename field_t>
     425        template<typename node_t, typename parent_t, typename child_t>
    426426        void ast::Pass< core_t >::maybe_accept_as_compound(
    427427                const node_t * & parent,
    428                 field_t super_t::*child
     428                child_t parent_t::*child
    429429        ) {
    430                 static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" );
     430                static_assert( std::is_base_of<parent_t, node_t>::value, "Error deducing member object" );
    431431
    432432                if(__pass::skip(parent->*child)) return;
     
    575575                        __pass::symtab::addId( core, 0, func );
    576576                        if ( __visit_children() ) {
     577                                // parameter declarations
     578                                maybe_accept( node, &FunctionDecl::params );
     579                                maybe_accept( node, &FunctionDecl::returns );
     580                                // type params and assertions
    577581                                maybe_accept( node, &FunctionDecl::type_params );
    578582                                maybe_accept( node, &FunctionDecl::assertions );
    579                                 maybe_accept( node, &FunctionDecl::params );
    580                                 maybe_accept( node, &FunctionDecl::returns );
    581                                 maybe_accept( node, &FunctionDecl::type );
    582583                                // First remember that we are now within a function.
    583584                                ValueGuard< bool > oldInFunction( inFunction );
     
    892893        if ( __visit_children() ) {
    893894                maybe_accept( node, &SwitchStmt::cond  );
    894                 maybe_accept( node, &SwitchStmt::cases );
     895                maybe_accept( node, &SwitchStmt::stmts );
    895896        }
    896897
     
    899900
    900901//--------------------------------------------------------------------------
    901 // CaseClause
    902 template< typename core_t >
    903 const ast::CaseClause * ast::Pass< core_t >::visit( const ast::CaseClause * node ) {
    904         VISIT_START( node );
    905 
    906         if ( __visit_children() ) {
    907                 maybe_accept( node, &CaseClause::cond  );
    908                 maybe_accept( node, &CaseClause::stmts );
    909         }
    910 
    911         VISIT_END( CaseClause, node );
     902// CaseStmt
     903template< typename core_t >
     904const ast::Stmt * ast::Pass< core_t >::visit( const ast::CaseStmt * node ) {
     905        VISIT_START( node );
     906
     907        if ( __visit_children() ) {
     908                maybe_accept( node, &CaseStmt::cond  );
     909                maybe_accept( node, &CaseStmt::stmts );
     910        }
     911
     912        VISIT_END( Stmt, node );
    912913}
    913914
     
    963964
    964965//--------------------------------------------------------------------------
    965 // CatchClause
    966 template< typename core_t >
    967 const ast::CatchClause * ast::Pass< core_t >::visit( const ast::CatchClause * node ) {
     966// CatchStmt
     967template< typename core_t >
     968const ast::Stmt * ast::Pass< core_t >::visit( const ast::CatchStmt * node ) {
    968969        VISIT_START( node );
    969970
     
    971972                // catch statements introduce a level of scope (for the caught exception)
    972973                guard_symtab guard { *this };
    973                 maybe_accept( node, &CatchClause::decl );
    974                 maybe_accept( node, &CatchClause::cond );
    975                 maybe_accept_as_compound( node, &CatchClause::body );
    976         }
    977 
    978         VISIT_END( CatchClause, node );
    979 }
    980 
    981 //--------------------------------------------------------------------------
    982 // FinallyClause
    983 template< typename core_t >
    984 const ast::FinallyClause * ast::Pass< core_t >::visit( const ast::FinallyClause * node ) {
    985         VISIT_START( node );
    986 
    987         if ( __visit_children() ) {
    988                 maybe_accept( node, &FinallyClause::body );
    989         }
    990 
    991         VISIT_END( FinallyClause, node );
     974                maybe_accept( node, &CatchStmt::decl );
     975                maybe_accept( node, &CatchStmt::cond );
     976                maybe_accept_as_compound( node, &CatchStmt::body );
     977        }
     978
     979        VISIT_END( Stmt, node );
     980}
     981
     982//--------------------------------------------------------------------------
     983// FinallyStmt
     984template< typename core_t >
     985const ast::Stmt * ast::Pass< core_t >::visit( const ast::FinallyStmt * node ) {
     986        VISIT_START( node );
     987
     988        if ( __visit_children() ) {
     989                maybe_accept( node, &FinallyStmt::body );
     990        }
     991
     992        VISIT_END( Stmt, node );
    992993}
    993994
     
    10531054                        auto n = __pass::mutate<core_t>(node);
    10541055                        for(size_t i = 0; i < new_clauses.size(); i++) {
    1055                                 if(new_clauses.at(i).target.func != nullptr) swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func);
     1056                                if(new_clauses.at(i).target.func != nullptr) std::swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func);
    10561057
    10571058                                for(size_t j = 0; j < new_clauses.at(i).target.args.size(); j++) {
    1058                                         if(new_clauses.at(i).target.args.at(j) != nullptr) swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j));
     1059                                        if(new_clauses.at(i).target.args.at(j) != nullptr) std::swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j));
    10591060                                }
    10601061
    1061                                 if(new_clauses.at(i).stmt != nullptr) swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt);
    1062                                 if(new_clauses.at(i).cond != nullptr) swap(n->clauses.at(i).cond, new_clauses.at(i).cond);
     1062                                if(new_clauses.at(i).stmt != nullptr) std::swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt);
     1063                                if(new_clauses.at(i).cond != nullptr) std::swap(n->clauses.at(i).cond, new_clauses.at(i).cond);
    10631064                        }
    10641065                        node = n;
     
    15151516                }
    15161517                maybe_accept( node, &TypeExpr::type );
    1517         }
    1518 
    1519         VISIT_END( Expr, node );
    1520 }
    1521 
    1522 //--------------------------------------------------------------------------
    1523 // DimensionExpr
    1524 template< typename core_t >
    1525 const ast::Expr * ast::Pass< core_t >::visit( const ast::DimensionExpr * node ) {
    1526         VISIT_START( node );
    1527 
    1528         if ( __visit_children() ) {
    1529                 guard_symtab guard { *this };
    1530                 maybe_accept( node, &DimensionExpr::result );
    15311518        }
    15321519
     
    18721859
    18731860        if ( __visit_children() ) {
    1874                 maybe_accept( node, &PointerType::dimension );
     1861                // xxx - should PointerType visit/mutate dimension?
    18751862                maybe_accept( node, &PointerType::base );
    18761863        }
     
    21642151
    21652152        if ( __visit_children() ) {
    2166                 bool mutated = false;
    2167                 std::unordered_map< ast::TypeInstType::TypeEnvKey, ast::ptr< ast::Type > > new_map;
    2168                 for ( const auto & p : node->typeEnv ) {
    2169                         guard_symtab guard { *this };
    2170                         auto new_node = p.second->accept( *this );
    2171                         if (new_node != p.second) mutated = true;
    2172                         new_map.insert({ p.first, new_node });
    2173                 }
    2174                 if (mutated) {
    2175                         auto new_node = __pass::mutate<core_t>( node );
    2176                         new_node->typeEnv.swap( new_map );
    2177                         node = new_node;
     2153                {
     2154                        bool mutated = false;
     2155                        std::unordered_map< ast::TypeInstType::TypeEnvKey, ast::ptr< ast::Type > > new_map;
     2156                        for ( const auto & p : node->typeEnv ) {
     2157                                guard_symtab guard { *this };
     2158                                auto new_node = p.second->accept( *this );
     2159                                if (new_node != p.second) mutated = true;
     2160                                new_map.insert({ p.first, new_node });
     2161                        }
     2162                        if (mutated) {
     2163                                auto new_node = __pass::mutate<core_t>( node );
     2164                                new_node->typeEnv.swap( new_map );
     2165                                node = new_node;
     2166                        }
    21782167                }
    21792168        }
  • src/AST/Pass.proto.hpp

    r2e9b59b rba897d21  
    2626
    2727struct PureVisitor;
    28 
    29 template<typename node_t>
    30 node_t * deepCopy( const node_t * localRoot );
    3128
    3229namespace __pass {
     
    399396                static inline auto addStructFwd( core_t & core, int, const ast::StructDecl * decl ) -> decltype( core.symtab.addStruct( decl ), void() ) {
    400397                        ast::StructDecl * fwd = new ast::StructDecl( decl->location, decl->name );
    401                         for ( const auto & param : decl->params ) {
    402                                 fwd->params.push_back( deepCopy( param.get() ) );
    403                         }
     398                        fwd->params = decl->params;
    404399                        core.symtab.addStruct( fwd );
    405400                }
     
    410405                template<typename core_t>
    411406                static inline auto addUnionFwd( core_t & core, int, const ast::UnionDecl * decl ) -> decltype( core.symtab.addUnion( decl ), void() ) {
    412                         ast::UnionDecl * fwd = new ast::UnionDecl( decl->location, decl->name );
    413                         for ( const auto & param : decl->params ) {
    414                                 fwd->params.push_back( deepCopy( param.get() ) );
    415                         }
     407                        UnionDecl * fwd = new UnionDecl( decl->location, decl->name );
     408                        fwd->params = decl->params;
    416409                        core.symtab.addUnion( fwd );
    417410                }
  • src/AST/Print.cpp

    r2e9b59b rba897d21  
    210210                }
    211211
    212                 auto ptrToEnum = dynamic_cast<const ast::EnumDecl *>(node);
    213                 if ( ! short_mode && ptrToEnum && ptrToEnum->base ) {
    214                         os << endl << indent << ".. with (enum) base" << endl;
    215                         ++indent;
    216                         ptrToEnum->base->accept( *this );
    217                         --indent; 
    218                 }
    219 
    220212                os << endl;
    221213        }
     
    597589
    598590                ++indent;
    599                 for ( const ast::CaseClause * stmt : node->cases ) {
     591                for ( const ast::Stmt * stmt : node->stmts ) {
    600592                        stmt->accept( *this );
    601593                }
     
    605597        }
    606598
    607         virtual const ast::CaseClause * visit( const ast::CaseClause * node ) override final {
     599        virtual const ast::Stmt * visit( const ast::CaseStmt * node ) override final {
    608600                if ( node->isDefault() ) {
    609601                        os << indent << "Default ";
     
    687679
    688680                os << indent-1 << "... and handlers:" << endl;
    689                 for ( const ast::CatchClause * stmt : node->handlers ) {
     681                for ( const ast::CatchStmt * stmt : node->handlers ) {
    690682                        os << indent;
    691683                        stmt->accept( *this );
     
    701693        }
    702694
    703         virtual const ast::CatchClause * visit( const ast::CatchClause * node ) override final {
     695        virtual const ast::Stmt * visit( const ast::CatchStmt * node ) override final {
    704696                os << "Catch ";
    705697                switch ( node->kind ) {
     
    726718        }
    727719
    728         virtual const ast::FinallyClause * visit( const ast::FinallyClause * node ) override final {
     720        virtual const ast::Stmt * visit( const ast::FinallyStmt * node ) override final {
    729721                os << "Finally Statement" << endl;
    730722                os << indent << "... with block:" << endl;
     
    10961088        virtual const ast::Expr * visit( const ast::TypeExpr * node ) override final {
    10971089                safe_print( node->type );
    1098                 postprint( node );
    1099 
    1100                 return node;
    1101         }
    1102 
    1103         virtual const ast::Expr * visit( const ast::DimensionExpr * node ) override final {
    1104                 os << "Type-Sys Value: " << node->name;
    11051090                postprint( node );
    11061091
  • src/AST/Stmt.hpp

    r2e9b59b rba897d21  
    99// Author           : Aaron B. Moss
    1010// Created On       : Wed May  8 13:00:00 2019
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Mon Mar 28  9:50:00 2022
    13 // Update Count     : 35
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 20:06:41 2022
     13// Update Count     : 34
    1414//
    1515
     
    4747  private:
    4848        Stmt * clone() const override = 0;
    49         MUTATE_FRIEND
    50 };
    51 
    52 // Base statement component node (only serves to group them).
    53 class StmtClause : public ParseNode {
    54   public:
    55         // This is for non-statements that still belong with the statements,
    56         // but are not statements, usually some sort of clause. Often these can
    57         // (and should) be folded into the approprate parent node, but if they
    58         // cannot be, they are sub-types of this type, for organization.
    59 
    60     StmtClause( const CodeLocation & loc )
    61                 : ParseNode(loc) {}
    62 
    63   private:
    64         StmtClause * clone() const override = 0;
    6549        MUTATE_FRIEND
    6650};
     
    174158  public:
    175159        ptr<Expr> cond;
    176         std::vector<ptr<CaseClause>> cases;
    177 
    178         SwitchStmt( const CodeLocation & loc, const Expr * cond,
    179                                 const std::vector<ptr<CaseClause>> && cases,
     160        std::vector<ptr<Stmt>> stmts;
     161
     162        SwitchStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts,
    180163                                const std::vector<Label> && labels = {} )
    181                 : Stmt(loc, std::move(labels)), cond(cond), cases(std::move(cases)) {}
     164                : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
    182165
    183166        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     
    188171
    189172// Case label: case ...: or default:
    190 class CaseClause final : public StmtClause {
     173class CaseStmt final : public Stmt {
    191174  public:
    192175        // Null for the default label.
     
    194177        std::vector<ptr<Stmt>> stmts;
    195178
    196         CaseClause( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts )
    197                 : StmtClause(loc), cond(cond), stmts(std::move(stmts)) {}
     179        CaseStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts,
     180                          const std::vector<Label> && labels = {} )
     181                : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
    198182
    199183        bool isDefault() const { return !cond; }
    200184
    201         const CaseClause * accept( Visitor & v ) const override { return v.visit( this ); }
    202   private:
    203         CaseClause * clone() const override { return new CaseClause{ *this }; }
     185        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     186  private:
     187        CaseStmt * clone() const override { return new CaseStmt{ *this }; }
    204188        MUTATE_FRIEND
    205189};
     
    314298  public:
    315299        ptr<CompoundStmt> body;
    316         std::vector<ptr<CatchClause>> handlers;
    317         ptr<FinallyClause> finally;
     300        std::vector<ptr<CatchStmt>> handlers;
     301        ptr<FinallyStmt> finally;
    318302
    319303        TryStmt( const CodeLocation & loc, const CompoundStmt * body,
    320                          const std::vector<ptr<CatchClause>> && handlers, const FinallyClause * finally,
     304                         const std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally,
    321305                         const std::vector<Label> && labels = {} )
    322306                : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {}
     
    329313
    330314// Catch clause of try statement
    331 class CatchClause final : public StmtClause {
     315class CatchStmt final : public Stmt {
    332316  public:
    333317        ptr<Decl> decl;
     
    336320        ExceptionKind kind;
    337321
    338         CatchClause( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,
    339                            const Stmt * body )
    340                 : StmtClause(loc), decl(decl), cond(cond), body(body), kind(kind) {}
    341 
    342         const CatchClause * accept( Visitor & v ) const override { return v.visit( this ); }
    343   private:
    344         CatchClause * clone() const override { return new CatchClause{ *this }; }
     322        CatchStmt( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,
     323                           const Stmt * body, const std::vector<Label> && labels = {} )
     324                : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {}
     325
     326        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     327  private:
     328        CatchStmt * clone() const override { return new CatchStmt{ *this }; }
    345329        MUTATE_FRIEND
    346330};
    347331
    348332// Finally clause of try statement
    349 class FinallyClause final : public StmtClause {
     333class FinallyStmt final : public Stmt {
    350334  public:
    351335        ptr<CompoundStmt> body;
    352336
    353         FinallyClause( const CodeLocation & loc, const CompoundStmt * body )
    354                 : StmtClause(loc), body(body) {}
    355 
    356         const FinallyClause * accept( Visitor & v ) const override { return v.visit( this ); }
    357   private:
    358         FinallyClause * clone() const override { return new FinallyClause{ *this }; }
     337        FinallyStmt( const CodeLocation & loc, const CompoundStmt * body,
     338                                 std::vector<Label> && labels = {} )
     339                : Stmt(loc, std::move(labels)), body(body) {}
     340
     341        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     342  private:
     343        FinallyStmt * clone() const override { return new FinallyStmt{ *this }; }
    359344        MUTATE_FRIEND
    360345};
  • src/AST/TranslationUnit.hpp

    r2e9b59b rba897d21  
    1010// Created On       : Tue Jun 11 15:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Mar 11 11:19:00 2022
    13 // Update Count     : 1
     12// Last Modified On : Tue Jun 11 15:42:00 2019
     13// Update Count     : 0
    1414//
    1515
     
    2323namespace ast {
    2424
    25 class TranslationGlobal {
    26 public:
    27         std::map< UniqueId, Decl * > idMap;
    28 
    29         ptr<Type> sizeType;
    30         const FunctionDecl * dereference;
    31         const StructDecl * dtorStruct;
    32         const FunctionDecl * dtorDestroy;
    33 };
    34 
    3525class TranslationUnit {
    3626public:
    3727        std::list< ptr< Decl > > decls;
    38         TranslationGlobal global;
     28
     29        struct Global {
     30                std::map< UniqueId, Decl * > idMap;
     31
     32                ptr<Type> sizeType;
     33                const FunctionDecl * dereference;
     34                const StructDecl * dtorStruct;
     35                const FunctionDecl * dtorDestroy;
     36        } global;
    3937};
    4038
  • src/AST/Type.cpp

    r2e9b59b rba897d21  
    147147// --- TypeInstType
    148148
    149 TypeInstType::TypeInstType( const TypeDecl * b,
    150         CV::Qualifiers q, std::vector<ptr<Attribute>> && as )
    151 : BaseInstType( b->name, q, move(as) ), base( b ), kind( b->kind ) {}
    152 
    153149void TypeInstType::set_base( const TypeDecl * b ) {
    154150        base = b;
  • src/AST/Type.hpp

    r2e9b59b rba897d21  
    421421                std::vector<ptr<Attribute>> && as = {} )
    422422        : BaseInstType( n, q, std::move(as) ), base( b ), kind( b->kind ) {}
    423 
    424         TypeInstType( const TypeDecl * b,
    425                 CV::Qualifiers q = {}, std::vector<ptr<Attribute>> && as = {} );
    426 
    427423        TypeInstType( const std::string& n, TypeDecl::Kind k, CV::Qualifiers q = {},
    428424                std::vector<ptr<Attribute>> && as = {} )
  • src/AST/TypeSubstitution.hpp

    r2e9b59b rba897d21  
    3737  public:
    3838        TypeSubstitution();
    39         template< typename FormalContainer, typename ActualContainer >
    40         TypeSubstitution( FormalContainer formals, ActualContainer actuals );
    4139        template< typename FormalIterator, typename ActualIterator >
    4240        TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
     
    7876        bool empty() const;
    7977
    80         template< typename FormalContainer, typename ActualContainer >
    81         void addAll( FormalContainer formals, ActualContainer actuals );
    8278        template< typename FormalIterator, typename ActualIterator >
    83         void addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
     79        void add( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
    8480
    8581        /// create a new TypeSubstitution using bindings from env containing all of the type variables in expr
     
    116112};
    117113
    118 template< typename FormalContainer, typename ActualContainer >
    119 TypeSubstitution::TypeSubstitution( FormalContainer formals, ActualContainer actuals ) {
    120         assert( formals.size() == actuals.size() );
    121         addAll( formals.begin(), formals.end(), actuals.begin() );
    122 }
    123 
    124 template< typename FormalIterator, typename ActualIterator >
    125 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
    126         addAll( formalBegin, formalEnd, actualBegin );
    127 }
    128 
    129 template< typename FormalContainer, typename ActualContainer >
    130 void TypeSubstitution::addAll( FormalContainer formals, ActualContainer actuals ) {
    131         assert( formals.size() == actuals.size() );
    132         addAll( formals.begin(), formals.end(), actuals.begin() );
    133 }
    134 
    135114// this is the only place where type parameters outside a function formal may be substituted.
    136115template< typename FormalIterator, typename ActualIterator >
    137 void TypeSubstitution::addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
     116void TypeSubstitution::add( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
    138117        // FormalIterator points to a TypeDecl
    139118        // ActualIterator points to a Type
     
    150129                        } // if
    151130                } else {
    152                         // Is this an error?
     131                       
    153132                } // if
    154133        } // for
    155134}
     135
     136
     137
     138template< typename FormalIterator, typename ActualIterator >
     139TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
     140        add( formalBegin, formalEnd, actualBegin );
     141}
     142
    156143
    157144} // namespace ast
  • src/AST/Util.cpp

    r2e9b59b rba897d21  
    1010// Created On       : Wed Jan 19  9:46:00 2022
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 11 18:07:00 2022
    13 // Update Count     : 1
     12// Last Modified On : Fri Feb 18  9:42:00 2022
     13// Update Count     : 0
    1414//
    1515
    1616#include "Util.hpp"
    1717
     18#include "Decl.hpp"
    1819#include "Node.hpp"
    19 #include "ParseNode.hpp"
    2020#include "Pass.hpp"
    2121#include "TranslationUnit.hpp"
     22#include "Common/ScopedMap.h"
    2223
    2324#include <vector>
     
    4546};
    4647
    47 /// Check that every note that can has a set CodeLocation.
    48 struct SetCodeLocationsCore {
    49         void previsit( const ParseNode * node ) {
    50                 assert( node->location.isSet() );
    51         }
    52 };
    53 
    5448struct InvariantCore {
    5549        // To save on the number of visits: this is a kind of composed core.
    5650        // None of the passes should make changes so ordering doesn't matter.
    5751        NoStrongCyclesCore no_strong_cycles;
    58         SetCodeLocationsCore set_code_locations;
    5952
    6053        void previsit( const Node * node ) {
    6154                no_strong_cycles.previsit( node );
    62         }
    63 
    64         void previsit( const ParseNode * node ) {
    65                 no_strong_cycles.previsit( node );
    66                 set_code_locations.previsit( node );
    6755        }
    6856
  • src/AST/Visitor.hpp

    r2e9b59b rba897d21  
    4141    virtual const ast::Stmt *             visit( const ast::ForStmt              * ) = 0;
    4242    virtual const ast::Stmt *             visit( const ast::SwitchStmt           * ) = 0;
    43     virtual const ast::CaseClause *       visit( const ast::CaseClause           * ) = 0;
     43    virtual const ast::Stmt *             visit( const ast::CaseStmt             * ) = 0;
    4444    virtual const ast::Stmt *             visit( const ast::BranchStmt           * ) = 0;
    4545    virtual const ast::Stmt *             visit( const ast::ReturnStmt           * ) = 0;
    4646    virtual const ast::Stmt *             visit( const ast::ThrowStmt            * ) = 0;
    4747    virtual const ast::Stmt *             visit( const ast::TryStmt              * ) = 0;
    48     virtual const ast::CatchClause *      visit( const ast::CatchClause          * ) = 0;
    49     virtual const ast::FinallyClause *    visit( const ast::FinallyClause        * ) = 0;
     48    virtual const ast::Stmt *             visit( const ast::CatchStmt            * ) = 0;
     49    virtual const ast::Stmt *             visit( const ast::FinallyStmt          * ) = 0;
    5050    virtual const ast::Stmt *             visit( const ast::SuspendStmt          * ) = 0;
    5151    virtual const ast::Stmt *             visit( const ast::WaitForStmt          * ) = 0;
     
    7676    virtual const ast::Expr *             visit( const ast::CommaExpr            * ) = 0;
    7777    virtual const ast::Expr *             visit( const ast::TypeExpr             * ) = 0;
    78     virtual const ast::Expr *             visit( const ast::DimensionExpr        * ) = 0;
    7978    virtual const ast::Expr *             visit( const ast::AsmExpr              * ) = 0;
    8079    virtual const ast::Expr *             visit( const ast::ImplicitCopyCtorExpr * ) = 0;
  • src/CodeGen/CodeGenerator.cc

    r2e9b59b rba897d21  
    274274        void CodeGenerator::postvisit( EnumDecl * enumDecl ) {
    275275                extension( enumDecl );
     276                output << "enum ";
     277                genAttributes( enumDecl->get_attributes() );
     278
     279                output << enumDecl->get_name();
     280
    276281                std::list< Declaration* > &memb = enumDecl->get_members();
    277                 if (enumDecl->base && ! memb.empty()) {
    278                         unsigned long long last_val = -1;
     282
     283                if ( ! memb.empty() ) {
     284                        output << " {" << endl;
     285
     286                        ++indent;
    279287                        for ( std::list< Declaration* >::iterator i = memb.begin(); i != memb.end();  i++) {
    280288                                ObjectDecl * obj = dynamic_cast< ObjectDecl* >( *i );
    281289                                assert( obj );
    282                                 output << "static const ";
    283                                 output << genType(enumDecl->base, "", options) << " ";
    284                                 output << mangleName( obj ) << " ";
    285                                 output << " = ";
    286                                 output << "(" << genType(enumDecl->base, "", options) << ")";
    287                                 if ( (BasicType *)(enumDecl->base) && ((BasicType *)(enumDecl->base))->isWholeNumber() ) {
    288                                         if ( obj->get_init() ) {
    289                                                 obj->get_init()->accept( *visitor );
    290                                                 last_val = ((ConstantExpr *)(((SingleInit *)(obj->init))->value))->constant.get_ival();
    291                                         } else {
    292                                                 output << ++last_val;
    293                                         } // if
    294                                 } else {
    295                                         if ( obj->get_init() ) {
    296                                                 obj->get_init()->accept( *visitor );
    297                                         } else {
    298                                                 // Should not reach here!
    299                                         }
    300                                 }
    301                                 output << ";" << endl;
     290                                output << indent << mangleName( obj );
     291                                if ( obj->get_init() ) {
     292                                        output << " = ";
     293                                        obj->get_init()->accept( *visitor );
     294                                } // if
     295                                output << "," << endl;
    302296                        } // for
    303                 } else {
    304                         output << "enum ";
    305                         genAttributes( enumDecl->get_attributes() );
    306 
    307                         output << enumDecl->get_name();
    308 
    309                         if ( ! memb.empty() ) {
    310                                 output << " {" << endl;
    311 
    312                                 ++indent;
    313                                 for ( std::list< Declaration* >::iterator i = memb.begin(); i != memb.end();  i++) {
    314                                         ObjectDecl * obj = dynamic_cast< ObjectDecl* >( *i );
    315                                         assert( obj );
    316                                         output << indent << mangleName( obj );
    317                                         if ( obj->get_init() ) {
    318                                                 output << " = ";
    319                                                 obj->get_init()->accept( *visitor );
    320                                         } // if
    321                                         output << "," << endl;
    322                                 } // for
     297
    323298                        --indent;
     299
    324300                        output << indent << "}";
    325                         } // if
    326301                } // if
    327302        }
     
    372347                                des->accept( *visitor );
    373348                        } else {
    374                                 // otherwise, it has to be a ConstantExpr or CastExpr, initializing array element
     349                                // otherwise, it has to be a ConstantExpr or CastExpr, initializing array eleemnt
    375350                                output << "[";
    376351                                des->accept( *visitor );
     
    686661                        output << opInfo->symbol;
    687662                } else {
    688                         // if (dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())
    689                         // && dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())->baseEnum->base) {
    690                         //      output << '(' <<genType(dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())->baseEnum->base, "", options) << ')';
    691                         // }
    692663                        output << mangleName( variableExpr->get_var() );
    693664                } // if
  • src/CodeGen/FixMain.cc

    r2e9b59b rba897d21  
    9191}
    9292
    93 ObjectDecl * makeArgvObj() {
     93ObjectDecl * charStarObj() {
    9494        return new ObjectDecl(
    9595                "", Type::StorageClasses(), LinkageSpec::Cforall, 0,
     
    117117        main_type->get_returnVals().push_back( signedIntObj() );
    118118        main_type->get_parameters().push_back( signedIntObj() );
    119         main_type->get_parameters().push_back( makeArgvObj() );
     119        main_type->get_parameters().push_back( charStarObj() );
    120120        return create_mangled_main_function_name( main_type );
    121121}
  • src/CodeGen/GenType.cc

    r2e9b59b rba897d21  
    253253
    254254        void GenType::postvisit( EnumInstType * enumInst ) {
    255                 if ( enumInst->baseEnum->base ) {
    256                         typeString = genType(enumInst->baseEnum->base, "", options) + typeString;
    257                 } else {
    258                         typeString = enumInst->name + " " + typeString;
    259                         if ( options.genC ) {
    260                                 typeString = "enum " + typeString;
    261                         }
    262                 }
     255                typeString = enumInst->name + " " + typeString;
     256                if ( options.genC ) typeString = "enum " + typeString;
    263257                handleQualifiers( enumInst );
    264258        }
  • src/Common/CodeLocationTools.cpp

    r2e9b59b rba897d21  
    99// Author           : Andrew Beach
    1010// Created On       : Fri Dec  4 15:42:00 2020
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Mon Mar 14 15:14:00 2022
    13 // Update Count     : 4
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Tue Feb  1 09:14:39 2022
     13// Update Count     : 3
    1414//
    1515
     
    112112    macro(ForStmt, Stmt) \
    113113    macro(SwitchStmt, Stmt) \
    114     macro(CaseClause, CaseClause) \
     114    macro(CaseStmt, Stmt) \
    115115    macro(BranchStmt, Stmt) \
    116116    macro(ReturnStmt, Stmt) \
    117117    macro(ThrowStmt, Stmt) \
    118118    macro(TryStmt, Stmt) \
    119     macro(CatchClause, CatchClause) \
    120     macro(FinallyClause, FinallyClause) \
     119    macro(CatchStmt, Stmt) \
     120    macro(FinallyStmt, Stmt) \
    121121    macro(SuspendStmt, Stmt) \
    122122    macro(WaitForStmt, Stmt) \
     
    147147    macro(CommaExpr, Expr) \
    148148    macro(TypeExpr, Expr) \
    149     macro(DimensionExpr, Expr) \
    150149    macro(AsmExpr, Expr) \
    151150    macro(ImplicitCopyCtorExpr, Expr) \
     
    240239};
    241240
    242 class LocalFillCore : public ast::WithGuards {
    243         CodeLocation const * parent;
    244 public:
    245         LocalFillCore( CodeLocation const & location ) : parent( &location ) {
    246                 assert( location.isSet() );
    247         }
    248 
    249         template<typename node_t>
    250         auto previsit( node_t const * node )
    251                         -> typename std::enable_if<has_code_location<node_t>::value, node_t const *>::type {
    252                 if ( node->location.isSet() ) {
    253                         GuardValue( parent ) = &node->location;
    254                         return node;
    255                 } else {
    256                         node_t * mut = ast::mutate( node );
    257                         mut->location = *parent;
    258                         return mut;
    259                 }
    260         }
    261 };
    262 
    263241} // namespace
    264242
     
    300278        ast::Pass<FillCore>::run( unit );
    301279}
    302 
    303 ast::Node const * localFillCodeLocations(
    304                 CodeLocation const & location , ast::Node const * node ) {
    305         ast::Pass<LocalFillCore> visitor( location );
    306         return node->accept( visitor );
    307 }
  • src/Common/CodeLocationTools.hpp

    r2e9b59b rba897d21  
    1010// Created On       : Fri Dec  4 15:35:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Mon Mar 14 15:14:00 2022
    13 // Update Count     : 2
     12// Last Modified On : Wed Dec  9  9:53:00 2020
     13// Update Count     : 1
    1414//
    1515
    1616#pragma once
    1717
    18 struct CodeLocation;
    1918namespace ast {
    20         class Node;
    2119        class TranslationUnit;
    2220}
     
    3028// Assign a nearby code-location to any unset code locations in the forest.
    3129void forceFillCodeLocations( ast::TranslationUnit & unit );
    32 
    33 // Fill in code-locations with a parent code location,
    34 // using the provided CodeLocation as the base.
    35 ast::Node const *
    36         localFillCodeLocations( CodeLocation const &, ast::Node const * );
  • src/Common/Eval.cc

    r2e9b59b rba897d21  
    112112        }
    113113
    114         void postvisit( const ast::VariableExpr * expr ) { // No hit
     114        void postvisit( const ast::VariableExpr * expr ) {
    115115                if ( const ast::EnumInstType * inst = dynamic_cast<const ast::EnumInstType *>(expr->result.get()) ) {
    116116                        if ( const ast::EnumDecl * decl = inst->base ) {
  • src/Common/Examine.cc

    r2e9b59b rba897d21  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Examine.cc -- Helpers for examining AST code.
     7// Examine.h --
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Wed Sept 2 14:02 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Dec 10 10:27 2021
    13 // Update Count     : 1
     12// Last Modified On : Wed Sep  8 12:15 2020
     13// Update Count     : 0
    1414//
    1515
    1616#include "Common/Examine.h"
    1717
    18 #include "AST/Type.hpp"
    1918#include "CodeGen/OperatorTable.h"
    20 #include "InitTweak/InitTweak.h"
    2119
    2220DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ) {
     
    3836
    3937namespace {
    40 
    41 // getTypeofThis but does some extra checks used in this module.
    42 const ast::Type * getTypeofThisSolo( const ast::FunctionDecl * func ) {
    43         if ( 1 != func->params.size() ) {
    44                 return nullptr;
    45         }
    46         auto ref = func->type->params.front().as<ast::ReferenceType>();
    47         return (ref) ? ref->base : nullptr;
    48 }
    49 
    50 }
    51 
    52 const ast::DeclWithType * isMainFor(
    53                 const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ) {
    54         if ( "main" != func->name ) return nullptr;
    55         if ( 1 != func->params.size() ) return nullptr;
    56 
    57         auto param = func->params.front();
    58 
    59         auto type = dynamic_cast<const ast::ReferenceType *>( param->get_type() );
    60         if ( !type ) return nullptr;
    61 
    62         auto obj = type->base.as<ast::StructInstType>();
    63         if ( !obj ) return nullptr;
    64 
    65         if ( kind != obj->base->kind ) return nullptr;
    66 
    67         return param;
    68 }
    69 
    70 namespace {
    7138        Type * getDestructorParam( FunctionDecl * func ) {
    7239                if ( !CodeGen::isDestructor( func->name ) ) return nullptr;
     
    8148                return nullptr;
    8249        }
    83 
    84 const ast::Type * getDestructorParam( const ast::FunctionDecl * func ) {
    85         if ( !CodeGen::isDestructor( func->name ) ) return nullptr;
    86         //return InitTweak::getParamThis( func )->type;
    87         return getTypeofThisSolo( func );
    88 }
    89 
    9050}
    9151
     
    9757        return false;
    9858}
    99 
    100 bool isDestructorFor(
    101                 const ast::FunctionDecl * func, const ast::StructDecl * type_decl ) {
    102         if ( const ast::Type * type = getDestructorParam( func ) ) {
    103                 auto stype = dynamic_cast<const ast::StructInstType *>( type );
    104                 return stype && stype->base.get() == type_decl;
    105         }
    106         return false;
    107 }
  • src/Common/Examine.h

    r2e9b59b rba897d21  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Examine.h -- Helpers for examining AST code.
     7// Examine.h --
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Wed Sept 2 13:57 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Dec 10 10:28 2021
    13 // Update Count     : 1
     12// Last Modified On : Wed Sep  8 12:08 2020
     13// Update Count     : 0
    1414//
    1515
    16 #include "AST/Decl.hpp"
    1716#include "SynTree/Declaration.h"
    1817
    1918/// Check if this is a main function for a type of an aggregate kind.
    2019DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind );
    21 const ast::DeclWithType * isMainFor(
    22         const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind );
    2320// Returns a pointer to the parameter if true, nullptr otherwise.
    2421
    2522/// Check if this function is a destructor for the given structure.
    2623bool isDestructorFor( FunctionDecl * func, StructDecl * type_decl );
    27 bool isDestructorFor(
    28         const ast::FunctionDecl * func, const ast::StructDecl * type );
  • src/Common/PassVisitor.impl.h

    r2e9b59b rba897d21  
    754754
    755755        // unlike structs, traits, and unions, enums inject their members into the global scope
    756         // if ( node->base ) maybeAccept_impl( node->base, *this ); // Need this? Maybe not?
    757756        maybeAccept_impl( node->parameters, *this );
    758757        maybeAccept_impl( node->members   , *this );
  • src/Concurrency/Keywords.cc

    r2e9b59b rba897d21  
    12041204                                        //new TypeofType( noQualifiers, args.front()->clone() )
    12051205                                        new TypeofType( noQualifiers, new UntypedExpr(
    1206                                                         new NameExpr( "__get_mutexstmt_lock_type" ),
     1206                                                        new NameExpr( "__get_type" ),
    12071207                                                        { args.front()->clone() }
    12081208                                                )
     
    12161216                                map_range < std::list<Initializer*> > ( args, [](Expression * var ){
    12171217                                        return new SingleInit( new UntypedExpr(
    1218                                                         new NameExpr( "__get_mutexstmt_lock_ptr" ),
     1218                                                        new NameExpr( "__get_ptr" ),
    12191219                                                        { var }
    12201220                                        ) );
     
    12271227                TypeExpr * lock_type_expr = new TypeExpr(
    12281228                        new TypeofType( noQualifiers, new UntypedExpr(
    1229                                 new NameExpr( "__get_mutexstmt_lock_type" ),
     1229                                new NameExpr( "__get_type" ),
    12301230                                { args.front()->clone() }
    12311231                                )
  • src/Concurrency/KeywordsNew.cpp

    r2e9b59b rba897d21  
    1010// Created On       : Tue Nov 16  9:53:00 2021
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 11 10:40:00 2022
    13 // Update Count     : 2
     12// Last Modified On : Wed Dec  1 11:24:00 2021
     13// Update Count     : 1
    1414//
    15 
    16 #include <iostream>
    1715
    1816#include "Concurrency/Keywords.h"
     
    2018#include "AST/Copy.hpp"
    2119#include "AST/Decl.hpp"
    22 #include "AST/Expr.hpp"
    2320#include "AST/Pass.hpp"
    2421#include "AST/Stmt.hpp"
    25 #include "AST/DeclReplacer.hpp"
    2622#include "AST/TranslationUnit.hpp"
    2723#include "CodeGen/OperatorTable.h"
    28 #include "Common/Examine.h"
    2924#include "Common/utility.h"
    30 #include "Common/UniqueName.h"
    31 #include "ControlStruct/LabelGeneratorNew.hpp"
    3225#include "InitTweak/InitTweak.h"
    33 #include "Virtual/Tables.h"
    3426
    3527namespace Concurrency {
     
    3729namespace {
    3830
    39 // --------------------------------------------------------------------------
    40 // Loose Helper Functions:
    41 
    42 /// Detect threads constructed with the keyword thread.
    43 bool isThread( const ast::DeclWithType * decl ) {
     31inline static bool isThread( const ast::DeclWithType * decl ) {
    4432        auto baseType = decl->get_type()->stripDeclarator();
    4533        auto instType = dynamic_cast<const ast::StructInstType *>( baseType );
     
    4836}
    4937
    50 /// Get the virtual type id if given a type name.
    51 std::string typeIdType( std::string const & exception_name ) {
    52         return exception_name.empty() ? std::string()
    53                 : Virtual::typeIdType( exception_name );
    54 }
    55 
    56 /// Get the vtable type name if given a type name.
    57 std::string vtableTypeName( std::string const & exception_name ) {
    58         return exception_name.empty() ? std::string()
    59                 : Virtual::vtableTypeName( exception_name );
    60 }
    61 
    62 static ast::Type * mutate_under_references( ast::ptr<ast::Type>& type ) {
    63         ast::Type * mutType = type.get_and_mutate();
    64         for ( ast::ReferenceType * mutRef
    65                 ; (mutRef = dynamic_cast<ast::ReferenceType *>( mutType ))
    66                 ; mutType = mutRef->base.get_and_mutate() );
    67         return mutType;
    68 }
    69 
    70 // Describe that it adds the generic parameters and the uses of the generic
    71 // parameters on the function and first "this" argument.
    72 ast::FunctionDecl * fixupGenerics(
    73                 const ast::FunctionDecl * func, const ast::StructDecl * decl ) {
    74         const CodeLocation & location = decl->location;
    75         // We have to update both the declaration
    76         auto mutFunc = ast::mutate( func );
    77         auto mutType = mutFunc->type.get_and_mutate();
    78 
    79         if ( decl->params.empty() ) {
    80                 return mutFunc;
    81         }
    82 
    83         assert( 0 != mutFunc->params.size() );
    84         assert( 0 != mutType->params.size() );
    85 
    86         // Add the "forall" clause information.
    87         for ( const ast::ptr<ast::TypeDecl> & typeParam : decl->params ) {
    88                 auto typeDecl = ast::deepCopy( typeParam );
    89                 mutFunc->type_params.push_back( typeDecl );
    90                 mutType->forall.push_back( new ast::TypeInstType( typeDecl ) );
    91                 for ( auto & assertion : typeDecl->assertions ) {
    92                         mutFunc->assertions.push_back( assertion );
    93                         mutType->assertions.emplace_back(
    94                                 new ast::VariableExpr( location, assertion ) );
    95                 }
    96                 typeDecl->assertions.clear();
    97         }
    98 
    99         // Even chain_mutate is not powerful enough for this:
    100         ast::ptr<ast::Type>& paramType = strict_dynamic_cast<ast::ObjectDecl *>(
    101                 mutFunc->params[0].get_and_mutate() )->type;
    102         auto paramTypeInst = strict_dynamic_cast<ast::StructInstType *>(
    103                 mutate_under_references( paramType ) );
    104         auto typeParamInst = strict_dynamic_cast<ast::StructInstType *>(
    105                 mutate_under_references( mutType->params[0] ) );
    106 
    107         for ( const ast::ptr<ast::TypeDecl> & typeDecl : mutFunc->type_params ) {
    108                 paramTypeInst->params.push_back(
    109                         new ast::TypeExpr( location, new ast::TypeInstType( typeDecl ) ) );
    110                 typeParamInst->params.push_back(
    111                         new ast::TypeExpr( location, new ast::TypeInstType( typeDecl ) ) );
    112         }
    113 
    114         return mutFunc;
    115 }
    116 
    11738// --------------------------------------------------------------------------
    118 struct ConcurrentSueKeyword : public ast::WithDeclsToAdd<> {
    119         ConcurrentSueKeyword(
    120                 std::string&& type_name, std::string&& field_name,
    121                 std::string&& getter_name, std::string&& context_error,
    122                 std::string&& exception_name,
    123                 bool needs_main, ast::AggregateDecl::Aggregate cast_target
    124         ) :
    125                 type_name( type_name ), field_name( field_name ),
    126                 getter_name( getter_name ), context_error( context_error ),
    127                 exception_name( exception_name ),
    128                 typeid_name( typeIdType( exception_name ) ),
    129                 vtable_name( vtableTypeName( exception_name ) ),
    130                 needs_main( needs_main ), cast_target( cast_target )
    131         {}
    132 
    133         virtual ~ConcurrentSueKeyword() {}
    134 
    135         const ast::Decl * postvisit( const ast::StructDecl * decl );
    136         const ast::DeclWithType * postvisit( const ast::FunctionDecl * decl );
    137         const ast::Expr * postvisit( const ast::KeywordCastExpr * expr );
    138 
    139         struct StructAndField {
    140                 const ast::StructDecl * decl;
    141                 const ast::ObjectDecl * field;
    142         };
    143 
    144         const ast::StructDecl * handleStruct( const ast::StructDecl * );
    145         void handleMain( const ast::FunctionDecl *, const ast::StructInstType * );
    146         void addTypeId( const ast::StructDecl * );
    147         void addVtableForward( const ast::StructDecl * );
    148         const ast::FunctionDecl * forwardDeclare( const ast::StructDecl * );
    149         StructAndField addField( const ast::StructDecl * );
    150         void addGetRoutines( const ast::ObjectDecl *, const ast::FunctionDecl * );
    151         void addLockUnlockRoutines( const ast::StructDecl * );
    152 
    153 private:
    154         const std::string type_name;
    155         const std::string field_name;
    156         const std::string getter_name;
    157         const std::string context_error;
    158         const std::string exception_name;
    159         const std::string typeid_name;
    160         const std::string vtable_name;
    161         const bool needs_main;
    162         const ast::AggregateDecl::Aggregate cast_target;
    163 
    164         const ast::StructDecl   * type_decl = nullptr;
    165         const ast::FunctionDecl * dtor_decl = nullptr;
    166         const ast::StructDecl * except_decl = nullptr;
    167         const ast::StructDecl * typeid_decl = nullptr;
    168         const ast::StructDecl * vtable_decl = nullptr;
    169 
    170 };
    171 
    172 // Handles thread type declarations:
    173 //
    174 // thread Mythread {                         struct MyThread {
    175 //  int data;                                  int data;
    176 //  a_struct_t more_data;                      a_struct_t more_data;
    177 //                                =>             thread$ __thrd_d;
    178 // };                                        };
    179 //                                           static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; }
    180 //
    181 struct ThreadKeyword final : public ConcurrentSueKeyword {
    182         ThreadKeyword() : ConcurrentSueKeyword(
    183                 "thread$",
    184                 "__thrd",
    185                 "get_thread",
    186                 "thread keyword requires threads to be in scope, add #include <thread.hfa>\n",
    187                 "ThreadCancelled",
    188                 true,
    189                 ast::AggregateDecl::Thread )
    190         {}
    191 
    192         virtual ~ThreadKeyword() {}
    193 };
    194 
    195 // Handles coroutine type declarations:
    196 //
    197 // coroutine MyCoroutine {                   struct MyCoroutine {
    198 //  int data;                                  int data;
    199 //  a_struct_t more_data;                      a_struct_t more_data;
    200 //                                =>             coroutine$ __cor_d;
    201 // };                                        };
    202 //                                           static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; }
    203 //
    204 struct CoroutineKeyword final : public ConcurrentSueKeyword {
    205         CoroutineKeyword() : ConcurrentSueKeyword(
    206                 "coroutine$",
    207                 "__cor",
    208                 "get_coroutine",
    209                 "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa>\n",
    210                 "CoroutineCancelled",
    211                 true,
    212                 ast::AggregateDecl::Coroutine )
    213         {}
    214 
    215         virtual ~CoroutineKeyword() {}
    216 };
    217 
    218 // Handles monitor type declarations:
    219 //
    220 // monitor MyMonitor {                       struct MyMonitor {
    221 //  int data;                                  int data;
    222 //  a_struct_t more_data;                      a_struct_t more_data;
    223 //                                =>             monitor$ __mon_d;
    224 // };                                        };
    225 //                                           static inline monitor$ * get_coroutine( MyMonitor * this ) {
    226 //                                               return &this->__cor_d;
    227 //                                           }
    228 //                                           void lock(MyMonitor & this) {
    229 //                                               lock(get_monitor(this));
    230 //                                           }
    231 //                                           void unlock(MyMonitor & this) {
    232 //                                               unlock(get_monitor(this));
    233 //                                           }
    234 //
    235 struct MonitorKeyword final : public ConcurrentSueKeyword {
    236         MonitorKeyword() : ConcurrentSueKeyword(
    237                 "monitor$",
    238                 "__mon",
    239                 "get_monitor",
    240                 "monitor keyword requires monitors to be in scope, add #include <monitor.hfa>\n",
    241                 "",
    242                 false,
    243                 ast::AggregateDecl::Monitor )
    244         {}
    245 
    246         virtual ~MonitorKeyword() {}
    247 };
    248 
    249 // Handles generator type declarations:
    250 //
    251 // generator MyGenerator {                   struct MyGenerator {
    252 //  int data;                                  int data;
    253 //  a_struct_t more_data;                      a_struct_t more_data;
    254 //                                =>             int __generator_state;
    255 // };                                        };
    256 //
    257 struct GeneratorKeyword final : public ConcurrentSueKeyword {
    258         GeneratorKeyword() : ConcurrentSueKeyword(
    259                 "generator$",
    260                 "__generator_state",
    261                 "get_generator",
    262                 "Unable to find builtin type generator$\n",
    263                 "",
    264                 true,
    265                 ast::AggregateDecl::Generator )
    266         {}
    267 
    268         virtual ~GeneratorKeyword() {}
    269 };
    270 
    271 const ast::Decl * ConcurrentSueKeyword::postvisit(
    272                 const ast::StructDecl * decl ) {
    273         if ( !decl->body ) {
    274                 return decl;
    275         } else if ( cast_target == decl->kind ) {
    276                 return handleStruct( decl );
    277         } else if ( type_name == decl->name ) {
    278                 assert( !type_decl );
    279                 type_decl = decl;
    280         } else if ( exception_name == decl->name ) {
    281                 assert( !except_decl );
    282                 except_decl = decl;
    283         } else if ( typeid_name == decl->name ) {
    284                 assert( !typeid_decl );
    285                 typeid_decl = decl;
    286         } else if ( vtable_name == decl->name ) {
    287                 assert( !vtable_decl );
    288                 vtable_decl = decl;
    289         }
    290         return decl;
    291 }
    292 
    293 // Try to get the full definition, but raise an error on conflicts.
    294 const ast::FunctionDecl * getDefinition(
    295                 const ast::FunctionDecl * old_decl,
    296                 const ast::FunctionDecl * new_decl ) {
    297         if ( !new_decl->stmts ) {
    298                 return old_decl;
    299         } else if ( !old_decl->stmts ) {
    300                 return new_decl;
    301         } else {
    302                 assert( !old_decl->stmts || !new_decl->stmts );
    303                 return nullptr;
    304         }
    305 }
    306 
    307 const ast::DeclWithType * ConcurrentSueKeyword::postvisit(
    308                 const ast::FunctionDecl * decl ) {
    309         if ( type_decl && isDestructorFor( decl, type_decl ) ) {
    310                 // Check for forward declarations, try to get the full definition.
    311                 dtor_decl = (dtor_decl) ? getDefinition( dtor_decl, decl ) : decl;
    312         } else if ( !vtable_name.empty() && decl->has_body() ) {
    313                 if (const ast::DeclWithType * param = isMainFor( decl, cast_target )) {
    314                         if ( !vtable_decl ) {
    315                                 SemanticError( decl, context_error );
    316                         }
    317                         // Should be safe because of isMainFor.
    318                         const ast::StructInstType * struct_type =
    319                                 static_cast<const ast::StructInstType *>(
    320                                         static_cast<const ast::ReferenceType *>(
    321                                                 param->get_type() )->base.get() );
    322 
    323                         handleMain( decl, struct_type );
    324                 }
    325         }
    326         return decl;
    327 }
    328 
    329 const ast::Expr * ConcurrentSueKeyword::postvisit(
    330                 const ast::KeywordCastExpr * expr ) {
    331         if ( cast_target == expr->target ) {
    332                 // Convert `(thread &)ex` to `(thread$ &)*get_thread(ex)`, etc.
    333                 if ( !type_decl || !dtor_decl ) {
    334                         SemanticError( expr, context_error );
    335                 }
    336                 assert( nullptr == expr->result );
    337                 auto cast = ast::mutate( expr );
    338                 cast->result = new ast::ReferenceType( new ast::StructInstType( type_decl ) );
    339                 cast->concrete_target.field  = field_name;
    340                 cast->concrete_target.getter = getter_name;
    341                 return cast;
    342         }
    343         return expr;
    344 }
    345 
    346 const ast::StructDecl * ConcurrentSueKeyword::handleStruct(
    347                 const ast::StructDecl * decl ) {
    348         assert( decl->body );
    349 
    350         if ( !type_decl || !dtor_decl ) {
    351                 SemanticError( decl, context_error );
    352         }
    353 
    354         if ( !exception_name.empty() ) {
    355                 if( !typeid_decl || !vtable_decl ) {
    356                         SemanticError( decl, context_error );
    357                 }
    358                 addTypeId( decl );
    359                 addVtableForward( decl );
    360         }
    361 
    362         const ast::FunctionDecl * func = forwardDeclare( decl );
    363         StructAndField addFieldRet = addField( decl );
    364         decl = addFieldRet.decl;
    365         const ast::ObjectDecl * field = addFieldRet.field;
    366 
    367         addGetRoutines( field, func );
    368         // Add routines to monitors for use by mutex stmt.
    369         if ( ast::AggregateDecl::Monitor == cast_target ) {
    370                 addLockUnlockRoutines( decl );
    371         }
    372 
    373         return decl;
    374 }
    375 
    376 void ConcurrentSueKeyword::handleMain(
    377                 const ast::FunctionDecl * decl, const ast::StructInstType * type ) {
    378         assert( vtable_decl );
    379         assert( except_decl );
    380 
    381         const CodeLocation & location = decl->location;
    382 
    383         std::vector<ast::ptr<ast::Expr>> poly_args = {
    384                 new ast::TypeExpr( location, type ),
    385         };
    386         ast::ObjectDecl * vtable_object = Virtual::makeVtableInstance(
    387                 location,
    388                 "_default_vtable_object_declaration",
    389                 new ast::StructInstType( vtable_decl, copy( poly_args ) ),
    390                 type,
    391                 nullptr
    392         );
    393         declsToAddAfter.push_back( vtable_object );
    394         declsToAddAfter.push_back(
    395                 new ast::ObjectDecl(
    396                         location,
    397                         Virtual::concurrentDefaultVTableName(),
    398                         new ast::ReferenceType( vtable_object->type, ast::CV::Const ),
    399                         new ast::SingleInit( location,
    400                                 new ast::VariableExpr( location, vtable_object ) ),
    401                         ast::Storage::Classes(),
    402                         ast::Linkage::Cforall
    403                 )
    404         );
    405         declsToAddAfter.push_back( Virtual::makeGetExceptionFunction(
    406                 location,
    407                 vtable_object,
    408                 new ast::StructInstType( except_decl, copy( poly_args ) )
    409         ) );
    410 }
    411 
    412 void ConcurrentSueKeyword::addTypeId( const ast::StructDecl * decl ) {
    413         assert( typeid_decl );
    414         const CodeLocation & location = decl->location;
    415 
    416         ast::StructInstType * typeid_type =
    417                 new ast::StructInstType( typeid_decl, ast::CV::Const );
    418         typeid_type->params.push_back(
    419                 new ast::TypeExpr( location, new ast::StructInstType( decl ) ) );
    420         declsToAddBefore.push_back(
    421                 Virtual::makeTypeIdInstance( location, typeid_type ) );
    422         // If the typeid_type is going to be kept, the other reference will have
    423         // been made by now, but we also get to avoid extra mutates.
    424         ast::ptr<ast::StructInstType> typeid_cleanup = typeid_type;
    425 }
    426 
    427 void ConcurrentSueKeyword::addVtableForward( const ast::StructDecl * decl ) {
    428         assert( vtable_decl );
    429         const CodeLocation& location = decl->location;
    430 
    431         std::vector<ast::ptr<ast::Expr>> poly_args = {
    432                 new ast::TypeExpr( location, new ast::StructInstType( decl ) ),
    433         };
    434         declsToAddBefore.push_back( Virtual::makeGetExceptionForward(
    435                 location,
    436                 new ast::StructInstType( vtable_decl, copy( poly_args ) ),
    437                 new ast::StructInstType( except_decl, copy( poly_args ) )
    438         ) );
    439         ast::ObjectDecl * vtable_object = Virtual::makeVtableForward(
    440                 location,
    441                 "_default_vtable_object_declaration",
    442                 new ast::StructInstType( vtable_decl, std::move( poly_args ) )
    443         );
    444         declsToAddBefore.push_back( vtable_object );
    445         declsToAddBefore.push_back(
    446                 new ast::ObjectDecl(
    447                         location,
    448                         Virtual::concurrentDefaultVTableName(),
    449                         new ast::ReferenceType( vtable_object->type, ast::CV::Const ),
    450                         nullptr,
    451                         ast::Storage::Extern,
    452                         ast::Linkage::Cforall
    453                 )
    454         );
    455 }
    456 
    457 const ast::FunctionDecl * ConcurrentSueKeyword::forwardDeclare(
    458                 const ast::StructDecl * decl ) {
    459         const CodeLocation & location = decl->location;
    460 
    461         ast::StructDecl * forward = ast::deepCopy( decl );
    462         {
    463                 // If removing members makes ref-count go to zero, do not free.
    464                 ast::ptr<ast::StructDecl> forward_ptr = forward;
    465                 forward->body = false;
    466                 forward->members.clear();
    467                 forward_ptr.release();
    468         }
    469 
    470         ast::ObjectDecl * this_decl = new ast::ObjectDecl(
    471                 location,
    472                 "this",
    473                 new ast::ReferenceType( new ast::StructInstType( decl ) ),
    474                 nullptr,
    475                 ast::Storage::Classes(),
    476                 ast::Linkage::Cforall
    477         );
    478 
    479         ast::ObjectDecl * ret_decl = new ast::ObjectDecl(
    480                 location,
    481                 "ret",
    482                 new ast::PointerType( new ast::StructInstType( type_decl ) ),
    483                 nullptr,
    484                 ast::Storage::Classes(),
    485                 ast::Linkage::Cforall
    486         );
    487 
    488         ast::FunctionDecl * get_decl = new ast::FunctionDecl(
    489                 location,
    490                 getter_name,
    491                 {}, // forall
    492                 { this_decl }, // params
    493                 { ret_decl }, // returns
    494                 nullptr, // stmts
    495                 ast::Storage::Static,
    496                 ast::Linkage::Cforall,
    497                 { new ast::Attribute( "const" ) },
    498                 ast::Function::Inline
    499         );
    500         get_decl = fixupGenerics( get_decl, decl );
    501 
    502         ast::FunctionDecl * main_decl = nullptr;
    503         if ( needs_main ) {
    504                 // `this_decl` is copied here because the original was used above.
    505                 main_decl = new ast::FunctionDecl(
    506                         location,
    507                         "main",
    508                         {},
    509                         { ast::deepCopy( this_decl ) },
    510                         {},
    511                         nullptr,
    512                         ast::Storage::Classes(),
    513                         ast::Linkage::Cforall
    514                 );
    515                 main_decl = fixupGenerics( main_decl, decl );
    516         }
    517 
    518         declsToAddBefore.push_back( forward );
    519         if ( needs_main ) declsToAddBefore.push_back( main_decl );
    520         declsToAddBefore.push_back( get_decl );
    521 
    522         return get_decl;
    523 }
    524 
    525 ConcurrentSueKeyword::StructAndField ConcurrentSueKeyword::addField(
    526                 const ast::StructDecl * decl ) {
    527         const CodeLocation & location = decl->location;
    528 
    529         ast::ObjectDecl * field = new ast::ObjectDecl(
    530                 location,
    531                 field_name,
    532                 new ast::StructInstType( type_decl ),
    533                 nullptr,
    534                 ast::Storage::Classes(),
    535                 ast::Linkage::Cforall
    536         );
    537 
    538         auto mutDecl = ast::mutate( decl );
    539         mutDecl->members.push_back( field );
    540 
    541         return {mutDecl, field};
    542 }
    543 
    544 void ConcurrentSueKeyword::addGetRoutines(
    545                 const ast::ObjectDecl * field, const ast::FunctionDecl * forward ) {
    546         // Say it is generated at the "same" places as the forward declaration.
    547         const CodeLocation & location = forward->location;
    548 
    549         const ast::DeclWithType * param = forward->params.front();
    550         ast::Stmt * stmt = new ast::ReturnStmt( location,
    551                 new ast::AddressExpr( location,
    552                         new ast::MemberExpr( location,
    553                                 field,
    554                                 new ast::CastExpr( location,
    555                                         new ast::VariableExpr( location, param ),
    556                                         ast::deepCopy( param->get_type()->stripReferences() ),
    557                                         ast::ExplicitCast
    558                                 )
    559                         )
    560                 )
    561         );
    562 
    563         ast::FunctionDecl * decl = ast::deepCopy( forward );
    564         decl->stmts = new ast::CompoundStmt( location, { stmt } );
    565         declsToAddAfter.push_back( decl );
    566 }
    567 
    568 void ConcurrentSueKeyword::addLockUnlockRoutines(
    569                 const ast::StructDecl * decl ) {
    570         // This should only be used on monitors.
    571         assert( ast::AggregateDecl::Monitor == cast_target );
    572 
    573         const CodeLocation & location = decl->location;
    574 
    575         // The parameter for both routines.
    576         ast::ObjectDecl * this_decl = new ast::ObjectDecl(
    577                 location,
    578                 "this",
    579                 new ast::ReferenceType( new ast::StructInstType( decl ) ),
    580                 nullptr,
    581                 ast::Storage::Classes(),
    582                 ast::Linkage::Cforall
    583         );
    584 
    585         ast::FunctionDecl * lock_decl = new ast::FunctionDecl(
    586                 location,
    587                 "lock",
    588                 { /* forall */ },
    589                 {
    590                         // Copy the declaration of this.
    591                         ast::deepCopy( this_decl ),
    592                 },
    593                 { /* returns */ },
    594                 nullptr,
    595                 ast::Storage::Static,
    596                 ast::Linkage::Cforall,
    597                 { /* attributes */ },
    598                 ast::Function::Inline
    599         );
    600         lock_decl = fixupGenerics( lock_decl, decl );
    601 
    602         lock_decl->stmts = new ast::CompoundStmt( location, {
    603                 new ast::ExprStmt( location,
    604                         new ast::UntypedExpr( location,
    605                                 new ast::NameExpr( location, "lock" ),
    606                                 {
    607                                         new ast::UntypedExpr( location,
    608                                                 new ast::NameExpr( location, "get_monitor" ),
    609                                                 { new ast::VariableExpr( location,
    610                                                         InitTweak::getParamThis( lock_decl ) ) }
    611                                         )
    612                                 }
    613                         )
    614                 )
    615         } );
    616 
    617         ast::FunctionDecl * unlock_decl = new ast::FunctionDecl(
    618                 location,
    619                 "unlock",
    620                 { /* forall */ },
    621                 {
    622                         // Last use, consume the declaration of this.
    623                         this_decl,
    624                 },
    625                 { /* returns */ },
    626                 nullptr,
    627                 ast::Storage::Static,
    628                 ast::Linkage::Cforall,
    629                 { /* attributes */ },
    630                 ast::Function::Inline
    631         );
    632         unlock_decl = fixupGenerics( unlock_decl, decl );
    633 
    634         unlock_decl->stmts = new ast::CompoundStmt( location, {
    635                 new ast::ExprStmt( location,
    636                         new ast::UntypedExpr( location,
    637                                 new ast::NameExpr( location, "unlock" ),
    638                                 {
    639                                         new ast::UntypedExpr( location,
    640                                                 new ast::NameExpr( location, "get_monitor" ),
    641                                                 { new ast::VariableExpr( location,
    642                                                         InitTweak::getParamThis( unlock_decl ) ) }
    643                                         )
    644                                 }
    645                         )
    646                 )
    647         } );
    648 
    649         declsToAddAfter.push_back( lock_decl );
    650         declsToAddAfter.push_back( unlock_decl );
    651 }
    652 
    653 
    654 // --------------------------------------------------------------------------
    655 struct SuspendKeyword final :
    656                 public ast::WithStmtsToAdd<>, public ast::WithGuards {
    657         SuspendKeyword() = default;
    658         virtual ~SuspendKeyword() = default;
    659 
    660         void previsit( const ast::FunctionDecl * );
    661         const ast::DeclWithType * postvisit( const ast::FunctionDecl * );
    662         const ast::Stmt * postvisit( const ast::SuspendStmt * );
    663 
    664 private:
    665         bool is_real_suspend( const ast::FunctionDecl * );
    666 
    667         const ast::Stmt * make_generator_suspend( const ast::SuspendStmt * );
    668         const ast::Stmt * make_coroutine_suspend( const ast::SuspendStmt * );
    669 
    670         struct LabelPair {
    671                 ast::Label obj;
    672                 int idx;
    673         };
    674 
    675         LabelPair make_label(const ast::Stmt * stmt ) {
    676                 labels.push_back( ControlStruct::newLabel( "generator", stmt ) );
    677                 return { labels.back(), int(labels.size()) };
    678         }
    679 
    680         const ast::DeclWithType * in_generator = nullptr;
    681         const ast::FunctionDecl * decl_suspend = nullptr;
    682         std::vector<ast::Label> labels;
    683 };
    684 
    685 void SuspendKeyword::previsit( const ast::FunctionDecl * decl ) {
    686         GuardValue( in_generator ); in_generator = nullptr;
    687 
    688         // If it is the real suspend, grab it if we don't have one already.
    689         if ( is_real_suspend( decl ) ) {
    690                 decl_suspend = decl_suspend ? decl_suspend : decl;
    691                 return;
    692         }
    693 
    694         // Otherwise check if this is a generator main and, if so, handle it.
    695         auto param = isMainFor( decl, ast::AggregateDecl::Generator );
    696         if ( !param ) return;
    697 
    698         if ( 0 != decl->returns.size() ) {
    699                 SemanticError( decl->location, "Generator main must return void" );
    700         }
    701 
    702         in_generator = param;
    703         GuardValue( labels ); labels.clear();
    704 }
    705 
    706 const ast::DeclWithType * SuspendKeyword::postvisit(
    707                 const ast::FunctionDecl * decl ) {
    708         // Only modify a full definition of a generator with states.
    709         if ( !decl->stmts || !in_generator || labels.empty() ) return decl;
    710 
    711         const CodeLocation & location = decl->location;
    712 
    713         // Create a new function body:
    714         // static void * __generator_labels[] = {&&s0, &&s1, ...};
    715         // void * __generator_label = __generator_labels[GEN.__generator_state];
    716         // goto * __generator_label;
    717         // s0: ;
    718         // OLD_BODY
    719 
    720         // This is the null statement inserted right before the body.
    721         ast::NullStmt * noop = new ast::NullStmt( location );
    722         noop->labels.push_back( ControlStruct::newLabel( "generator", noop ) );
    723         const ast::Label & first_label = noop->labels.back();
    724 
    725         // Add each label to the init, starting with the first label.
    726         std::vector<ast::ptr<ast::Init>> inits = {
    727                 new ast::SingleInit( location,
    728                         new ast::LabelAddressExpr( location, copy( first_label ) ) ) };
    729         // Then go through all the stored labels, and clear the store.
    730         for ( auto && label : labels ) {
    731                 inits.push_back( new ast::SingleInit( label.location,
    732                         new ast::LabelAddressExpr( label.location, std::move( label )
    733                         ) ) );
    734         }
    735         labels.clear();
    736         // Then construct the initializer itself.
    737         auto init = new ast::ListInit( location, std::move( inits ) );
    738 
    739         ast::ObjectDecl * generatorLabels = new ast::ObjectDecl(
    740                 location,
    741                 "__generator_labels",
    742                 new ast::ArrayType(
    743                         new ast::PointerType( new ast::VoidType() ),
    744                         nullptr,
    745                         ast::FixedLen,
    746                         ast::DynamicDim
    747                 ),
    748                 init,
    749                 ast::Storage::Classes(),
    750                 ast::Linkage::AutoGen
    751         );
    752 
    753         ast::ObjectDecl * generatorLabel = new ast::ObjectDecl(
    754                 location,
    755                 "__generator_label",
    756                 new ast::PointerType( new ast::VoidType() ),
    757                 new ast::SingleInit( location,
    758                         new ast::UntypedExpr( location,
    759                                 new ast::NameExpr( location, "?[?]" ),
    760                                 {
    761                                         // TODO: Could be a variable expr.
    762                                         new ast::NameExpr( location, "__generator_labels" ),
    763                                         new ast::UntypedMemberExpr( location,
    764                                                 new ast::NameExpr( location, "__generator_state" ),
    765                                                 new ast::VariableExpr( location, in_generator )
    766                                         )
    767                                 }
    768                         )
    769                 ),
    770                 ast::Storage::Classes(),
    771                 ast::Linkage::AutoGen
    772         );
    773 
    774         ast::BranchStmt * theGoTo = new ast::BranchStmt(
    775                 location, new ast::VariableExpr( location, generatorLabel )
    776         );
    777 
    778         // The noop goes here in order.
    779 
    780         ast::CompoundStmt * body = new ast::CompoundStmt( location, {
    781                 { new ast::DeclStmt( location, generatorLabels ) },
    782                 { new ast::DeclStmt( location, generatorLabel ) },
    783                 { theGoTo },
    784                 { noop },
    785                 { decl->stmts },
    786         } );
    787 
    788         auto mutDecl = ast::mutate( decl );
    789         mutDecl->stmts = body;
    790         return mutDecl;
    791 }
    792 
    793 const ast::Stmt * SuspendKeyword::postvisit( const ast::SuspendStmt * stmt ) {
    794         switch ( stmt->type ) {
    795         case ast::SuspendStmt::None:
    796                 // Use the context to determain the implicit target.
    797                 if ( in_generator ) {
    798                         return make_generator_suspend( stmt );
    799                 } else {
    800                         return make_coroutine_suspend( stmt );
    801                 }
    802         case ast::SuspendStmt::Coroutine:
    803                 return make_coroutine_suspend( stmt );
    804         case ast::SuspendStmt::Generator:
    805                 // Generator suspends must be directly in a generator.
    806                 if ( !in_generator ) SemanticError( stmt->location, "'suspend generator' must be used inside main of generator type." );
    807                 return make_generator_suspend( stmt );
    808         }
    809         assert( false );
    810         return stmt;
    811 }
    812 
    813 /// Find the real/official suspend declaration.
    814 bool SuspendKeyword::is_real_suspend( const ast::FunctionDecl * decl ) {
    815         return ( !decl->linkage.is_mangled
    816                 && 0 == decl->params.size()
    817                 && 0 == decl->returns.size()
    818                 && "__cfactx_suspend" == decl->name );
    819 }
    820 
    821 const ast::Stmt * SuspendKeyword::make_generator_suspend(
    822                 const ast::SuspendStmt * stmt ) {
    823         assert( in_generator );
    824         // Target code is:
    825         //   GEN.__generator_state = X;
    826         //   THEN
    827         //   return;
    828         //   __gen_X:;
    829 
    830         const CodeLocation & location = stmt->location;
    831 
    832         LabelPair label = make_label( stmt );
    833 
    834         // This is the context saving statement.
    835         stmtsToAddBefore.push_back( new ast::ExprStmt( location,
    836                 new ast::UntypedExpr( location,
    837                         new ast::NameExpr( location, "?=?" ),
    838                         {
    839                                 new ast::UntypedMemberExpr( location,
    840                                         new ast::NameExpr( location, "__generator_state" ),
    841                                         new ast::VariableExpr( location, in_generator )
    842                                 ),
    843                                 ast::ConstantExpr::from_int( location, label.idx ),
    844                         }
    845                 )
    846         ) );
    847 
    848         // The THEN component is conditional (return is not).
    849         if ( stmt->then ) {
    850                 stmtsToAddBefore.push_back( stmt->then.get() );
    851         }
    852         stmtsToAddBefore.push_back( new ast::ReturnStmt( location, nullptr ) );
    853 
    854         // The null statement replaces the old suspend statement.
    855         return new ast::NullStmt( location, { label.obj } );
    856 }
    857 
    858 const ast::Stmt * SuspendKeyword::make_coroutine_suspend(
    859                 const ast::SuspendStmt * stmt ) {
    860         // The only thing we need from the old statement is the location.
    861         const CodeLocation & location = stmt->location;
    862 
    863         if ( !decl_suspend ) {
    864                 SemanticError( location, "suspend keyword applied to coroutines requires coroutines to be in scope, add #include <coroutine.hfa>\n" );
    865         }
    866         if ( stmt->then ) {
    867                 SemanticError( location, "Compound statement following coroutines is not implemented." );
    868         }
    869 
    870         return new ast::ExprStmt( location,
    871                 new ast::UntypedExpr( location,
    872                         ast::VariableExpr::functionPointer( location, decl_suspend ) )
    873         );
    874 }
    875 
    876 // --------------------------------------------------------------------------
    877 struct MutexKeyword final : public ast::WithDeclsToAdd<> {
     39struct MutexKeyword final {
    87840        const ast::FunctionDecl * postvisit( const ast::FunctionDecl * decl );
    87941        void postvisit( const ast::StructDecl * decl );
     
    88850        ast::CompoundStmt * addStatements( const ast::CompoundStmt * body, const std::vector<ast::ptr<ast::Expr>> & args );
    88951        ast::CompoundStmt * addThreadDtorStatements( const ast::FunctionDecl* func, const ast::CompoundStmt * body, const std::vector<const ast::DeclWithType *> & args );
    890         ast::ExprStmt * genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param);
    891         ast::IfStmt * genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam );
     52
    89253private:
    89354        const ast::StructDecl * monitor_decl = nullptr;
     
    89859
    89960        static ast::ptr<ast::Type> generic_func;
    900 
    901         UniqueName mutex_func_namer = UniqueName("__lock_unlock_curr");
    90261};
    90362
     
    1001160
    1002161const ast::Stmt * MutexKeyword::postvisit( const ast::MutexStmt * stmt ) {
    1003         if ( !lock_guard_decl ) {
    1004                 SemanticError( stmt->location, "mutex stmt requires a header, add #include <mutex_stmt.hfa>\n" );
    1005         }
    1006162        ast::CompoundStmt * body =
    1007163                        new ast::CompoundStmt( stmt->location, { stmt->stmt } );
    1008        
    1009         return addStatements( body, stmt->mutexObjs );;
     164        addStatements( body, stmt->mutexObjs );
     165        return body;
    1010166}
    1011167
     
    1095251                                {
    1096252                                        new ast::SingleInit( location,
    1097                                                 new ast::AddressExpr( location,
     253                                                new ast::AddressExpr(
    1098254                                                        new ast::VariableExpr( location, monitor ) ) ),
    1099255                                        new ast::SingleInit( location,
     
    1202358}
    1203359
    1204 // generates a cast to the void ptr to the appropriate lock type and dereferences it before calling lock or unlock on it
    1205 // used to undo the type erasure done by storing all the lock pointers as void
    1206 ast::ExprStmt * MutexKeyword::genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param ) {
    1207         return new ast::ExprStmt( location,
    1208                 new ast::UntypedExpr( location,
    1209                         new ast::NameExpr( location, fnName ), {
    1210                                 ast::UntypedExpr::createDeref(
    1211                                         location,
    1212                                         new ast::CastExpr( location,
    1213                                                 param,
    1214                                                 new ast::PointerType( new ast::TypeofType( new ast::UntypedExpr(
    1215                                                         expr->location,
    1216                                                         new ast::NameExpr( expr->location, "__get_mutexstmt_lock_type" ),
    1217                                                         { expr }
    1218                                                 ) ) ),
    1219                                                 ast::GeneratedFlag::ExplicitCast
    1220                                         )
    1221                                 )
    1222                         }
    1223                 )
    1224         );
    1225 }
    1226 
    1227 ast::IfStmt * MutexKeyword::genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ) {
    1228         ast::IfStmt * outerLockIf = nullptr;
    1229         ast::IfStmt * lastLockIf = nullptr;
    1230 
    1231         //adds an if/elif clause for each lock to assign type from void ptr based on ptr address
    1232         for ( long unsigned int i = 0; i < args.size(); i++ ) {
    1233                
    1234                 ast::UntypedExpr * ifCond = new ast::UntypedExpr( location,
    1235                         new ast::NameExpr( location, "?==?" ), {
    1236                                 ast::deepCopy( thisParam ),
    1237                                 new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() ))
    1238                         }
    1239                 );
    1240 
    1241                 ast::IfStmt * currLockIf = new ast::IfStmt(
    1242                         location,
    1243                         ifCond,
    1244                         genVirtLockUnlockExpr( fnName, args.at(i), location, ast::deepCopy( thisParam ) )
    1245                 );
    1246                
    1247                 if ( i == 0 ) {
    1248                         outerLockIf = currLockIf;
    1249                 } else {
    1250                         // add ifstmt to else of previous stmt
    1251                         lastLockIf->else_ = currLockIf;
    1252                 }
    1253 
    1254                 lastLockIf = currLockIf;
    1255         }
    1256         return outerLockIf;
    1257 }
    1258 
    1259360ast::CompoundStmt * MutexKeyword::addStatements(
    1260361                const ast::CompoundStmt * body,
    1261362                const std::vector<ast::ptr<ast::Expr>> & args ) {
     363        ast::CompoundStmt * mutBody = ast::mutate( body );
    1262364
    1263365        // Code is generated near the beginning of the compound statement.
    1264         const CodeLocation & location = body->location;
    1265 
    1266                 // final body to return
    1267         ast::CompoundStmt * newBody = new ast::CompoundStmt( location );
    1268 
    1269         // std::string lockFnName = mutex_func_namer.newName();
    1270         // std::string unlockFnName = mutex_func_namer.newName();
     366        const CodeLocation & location = mutBody->location;
    1271367
    1272368        // Make pointer to the monitors.
     
    1276372                new ast::ArrayType(
    1277373                        new ast::PointerType(
    1278                                 new ast::VoidType()
     374                                new ast::TypeofType(
     375                                        new ast::UntypedExpr(
     376                                                location,
     377                                                new ast::NameExpr( location, "__get_type" ),
     378                                                { args.front() }
     379                                        )
     380                                )
    1279381                        ),
    1280382                        ast::ConstantExpr::from_ulong( location, args.size() ),
     
    1290392                                                new ast::UntypedExpr(
    1291393                                                        expr->location,
    1292                                                         new ast::NameExpr( expr->location, "__get_mutexstmt_lock_ptr" ),
     394                                                        new ast::NameExpr( expr->location, "__get_ptr" ),
    1293395                                                        { expr }
    1294396                                                )
     
    1303405        ast::StructInstType * lock_guard_struct =
    1304406                        new ast::StructInstType( lock_guard_decl );
    1305 
    1306         // use try stmts to lock and finally to unlock
    1307         ast::TryStmt * outerTry = nullptr;
    1308         ast::TryStmt * currentTry;
    1309         ast::CompoundStmt * lastBody = nullptr;
    1310 
    1311         // adds a nested try stmt for each lock we are locking
    1312         for ( long unsigned int i = 0; i < args.size(); i++ ) {
    1313                 ast::UntypedExpr * innerAccess = new ast::UntypedExpr(
    1314                         location,
    1315                         new ast::NameExpr( location,"?[?]" ), {
    1316                                 new ast::NameExpr( location, "__monitors" ),
    1317                                 ast::ConstantExpr::from_int( location, i )
    1318                         }
    1319                 );
    1320 
    1321                 // make the try body
    1322                 ast::CompoundStmt * currTryBody = new ast::CompoundStmt( location );
    1323                 ast::IfStmt * lockCall = genTypeDiscrimLockUnlock( "lock", args, location, innerAccess );
    1324                 currTryBody->push_back( lockCall );
    1325 
    1326                 // make the finally stmt
    1327                 ast::CompoundStmt * currFinallyBody = new ast::CompoundStmt( location );
    1328                 ast::IfStmt * unlockCall = genTypeDiscrimLockUnlock( "unlock", args, location, innerAccess );
    1329                 currFinallyBody->push_back( unlockCall );
    1330 
    1331                 // construct the current try
    1332                 currentTry = new ast::TryStmt(
    1333                         location,
    1334                         currTryBody,
    1335                         {},
    1336                         new ast::FinallyClause( location, currFinallyBody )
    1337                 );
    1338                 if ( i == 0 ) outerTry = currentTry;
    1339                 else {
    1340                         // pushback try into the body of the outer try
    1341                         lastBody->push_back( currentTry );
    1342                 }
    1343                 lastBody = currTryBody;
    1344         }
    1345 
    1346         // push body into innermost try body
    1347         if ( lastBody != nullptr ) {
    1348                 lastBody->push_back( body );
    1349                 newBody->push_front( outerTry );
    1350         }
    1351 
     407        ast::TypeExpr * lock_type_expr = new ast::TypeExpr(
     408                location,
     409                new ast::TypeofType(
     410                        new ast::UntypedExpr(
     411                                location,
     412                                new ast::NameExpr( location, "__get_type" ),
     413                                { args.front() }
     414                        )
     415                )
     416        );
     417
     418        lock_guard_struct->params.push_back( lock_type_expr );
     419
     420        // In reverse order:
    1352421        // monitor_guard_t __guard = { __monitors, # };
    1353         newBody->push_front(
     422        mutBody->push_front(
    1354423                new ast::DeclStmt(
    1355424                        location,
     
    1378447
    1379448        // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) };
    1380         newBody->push_front( new ast::DeclStmt( location, monitors ) );
    1381 
    1382         // // The parameter for both __lock_curr/__unlock_curr routines.
    1383         // ast::ObjectDecl * this_decl = new ast::ObjectDecl(
    1384         //      location,
    1385         //      "this",
    1386         //      new ast::PointerType( new ast::VoidType() ),
    1387         //      nullptr,
    1388         //      {},
    1389         //      ast::Linkage::Cforall
    1390         // );
    1391 
    1392         // ast::FunctionDecl * lock_decl = new ast::FunctionDecl(
    1393         //      location,
    1394         //      lockFnName,
    1395         //      { /* forall */ },
    1396         //      {
    1397         //              // Copy the declaration of this.
    1398         //              this_decl,
    1399         //      },
    1400         //      { /* returns */ },
    1401         //      nullptr,
    1402         //      0,
    1403         //      ast::Linkage::Cforall,
    1404         //      { /* attributes */ },
    1405         //      ast::Function::Inline
    1406         // );
    1407 
    1408         // ast::FunctionDecl * unlock_decl = new ast::FunctionDecl(
    1409         //      location,
    1410         //      unlockFnName,
    1411         //      { /* forall */ },
    1412         //      {
    1413         //              // Copy the declaration of this.
    1414         //              ast::deepCopy( this_decl ),
    1415         //      },
    1416         //      { /* returns */ },
    1417         //      nullptr,
    1418         //      0,
    1419         //      ast::Linkage::Cforall,
    1420         //      { /* attributes */ },
    1421         //      ast::Function::Inline
    1422         // );
    1423 
    1424         // ast::IfStmt * outerLockIf = nullptr;
    1425         // ast::IfStmt * outerUnlockIf = nullptr;
    1426         // ast::IfStmt * lastLockIf = nullptr;
    1427         // ast::IfStmt * lastUnlockIf = nullptr;
    1428 
    1429         // //adds an if/elif clause for each lock to assign type from void ptr based on ptr address
    1430         // for ( long unsigned int i = 0; i < args.size(); i++ ) {
    1431         //      ast::VariableExpr * thisParam = new ast::VariableExpr( location, InitTweak::getParamThis( lock_decl ) );
    1432         //      ast::UntypedExpr * ifCond = new ast::UntypedExpr( location,
    1433         //              new ast::NameExpr( location, "?==?" ), {
    1434         //                      thisParam,
    1435         //                      new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() ))
    1436         //              }
    1437         //      );
    1438 
    1439         //      ast::IfStmt * currLockIf = new ast::IfStmt(
    1440         //              location,
    1441         //              ast::deepCopy( ifCond ),
    1442         //              genVirtLockUnlockExpr( "lock", args.at(i), location, ast::deepCopy( thisParam ) )
    1443         //      );
    1444 
    1445         //      ast::IfStmt * currUnlockIf = new ast::IfStmt(
    1446         //              location,
    1447         //              ifCond,
    1448         //              genVirtLockUnlockExpr( "unlock", args.at(i), location, ast::deepCopy( thisParam ) )
    1449         //      );
    1450                
    1451         //      if ( i == 0 ) {
    1452         //              outerLockIf = currLockIf;
    1453         //              outerUnlockIf = currUnlockIf;
    1454         //      } else {
    1455         //              // add ifstmt to else of previous stmt
    1456         //              lastLockIf->else_ = currLockIf;
    1457         //              lastUnlockIf->else_ = currUnlockIf;
    1458         //      }
    1459 
    1460         //      lastLockIf = currLockIf;
    1461         //      lastUnlockIf = currUnlockIf;
    1462         // }
    1463        
    1464         // // add pointer typing if/elifs to body of routines
    1465         // lock_decl->stmts = new ast::CompoundStmt( location, { outerLockIf } );
    1466         // unlock_decl->stmts = new ast::CompoundStmt( location, { outerUnlockIf } );
    1467 
    1468         // // add routines to scope
    1469         // declsToAddBefore.push_back( lock_decl );
    1470         // declsToAddBefore.push_back( unlock_decl );
    1471 
    1472         // newBody->push_front(new ast::DeclStmt( location, lock_decl ));
    1473         // newBody->push_front(new ast::DeclStmt( location, unlock_decl ));
    1474 
    1475         return newBody;
     449        mutBody->push_front( new ast::DeclStmt( location, monitors ) );
     450
     451        return mutBody;
    1476452}
    1477453
     
    1588564
    1589565// --------------------------------------------------------------------------
    1590 // Interface Functions:
    1591566
    1592567void implementKeywords( ast::TranslationUnit & translationUnit ) {
    1593         ast::Pass<ThreadKeyword>::run( translationUnit );
    1594         ast::Pass<CoroutineKeyword>::run( translationUnit );
    1595         ast::Pass<MonitorKeyword>::run( translationUnit );
    1596         ast::Pass<GeneratorKeyword>::run( translationUnit );
    1597         ast::Pass<SuspendKeyword>::run( translationUnit );
     568        (void)translationUnit;
     569        assertf(false, "Apply Keywords not implemented." );
    1598570}
    1599571
  • src/ControlStruct/ExceptTranslateNew.cpp

    r2e9b59b rba897d21  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Nov  8 11:53:00 2021
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 11 17:51:00 2022
    13 // Update Count     : 2
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Mon Jan 31 18:49:58 2022
     13// Update Count     : 1
    1414//
    1515
     
    2626namespace {
    2727
    28         typedef std::list<ast::CatchClause*> CatchList;
     28        typedef std::list<ast::CatchStmt*> CatchList;
     29
     30        void split( CatchList& allHandlers, CatchList& terHandlers,
     31                                CatchList& resHandlers ) {
     32                while ( !allHandlers.empty() ) {
     33                        ast::CatchStmt * stmt = allHandlers.front();
     34                        allHandlers.pop_front();
     35                        if (stmt->kind == ast::ExceptionKind::Terminate) {
     36                                terHandlers.push_back(stmt);
     37                        } else {
     38                                resHandlers.push_back(stmt);
     39                        }
     40                }
     41        }
    2942
    3043        void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) {
     
    4558        {}
    4659
    47         void previsit( const ast::CatchClause * stmt );
     60        void previsit( const ast::CatchStmt * stmt );
    4861        const ast::Stmt * postvisit( const ast::ThrowStmt * stmt );
    4962};
     
    88101}
    89102
    90 void TranslateThrowsCore::previsit( const ast::CatchClause * stmt ) {
     103void TranslateThrowsCore::previsit( const ast::CatchStmt * stmt ) {
    91104        // Validate the statement's form.
    92105        const ast::ObjectDecl * decl = stmt->decl.as<ast::ObjectDecl>();
     
    147160        ast::FunctionDecl * create_terminate_catch( CatchList &handlers );
    148161        ast::CompoundStmt * create_single_matcher(
    149                 const ast::DeclWithType * except_obj, ast::CatchClause * modded_handler );
     162                const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler );
    150163        ast::FunctionDecl * create_terminate_match( CatchList &handlers );
    151164        ast::CompoundStmt * create_terminate_caller( CodeLocation loc, ast::FunctionDecl * try_wrapper,
     
    158171        ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt );
    159172
    160         // Types used in translation, first group are internal.
    161         ast::ObjectDecl * make_index_object( CodeLocation const & ) const;
    162         ast::ObjectDecl * make_exception_object( CodeLocation const & ) const;
    163         ast::ObjectDecl * make_bool_object( CodeLocation const & ) const;
    164         ast::ObjectDecl * make_voidptr_object( CodeLocation const & ) const;
    165         ast::ObjectDecl * make_unused_index_object( CodeLocation const & ) const;
     173        // Types used in translation, make sure to use clone.
    166174        // void (*function)();
    167         ast::FunctionDecl * make_try_function( CodeLocation const & ) const;
     175        ast::FunctionDecl * try_func_t;
    168176        // void (*function)(int, exception);
    169         ast::FunctionDecl * make_catch_function( CodeLocation const & ) const;
     177        ast::FunctionDecl * catch_func_t;
    170178        // int (*function)(exception);
    171         ast::FunctionDecl * make_match_function( CodeLocation const & ) const;
     179        ast::FunctionDecl * match_func_t;
    172180        // bool (*function)(exception);
    173         ast::FunctionDecl * make_handle_function( CodeLocation const & ) const;
     181        ast::FunctionDecl * handle_func_t;
    174182        // void (*function)(__attribute__((unused)) void *);
    175         ast::FunctionDecl * make_finally_function( CodeLocation const & ) const;
     183        ast::FunctionDecl * finally_func_t;
     184
     185        ast::StructInstType * create_except_type() {
     186                assert( except_decl );
     187                return new ast::StructInstType( except_decl );
     188        }
     189        void init_func_types();
    176190
    177191public:
     
    185199};
    186200
    187 ast::ObjectDecl * TryMutatorCore::make_index_object(
    188                 CodeLocation const & location ) const {
    189         return new ast::ObjectDecl(
    190                 location,
     201void TryMutatorCore::init_func_types() {
     202        assert( except_decl );
     203
     204        ast::ObjectDecl index_obj(
     205                {},
    191206                "__handler_index",
    192                 new ast::BasicType(ast::BasicType::SignedInt),
    193                 nullptr, //init
    194                 ast::Storage::Classes{},
    195                 ast::Linkage::Cforall
    196                 );
    197 }
    198 
    199 ast::ObjectDecl * TryMutatorCore::make_exception_object(
    200                 CodeLocation const & location ) const {
    201         assert( except_decl );
    202         return new ast::ObjectDecl(
    203                 location,
     207                new ast::BasicType(ast::BasicType::SignedInt)
     208                );
     209        ast::ObjectDecl exception_obj(
     210                {},
    204211                "__exception_inst",
    205212                new ast::PointerType(
    206213                        new ast::StructInstType( except_decl )
    207214                        ),
    208                 nullptr, //init
    209                 ast::Storage::Classes{},
    210                 ast::Linkage::Cforall
    211                 );
    212 }
    213 
    214 ast::ObjectDecl * TryMutatorCore::make_bool_object(
    215                 CodeLocation const & location ) const {
    216         return new ast::ObjectDecl(
    217                 location,
     215                NULL
     216                );
     217        ast::ObjectDecl bool_obj(
     218                {},
    218219                "__ret_bool",
    219220                new ast::BasicType( ast::BasicType::Bool ),
     
    224225                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    225226                );
    226 }
    227 
    228 ast::ObjectDecl * TryMutatorCore::make_voidptr_object(
    229                 CodeLocation const & location ) const {
    230         return new ast::ObjectDecl(
    231                 location,
     227        ast::ObjectDecl voidptr_obj(
     228                {},
    232229                "__hook",
    233230                new ast::PointerType(
     
    240237                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    241238                );
    242 }
    243 
    244 ast::ObjectDecl * TryMutatorCore::make_unused_index_object(
    245                 CodeLocation const & location ) const {
    246         return new ast::ObjectDecl(
    247                 location,
     239
     240        ast::ObjectDecl unused_index_obj(
     241                {},
    248242                "__handler_index",
    249243                new ast::BasicType(ast::BasicType::SignedInt),
     
    254248                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    255249        );
    256 }
    257 
    258 ast::FunctionDecl * TryMutatorCore::make_try_function(
    259                 CodeLocation const & location ) const {
    260         return new ast::FunctionDecl(
    261                 location,
     250        //unused_index_obj->attributes.push_back( new Attribute( "unused" ) );
     251
     252        try_func_t = new ast::FunctionDecl(
     253                {},
    262254                "try",
    263255                {}, //forall
     
    268260                ast::Linkage::Cforall
    269261        );
    270 }
    271 
    272 ast::FunctionDecl * TryMutatorCore::make_catch_function(
    273                 CodeLocation const & location ) const {
    274         return new ast::FunctionDecl(
    275                 location,
     262
     263        catch_func_t = new ast::FunctionDecl(
     264                {},
    276265                "catch",
    277266                {}, //forall
    278                 { make_index_object( location ), make_exception_object( location ) },
     267                {ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param
    279268                {}, //return void
    280269                nullptr,
     
    282271                ast::Linkage::Cforall
    283272        );
    284 }
    285 
    286 ast::FunctionDecl * TryMutatorCore::make_match_function(
    287                 CodeLocation const & location ) const {
    288         return new ast::FunctionDecl(
    289                 location,
     273
     274        match_func_t = new ast::FunctionDecl(
     275                {},
    290276                "match",
    291277                {}, //forall
    292                 { make_exception_object( location ) },
    293                 { make_unused_index_object( location ) },
     278                {ast::deepCopy(&exception_obj)},
     279                {ast::deepCopy(&unused_index_obj)},
    294280                nullptr,
    295281                ast::Storage::Classes{},
    296282                ast::Linkage::Cforall
    297283        );
    298 }
    299 
    300 ast::FunctionDecl * TryMutatorCore::make_handle_function(
    301                 CodeLocation const & location ) const {
    302         return new ast::FunctionDecl(
    303                 location,
     284
     285        handle_func_t = new ast::FunctionDecl(
     286                {},
    304287                "handle",
    305288                {}, //forall
    306                 { make_exception_object( location ) },
    307                 { make_bool_object( location ) },
     289                {ast::deepCopy(&exception_obj)},
     290                {ast::deepCopy(&bool_obj)},
    308291                nullptr,
    309292                ast::Storage::Classes{},
    310293                ast::Linkage::Cforall
    311294        );
    312 }
    313 
    314 ast::FunctionDecl * TryMutatorCore::make_finally_function(
    315                 CodeLocation const & location ) const {
    316         return new ast::FunctionDecl(
    317                 location,
     295
     296        finally_func_t = new ast::FunctionDecl(
     297                {},
    318298                "finally",
    319299                {}, //forall
    320                 { make_voidptr_object( location ) },
     300                {ast::deepCopy(&voidptr_obj)},
    321301                {}, //return void
    322302                nullptr,
     
    324304                ast::Linkage::Cforall
    325305        );
     306
     307        //catch_func_t.get_parameters().push_back( index_obj.clone() );
     308        //catch_func_t.get_parameters().push_back( exception_obj.clone() );
     309        //match_func_t.get_returnVals().push_back( unused_index_obj );
     310        //match_func_t.get_parameters().push_back( exception_obj.clone() );
     311        //handle_func_t.get_returnVals().push_back( bool_obj.clone() );
     312        //handle_func_t.get_parameters().push_back( exception_obj.clone() );
     313        //finally_func_t.get_parameters().push_back( voidptr_obj.clone() );
    326314}
    327315
    328316// TryStmt Mutation Helpers
     317
     318/*
     319ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) {
     320        ast::CompoundStmt * block = tryStmt->body;
     321        tryStmt->body = nullptr;
     322        return block;
     323}
     324*/
    329325
    330326ast::FunctionDecl * TryMutatorCore::create_try_wrapper(
    331327                const ast::CompoundStmt *body ) {
    332328
    333         ast::FunctionDecl * ret = make_try_function( body->location );
     329        ast::FunctionDecl * ret = ast::deepCopy(try_func_t);
    334330        ret->stmts = body;
    335331        return ret;
     
    338334ast::FunctionDecl * TryMutatorCore::create_terminate_catch(
    339335                CatchList &handlers ) {
    340         std::vector<ast::ptr<ast::CaseClause>> handler_wrappers;
     336        std::vector<ast::ptr<ast::Stmt>> handler_wrappers;
    341337
    342338        assert (!handlers.empty());
    343339        const CodeLocation loc = handlers.front()->location;
    344340
    345         ast::FunctionDecl * func_t = make_catch_function( loc );
     341        ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t);
    346342        const ast::DeclWithType * index_obj = func_t->params.front();
    347343        const ast::DeclWithType * except_obj = func_t->params.back();
     
    352348        for ( ; it != handlers.end() ; ++it ) {
    353349                ++index;
    354                 ast::CatchClause * handler = *it;
     350                ast::CatchStmt * handler = *it;
    355351                const CodeLocation loc = handler->location;
    356352
     
    390386                // handler->body = nullptr;
    391387
    392                 handler_wrappers.push_back( new ast::CaseClause(loc,
     388                handler_wrappers.push_back( new ast::CaseStmt(loc,
    393389                        ast::ConstantExpr::from_int(loc, index) ,
    394390                        { block, new ast::ReturnStmt( loc, nullptr ) }
     
    397393        // TODO: Some sort of meaningful error on default perhaps?
    398394
    399         ast::SwitchStmt * handler_lookup = new ast::SwitchStmt( loc,
     395        /*
     396        std::list<Statement*> stmt_handlers;
     397        while ( !handler_wrappers.empty() ) {
     398                stmt_handlers.push_back( handler_wrappers.front() );
     399                handler_wrappers.pop_front();
     400        }
     401        */
     402
     403        ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc,
    400404                new ast::VariableExpr( loc, index_obj ),
    401405                std::move(handler_wrappers)
    402406                );
    403         ast::CompoundStmt * body = new ast::CompoundStmt( loc, {handler_lookup} );
     407        ast::CompoundStmt * body = new ast::CompoundStmt(loc,
     408                {handler_lookup});
    404409
    405410        func_t->stmts = body;
     
    410415// except_obj is referenced, modded_handler will be freed.
    411416ast::CompoundStmt * TryMutatorCore::create_single_matcher(
    412                 const ast::DeclWithType * except_obj, ast::CatchClause * modded_handler ) {
     417                const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ) {
    413418        // {
    414419        //     `modded_handler.decl`
     
    428433
    429434        // Check for type match.
    430         ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc,
     435        ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 
    431436                new ast::VariableExpr(loc, except_obj ),
    432437                local_except->get_type()
     
    440445        }
    441446        // Construct the match condition.
    442         block->push_back( new ast::IfStmt(loc,
     447        block->push_back( new ast::IfStmt(loc, 
    443448                cond, modded_handler->body, nullptr ) );
    444449
     450        // xxx - how does this work in new ast
     451        //modded_handler->set_decl( nullptr );
     452        //modded_handler->set_cond( nullptr );
     453        //modded_handler->set_body( nullptr );
     454        //delete modded_handler;
    445455        return block;
    446456}
     
    457467        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
    458468
    459         ast::FunctionDecl * func_t = make_match_function( loc );
     469        ast::FunctionDecl * func_t = ast::deepCopy(match_func_t);
    460470        const ast::DeclWithType * except_obj = func_t->params.back();
    461471
     
    465475        for ( it = handlers.begin() ; it != handlers.end() ; ++it ) {
    466476                ++index;
    467                 ast::CatchClause * handler = *it;
     477                ast::CatchStmt * handler = *it;
    468478
    469479                // Body should have been taken by create_terminate_catch.
     
    480490        }
    481491
    482         body->push_back( new ast::ReturnStmt(loc,
     492        body->push_back( new ast::ReturnStmt(loc, 
    483493                ast::ConstantExpr::from_int( loc, 0 ) ));
    484494
     
    515525        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
    516526
    517         ast::FunctionDecl * func_t = make_handle_function( loc );
     527        ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t);
    518528        const ast::DeclWithType * except_obj = func_t->params.back();
    519529
    520530        CatchList::iterator it;
    521531        for ( it = handlers.begin() ; it != handlers.end() ; ++it ) {
    522                 ast::CatchClause * handler = *it;
     532                ast::CatchStmt * handler = *it;
    523533                const CodeLocation loc = handler->location;
    524534                // Modifiy body.
    525535                ast::CompoundStmt * handling_code;
    526536                if (handler->body.as<ast::CompoundStmt>()) {
    527                         handling_code = strict_dynamic_cast<ast::CompoundStmt*>(
    528                                 handler->body.get_and_mutate() );
     537                        handling_code =
     538                        strict_dynamic_cast<ast::CompoundStmt*>( handler->body.get_and_mutate() );
    529539                } else {
    530540                        handling_code = new ast::CompoundStmt(loc);
     
    587597                ast::TryStmt * tryStmt ) {
    588598        // void finally() { `finally->block` }
    589         const ast::FinallyClause * finally = tryStmt->finally;
     599        const ast::FinallyStmt * finally = tryStmt->finally;
    590600        const ast::CompoundStmt * body = finally->body;
    591601
    592         ast::FunctionDecl * func_t = make_finally_function( tryStmt->location );
     602        ast::FunctionDecl * func_t = ast::deepCopy(finally_func_t);
    593603        func_t->stmts = body;
    594604
     605        // finally->set_block( nullptr );
     606        // delete finally;
    595607        tryStmt->finally = nullptr;
     608
    596609
    597610        return func_t;
     
    604617
    605618        const CodeLocation loc = finally_wrapper->location;
     619        // Make Cleanup Attribute.
     620        /*
     621        std::list< ast::Attribute * > attributes;
     622        {
     623                std::list<  > attr_params;
     624                attr_params.push_back( nameOf( finally_wrapper ) );
     625                attributes.push_back( new Attribute( "cleanup", attr_params ) );
     626        }
     627        */
     628
    606629        return new ast::ObjectDecl(
    607630                loc,
     
    621644        // return false;
    622645        const CodeLocation loc = throwStmt->location;
    623         ast::Stmt * result = new ast::ReturnStmt(loc,
     646        ast::Stmt * result = new ast::ReturnStmt(loc, 
    624647                ast::ConstantExpr::from_bool( loc, false )
    625648                );
    626649        result->labels = throwStmt->labels;
     650        // delete throwStmt; done by postvisit
    627651        return result;
    628652}
     
    636660                assert( nullptr == except_decl );
    637661                except_decl = structDecl;
     662                init_func_types();
    638663        } else if ( structDecl->name == "__cfaehm_try_resume_node" ) {
    639664                assert( nullptr == node_decl );
     
    681706                }
    682707        }
     708        // split( mutStmt->handlers,
     709        //              termination_handlers, resumption_handlers );
    683710
    684711        if ( resumption_handlers.size() ) {
  • src/ControlStruct/LabelGeneratorNew.cpp

    r2e9b59b rba897d21  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // LabelGeneratorNew.cpp --
     7// LabelGenerator.cc --
    88//
    99// Author           : Peter A. Buhr
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Mon Mar 28 10:03:00 2022
    13 // Update Count     : 73
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 09:11:17 2022
     13// Update Count     : 72
    1414//
    1515
     
    2525namespace ControlStruct {
    2626
    27 enum { size = 128 };
    28 
    29 static int newLabelPre( char buf[size], const string & suffix ) {
     27Label newLabel( const string & suffix, const Stmt * stmt ) {
    3028        static int current = 0;
    3129
     30        assertf( stmt, "CFA internal error: parameter statement cannot be null pointer" );
     31
     32        enum { size = 128 };
     33        char buf[size];                                                                         // space to build label
    3234        int len = snprintf( buf, size, "__L%d__%s", current++, suffix.c_str() );
    3335        assertf( len < size, "CFA Internal error: buffer overflow creating label" );
    34         return len;
    35 }
    36 
    37 static Label newLabelPost( char buf[size], const CodeLocation & location ) {
    38         Label ret_label( location, buf );
    39         ret_label.attributes.push_back( new Attribute( "unused" ) );
    40         return ret_label;
    41 }
    42 
    43 Label newLabel( const string & suffix, const Stmt * stmt ) {
    44         // Buffer for string manipulation.
    45         char buf[size];
    46 
    47         assertf( stmt, "CFA internal error: parameter statement cannot be null pointer" );
    48         int len = newLabelPre( buf, suffix );
    4936
    5037        // What does this do?
     
    5441        } // if
    5542
    56         return newLabelPost( buf, stmt->location );
    57 }
    58 
    59 Label newLabel( const string & suffix, const CodeLocation & location ) {
    60         // Buffer for string manipulation.
    61         char buf[size];
    62 
    63         newLabelPre( buf, suffix );
    64         return newLabelPost( buf, location );
     43        Label ret_label( stmt->location, buf );
     44        ret_label.attributes.push_back( new Attribute( "unused" ) );
     45        return ret_label;
    6546}
    6647
  • src/ControlStruct/LabelGeneratorNew.hpp

    r2e9b59b rba897d21  
    99// Author           : Rodolfo G. Esteves
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Fir Mar 25 15:40:00 2022
    13 // Update Count     : 28
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Mon Jan 31 18:03:09 2022
     13// Update Count     : 27
    1414//
    1515
     
    1818#include <string>                                                                               // for string
    1919
    20 class CodeLocation;
     20class Statement;
    2121
    2222namespace ast {
     23        class Stmt;
    2324        class Label;
    24         class Stmt;
    2525} // namespace ast
    2626
    2727namespace ControlStruct {
    2828        ast::Label newLabel( const std::string &, const ast::Stmt * );
    29         ast::Label newLabel( const std::string &, const CodeLocation & );
    3029} // namespace ControlStruct
    3130
  • src/ControlStruct/MultiLevelExit.cpp

    r2e9b59b rba897d21  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Nov  1 13:48:00 2021
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Mon Mar 28  9:42:00 2022
    13 // Update Count     : 34
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 23:07:54 2022
     13// Update Count     : 33
    1414//
    1515
     
    4040
    4141        enum Kind {
    42                 ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, CaseClauseK, SwitchStmtK, TryStmtK
     42                ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, CaseStmtK, SwitchStmtK, TryStmtK
    4343        } kind;
    4444
     
    5858        Entry( const IfStmt *stmt, Label breakExit ) :
    5959                stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( IfStmtK ) {}
    60         Entry( const CaseClause *, const CompoundStmt *stmt, Label fallExit ) :
    61                 stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseClauseK ) {}
     60        Entry( const CaseStmt *stmt, Label fallExit ) :
     61                stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseStmtK ) {}
    6262        Entry( const SwitchStmt *stmt, Label breakExit, Label fallDefaultExit ) :
    6363                stmt( stmt ), firstTarget( breakExit ), secondTarget( fallDefaultExit ), kind( SwitchStmtK ) {}
     
    6666
    6767        bool isContTarget() const { return kind <= WhileDoStmtK; }
    68         bool isBreakTarget() const { return kind != CaseClauseK; }
    69         bool isFallTarget() const { return kind == CaseClauseK; }
     68        bool isBreakTarget() const { return kind != CaseStmtK; }
     69        bool isFallTarget() const { return kind == CaseStmtK; }
    7070        bool isFallDefaultTarget() const { return kind == SwitchStmtK; }
    7171
    7272        // These routines set a target as being "used" by a BranchStmt
    7373        Label useContExit() { assert( kind <= WhileDoStmtK ); return useTarget(secondTarget); }
    74         Label useBreakExit() { assert( kind != CaseClauseK ); return useTarget(firstTarget); }
    75         Label useFallExit() { assert( kind == CaseClauseK );  return useTarget(firstTarget); }
     74        Label useBreakExit() { assert( kind != CaseStmtK ); return useTarget(firstTarget); }
     75        Label useFallExit() { assert( kind == CaseStmtK );  return useTarget(firstTarget); }
    7676        Label useFallDefaultExit() { assert( kind == SwitchStmtK ); return useTarget(secondTarget); }
    7777
    7878        // These routines check if a specific label for a statement is used by a BranchStmt
    7979        bool isContUsed() const { assert( kind <= WhileDoStmtK ); return secondTarget.used; }
    80         bool isBreakUsed() const { assert( kind != CaseClauseK ); return firstTarget.used; }
    81         bool isFallUsed() const { assert( kind == CaseClauseK ); return firstTarget.used; }
     80        bool isBreakUsed() const { assert( kind != CaseStmtK ); return firstTarget.used; }
     81        bool isFallUsed() const { assert( kind == CaseStmtK ); return firstTarget.used; }
    8282        bool isFallDefaultUsed() const { assert( kind == SwitchStmtK ); return secondTarget.used; }
    8383        void seenDefault() { fallDefaultValid = false; }
     
    115115        void previsit( const ForStmt * );
    116116        const ForStmt * postvisit( const ForStmt * );
    117         const CaseClause * previsit( const CaseClause * );
     117        const CaseStmt * previsit( const CaseStmt * );
    118118        void previsit( const IfStmt * );
    119119        const IfStmt * postvisit( const IfStmt * );
     
    123123        void previsit( const TryStmt * );
    124124        void postvisit( const TryStmt * );
    125         void previsit( const FinallyClause * );
     125        void previsit( const FinallyStmt * );
    126126
    127127        const Stmt * mutateLoop( const Stmt * body, Entry& );
     
    288288                  auto switchStmt = strict_dynamic_cast< const SwitchStmt * >( targetEntry->stmt );
    289289                  bool foundDefault = false;
    290                   for ( auto caseStmt : switchStmt->cases ) {
     290                  for ( auto subStmt : switchStmt->stmts ) {
     291                          const CaseStmt * caseStmt = subStmt.strict_as<CaseStmt>();
    291292                          if ( caseStmt->isDefault() ) {
    292293                                  foundDefault = true;
     
    364365}
    365366
    366 const CaseClause * MultiLevelExitCore::previsit( const CaseClause * stmt ) {
     367const CaseStmt * MultiLevelExitCore::previsit( const CaseStmt * stmt ) {
    367368        visit_children = false;
    368369
     
    374375
    375376        // The cond may not exist, but if it does update it now.
    376         visitor->maybe_accept( stmt, &CaseClause::cond );
     377        visitor->maybe_accept( stmt, &CaseStmt::cond );
    377378
    378379        // Just save the mutated node for simplicity.
    379         CaseClause * mutStmt = mutate( stmt );
    380 
    381         Label fallLabel = newLabel( "fallThrough", stmt->location );
     380        CaseStmt * mutStmt = mutate( stmt );
     381
     382        Label fallLabel = newLabel( "fallThrough", stmt );
    382383        if ( ! mutStmt->stmts.empty() ) {
    383                 // These should already be in a block.
    384                 auto first = mutStmt->stmts.front().get_and_mutate();
    385                 auto block = strict_dynamic_cast<CompoundStmt *>( first );
    386 
    387384                // Ensure that the stack isn't corrupted by exceptions in fixBlock.
    388385                auto guard = makeFuncGuard(
    389                         [&](){ enclosing_control_structures.emplace_back( mutStmt, block, fallLabel ); },
     386                        [&](){ enclosing_control_structures.emplace_back( mutStmt, fallLabel ); },
    390387                        [this](){ enclosing_control_structures.pop_back(); }
    391388                        );
    392389
     390                // These should already be in a block.
     391                auto block = mutate( mutStmt->stmts.front().strict_as<CompoundStmt>() );
    393392                block->kids = fixBlock( block->kids, true );
    394393
     
    397396                Entry & entry = enclosing_control_structures.back();
    398397                if ( entry.isFallUsed() ) {
    399                         mutStmt->stmts.push_back( labelledNullStmt( block->location, entry.useFallExit() ) );
     398                        mutStmt->stmts.push_back( labelledNullStmt( mutStmt->location, entry.useFallExit() ) );
    400399                }
    401400        }
     
    434433}
    435434
    436 static bool isDefaultCase( const ptr<CaseClause> & caseClause ) {
    437         return caseClause->isDefault();
     435bool isDefaultCase( const ptr<Stmt> & stmt ) {
     436        const CaseStmt * caseStmt = stmt.strict_as<CaseStmt>();
     437        return caseStmt->isDefault();
    438438}
    439439
    440440void MultiLevelExitCore::previsit( const SwitchStmt * stmt ) {
    441441        Label label = newLabel( "switchBreak", stmt );
    442         auto it = find_if( stmt->cases.rbegin(), stmt->cases.rend(), isDefaultCase );
    443 
    444         const CaseClause * defaultCase = it != stmt->cases.rend() ? (*it) : nullptr;
    445         Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase->location ) : Label( stmt->location, "" );
     442        auto it = find_if( stmt->stmts.rbegin(), stmt->stmts.rend(), isDefaultCase );
     443
     444        const CaseStmt * defaultCase = it != stmt->stmts.rend() ? (it)->strict_as<CaseStmt>() : nullptr;
     445        Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase ) : Label( stmt->location, "" );
    446446        enclosing_control_structures.emplace_back( stmt, label, defaultLabel );
    447447        GuardAction( [this]() { enclosing_control_structures.pop_back(); } );
     
    449449        // Collect valid labels for fallthrough. It starts with all labels at this level, then remove as each is seen during
    450450        // traversal.
    451         for ( const CaseClause * caseStmt : stmt->cases ) {
     451        for ( const Stmt * stmt : stmt->stmts ) {
     452                auto * caseStmt = strict_dynamic_cast< const CaseStmt * >( stmt );
    452453                if ( caseStmt->stmts.empty() ) continue;
    453454                auto block = caseStmt->stmts.front().strict_as<CompoundStmt>();
     
    470471                // exit label and break to the last case, create a default case if no cases.
    471472                SwitchStmt * mutStmt = mutate( stmt );
    472                 if ( mutStmt->cases.empty() ) {
    473                         mutStmt->cases.push_back( new CaseClause( mutStmt->location, nullptr, {} ) );
    474                 }
    475 
    476                 auto caseStmt = mutStmt->cases.back().get();
     473                if ( mutStmt->stmts.empty() ) {
     474                        mutStmt->stmts.push_back( new CaseStmt( mutStmt->location, nullptr, {} ) );
     475                }
     476
     477                auto caseStmt = mutStmt->stmts.back().strict_as<CaseStmt>();
    477478                auto mutCase = mutate( caseStmt );
    478                 mutStmt->cases.back() = mutCase;
     479                mutStmt->stmts.back() = mutCase;
    479480
    480481                Label label( mutCase->location, "breakLabel" );
     
    513514}
    514515
    515 void MultiLevelExitCore::previsit( const FinallyClause * ) {
     516void MultiLevelExitCore::previsit( const FinallyStmt * ) {
    516517        GuardAction([this, old = move( enclosing_control_structures)](){ enclosing_control_structures = move(old); });
    517518        enclosing_control_structures = vector<Entry>();
  • src/InitTweak/FixGlobalInit.cc

    r2e9b59b rba897d21  
    113113                accept_all(translationUnit, fixer);
    114114
    115                 // Say these magic declarations come at the end of the file.
    116                 CodeLocation const & location = translationUnit.decls.back()->location;
    117 
    118115                if ( !fixer.core.initStmts.empty() ) {
    119116                        std::vector<ast::ptr<ast::Expr>> ctorParams;
    120                         if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int(location, 200));
    121                         auto initFunction = new ast::FunctionDecl(location,
    122                                 "__global_init__", {}, {}, {},
    123                                 new ast::CompoundStmt(location, std::move(fixer.core.initStmts)),
    124                                 ast::Storage::Static, ast::Linkage::C,
    125                                 {new ast::Attribute("constructor", std::move(ctorParams))});
     117                        if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int({}, 200));
     118                        auto initFunction = new ast::FunctionDecl({}, "__global_init__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.initStmts)),
     119                                ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("constructor", std::move(ctorParams))});
    126120
    127121                        translationUnit.decls.emplace_back( initFunction );
     
    130124                if ( !fixer.core.destroyStmts.empty() ) {
    131125                        std::vector<ast::ptr<ast::Expr>> dtorParams;
    132                         if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int(location, 200));
    133                         auto destroyFunction = new ast::FunctionDecl( location,
    134                                 "__global_destroy__", {}, {}, {},
    135                                 new ast::CompoundStmt(location, std::move(fixer.core.destroyStmts)),
    136                                 ast::Storage::Static, ast::Linkage::C,
    137                                 {new ast::Attribute("destructor", std::move(dtorParams))});
     126                        if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int({}, 200));
     127                        auto destroyFunction = new ast::FunctionDecl({}, "__global_destroy__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.destroyStmts)),
     128                                ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("destructor", std::move(dtorParams))});
    138129
    139130                        translationUnit.decls.emplace_back(destroyFunction);
  • src/InitTweak/FixInitNew.cpp

    r2e9b59b rba897d21  
    1616#include "CodeGen/GenType.h"           // for genPrettyType
    1717#include "CodeGen/OperatorTable.h"
    18 #include "Common/CodeLocationTools.hpp"
    1918#include "Common/PassVisitor.h"        // for PassVisitor, WithStmtsToAdd
    2019#include "Common/SemanticError.h"      // for SemanticError
     
    8685        /// generate/resolve copy construction expressions for each, and generate/resolve destructors for both
    8786        /// arguments and return value temporaries
    88         struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors>, public ast::WithConstTranslationUnit {
     87        struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors> {
    8988                const ast::Expr * postvisit( const ast::ImplicitCopyCtorExpr * impCpCtorExpr );
    9089                const ast::StmtExpr * previsit( const ast::StmtExpr * stmtExpr );
     
    190189        /// for any member that is missing a corresponding ctor/dtor call.
    191190        /// error if a member is used before constructed
    192         struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls>, public ast::WithConstTranslationUnit {
     191        struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls> {
    193192                void previsit( const ast::FunctionDecl * funcDecl );
    194193                const ast::DeclWithType * postvisit( const ast::FunctionDecl * funcDecl );
     
    215214
    216215        /// expands ConstructorExpr nodes into comma expressions, using a temporary for the first argument
    217         struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithConstTranslationUnit {
     216        struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting {
    218217                const ast::Expr * postvisit( const ast::ConstructorExpr * ctorExpr );
    219218        };
     
    510509                // (VariableExpr and already resolved expression)
    511510                CP_CTOR_PRINT( std::cerr << "ResolvingCtorDtor " << untyped << std::endl; )
    512                 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, { symtab, transUnit().global } );
     511                ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, symtab);
    513512                assert( resolved );
    514513                if ( resolved->env ) {
     
    554553                ast::ptr<ast::Expr> guard = mutArg;
    555554
    556                 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl(loc, "__tmp", mutResult, nullptr );
     555                ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl({}, "__tmp", mutResult, nullptr );
    557556
    558557                // create and resolve copy constructor
     
    588587
    589588        ast::Expr * ResolveCopyCtors::destructRet( const ast::ObjectDecl * ret, const ast::Expr * arg ) {
    590                 auto global = transUnit().global;
    591589                // TODO: refactor code for generating cleanup attribute, since it's common and reused in ~3-4 places
    592590                // check for existing cleanup attribute before adding another(?)
    593591                // need to add __Destructor for _tmp_cp variables as well
    594592
    595                 assertf( global.dtorStruct, "Destructor generation requires __Destructor definition." );
    596                 assertf( global.dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );
    597                 assertf( global.dtorDestroy, "Destructor generation requires __destroy_Destructor." );
     593                assertf( ast::dtorStruct, "Destructor generation requires __Destructor definition." );
     594                assertf( ast::dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );
     595                assertf( ast::dtorStructDestroy, "Destructor generation requires __destroy_Destructor." );
    598596
    599597                const CodeLocation loc = ret->location;
     
    612610                auto dtorFunc = getDtorFunc( ret, new ast::ExprStmt(loc, dtor ), stmtsToAddBefore );
    613611
    614                 auto dtorStructType = new ast::StructInstType( global.dtorStruct );
     612                auto dtorStructType = new ast::StructInstType(ast::dtorStruct);
    615613
    616614                // what does this do???
     
    624622                static UniqueName namer( "_ret_dtor" );
    625623                auto retDtor = new ast::ObjectDecl(loc, namer.newName(), dtorStructType, new ast::ListInit(loc, { new ast::SingleInit(loc, ast::ConstantExpr::null(loc) ), new ast::SingleInit(loc, new ast::CastExpr( new ast::VariableExpr(loc, dtorFunc ), dtorType ) ) } ) );
    626                 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, global.dtorDestroy ) } ) );
     624                retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, ast::dtorStructDestroy ) } ) );
    627625                stmtsToAddBefore.push_back( new ast::DeclStmt(loc, retDtor ) );
    628626
    629627                if ( arg ) {
    630                         auto member = new ast::MemberExpr(loc, global.dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );
     628                        auto member = new ast::MemberExpr(loc, ast::dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );
    631629                        auto object = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, ret ) ), new ast::PointerType(new ast::VoidType() ) );
    632630                        ast::Expr * assign = createBitwiseAssignment( member, object );
     
    801799        // to prevent warnings ('_unq0' may be used uninitialized in this function),
    802800        // insert an appropriate zero initializer for UniqueExpr temporaries.
    803         ast::Init * makeInit( const ast::Type * t, CodeLocation const & loc ) {
     801        ast::Init * makeInit( const ast::Type * t ) {
    804802                if ( auto inst = dynamic_cast< const ast::StructInstType * >( t ) ) {
    805803                        // initizer for empty struct must be empty
    806                         if ( inst->base->members.empty() ) {
    807                                 return new ast::ListInit( loc, {} );
    808                         }
     804                        if ( inst->base->members.empty() ) return new ast::ListInit({}, {});
    809805                } else if ( auto inst = dynamic_cast< const ast::UnionInstType * >( t ) ) {
    810806                        // initizer for empty union must be empty
    811                         if ( inst->base->members.empty() ) {
    812                                 return new ast::ListInit( loc, {} );
    813                         }
    814                 }
    815 
    816                 return new ast::ListInit( loc, {
    817                         new ast::SingleInit( loc, ast::ConstantExpr::from_int( loc, 0 ) )
    818                 } );
     807                        if ( inst->base->members.empty() ) return new ast::ListInit({}, {});
     808                }
     809
     810                return new ast::ListInit( {}, { new ast::SingleInit( {}, ast::ConstantExpr::from_int({}, 0) ) } );
    819811        }
    820812
     
    840832                        } else {
    841833                                // expr isn't a call expr, so create a new temporary variable to use to hold the value of the unique expression
    842                                 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result, mutExpr->location ) );
     834                                mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result ) );
    843835                                mutExpr->var = new ast::VariableExpr( mutExpr->location, mutExpr->object );
    844836                        }
     
    11801172                        auto guard = makeFuncGuard( [this]() { symtab.enterScope(); }, [this]() { symtab.leaveScope(); } );
    11811173                        symtab.addFunction( function );
    1182                         auto global = transUnit().global;
    11831174
    11841175                        // need to iterate through members in reverse in order for
     
    12261217
    12271218                                                        static UniqueName memberDtorNamer = { "__memberDtor" };
    1228                                                         assertf( global.dtorStruct, "builtin __Destructor not found." );
    1229                                                         assertf( global.dtorDestroy, "builtin __destroy_Destructor not found." );
     1219                                                        assertf( ast::dtorStruct, "builtin __Destructor not found." );
     1220                                                        assertf( ast::dtorStructDestroy, "builtin __destroy_Destructor not found." );
    12301221
    12311222                                                        ast::Expr * thisExpr = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, thisParam ) ), new ast::PointerType( new ast::VoidType(), ast::CV::Qualifiers() ) );
     
    12371228                                                        auto dtorType = new ast::PointerType( dtorFtype );
    12381229
    1239                                                         auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( global.dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );
    1240                                                         destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( loc, global.dtorDestroy ) } ) );
     1230                                                        auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( ast::dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );
     1231                                                        destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr({}, ast::dtorStructDestroy ) } ) );
    12411232                                                        mutStmts->push_front( new ast::DeclStmt(loc, destructor ) );
    12421233                                                        mutStmts->kids.splice( mutStmts->kids.begin(), stmtsToAdd );
     
    13321323
    13331324        const ast::Expr * GenStructMemberCalls::postvisit( const ast::UntypedExpr * untypedExpr ) {
     1325                // Expression * newExpr = untypedExpr;
    13341326                // xxx - functions returning ast::ptr seems wrong...
    1335                 auto res = ResolvExpr::findVoidExpression( untypedExpr, { symtab, transUnit().global } );
    1336                 // Fix CodeLocation (at least until resolver is fixed).
    1337                 auto fix = localFillCodeLocations( untypedExpr->location, res.release() );
    1338                 return strict_dynamic_cast<const ast::Expr *>( fix );
     1327                auto res = ResolvExpr::findVoidExpression( untypedExpr, symtab );
     1328                return res.release();
     1329                // return newExpr;
    13391330        }
    13401331
     
    13701361
    13711362                // resolve assignment and dispose of new env
    1372                 auto resolved = ResolvExpr::findVoidExpression( assign, { symtab, transUnit().global } );
     1363                auto resolved = ResolvExpr::findVoidExpression( assign, symtab );
    13731364                auto mut = resolved.get_and_mutate();
    13741365                assertf(resolved.get() == mut, "newly resolved expression must be unique");
  • src/InitTweak/GenInit.cc

    r2e9b59b rba897d21  
    402402                                        retVal->location, "?{}", retVal, stmt->expr );
    403403                                assertf( ctorStmt,
    404                                         "ReturnFixer: genCtorDtor returned nullptr: %s / %s",
     404                                        "ReturnFixer: genCtorDtor returned nllptr: %s / %s",
    405405                                        toString( retVal ).c_str(),
    406406                                        toString( stmt->expr ).c_str() );
    407                                 stmtsToAddBefore.push_back( ctorStmt );
     407                                        stmtsToAddBefore.push_back( ctorStmt );
    408408
    409409                                // Return the retVal object.
     
    421421        void genInit( ast::TranslationUnit & transUnit ) {
    422422                ast::Pass<HoistArrayDimension_NoResolve_New>::run( transUnit );
    423                 ast::Pass<ReturnFixer_New>::run( transUnit );
    424         }
    425 
    426         void fixReturnStatements( ast::TranslationUnit & transUnit ) {
    427423                ast::Pass<ReturnFixer_New>::run( transUnit );
    428424        }
  • src/InitTweak/GenInit.h

    r2e9b59b rba897d21  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 18 14:22:00 2022
    13 // Update Count     : 7
     12// Last Modified On : Fri Oct 22 16:08:00 2021
     13// Update Count     : 6
    1414//
    1515
     
    3131        /// Converts return statements into copy constructor calls on the hidden return variable
    3232        void fixReturnStatements( std::list< Declaration * > & translationUnit );
    33         void fixReturnStatements( ast::TranslationUnit & translationUnit );
    3433
    3534        /// generates a single ctor/dtor statement using objDecl as the 'this' parameter and arg as the optional argument
  • src/InitTweak/InitTweak.cc

    r2e9b59b rba897d21  
    423423                                loc, targetLabel.newName(), { new ast::Attribute{ "unused" } } };
    424424
    425                         std::vector< ast::ptr< ast::CaseClause > > branches;
     425                        std::vector< ast::ptr< ast::Stmt > > branches;
    426426                        for ( const ast::Init * init : *listInit ) {
    427427                                auto condition = ast::ConstantExpr::from_ulong( loc, cond );
     
    432432                                stmts.emplace_back(
    433433                                        new ast::BranchStmt{ loc, ast::BranchStmt::Break, switchLabel } );
    434                                 branches.emplace_back( new ast::CaseClause{ loc, condition, std::move( stmts ) } );
     434                                branches.emplace_back( new ast::CaseStmt{ loc, condition, std::move( stmts ) } );
    435435                        }
    436436                        out.emplace_back( new ast::SwitchStmt{ loc, index, std::move( branches ) } );
  • src/Parser/DeclarationNode.cc

    r2e9b59b rba897d21  
    7878        delete variable.initializer;
    7979
    80 //      delete type;
     80        delete type;
    8181        delete bitfieldWidth;
    8282
     
    253253} // DeclarationNode::newAggregate
    254254
    255 DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body, bool typed) {
     255DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body ) {
    256256        DeclarationNode * newnode = new DeclarationNode;
    257257        newnode->type = new TypeData( TypeData::Enum );
     
    263263} // DeclarationNode::newEnum
    264264
    265 
    266 
    267265DeclarationNode * DeclarationNode::newName( const string * name ) {
    268266        DeclarationNode * newnode = new DeclarationNode;
     
    272270} // DeclarationNode::newName
    273271
    274 DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) { // Marker
     272DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) {
    275273        DeclarationNode * newnode = newName( name );
    276274        newnode->enumeratorValue.reset( constant );
     
    667665}
    668666
    669 DeclarationNode * DeclarationNode::addEnumBase( DeclarationNode * o ) {
    670         if ( o && o -> type)  {
    671                 type->base= o->type;
    672         }
    673         delete o;
    674         return this;
    675 }
    676 
    677667DeclarationNode * DeclarationNode::addTypedef() {
    678668        TypeData * newtype = new TypeData( TypeData::Symbolic );
  • src/Parser/ParseNode.h

    r2e9b59b rba897d21  
    235235        static DeclarationNode * newFunction( const std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body );
    236236        static DeclarationNode * newAggregate( AggregateDecl::Aggregate kind, const std::string * name, ExpressionNode * actuals, DeclarationNode * fields, bool body );
    237         static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed );
     237        static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body );
    238238        static DeclarationNode * newEnumConstant( const std::string * name, ExpressionNode * constant );
    239239        static DeclarationNode * newName( const std::string * );
     
    265265        DeclarationNode * addType( DeclarationNode * );
    266266        DeclarationNode * addTypedef();
    267         DeclarationNode * addEnumBase( DeclarationNode * );
    268267        DeclarationNode * addAssertions( DeclarationNode * );
    269268        DeclarationNode * addName( std::string * );
  • src/Parser/StatementNode.cc

    r2e9b59b rba897d21  
    366366} // maybe_build_compound
    367367
    368 // Question
    369368Statement * build_asm( bool voltile, Expression * instruction, ExpressionNode * output, ExpressionNode * input, ExpressionNode * clobber, LabelNode * gotolabels ) {
    370369        list< Expression * > out, in;
  • src/Parser/TypeData.cc

    r2e9b59b rba897d21  
    918918EnumDecl * buildEnum( const TypeData * td, std::list< Attribute * > attributes, LinkageSpec::Spec linkage ) {
    919919        assert( td->kind == TypeData::Enum );
    920         Type * baseType = td->base ? typebuild(td->base) : nullptr;
    921         EnumDecl * ret = new EnumDecl( *td->enumeration.name, attributes, linkage, baseType );
     920        EnumDecl * ret = new EnumDecl( *td->enumeration.name, attributes, linkage );
    922921        buildList( td->enumeration.constants, ret->get_members() );
    923922        list< Declaration * >::iterator members = ret->get_members().begin();
    924         for ( const DeclarationNode * cur = td->enumeration.constants; cur != nullptr; cur = dynamic_cast< DeclarationNode * >( cur->get_next() ), ++members ) {
     923        for ( const DeclarationNode * cur = td->enumeration. constants; cur != nullptr; cur = dynamic_cast< DeclarationNode * >( cur->get_next() ), ++members ) {
    925924                if ( cur->has_enumeratorValue() ) {
    926925                        ObjectDecl * member = dynamic_cast< ObjectDecl * >(* members);
    927926                        member->set_init( new SingleInit( maybeMoveBuild< Expression >( cur->consume_enumeratorValue() ) ) );
    928                 } else {
    929                         if ( baseType && (!dynamic_cast<BasicType *>(baseType) || !dynamic_cast<BasicType *>(baseType)->isWholeNumber())) {
    930                                 SemanticError( td->location, "A non whole number enum value decl must be explicitly initialized." );
    931                         }
    932927                } // if
    933928        } // for
    934         ret->set_body( td->enumeration.body ); // Boolean; if it has body
     929        ret->set_body( td->enumeration.body );
    935930        return ret;
    936931} // buildEnum
  • src/Parser/TypeData.h

    r2e9b59b rba897d21  
    132132                                                 Initializer * init = nullptr, std::list< class Attribute * > attributes = std::list< class Attribute * >() );
    133133FunctionType * buildFunction( const TypeData * );
    134 Declaration * addEnumBase( Declaration *, const TypeData * );
    135134void buildKRFunction( const TypeData::Function_t & function );
    136135
  • src/Parser/parser.yy

    r2e9b59b rba897d21  
    1010// Created On       : Sat Sep  1 20:22:55 2001
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Mar 14 16:35:29 2022
    13 // Update Count     : 5276
     12// Last Modified On : Fri Feb 25 17:54:56 2022
     13// Update Count     : 5262
    1414//
    1515
     
    652652                        // Historic, transitional: Disallow commas in subscripts.
    653653                        // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts.
     654                // { SemanticError( yylloc, "New array subscript is currently unimplemented." ); $$ = nullptr; }
    654655                        // Current: Commas in subscripts make tuples.
    655656                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); }
     
    660661                // equivalent to the old x[i,j].
    661662                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); }
    662         | constant '[' assignment_expression ']'                        // 3[a], 'a'[a], 3.5[a]
    663                 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); }
    664         | string_literal '[' assignment_expression ']'          // "abc"[3], 3["abc"]
    665                 { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, new ExpressionNode( $1 ), $3 ) ); }
    666663        | postfix_expression '{' argument_expression_list_opt '}' // CFA, constructor call
    667664                {
     
    23032300        ;
    23042301
    2305 enum_type: // static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed );                                                                                         // enum
     2302enum_type:                                                                                              // enum
    23062303        ENUM attribute_list_opt '{' enumerator_list comma_opt '}'
    2307                 { $$ = DeclarationNode::newEnum( nullptr, $4, true, false )->addQualifiers( $2 ); }
     2304                { $$ = DeclarationNode::newEnum( nullptr, $4, true )->addQualifiers( $2 ); }
    23082305        | ENUM attribute_list_opt identifier
    23092306                { typedefTable.makeTypedef( *$3 ); }
    23102307          '{' enumerator_list comma_opt '}'
    2311                 { $$ = DeclarationNode::newEnum( $3, $6, true, false )->addQualifiers( $2 ); }
     2308                { $$ = DeclarationNode::newEnum( $3, $6, true )->addQualifiers( $2 ); }
    23122309        | ENUM attribute_list_opt typedef_name                          // unqualified type name
    23132310          '{' enumerator_list comma_opt '}'
    2314                 { $$ = DeclarationNode::newEnum( $3->name, $5, true, false )->addQualifiers( $2 ); }
     2311                { $$ = DeclarationNode::newEnum( $3->name, $5, true )->addQualifiers( $2 ); }
    23152312        | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt '{' enumerator_list comma_opt '}'
    23162313                {
    2317                         if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 )
    2318                         { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
    2319                         // SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
    2320 
    2321                         $$ = DeclarationNode::newEnum( nullptr, $7, true, true ) ->addQualifiers( $5 )  -> addEnumBase( $3 );
    2322                         // $$ = DeclarationNode::newEnum( nullptr, $7, true, true ) ->addQualifiers( $5 );
    2323                 }
    2324         | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt // Question: why attributes/qualifier after identifier
     2314                        if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
     2315                        SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
     2316                }
     2317        | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt
    23252318                {
    23262319                        if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
     
    23292322          '{' enumerator_list comma_opt '}'
    23302323                {
    2331                         $$ = DeclarationNode::newEnum( $6, $10, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ) -> addEnumBase( $3 );
    2332                         // $$ = DeclarationNode::newEnum( $6, $10, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 );
     2324                        SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
    23332325                }
    23342326        | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt typedef_name attribute_list_opt '{' enumerator_list comma_opt '}'
     
    23362328                        if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
    23372329                        typedefTable.makeTypedef( *$6->name );
    2338                         $$ = DeclarationNode::newEnum( $6->name, $9, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ) -> addEnumBase( $3 );
    2339                         // $$ = DeclarationNode::newEnum( $6->name, $9, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 );
     2330                        SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
    23402331                }
    23412332        | enum_type_nobody
     
    23442335enum_type_nobody:                                                                               // enum - {...}
    23452336        ENUM attribute_list_opt identifier
    2346                 { typedefTable.makeTypedef( *$3 ); $$ = DeclarationNode::newEnum( $3, 0, false, false )->addQualifiers( $2 ); }
     2337                { typedefTable.makeTypedef( *$3 ); $$ = DeclarationNode::newEnum( $3, 0, false )->addQualifiers( $2 ); }
    23472338        | ENUM attribute_list_opt type_name                                     // qualified type name
    2348                 { typedefTable.makeTypedef( *$3->type->symbolic.name ); $$ = DeclarationNode::newEnum( $3->type->symbolic.name, 0, false, false )->addQualifiers( $2 ); }
     2339                { typedefTable.makeTypedef( *$3->type->symbolic.name ); $$ = DeclarationNode::newEnum( $3->type->symbolic.name, 0, false )->addQualifiers( $2 ); }
    23492340        ;
    23502341
  • src/ResolvExpr/CandidateFinder.cpp

    r2e9b59b rba897d21  
    1010// Created On       : Wed Jun 5 14:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 11:58:00 2022
    13 // Update Count     : 3
     12// Last Modified On : Tue Oct  1 14:55:00 2019
     13// Update Count     : 2
    1414//
    1515
     
    595595        /// Actually visits expressions to find their candidate interpretations
    596596        class Finder final : public ast::WithShortCircuiting {
    597                 const ResolveContext & context;
    598597                const ast::SymbolTable & symtab;
    599598        public:
     
    619618
    620619                Finder( CandidateFinder & f )
    621                 : context( f.context ), symtab( context.symtab ), selfFinder( f ),
    622                   candidates( f.candidates ), tenv( f.env ), targetType( f.targetType ) {}
     620                : symtab( f.localSyms ), selfFinder( f ), candidates( f.candidates ), tenv( f.env ),
     621                  targetType( f.targetType ) {}
    623622
    624623                void previsit( const ast::Node * ) { visit_children = false; }
     
    873872                        Tuples::handleTupleAssignment( selfFinder, untypedExpr, argCandidates );
    874873
    875                         CandidateFinder funcFinder( context, tenv );
     874                        CandidateFinder funcFinder{ symtab, tenv };
    876875                        if (auto nameExpr = untypedExpr->func.as<ast::NameExpr>()) {
    877876                                auto kind = ast::SymbolTable::getSpecialFunctionKind(nameExpr->name);
     
    919918                        // find function operators
    920919                        ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" };
    921                         CandidateFinder opFinder( context, tenv );
     920                        CandidateFinder opFinder{ symtab, tenv };
    922921                        // okay if there aren't any function operations
    923922                        opFinder.find( opExpr, ResolvMode::withoutFailFast() );
     
    10601059
    10611060                void postvisit( const ast::AddressExpr * addressExpr ) {
    1062                         CandidateFinder finder( context, tenv );
     1061                        CandidateFinder finder{ symtab, tenv };
    10631062                        finder.find( addressExpr->arg );
    10641063
     
    10801079                        ast::ptr< ast::Type > toType = castExpr->result;
    10811080                        assert( toType );
    1082                         toType = resolveTypeof( toType, context );
     1081                        toType = resolveTypeof( toType, symtab );
    10831082                        // toType = SymTab::validateType( castExpr->location, toType, symtab );
    10841083                        toType = adjustExprType( toType, tenv, symtab );
    10851084
    1086                         CandidateFinder finder( context, tenv, toType );
     1085                        CandidateFinder finder{ symtab, tenv, toType };
    10871086                        finder.find( castExpr->arg, ResolvMode::withAdjustment() );
    10881087
     
    11371136                void postvisit( const ast::VirtualCastExpr * castExpr ) {
    11381137                        assertf( castExpr->result, "Implicit virtual cast targets not yet supported." );
    1139                         CandidateFinder finder( context, tenv );
     1138                        CandidateFinder finder{ symtab, tenv };
    11401139                        // don't prune here, all alternatives guaranteed to have same type
    11411140                        finder.find( castExpr->arg, ResolvMode::withoutPrune() );
     
    11541153                        auto target = inst->base.get();
    11551154
    1156                         CandidateFinder finder( context, tenv );
     1155                        CandidateFinder finder{ symtab, tenv };
    11571156
    11581157                        auto pick_alternatives = [target, this](CandidateList & found, bool expect_ref) {
     
    12031202
    12041203                void postvisit( const ast::UntypedMemberExpr * memberExpr ) {
    1205                         CandidateFinder aggFinder( context, tenv );
     1204                        CandidateFinder aggFinder{ symtab, tenv };
    12061205                        aggFinder.find( memberExpr->aggregate, ResolvMode::withAdjustment() );
    12071206                        for ( CandidateRef & agg : aggFinder.candidates ) {
     
    12881287                                addCandidate(
    12891288                                        new ast::SizeofExpr{
    1290                                                 sizeofExpr->location, resolveTypeof( sizeofExpr->type, context ) },
     1289                                                sizeofExpr->location, resolveTypeof( sizeofExpr->type, symtab ) },
    12911290                                        tenv );
    12921291                        } else {
    12931292                                // find all candidates for the argument to sizeof
    1294                                 CandidateFinder finder( context, tenv );
     1293                                CandidateFinder finder{ symtab, tenv };
    12951294                                finder.find( sizeofExpr->expr );
    12961295                                // find the lowest-cost candidate, otherwise ambiguous
     
    13121311                                addCandidate(
    13131312                                        new ast::AlignofExpr{
    1314                                                 alignofExpr->location, resolveTypeof( alignofExpr->type, context ) },
     1313                                                alignofExpr->location, resolveTypeof( alignofExpr->type, symtab ) },
    13151314                                        tenv );
    13161315                        } else {
    13171316                                // find all candidates for the argument to alignof
    1318                                 CandidateFinder finder( context, tenv );
     1317                                CandidateFinder finder{ symtab, tenv };
    13191318                                finder.find( alignofExpr->expr );
    13201319                                // find the lowest-cost candidate, otherwise ambiguous
     
    13551354
    13561355                void postvisit( const ast::LogicalExpr * logicalExpr ) {
    1357                         CandidateFinder finder1( context, tenv );
     1356                        CandidateFinder finder1{ symtab, tenv };
    13581357                        finder1.find( logicalExpr->arg1, ResolvMode::withAdjustment() );
    13591358                        if ( finder1.candidates.empty() ) return;
    13601359
    1361                         CandidateFinder finder2( context, tenv );
     1360                        CandidateFinder finder2{ symtab, tenv };
    13621361                        finder2.find( logicalExpr->arg2, ResolvMode::withAdjustment() );
    13631362                        if ( finder2.candidates.empty() ) return;
     
    13851384                void postvisit( const ast::ConditionalExpr * conditionalExpr ) {
    13861385                        // candidates for condition
    1387                         CandidateFinder finder1( context, tenv );
     1386                        CandidateFinder finder1{ symtab, tenv };
    13881387                        finder1.find( conditionalExpr->arg1, ResolvMode::withAdjustment() );
    13891388                        if ( finder1.candidates.empty() ) return;
    13901389
    13911390                        // candidates for true result
    1392                         CandidateFinder finder2( context, tenv );
     1391                        CandidateFinder finder2{ symtab, tenv };
    13931392                        finder2.find( conditionalExpr->arg2, ResolvMode::withAdjustment() );
    13941393                        if ( finder2.candidates.empty() ) return;
    13951394
    13961395                        // candidates for false result
    1397                         CandidateFinder finder3( context, tenv );
     1396                        CandidateFinder finder3{ symtab, tenv };
    13981397                        finder3.find( conditionalExpr->arg3, ResolvMode::withAdjustment() );
    13991398                        if ( finder3.candidates.empty() ) return;
     
    14461445                void postvisit( const ast::CommaExpr * commaExpr ) {
    14471446                        ast::TypeEnvironment env{ tenv };
    1448                         ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, context, env );
    1449 
    1450                         CandidateFinder finder2( context, env );
     1447                        ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, symtab, env );
     1448
     1449                        CandidateFinder finder2{ symtab, env };
    14511450                        finder2.find( commaExpr->arg2, ResolvMode::withAdjustment() );
    14521451
     
    14611460
    14621461                void postvisit( const ast::ConstructorExpr * ctorExpr ) {
    1463                         CandidateFinder finder( context, tenv );
     1462                        CandidateFinder finder{ symtab, tenv };
    14641463                        finder.find( ctorExpr->callExpr, ResolvMode::withoutPrune() );
    14651464                        for ( CandidateRef & r : finder.candidates ) {
     
    14701469                void postvisit( const ast::RangeExpr * rangeExpr ) {
    14711470                        // resolve low and high, accept candidates where low and high types unify
    1472                         CandidateFinder finder1( context, tenv );
     1471                        CandidateFinder finder1{ symtab, tenv };
    14731472                        finder1.find( rangeExpr->low, ResolvMode::withAdjustment() );
    14741473                        if ( finder1.candidates.empty() ) return;
    14751474
    1476                         CandidateFinder finder2( context, tenv );
     1475                        CandidateFinder finder2{ symtab, tenv };
    14771476                        finder2.find( rangeExpr->high, ResolvMode::withAdjustment() );
    14781477                        if ( finder2.candidates.empty() ) return;
     
    15501549
    15511550                void postvisit( const ast::UniqueExpr * unqExpr ) {
    1552                         CandidateFinder finder( context, tenv );
     1551                        CandidateFinder finder{ symtab, tenv };
    15531552                        finder.find( unqExpr->expr, ResolvMode::withAdjustment() );
    15541553                        for ( CandidateRef & r : finder.candidates ) {
     
    15591558
    15601559                void postvisit( const ast::StmtExpr * stmtExpr ) {
    1561                         addCandidate( resolveStmtExpr( stmtExpr, context ), tenv );
     1560                        addCandidate( resolveStmtExpr( stmtExpr, symtab ), tenv );
    15621561                }
    15631562
     
    15711570                        for ( const ast::InitAlternative & initAlt : initExpr->initAlts ) {
    15721571                                // calculate target type
    1573                                 const ast::Type * toType = resolveTypeof( initAlt.type, context );
     1572                                const ast::Type * toType = resolveTypeof( initAlt.type, symtab );
    15741573                                // toType = SymTab::validateType( initExpr->location, toType, symtab );
    15751574                                toType = adjustExprType( toType, tenv, symtab );
     
    15771576                                // types are not bound to the initialization type, since return type variables are
    15781577                                // only open for the duration of resolving the UntypedExpr.
    1579                                 CandidateFinder finder( context, tenv, toType );
     1578                                CandidateFinder finder{ symtab, tenv, toType };
    15801579                                finder.find( initExpr->expr, ResolvMode::withAdjustment() );
    15811580                                for ( CandidateRef & cand : finder.candidates ) {
     
    16941693                }
    16951694                else {
    1696                         satisfyAssertions(candidate, context.symtab, satisfied, errors);
     1695                        satisfyAssertions(candidate, localSyms, satisfied, errors);
    16971696                        needRecomputeKey = true;
    16981697                }
     
    18561855                        r->expr = ast::mutate_field(
    18571856                                r->expr.get(), &ast::Expr::result,
    1858                                 adjustExprType( r->expr->result, r->env, context.symtab ) );
     1857                                adjustExprType( r->expr->result, r->env, localSyms ) );
    18591858                }
    18601859        }
     
    18741873
    18751874        for ( const auto & x : xs ) {
    1876                 out.emplace_back( context, env );
     1875                out.emplace_back( localSyms, env );
    18771876                out.back().find( x, ResolvMode::withAdjustment() );
    18781877
  • src/ResolvExpr/CandidateFinder.hpp

    r2e9b59b rba897d21  
    1010// Created On       : Wed Jun 5 14:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 15:22:00 2022
    13 // Update Count     : 3
     12// Last Modified On : Tue Oct  1  9:51:00 2019
     13// Update Count     : 2
    1414//
    1515
     
    2525namespace ResolvExpr {
    2626
    27 struct ResolveContext;
    28 
    2927/// Data to perform expression resolution
    3028struct CandidateFinder {
    3129        CandidateList candidates;          ///< List of candidate resolutions
    32         const ResolveContext & context;  ///< Information about where the canditates are being found.
     30        const ast::SymbolTable & localSyms;   ///< Symbol table to lookup candidates
    3331        const ast::TypeEnvironment & env;  ///< Substitutions performed in this resolution
    3432        ast::ptr< ast::Type > targetType;  ///< Target type for resolution
     
    3634
    3735        CandidateFinder(
    38                 const ResolveContext & context, const ast::TypeEnvironment & env,
     36                const ast::SymbolTable & syms, const ast::TypeEnvironment & env,
    3937                const ast::Type * tt = nullptr )
    40         : candidates(), context( context ), env( env ), targetType( tt ) {}
     38        : candidates(), localSyms( syms ), env( env ), targetType( tt ) {}
    4139
    4240        /// Fill candidates with feasible resolutions for `expr`
  • src/ResolvExpr/CandidatePrinter.cpp

    r2e9b59b rba897d21  
    1010// Created On       : Tue Nov  9  9:54:00 2021
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 13:56:00 2022
    13 // Update Count     : 1
     12// Last Modified On : Tue Nov  9 15:47:00 2021
     13// Update Count     : 0
    1414//
    1515
     
    2222#include "AST/TranslationUnit.hpp"
    2323#include "ResolvExpr/CandidateFinder.hpp"
    24 #include "ResolvExpr/Resolver.h"
    2524
    2625#include <iostream>
     
    3029namespace {
    3130
    32 class CandidatePrintCore : public ast::WithSymbolTable,
    33                 public ast::WithConstTranslationUnit {
     31class CandidatePrintCore : public ast::WithSymbolTable {
    3432        std::ostream & os;
    3533public:
     
    3836        void postvisit( const ast::ExprStmt * stmt ) {
    3937                ast::TypeEnvironment env;
    40                 CandidateFinder finder( { symtab, transUnit().global }, env );
     38                CandidateFinder finder( symtab, env );
    4139                finder.find( stmt->expr, ResolvMode::withAdjustment() );
    4240                int count = 1;
  • src/ResolvExpr/ConversionCost.cc

    r2e9b59b rba897d21  
    333333                } else if ( dynamic_cast< const EnumInstType * >( dest ) ) {
    334334                        // xxx - not positive this is correct, but appears to allow casting int => enum
    335                         // TODO
    336                         EnumDecl * decl = dynamic_cast< const EnumInstType * >( dest )->baseEnum;
    337                         if ( decl->base ) {
    338                                 cost = Cost::infinity;
    339                         } else {
    340                                 cost = Cost::unsafe;
    341                         } // if
     335                        cost = Cost::unsafe;
    342336                } // if
    343337                // no cases for zero_t/one_t because it should not be possible to convert int, etc. to zero_t/one_t.
     
    616610        } else if ( dynamic_cast< const ast::EnumInstType * >( dst ) ) {
    617611                // xxx - not positive this is correct, but appears to allow casting int => enum
    618                 const ast::EnumDecl * decl = (dynamic_cast< const ast::EnumInstType * >( dst ))->base.get();
    619                 if ( decl->base ) {
    620                         cost = Cost::infinity;
    621                 } else {
    622                         cost = Cost::unsafe;
    623                 } // if
     612                cost = Cost::unsafe;
    624613        }
    625614}
  • src/ResolvExpr/RenameVars.h

    r2e9b59b rba897d21  
    3636        };
    3737        const ast::Type * renameTyVars( const ast::Type *, RenameMode mode = GEN_USAGE, bool reset = true );
     38       
    3839
    3940        /// resets internal state of renamer to avoid overflow
    4041        void resetTyVarRenaming();
     42
     43       
    4144} // namespace ResolvExpr
    4245
  • src/ResolvExpr/ResolveTypeof.cc

    r2e9b59b rba897d21  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:12:20 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 16:09:00 2022
    13 // Update Count     : 4
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Tue May 19 16:49:04 2015
     13// Update Count     : 3
    1414//
    1515
     
    2222#include "AST/Node.hpp"
    2323#include "AST/Pass.hpp"
    24 #include "AST/TranslationUnit.hpp"
    2524#include "AST/Type.hpp"
    2625#include "AST/TypeEnvironment.hpp"
     
    120119namespace {
    121120        struct ResolveTypeof_new : public ast::WithShortCircuiting {
    122                 const ResolveContext & context;
    123 
    124                 ResolveTypeof_new( const ResolveContext & context ) :
    125                         context( context ) {}
     121                const ast::SymbolTable & localSymtab;
     122
     123                ResolveTypeof_new( const ast::SymbolTable & syms ) : localSymtab( syms ) {}
    126124
    127125                void previsit( const ast::TypeofType * ) { visit_children = false; }
     
    139137                                ast::TypeEnvironment dummy;
    140138                                ast::ptr< ast::Expr > newExpr =
    141                                         resolveInVoidContext( typeofType->expr, context, dummy );
     139                                        resolveInVoidContext( typeofType->expr, localSymtab, dummy );
    142140                                assert( newExpr->result && ! newExpr->result->isVoid() );
    143141                                newType = newExpr->result;
     
    163161} // anonymous namespace
    164162
    165 const ast::Type * resolveTypeof( const ast::Type * type , const ResolveContext & context ) {
    166         ast::Pass< ResolveTypeof_new > mutator( context );
     163const ast::Type * resolveTypeof( const ast::Type * type , const ast::SymbolTable & symtab ) {
     164        ast::Pass< ResolveTypeof_new > mutator{ symtab };
    167165        return type->accept( mutator );
    168166}
    169167
    170168struct FixArrayDimension {
    171         const ResolveContext & context;
    172         FixArrayDimension(const ResolveContext & context) : context( context ) {}
     169        // should not require a mutable symbol table - prevent pass template instantiation
     170        const ast::SymbolTable & _symtab;
     171        FixArrayDimension(const ast::SymbolTable & symtab): _symtab(symtab) {}
    173172
    174173        const ast::ArrayType * previsit (const ast::ArrayType * arrayType) {
    175174                if (!arrayType->dimension) return arrayType;
    176175                auto mutType = mutate(arrayType);
    177                 auto globalSizeType = context.global.sizeType;
    178                 ast::ptr<ast::Type> sizetype = globalSizeType ? globalSizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt);
    179                 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, context );
     176                ast::ptr<ast::Type> sizetype = ast::sizeType ? ast::sizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt);
     177                mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, _symtab);
    180178
    181179                if (InitTweak::isConstExpr(mutType->dimension)) {
     
    189187};
    190188
    191 const ast::Type * fixArrayType( const ast::Type * type, const ResolveContext & context ) {
    192         ast::Pass<FixArrayDimension> visitor(context);
     189const ast::Type * fixArrayType( const ast::Type * type, const ast::SymbolTable & symtab) {
     190        ast::Pass<FixArrayDimension> visitor {symtab};
    193191        return type->accept(visitor);
    194192}
    195193
    196 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & context ) {
    197         if (decl->isTypeFixed) {
    198                 return decl;
    199         }
    200 
    201         auto mutDecl = mutate(decl);
    202         {
    203                 auto resolvedType = resolveTypeof(decl->type, context);
    204                 resolvedType = fixArrayType(resolvedType, context);
     194const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab ) {
     195        if (!decl->isTypeFixed) {
     196                auto mutDecl = mutate(decl);
     197                auto resolvedType = resolveTypeof(decl->type, symtab);
     198                resolvedType = fixArrayType(resolvedType, symtab);
    205199                mutDecl->type = resolvedType;
    206         }
    207 
    208         // Do not mangle unnamed variables.
    209         if (!mutDecl->name.empty()) {
    210                 mutDecl->mangleName = Mangle::mangle(mutDecl);
    211         }
    212 
    213         mutDecl->type = renameTyVars(mutDecl->type, RenameMode::GEN_EXPR_ID);
    214         mutDecl->isTypeFixed = true;
    215         return mutDecl;
     200
     201                // check variable length if object is an array.
     202                // xxx - should this be part of fixObjectType?
     203
     204                /*
     205                if (auto arrayType = dynamic_cast<const ast::ArrayType *>(resolvedType)) {
     206                        auto dimExpr = findSingleExpression(arrayType->dimension, ast::sizeType, symtab);
     207                        if (auto varexpr = arrayType->dimension.as<ast::VariableExpr>()) {// hoisted previously
     208                                if (InitTweak::isConstExpr(varexpr->var.strict_as<ast::ObjectDecl>()->init)) {
     209                                        auto mutType = mutate(arrayType);
     210                                        mutType->isVarLen = ast::LengthFlag::VariableLen;
     211                                        mutDecl->type = mutType;
     212                                }
     213                        }
     214                }
     215                */
     216
     217
     218                if (!mutDecl->name.empty())
     219                        mutDecl->mangleName = Mangle::mangle(mutDecl); // do not mangle unnamed variables
     220               
     221                mutDecl->type = renameTyVars(mutDecl->type, RenameMode::GEN_EXPR_ID);
     222                mutDecl->isTypeFixed = true;
     223                return mutDecl;
     224        }
     225        return decl;
    216226}
    217227
  • src/ResolvExpr/ResolveTypeof.h

    r2e9b59b rba897d21  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // ResolveTypeof.h --
     7// ResolveTypeof.h -- 
    88//
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:14:53 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 11:33:00 2022
    13 // Update Count     : 4
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Sat Jul 22 09:38:35 2017
     13// Update Count     : 3
    1414//
    1515
     
    2222namespace ast {
    2323        class Type;
     24        class SymbolTable;
    2425        class ObjectDecl;
    2526}
    2627
    2728namespace ResolvExpr {
    28         struct ResolveContext;
    29 
    3029        Type *resolveTypeof( Type*, const SymTab::Indexer &indexer );
    31         const ast::Type * resolveTypeof( const ast::Type *, const ResolveContext & );
    32         const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & );
     30        const ast::Type * resolveTypeof( const ast::Type *, const ast::SymbolTable & );
     31        const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab );
    3332} // namespace ResolvExpr
    3433
  • src/ResolvExpr/Resolver.cc

    r2e9b59b rba897d21  
    99// Author           : Aaron B. Moss
    1010// Created On       : Sun May 17 12:17:01 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 18 10:41:00 2022
    13 // Update Count     : 247
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Tue Feb  1 16:27:14 2022
     13// Update Count     : 245
    1414//
    1515
     
    997997                /// Calls the CandidateFinder and finds the single best candidate
    998998                CandidateRef findUnfinishedKindExpression(
    999                         const ast::Expr * untyped, const ResolveContext & context, const std::string & kind,
     999                        const ast::Expr * untyped, const ast::SymbolTable & symtab, const std::string & kind,
    10001000                        std::function<bool(const Candidate &)> pred = anyCandidate, ResolvMode mode = {}
    10011001                ) {
     
    10071007                        ++recursion_level;
    10081008                        ast::TypeEnvironment env;
    1009                         CandidateFinder finder( context, env );
     1009                        CandidateFinder finder{ symtab, env };
    10101010                        finder.find( untyped, recursion_level == 1 ? mode.atTopLevel() : mode );
    10111011                        --recursion_level;
     
    11291129
    11301130        ast::ptr< ast::Expr > resolveInVoidContext(
    1131                 const ast::Expr * expr, const ResolveContext & context,
    1132                 ast::TypeEnvironment & env
     1131                const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env
    11331132        ) {
    11341133                assertf( expr, "expected a non-null expression" );
     
    11371136                ast::ptr< ast::CastExpr > untyped = new ast::CastExpr{ expr };
    11381137                CandidateRef choice = findUnfinishedKindExpression(
    1139                         untyped, context, "", anyCandidate, ResolvMode::withAdjustment() );
     1138                        untyped, symtab, "", anyCandidate, ResolvMode::withAdjustment() );
    11401139
    11411140                // a cast expression has either 0 or 1 interpretations (by language rules);
     
    11501149                /// context.
    11511150                ast::ptr< ast::Expr > findVoidExpression(
    1152                         const ast::Expr * untyped, const ResolveContext & context
     1151                        const ast::Expr * untyped, const ast::SymbolTable & symtab
    11531152                ) {
    11541153                        ast::TypeEnvironment env;
    1155                         ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, context, env );
     1154                        ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, symtab, env );
    11561155                        finishExpr( newExpr, env, untyped->env );
    11571156                        return newExpr;
     
    11641163                /// lowest cost, returning the resolved version
    11651164                ast::ptr< ast::Expr > findKindExpression(
    1166                         const ast::Expr * untyped, const ResolveContext & context,
     1165                        const ast::Expr * untyped, const ast::SymbolTable & symtab,
    11671166                        std::function<bool(const Candidate &)> pred = anyCandidate,
    11681167                        const std::string & kind = "", ResolvMode mode = {}
     
    11701169                        if ( ! untyped ) return {};
    11711170                        CandidateRef choice =
    1172                                 findUnfinishedKindExpression( untyped, context, kind, pred, mode );
     1171                                findUnfinishedKindExpression( untyped, symtab, kind, pred, mode );
    11731172                        ResolvExpr::finishExpr( choice->expr, choice->env, untyped->env );
    11741173                        return std::move( choice->expr );
     
    11771176                /// Resolve `untyped` to the single expression whose candidate is the best match
    11781177                ast::ptr< ast::Expr > findSingleExpression(
    1179                         const ast::Expr * untyped, const ResolveContext & context
     1178                        const ast::Expr * untyped, const ast::SymbolTable & symtab
    11801179                ) {
    11811180                        Stats::ResolveTime::start( untyped );
    1182                         auto res = findKindExpression( untyped, context );
     1181                        auto res = findKindExpression( untyped, symtab );
    11831182                        Stats::ResolveTime::stop();
    11841183                        return res;
     
    11871186
    11881187        ast::ptr< ast::Expr > findSingleExpression(
    1189                 const ast::Expr * untyped, const ast::Type * type,
    1190                 const ResolveContext & context
     1188                const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab
    11911189        ) {
    11921190                assert( untyped && type );
    11931191                ast::ptr< ast::Expr > castExpr = new ast::CastExpr{ untyped, type };
    1194                 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, context );
    1195                 removeExtraneousCast( newExpr, context.symtab );
     1192                ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, symtab );
     1193                removeExtraneousCast( newExpr, symtab );
    11961194                return newExpr;
    11971195        }
     
    12191217                /// Resolve `untyped` as an integral expression, returning the resolved version
    12201218                ast::ptr< ast::Expr > findIntegralExpression(
    1221                         const ast::Expr * untyped, const ResolveContext & context
     1219                        const ast::Expr * untyped, const ast::SymbolTable & symtab
    12221220                ) {
    1223                         return findKindExpression( untyped, context, hasIntegralType, "condition" );
     1221                        return findKindExpression( untyped, symtab, hasIntegralType, "condition" );
    12241222                }
    12251223
     
    12511249                // for work previously in GenInit
    12521250                static InitTweak::ManagedTypes_new managedTypes;
    1253                 ResolveContext context;
    12541251
    12551252                bool inEnumDecl = false;
     
    12571254        public:
    12581255                static size_t traceId;
    1259                 Resolver_new( const ast::TranslationGlobal & global ) :
    1260                         context{ symtab, global } {}
    1261                 Resolver_new( const ResolveContext & context ) :
    1262                         ast::WithSymbolTable{ context.symtab },
    1263                         context{ symtab, context.global } {}
     1256                Resolver_new() = default;
     1257                Resolver_new( const ast::SymbolTable & syms ) { symtab = syms; }
    12641258
    12651259                const ast::FunctionDecl * previsit( const ast::FunctionDecl * );
     
    12781272                const ast::AsmStmt *         previsit( const ast::AsmStmt * );
    12791273                const ast::IfStmt *          previsit( const ast::IfStmt * );
    1280                 const ast::WhileDoStmt *     previsit( const ast::WhileDoStmt * );
     1274                const ast::WhileDoStmt *       previsit( const ast::WhileDoStmt * );
    12811275                const ast::ForStmt *         previsit( const ast::ForStmt * );
    12821276                const ast::SwitchStmt *      previsit( const ast::SwitchStmt * );
    1283                 const ast::CaseClause *      previsit( const ast::CaseClause * );
     1277                const ast::CaseStmt *        previsit( const ast::CaseStmt * );
    12841278                const ast::BranchStmt *      previsit( const ast::BranchStmt * );
    12851279                const ast::ReturnStmt *      previsit( const ast::ReturnStmt * );
    12861280                const ast::ThrowStmt *       previsit( const ast::ThrowStmt * );
    1287                 const ast::CatchClause *     previsit( const ast::CatchClause * );
    1288                 const ast::CatchClause *     postvisit( const ast::CatchClause * );
     1281                const ast::CatchStmt *       previsit( const ast::CatchStmt * );
     1282                const ast::CatchStmt *       postvisit( const ast::CatchStmt * );
    12891283                const ast::WaitForStmt *     previsit( const ast::WaitForStmt * );
    12901284                const ast::WithStmt *        previsit( const ast::WithStmt * );
     
    13051299
    13061300        void resolve( ast::TranslationUnit& translationUnit ) {
    1307                 ast::Pass< Resolver_new >::run( translationUnit, translationUnit.global );
     1301                ast::Pass< Resolver_new >::run( translationUnit );
    13081302        }
    13091303
    13101304        ast::ptr< ast::Init > resolveCtorInit(
    1311                 const ast::ConstructorInit * ctorInit, const ResolveContext & context
     1305                const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab
    13121306        ) {
    13131307                assert( ctorInit );
    1314                 ast::Pass< Resolver_new > resolver( context );
     1308                ast::Pass< Resolver_new > resolver{ symtab };
    13151309                return ctorInit->accept( resolver );
    13161310        }
    13171311
    13181312        const ast::Expr * resolveStmtExpr(
    1319                 const ast::StmtExpr * stmtExpr, const ResolveContext & context
     1313                const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab
    13201314        ) {
    13211315                assert( stmtExpr );
    1322                 ast::Pass< Resolver_new > resolver( context );
     1316                ast::Pass< Resolver_new > resolver{ symtab };
    13231317                auto ret = mutate(stmtExpr->accept(resolver));
    13241318                strict_dynamic_cast< ast::StmtExpr * >( ret )->computeResult();
     
    13271321
    13281322        namespace {
    1329                 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ResolveContext & context) {
     1323                const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ast::SymbolTable & symtab) {
    13301324                        std::string name = attr->normalizedName();
    13311325                        if (name == "constructor" || name == "destructor") {
    13321326                                if (attr->params.size() == 1) {
    13331327                                        auto arg = attr->params.front();
    1334                                         auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), context );
     1328                                        auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), symtab );
    13351329                                        auto result = eval(arg);
    13361330
     
    13751369
    13761370                        for (auto & attr: mutDecl->attributes) {
    1377                                 attr = handleAttribute(mutDecl->location, attr, context );
     1371                                attr = handleAttribute(mutDecl->location, attr, symtab);
    13781372                        }
    13791373
     
    13851379                        for (auto & typeParam : mutDecl->type_params) {
    13861380                                symtab.addType(typeParam);
    1387                                 mutType->forall.emplace_back(new ast::TypeInstType(typeParam));
     1381                                mutType->forall.emplace_back(new ast::TypeInstType(typeParam->name, typeParam));
    13881382                        }
    13891383                        for (auto & asst : mutDecl->assertions) {
    1390                                 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), context);
     1384                                asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), symtab);
    13911385                                symtab.addId(asst);
    13921386                                mutType->assertions.emplace_back(new ast::VariableExpr(functionDecl->location, asst));
     
    14001394
    14011395                        for (auto & param : mutDecl->params) {
    1402                                 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), context);
     1396                                param = fixObjectType(param.strict_as<ast::ObjectDecl>(), symtab);
    14031397                                symtab.addId(param);
    14041398                                paramTypes.emplace_back(param->get_type());
    14051399                        }
    14061400                        for (auto & ret : mutDecl->returns) {
    1407                                 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), context);
     1401                                ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), symtab);
    14081402                                returnTypes.emplace_back(ret->get_type());
    14091403                        }
     
    14761470                        // enumerator initializers should not use the enum type to initialize, since the
    14771471                        // enum type is still incomplete at this point. Use `int` instead.
    1478 
    1479                         if (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base) { // const ast::PointerType &
    1480                                 // const ast::Type * enumBase =  (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base.get());
    1481                                 // const ast::PointerType * enumBaseAsPtr = dynamic_cast<const ast::PointerType *>(enumBase);
    1482 
    1483                                 // if ( enumBaseAsPtr ) {
    1484                                 //      const ast::Type * pointerBase = enumBaseAsPtr->base.get();
    1485                                 //      if ( dynamic_cast<const ast::BasicType *>(pointerBase) ) {
    1486                                 //              objectDecl = fixObjectType(objectDecl, context);
    1487                                 //              if (dynamic_cast<const ast::BasicType *>(pointerBase)->kind == ast::BasicType::Char)
    1488                                 //              currentObject = ast::CurrentObject{
    1489                                 //                      objectDecl->location,  new ast::PointerType{
    1490                                 //                              new ast::BasicType{ ast::BasicType::Char }
    1491                                 //                      } };
    1492                                 //      } else {
    1493                                 //              objectDecl = fixObjectType(objectDecl, context);
    1494                                 //              currentObject = ast::CurrentObject{objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
    1495                                 //      }
    1496                                 // }
    1497                                 objectDecl = fixObjectType( objectDecl, context );
    1498                                 const ast::Type * enumBase =  (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base.get());
    1499                                 currentObject = ast::CurrentObject{
    1500                                         objectDecl->location,
    1501                                         enumBase
    1502                                 };
    1503                         } else {
    1504                                 objectDecl = fixObjectType( objectDecl, context );
    1505                                 currentObject = ast::CurrentObject{
    1506                                         objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
    1507                         }
    1508 
     1472                        objectDecl = fixObjectType(objectDecl, symtab);
     1473                        currentObject = ast::CurrentObject{
     1474                                objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
    15091475                }
    15101476                else {
    15111477                        if (!objectDecl->isTypeFixed) {
    1512                                 auto newDecl = fixObjectType(objectDecl, context);
     1478                                auto newDecl = fixObjectType(objectDecl, symtab);
    15131479                                auto mutDecl = mutate(newDecl);
    15141480
     
    15411507                        // nested type decls are hoisted already. no need to do anything
    15421508                        if (auto obj = member.as<ast::ObjectDecl>()) {
    1543                                 member = fixObjectType(obj, context);
     1509                                member = fixObjectType(obj, symtab);
    15441510                        }
    15451511                }
     
    15641530                return ast::mutate_field(
    15651531                        assertDecl, &ast::StaticAssertDecl::cond,
    1566                         findIntegralExpression( assertDecl->cond, context ) );
     1532                        findIntegralExpression( assertDecl->cond, symtab ) );
    15671533        }
    15681534
    15691535        template< typename PtrType >
    1570         const PtrType * handlePtrType( const PtrType * type, const ResolveContext & context ) {
     1536        const PtrType * handlePtrType( const PtrType * type, const ast::SymbolTable & symtab ) {
    15711537                if ( type->dimension ) {
    1572                         ast::ptr< ast::Type > sizeType = context.global.sizeType;
     1538                        ast::ptr< ast::Type > sizeType = ast::sizeType;
    15731539                        ast::mutate_field(
    15741540                                type, &PtrType::dimension,
    1575                                 findSingleExpression( type->dimension, sizeType, context ) );
     1541                                findSingleExpression( type->dimension, sizeType, symtab ) );
    15761542                }
    15771543                return type;
     
    15791545
    15801546        const ast::ArrayType * Resolver_new::previsit( const ast::ArrayType * at ) {
    1581                 return handlePtrType( at, context );
     1547                return handlePtrType( at, symtab );
    15821548        }
    15831549
    15841550        const ast::PointerType * Resolver_new::previsit( const ast::PointerType * pt ) {
    1585                 return handlePtrType( pt, context );
     1551                return handlePtrType( pt, symtab );
    15861552        }
    15871553
     
    15911557
    15921558                return ast::mutate_field(
    1593                         exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, context ) );
     1559                        exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, symtab ) );
    15941560        }
    15951561
     
    15981564
    15991565                asmExpr = ast::mutate_field(
    1600                         asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, context ) );
     1566                        asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, symtab ) );
    16011567
    16021568                return asmExpr;
     
    16121578        const ast::IfStmt * Resolver_new::previsit( const ast::IfStmt * ifStmt ) {
    16131579                return ast::mutate_field(
    1614                         ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, context ) );
     1580                        ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, symtab ) );
    16151581        }
    16161582
    16171583        const ast::WhileDoStmt * Resolver_new::previsit( const ast::WhileDoStmt * whileDoStmt ) {
    16181584                return ast::mutate_field(
    1619                         whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, context ) );
     1585                        whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, symtab ) );
    16201586        }
    16211587
     
    16231589                if ( forStmt->cond ) {
    16241590                        forStmt = ast::mutate_field(
    1625                                 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, context ) );
     1591                                forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, symtab ) );
    16261592                }
    16271593
    16281594                if ( forStmt->inc ) {
    16291595                        forStmt = ast::mutate_field(
    1630                                 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, context ) );
     1596                                forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, symtab ) );
    16311597                }
    16321598
     
    16381604                switchStmt = ast::mutate_field(
    16391605                        switchStmt, &ast::SwitchStmt::cond,
    1640                         findIntegralExpression( switchStmt->cond, context ) );
     1606                        findIntegralExpression( switchStmt->cond, symtab ) );
    16411607                currentObject = ast::CurrentObject{ switchStmt->location, switchStmt->cond->result };
    16421608                return switchStmt;
    16431609        }
    16441610
    1645         const ast::CaseClause * Resolver_new::previsit( const ast::CaseClause * caseStmt ) {
     1611        const ast::CaseStmt * Resolver_new::previsit( const ast::CaseStmt * caseStmt ) {
    16461612                if ( caseStmt->cond ) {
    16471613                        std::deque< ast::InitAlternative > initAlts = currentObject.getOptions();
     
    16511617                        ast::ptr< ast::Expr > untyped =
    16521618                                new ast::CastExpr{ caseStmt->location, caseStmt->cond, initAlts.front().type };
    1653                         ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, context );
     1619                        ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, symtab );
    16541620
    16551621                        // case condition cannot have a cast in C, so it must be removed here, regardless of
     
    16591625                        }
    16601626
    1661                         caseStmt = ast::mutate_field( caseStmt, &ast::CaseClause::cond, newExpr );
     1627                        caseStmt = ast::mutate_field( caseStmt, &ast::CaseStmt::cond, newExpr );
    16621628                }
    16631629                return caseStmt;
     
    16721638                        branchStmt = ast::mutate_field(
    16731639                                branchStmt, &ast::BranchStmt::computedTarget,
    1674                                 findSingleExpression( branchStmt->computedTarget, target, context ) );
     1640                                findSingleExpression( branchStmt->computedTarget, target, symtab ) );
    16751641                }
    16761642                return branchStmt;
     
    16821648                        returnStmt = ast::mutate_field(
    16831649                                returnStmt, &ast::ReturnStmt::expr,
    1684                                 findSingleExpression( returnStmt->expr, functionReturn, context ) );
     1650                                findSingleExpression( returnStmt->expr, functionReturn, symtab ) );
    16851651                }
    16861652                return returnStmt;
     
    16971663                        throwStmt = ast::mutate_field(
    16981664                                throwStmt, &ast::ThrowStmt::expr,
    1699                                 findSingleExpression( throwStmt->expr, exceptType, context ) );
     1665                                findSingleExpression( throwStmt->expr, exceptType, symtab ) );
    17001666                }
    17011667                return throwStmt;
    17021668        }
    17031669
    1704         const ast::CatchClause * Resolver_new::previsit( const ast::CatchClause * catchClause ) {
     1670        const ast::CatchStmt * Resolver_new::previsit( const ast::CatchStmt * catchStmt ) {
    17051671                // Until we are very sure this invarent (ifs that move between passes have then)
    17061672                // holds, check it. This allows a check for when to decode the mangling.
    1707                 if ( auto ifStmt = catchClause->body.as<ast::IfStmt>() ) {
     1673                if ( auto ifStmt = catchStmt->body.as<ast::IfStmt>() ) {
    17081674                        assert( ifStmt->then );
    17091675                }
    17101676                // Encode the catchStmt so the condition can see the declaration.
    1711                 if ( catchClause->cond ) {
    1712                         ast::CatchClause * clause = mutate( catchClause );
    1713                         clause->body = new ast::IfStmt( clause->location, clause->cond, nullptr, clause->body );
    1714                         clause->cond = nullptr;
    1715                         return clause;
    1716                 }
    1717                 return catchClause;
    1718         }
    1719 
    1720         const ast::CatchClause * Resolver_new::postvisit( const ast::CatchClause * catchClause ) {
     1677                if ( catchStmt->cond ) {
     1678                        ast::CatchStmt * stmt = mutate( catchStmt );
     1679                        stmt->body = new ast::IfStmt( stmt->location, stmt->cond, nullptr, stmt->body );
     1680                        stmt->cond = nullptr;
     1681                        return stmt;
     1682                }
     1683                return catchStmt;
     1684        }
     1685
     1686        const ast::CatchStmt * Resolver_new::postvisit( const ast::CatchStmt * catchStmt ) {
    17211687                // Decode the catchStmt so everything is stored properly.
    1722                 const ast::IfStmt * ifStmt = catchClause->body.as<ast::IfStmt>();
     1688                const ast::IfStmt * ifStmt = catchStmt->body.as<ast::IfStmt>();
    17231689                if ( nullptr != ifStmt && nullptr == ifStmt->then ) {
    17241690                        assert( ifStmt->cond );
    17251691                        assert( ifStmt->else_ );
    1726                         ast::CatchClause * clause = ast::mutate( catchClause );
    1727                         clause->cond = ifStmt->cond;
    1728                         clause->body = ifStmt->else_;
     1692                        ast::CatchStmt * stmt = ast::mutate( catchStmt );
     1693                        stmt->cond = ifStmt->cond;
     1694                        stmt->body = ifStmt->else_;
    17291695                        // ifStmt should be implicately deleted here.
    1730                         return clause;
    1731                 }
    1732                 return catchClause;
     1696                        return stmt;
     1697                }
     1698                return catchStmt;
    17331699        }
    17341700
     
    17411707
    17421708                        ast::TypeEnvironment env;
    1743                         CandidateFinder funcFinder( context, env );
     1709                        CandidateFinder funcFinder{ symtab, env };
    17441710
    17451711                        // Find all candidates for a function in canonical form
     
    19551921                                );
    19561922
    1957                                 clause2.target.args.emplace_back( findSingleExpression( init, context ) );
     1923                                clause2.target.args.emplace_back( findSingleExpression( init, symtab ) );
    19581924                        }
    19591925
    19601926                        // Resolve the conditions as if it were an IfStmt, statements normally
    1961                         clause2.cond = findSingleExpression( clause.cond, context );
     1927                        clause2.cond = findSingleExpression( clause.cond, symtab );
    19621928                        clause2.stmt = clause.stmt->accept( *visitor );
    19631929
     
    19741940                        ast::ptr< ast::Type > target =
    19751941                                new ast::BasicType{ ast::BasicType::LongLongUnsignedInt };
    1976                         timeout2.time = findSingleExpression( stmt->timeout.time, target, context );
    1977                         timeout2.cond = findSingleExpression( stmt->timeout.cond, context );
     1942                        timeout2.time = findSingleExpression( stmt->timeout.time, target, symtab );
     1943                        timeout2.cond = findSingleExpression( stmt->timeout.cond, symtab );
    19781944                        timeout2.stmt = stmt->timeout.stmt->accept( *visitor );
    19791945
     
    19881954                        ast::WaitForStmt::OrElse orElse2;
    19891955
    1990                         orElse2.cond = findSingleExpression( stmt->orElse.cond, context );
     1956                        orElse2.cond = findSingleExpression( stmt->orElse.cond, symtab );
    19911957                        orElse2.stmt = stmt->orElse.stmt->accept( *visitor );
    19921958
     
    20091975                for (auto & expr : exprs) {
    20101976                        // only struct- and union-typed expressions are viable candidates
    2011                         expr = findKindExpression( expr, context, structOrUnion, "with expression" );
     1977                        expr = findKindExpression( expr, symtab, structOrUnion, "with expression" );
    20121978
    20131979                        // if with expression might be impure, create a temporary so that it is evaluated once
     
    20352001                ast::ptr< ast::Expr > untyped = new ast::UntypedInitExpr{
    20362002                        singleInit->location, singleInit->value, currentObject.getOptions() };
    2037                 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, context );
     2003                ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, symtab );
    20382004                const ast::InitExpr * initExpr = newExpr.strict_as< ast::InitExpr >();
    20392005
  • src/ResolvExpr/Resolver.h

    r2e9b59b rba897d21  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:18:34 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 11:32:00 2022
    13 // Update Count     : 5
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Mon Feb 18 20:40:38 2019
     13// Update Count     : 4
    1414//
    1515
     
    2323class Declaration;
    2424class Expression;
    25 class DeletedExpr;
    2625class StmtExpr;
    27 class Type;
    2826namespace SymTab {
    2927        class Indexer;
     
    3735        class StmtExpr;
    3836        class SymbolTable;
    39         class TranslationGlobal;
    4037        class TranslationUnit;
    4138        class Type;
     
    5855        void resolveWithExprs( std::list< Declaration * > & translationUnit );
    5956
    60         /// Helper Type: Passes around information between various sub-calls.
    61         struct ResolveContext {
    62                 const ast::SymbolTable & symtab;
    63                 const ast::TranslationGlobal & global;
    64         };
    65 
    6657        /// Checks types and binds syntactic constructs to typed representations
    6758        void resolve( ast::TranslationUnit& translationUnit );
     
    7162        /// context.
    7263        ast::ptr< ast::Expr > resolveInVoidContext(
    73                 const ast::Expr * expr, const ResolveContext &, ast::TypeEnvironment & env );
     64                const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env );
    7465        /// Resolve `untyped` to the single expression whose candidate is the best match for the
    7566        /// given type.
    7667        ast::ptr< ast::Expr > findSingleExpression(
    77                 const ast::Expr * untyped, const ast::Type * type, const ResolveContext & );
     68                const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab );
    7869        ast::ptr< ast::Expr > findVoidExpression(
    79                 const ast::Expr * untyped, const ResolveContext & );
     70                const ast::Expr * untyped, const ast::SymbolTable & symtab);
    8071        /// Resolves a constructor init expression
    8172        ast::ptr< ast::Init > resolveCtorInit(
    82                 const ast::ConstructorInit * ctorInit, const ResolveContext & context );
     73                const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab );
    8374        /// Resolves a statement expression
    8475        const ast::Expr * resolveStmtExpr(
    85                 const ast::StmtExpr * stmtExpr, const ResolveContext & context );
     76                const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab );
    8677} // namespace ResolvExpr
    8778
  • src/ResolvExpr/Unify.cc

    r2e9b59b rba897d21  
    943943                        // check that the other type is compatible and named the same
    944944                        auto otherInst = dynamic_cast< const XInstType * >( other );
    945                         if (otherInst && inst->name == otherInst->name) this->result = otherInst;
     945                        this->result = otherInst && inst->name == otherInst->name;
    946946                        return otherInst;
    947947                }
  • src/SymTab/Validate.cc

    r2e9b59b rba897d21  
    395395                                TranslateDimensionGenericParameters::translateDimensions( translationUnit );
    396396                        });
    397                         if (!useNewAST) {
    398397                        Stats::Time::TimeBlock("Resolve Enum Initializers", [&]() {
    399398                                acceptAll( translationUnit, rei ); // must happen after translateDimensions because rei needs identifier lookup, which needs name mangling
    400399                        });
    401                         }
    402400                        Stats::Time::TimeBlock("Check Function Returns", [&]() {
    403401                                ReturnChecker::checkFunctionReturns( translationUnit );
     
    407405                        });
    408406                }
    409         }
    410 
    411         static void decayForallPointers( std::list< Declaration * > & translationUnit ) {
    412                 PassVisitor<TraitExpander_old> te;
    413                 acceptAll( translationUnit, te );
    414                 PassVisitor<AssertionFixer_old> af;
    415                 acceptAll( translationUnit, af );
    416                 PassVisitor<CheckOperatorTypes_old> cot;
    417                 acceptAll( translationUnit, cot );
    418                 PassVisitor<FixUniqueIds_old> fui;
    419                 acceptAll( translationUnit, fui );
    420407        }
    421408
     
    487474        }
    488475
     476        void decayForallPointers( std::list< Declaration * > & translationUnit ) {
     477                PassVisitor<TraitExpander_old> te;
     478                acceptAll( translationUnit, te );
     479                PassVisitor<AssertionFixer_old> af;
     480                acceptAll( translationUnit, af );
     481                PassVisitor<CheckOperatorTypes_old> cot;
     482                acceptAll( translationUnit, cot );
     483                PassVisitor<FixUniqueIds_old> fui;
     484                acceptAll( translationUnit, fui );
     485        }
     486
     487        void decayForallPointersA( std::list< Declaration * > & translationUnit ) {
     488                PassVisitor<TraitExpander_old> te;
     489                acceptAll( translationUnit, te );
     490        }
     491        void decayForallPointersB( std::list< Declaration * > & translationUnit ) {
     492                PassVisitor<AssertionFixer_old> af;
     493                acceptAll( translationUnit, af );
     494        }
     495        void decayForallPointersC( std::list< Declaration * > & translationUnit ) {
     496                PassVisitor<CheckOperatorTypes_old> cot;
     497                acceptAll( translationUnit, cot );
     498        }
     499        void decayForallPointersD( std::list< Declaration * > & translationUnit ) {
     500                PassVisitor<FixUniqueIds_old> fui;
     501                acceptAll( translationUnit, fui );
     502        }
     503
    489504        void validate( std::list< Declaration * > &translationUnit, __attribute__((unused)) bool doDebug ) {
    490505                validate_A( translationUnit );
     
    974989                                        // need to resolve enumerator initializers early so that other passes that determine if an expression is constexpr have the appropriate information.
    975990                                        SingleInit * init = strict_dynamic_cast<SingleInit *>( field->init );
    976                                         if ( !enumDecl->base || dynamic_cast<BasicType *>(enumDecl->base))
    977                                                 ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );
    978                                         else {
    979                                                 if (dynamic_cast<PointerType *>(enumDecl->base)) {
    980                                                         auto typePtr = dynamic_cast<PointerType *>(enumDecl->base);
    981                                                         ResolvExpr::findSingleExpression( init->value,
    982                                                          new PointerType( Type::Qualifiers(), typePtr->base ), indexer );
    983                                                 } else {
    984                                                         ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );
    985                                                 }
    986                                         }
    987                                        
     991                                        ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );
    988992                                }
    989993                        }
    990 
    991994                } // if
    992995        }
     
    12521255                        declsToAddBefore.push_back( new UnionDecl( aggDecl->name, noAttributes, tyDecl->linkage ) );
    12531256                } else if ( EnumInstType * enumDecl = dynamic_cast< EnumInstType * >( designatorType ) ) {
    1254                         // declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage, enumDecl->baseEnum->base ) );
    1255                         if (enumDecl->baseEnum) {
    1256                                 declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage, enumDecl->baseEnum->base ) );
    1257                         } else {
    1258                                 declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage ) );
    1259                         }
     1257                        declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage ) );
    12601258                } // if
    12611259                return tyDecl->clone();
  • src/SymTab/Validate.h

    r2e9b59b rba897d21  
    4242        void validate_E( std::list< Declaration * > &translationUnit );
    4343        void validate_F( std::list< Declaration * > &translationUnit );
     44        void decayForallPointers( std::list< Declaration * > & translationUnit );
     45        void decayForallPointersA( std::list< Declaration * > & translationUnit );
     46        void decayForallPointersB( std::list< Declaration * > & translationUnit );
     47        void decayForallPointersC( std::list< Declaration * > & translationUnit );
     48        void decayForallPointersD( std::list< Declaration * > & translationUnit );
    4449
    4550        const ast::Type * validateType(
  • src/SynTree/AggregateDecl.cc

    r2e9b59b rba897d21  
    5959        } // if
    6060        os << " with body " << has_body();
     61
    6162        if ( ! parameters.empty() ) {
    6263                os << endl << indent << "... with parameters" << endl;
     
    105106const char * EnumDecl::typeString() const { return aggrString( Enum ); }
    106107
    107 void EnumDecl::print( std::ostream & os, Indenter indent ) const {
    108         AggregateDecl::print(os, indent);
    109         os << " with base? " << (base? "True" : "False") << std::endl;
    110         if ( base ) {
    111                 os << "Base Type of Enum:" << std::endl;
    112                 base->print(os, indent);
    113         }
    114         os <<  std::endl << "End of EnumDecl::print" << std::endl;
    115 }
    116 
    117108const char * TraitDecl::typeString() const { return aggrString( Trait ); }
    118109
  • src/SynTree/BasicType.cc

    r2e9b59b rba897d21  
    2929}
    3030
    31 bool BasicType::isWholeNumber() const {
    32         return kind == Bool ||
    33                 kind ==Char ||
    34                 kind == SignedChar ||
    35                 kind == UnsignedChar ||
    36                 kind == ShortSignedInt ||
    37                 kind == ShortUnsignedInt ||
    38                 kind == SignedInt ||
    39                 kind == UnsignedInt ||
    40                 kind == LongSignedInt ||
    41                 kind == LongUnsignedInt ||
    42                 kind == LongLongSignedInt ||
    43                 kind ==LongLongUnsignedInt ||
    44                 kind == SignedInt128 ||
    45                 kind == UnsignedInt128;
    46 }
    47 
    4831bool BasicType::isInteger() const {
    4932        return kind <= UnsignedInt128;
  • src/SynTree/Declaration.h

    r2e9b59b rba897d21  
    144144        virtual void print( std::ostream & os, Indenter indent = {} ) const override;
    145145        virtual void printShort( std::ostream & os, Indenter indent = {} ) const override;
    146 
    147         // TODO: Move to the right place
    148         void checkAssignedValue() const;
    149146};
    150147
     
    290287        AggregateDecl * set_body( bool body ) { AggregateDecl::body = body; return this; }
    291288
    292         virtual void print( std::ostream & os, Indenter indent = {} ) const override;
     289        virtual void print( std::ostream & os, Indenter indent = {} ) const override final;
    293290        virtual void printShort( std::ostream & os, Indenter indent = {} ) const override;
    294291  protected:
     
    338335        typedef AggregateDecl Parent;
    339336  public:
    340         EnumDecl( const std::string & name,
    341          const std::list< Attribute * > & attributes = std::list< class Attribute * >(),
    342           LinkageSpec::Spec linkage = LinkageSpec::Cforall,
    343           Type * baseType = nullptr ) : Parent( name, attributes, linkage ) , base( baseType ){}
    344         EnumDecl( const EnumDecl & other ) : Parent( other ), base( other.base ) {}
     337        EnumDecl( const std::string & name, const std::list< Attribute * > & attributes = std::list< class Attribute * >(), LinkageSpec::Spec linkage = LinkageSpec::Cforall ) : Parent( name, attributes, linkage ) {}
     338        EnumDecl( const EnumDecl & other ) : Parent( other ) {}
    345339
    346340        bool valueOf( Declaration * enumerator, long long int & value );
     
    350344        virtual void accept( Visitor & v ) const override { v.visit( this ); }
    351345        virtual Declaration * acceptMutator( Mutator & m )  override { return m.mutate( this ); }
    352         Type * base;
     346  private:
    353347        std::unordered_map< std::string, long long int > enumValues;
    354         virtual void print( std::ostream & os, Indenter indent = {} ) const override final;
    355   private:
    356         // std::unordered_map< std::string, long long int > enumValues;
    357348        virtual const char * typeString() const override;
    358349};
  • src/SynTree/Type.h

    r2e9b59b rba897d21  
    268268        virtual Type *acceptMutator( Mutator & m ) override { return m.mutate( this ); }
    269269        virtual void print( std::ostream & os, Indenter indent = {} ) const override;
    270         bool isWholeNumber() const;
     270
    271271        bool isInteger() const;
    272272};
  • src/SynTree/Visitor.h

    r2e9b59b rba897d21  
    3535        virtual void visit( UnionDecl * node ) { visit( const_cast<const UnionDecl *>(node) ); }
    3636        virtual void visit( const UnionDecl * aggregateDecl ) = 0;
    37         virtual void visit( EnumDecl * node ) { visit( const_cast<const EnumDecl *>(node) ); } // Marker 1
     37        virtual void visit( EnumDecl * node ) { visit( const_cast<const EnumDecl *>(node) ); }
    3838        virtual void visit( const EnumDecl * aggregateDecl ) = 0;
    3939        virtual void visit( TraitDecl * node ) { visit( const_cast<const TraitDecl *>(node) ); }
     
    190190        virtual void visit( UnionInstType * node ) { visit( const_cast<const UnionInstType *>(node) ); }
    191191        virtual void visit( const UnionInstType * aggregateUseType ) = 0;
    192         virtual void visit( EnumInstType * node ) { visit( const_cast<const EnumInstType *>(node) ); } // Marker 2
     192        virtual void visit( EnumInstType * node ) { visit( const_cast<const EnumInstType *>(node) ); }
    193193        virtual void visit( const EnumInstType * aggregateUseType ) = 0;
    194194        virtual void visit( TraitInstType * node ) { visit( const_cast<const TraitInstType *>(node) ); }
  • src/Tuples/TupleAssignment.cc

    r2e9b59b rba897d21  
    99// Author           : Rodolfo G. Esteves
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Mar 16 14:06:00 2022
    13 // Update Count     : 10
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Fri Dec 13 23:45:33 2019
     13// Update Count     : 9
    1414//
    1515
     
    465465                                        // resolve ctor/dtor for the new object
    466466                                        ast::ptr< ast::Init > ctorInit = ResolvExpr::resolveCtorInit(
    467                                                         InitTweak::genCtorInit( location, ret ), spotter.crntFinder.context );
     467                                                        InitTweak::genCtorInit( location, ret ), spotter.crntFinder.localSyms );
    468468                                        // remove environments from subexpressions of stmtExpr
    469469                                        ast::Pass< EnvRemover > rm{ env };
     
    560560                                        // resolve the cast expression so that rhsCand return type is bound by the cast
    561561                                        // type as needed, and transfer the resulting environment
    562                                         ResolvExpr::CandidateFinder finder( spotter.crntFinder.context, env );
     562                                        ResolvExpr::CandidateFinder finder{ spotter.crntFinder.localSyms, env };
    563563                                        finder.find( rhsCand->expr, ResolvExpr::ResolvMode::withAdjustment() );
    564564                                        assert( finder.candidates.size() == 1 );
     
    609609                                        // explode the LHS so that each field of a tuple-valued expr is assigned
    610610                                        ResolvExpr::CandidateList lhs;
    611                                         explode( *lhsCand, crntFinder.context.symtab, back_inserter(lhs), true );
     611                                        explode( *lhsCand, crntFinder.localSyms, back_inserter(lhs), true );
    612612                                        for ( ResolvExpr::CandidateRef & cand : lhs ) {
    613613                                                // each LHS value must be a reference - some come in with a cast, if not
     
    629629                                                        if ( isTuple( rhsCand->expr ) ) {
    630630                                                                // multiple assignment
    631                                                                 explode( *rhsCand, crntFinder.context.symtab, back_inserter(rhs), true );
     631                                                                explode( *rhsCand, crntFinder.localSyms, back_inserter(rhs), true );
    632632                                                                matcher.reset(
    633633                                                                        new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } );
     
    648648                                                        // multiple assignment
    649649                                                        ResolvExpr::CandidateList rhs;
    650                                                         explode( rhsCand, crntFinder.context.symtab, back_inserter(rhs), true );
     650                                                        explode( rhsCand, crntFinder.localSyms, back_inserter(rhs), true );
    651651                                                        matcher.reset(
    652652                                                                new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } );
     
    678678                                )
    679679
    680                                 ResolvExpr::CandidateFinder finder( crntFinder.context, matcher->env );
     680                                ResolvExpr::CandidateFinder finder{ crntFinder.localSyms, matcher->env };
    681681
    682682                                try {
  • src/Validate/Autogen.cpp

    r2e9b59b rba897d21  
    248248                structInst.params.push_back( new ast::TypeExpr(
    249249                        typeDecl->location,
    250                         new ast::TypeInstType( typeDecl )
     250                        new ast::TypeInstType( typeDecl->name, typeDecl )
    251251                ) );
    252252        }
     
    264264                unionInst.params.push_back( new ast::TypeExpr(
    265265                        unionDecl->location,
    266                         new ast::TypeInstType( typeDecl )
     266                        new ast::TypeInstType( typeDecl->name, typeDecl )
    267267                ) );
    268268        }
  • src/Validate/FindSpecialDeclsNew.cpp

    r2e9b59b rba897d21  
    3030
    3131struct FindDeclsCore : public ast::WithShortCircuiting {
    32         ast::TranslationGlobal & global;
    33         FindDeclsCore( ast::TranslationGlobal & g ) : global( g ) {}
     32        ast::TranslationUnit::Global & global;
     33        FindDeclsCore( ast::TranslationUnit::Global & g ) : global( g ) {}
    3434
    3535        void previsit( const ast::Decl * decl );
     
    7474        ast::Pass<FindDeclsCore>::run( translationUnit, translationUnit.global );
    7575
     76        // TODO: When everything gets the globals from the translation unit,
     77        // remove these.
     78        ast::dereferenceOperator = translationUnit.global.dereference;
     79        ast::dtorStruct = translationUnit.global.dtorStruct;
     80        ast::dtorStructDestroy = translationUnit.global.dtorDestroy;
     81
    7682        // TODO: conditionally generate 'fake' declarations for missing features,
    7783        // so that translation can proceed in the event that builtins, prelude,
  • src/Validate/ForallPointerDecay.cpp

    r2e9b59b rba897d21  
    4141        for ( auto & type_param : decl->type_params ) {
    4242                type->forall.emplace_back(
    43                         new ast::TypeInstType( type_param ) );
     43                        new ast::TypeInstType( type_param->name, type_param ) );
    4444        }
    4545        for ( auto & assertion : decl->assertions ) {
     
    7070                AssertionList assertions;
    7171                // Substitute trait decl parameters for instance parameters.
    72                 ast::TypeSubstitution sub( inst->base->params, inst->params );
     72                ast::TypeSubstitution sub(
     73                        inst->base->params.begin(),
     74                        inst->base->params.end(),
     75                        inst->params.begin()
     76                );
    7377                for ( const ast::ptr<ast::Decl> & decl : inst->base->members ) {
    7478                        ast::ptr<ast::DeclWithType> copy =
  • src/Validate/module.mk

    r2e9b59b rba897d21  
    2222        Validate/ForallPointerDecay.cpp \
    2323        Validate/ForallPointerDecay.hpp \
    24         Validate/GenericParameter.cpp \
    25         Validate/GenericParameter.hpp \
    2624        Validate/HandleAttributes.cc \
    2725        Validate/HandleAttributes.h \
     
    3028        Validate/LabelAddressFixer.cpp \
    3129        Validate/LabelAddressFixer.hpp \
    32         Validate/ReturnCheck.cpp \
    33         Validate/ReturnCheck.hpp \
    3430        Validate/FindSpecialDeclsNew.cpp \
    3531        Validate/FindSpecialDecls.cc \
  • src/Virtual/Tables.cc

    r2e9b59b rba897d21  
    1010// Created On       : Mon Aug 31 11:11:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Mar 11 10:40:00 2022
    13 // Update Count     : 3
     12// Last Modified On : Wed Apr 21 15:36:00 2021
     13// Update Count     : 2
    1414//
    1515
    16 #include "AST/Attribute.hpp"
    17 #include "AST/Copy.hpp"
    18 #include "AST/Decl.hpp"
    19 #include "AST/Expr.hpp"
    20 #include "AST/Init.hpp"
    21 #include "AST/Stmt.hpp"
    22 #include "AST/Type.hpp"
    2316#include <SynTree/Attribute.h>
    2417#include <SynTree/Declaration.h>
     
    8477}
    8578
    86 static ast::ObjectDecl * makeVtableDeclaration(
    87                 CodeLocation const & location, std::string const & name,
    88                 ast::StructInstType const * type, ast::Init const * init ) {
    89         ast::Storage::Classes storage;
    90         if ( nullptr == init ) {
    91                 storage.is_extern = true;
    92         }
    93         return new ast::ObjectDecl(
    94                 location,
    95                 name,
    96                 type,
    97                 init,
    98                 storage,
    99                 ast::Linkage::Cforall
    100         );
    101 }
    102 
    10379ObjectDecl * makeVtableForward( std::string const & name, StructInstType * type ) {
    10480        assert( type );
    10581        return makeVtableDeclaration( name, type, nullptr );
    106 }
    107 
    108 ast::ObjectDecl * makeVtableForward(
    109                 CodeLocation const & location, std::string const & name,
    110                 ast::StructInstType const * vtableType ) {
    111         assert( vtableType );
    112         return makeVtableDeclaration( location, name, vtableType, nullptr );
    11382}
    11483
     
    154123}
    155124
    156 static std::vector<ast::ptr<ast::Init>> buildInits(
    157                 CodeLocation const & location,
    158                 //std::string const & name,
    159                 ast::StructInstType const * vtableType,
    160                 ast::Type const * objectType ) {
    161         ast::StructDecl const * vtableStruct = vtableType->base;
    162 
    163         std::vector<ast::ptr<ast::Init>> inits;
    164         inits.reserve( vtableStruct->members.size() );
    165 
    166         // This is designed to run before the resolver.
    167         for ( auto field : vtableStruct->members ) {
    168                 if ( std::string( "parent" ) == field->name ) {
    169                         // This will not work with polymorphic state.
    170                         auto oField = field.strict_as<ast::ObjectDecl>();
    171                         auto fieldType = oField->type.strict_as<ast::PointerType>();
    172                         auto parentType = fieldType->base.strict_as<ast::StructInstType>();
    173                         std::string const & parentInstance = instanceName( parentType->name );
    174                         inits.push_back(
    175                                         new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, parentInstance ) ) ) );
    176                 } else if ( std::string( "__cfavir_typeid" ) == field->name ) {
    177                         std::string const & baseType = baseTypeName( vtableType->name );
    178                         std::string const & typeId = typeIdName( baseType );
    179                         inits.push_back( new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, typeId ) ) ) );
    180                 } else if ( std::string( "size" ) == field->name ) {
    181                         inits.push_back( new ast::SingleInit( location, new ast::SizeofExpr( location, objectType )
    182                         ) );
    183                 } else if ( std::string( "align" ) == field->name ) {
    184                         inits.push_back( new ast::SingleInit( location,
    185                                 new ast::AlignofExpr( location, objectType )
    186                         ) );
    187                 } else {
    188                         inits.push_back( new ast::SingleInit( location,
    189                                 new ast::NameExpr( location, field->name )
    190                         ) );
    191                 }
    192                 //ast::Expr * expr = buildInitExpr(...);
    193                 //inits.push_back( new ast::SingleInit( location, expr ) )
    194         }
    195 
    196         return inits;
    197 }
    198 
    199 ast::ObjectDecl * makeVtableInstance(
    200                 CodeLocation const & location,
    201                 std::string const & name,
    202                 ast::StructInstType const * vtableType,
    203                 ast::Type const * objectType,
    204                 ast::Init const * init ) {
    205         assert( vtableType );
    206         assert( objectType );
    207 
    208         // Build the initialization.
    209         if ( nullptr == init ) {
    210                 init = new ast::ListInit( location,
    211                         buildInits( location, vtableType, objectType ) );
    212 
    213         // The provided init should initialize everything except the parent
    214         // pointer, the size-of and align-of fields. These should be inserted.
    215         } else {
    216                 // Except this is not yet supported.
    217                 assert(false);
    218         }
    219         return makeVtableDeclaration( location, name, vtableType, init );
    220 }
    221 
    222125namespace {
    223126        std::string const functionName = "get_exception_vtable";
     
    237140                new ReferenceType( noQualifiers, vtableType ),
    238141                nullptr,
    239                 { new Attribute("unused") }
     142        { new Attribute("unused") }
    240143        ) );
    241144        type->parameters.push_back( new ObjectDecl(
     
    257160}
    258161
    259 ast::FunctionDecl * makeGetExceptionForward(
    260                 CodeLocation const & location,
    261                 ast::Type const * vtableType,
    262                 ast::Type const * exceptType ) {
    263         assert( vtableType );
    264         assert( exceptType );
    265         return new ast::FunctionDecl(
    266                 location,
    267                 functionName,
    268                 { /* forall */ },
    269                 { new ast::ObjectDecl(
    270                         location,
    271                         "__unused",
    272                         new ast::PointerType( exceptType )
    273                 ) },
    274                 { new ast::ObjectDecl(
    275                         location,
    276                         "_retvalue",
    277                         new ast::ReferenceType( vtableType )
    278                 ) },
    279                 nullptr,
    280                 ast::Storage::Classes(),
    281                 ast::Linkage::Cforall,
    282                 { new ast::Attribute( "unused" ) }
    283         );
    284 }
    285 
    286162FunctionDecl * makeGetExceptionFunction(
    287163                ObjectDecl * vtableInstance, Type * exceptType ) {
     
    292168        func->statements = new CompoundStmt( {
    293169                new ReturnStmt( new VariableExpr( vtableInstance ) ),
    294         } );
    295         return func;
    296 }
    297 
    298 ast::FunctionDecl * makeGetExceptionFunction(
    299                 CodeLocation const & location,
    300                 ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ) {
    301         assert( vtableInstance );
    302         assert( exceptType );
    303         ast::FunctionDecl * func = makeGetExceptionForward(
    304                         location, ast::deepCopy( vtableInstance->type ), exceptType );
    305         func->stmts = new ast::CompoundStmt( location, {
    306                 new ast::ReturnStmt( location, new ast::VariableExpr( location, vtableInstance ) )
    307170        } );
    308171        return func;
     
    328191}
    329192
    330 ast::ObjectDecl * makeTypeIdInstance(
    331                 CodeLocation const & location,
    332                 ast::StructInstType const * typeIdType ) {
    333         assert( typeIdType );
    334         ast::StructInstType * type = ast::mutate( typeIdType );
    335         type->set_const( true );
    336         std::string const & typeid_name = typeIdTypeToInstance( typeIdType->name );
    337         return new ast::ObjectDecl(
    338                 location,
    339                 typeid_name,
    340                 type,
    341                 new ast::ListInit( location, {
    342                         new ast::SingleInit( location,
    343                                 new ast::AddressExpr( location,
    344                                         new ast::NameExpr( location, "__cfatid_exception_t" ) ) )
    345                 } ),
    346                 ast::Storage::Classes(),
    347                 ast::Linkage::Cforall,
    348                 nullptr,
    349                 { new ast::Attribute( "cfa_linkonce" ) }
    350         );
    351193}
    352 
    353 }
  • src/Virtual/Tables.h

    r2e9b59b rba897d21  
    1010// Created On       : Mon Aug 31 11:07:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wec Dec  8 16:58:00 2021
    13 // Update Count     : 3
     12// Last Modified On : Wed Apr 21 10:30:00 2021
     13// Update Count     : 2
    1414//
    1515
    1616#include <list>  // for list
    1717
    18 #include <string>
    19 #include "AST/Fwd.hpp"
    2018class Declaration;
    2119class StructDecl;
     
    3735 * vtableType node is consumed.
    3836 */
    39 ast::ObjectDecl * makeVtableForward(
    40         CodeLocation const & location, std::string const & name,
    41         ast::StructInstType const * vtableType );
    4237
    4338ObjectDecl * makeVtableInstance(
     
    4843 * vtableType and init (if provided) nodes are consumed.
    4944 */
    50 ast::ObjectDecl * makeVtableInstance(
    51         CodeLocation const & location,
    52         std::string const & name,
    53         ast::StructInstType const * vtableType,
    54         ast::Type const * objectType,
    55         ast::Init const * init = nullptr );
    5645
    5746// Some special code for how exceptions interact with virtual tables.
     
    6049 * linking the vtableType to the exceptType. Both nodes are consumed.
    6150 */
    62 ast::FunctionDecl * makeGetExceptionForward(
    63         CodeLocation const & location,
    64         ast::Type const * vtableType,
    65         ast::Type const * exceptType );
    6651
    6752FunctionDecl * makeGetExceptionFunction(
     
    7055 * exceptType node is consumed.
    7156 */
    72 ast::FunctionDecl * makeGetExceptionFunction(
    73         CodeLocation const & location,
    74         ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType );
    7557
    7658ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType );
     
    7860 * TODO: Should take the parent type. Currently locked to the exception_t.
    7961 */
    80 ast::ObjectDecl * makeTypeIdInstance(
    81         const CodeLocation & location, ast::StructInstType const * typeIdType );
    8262
    8363}
  • src/main.cc

    r2e9b59b rba897d21  
    1010// Created On       : Fri May 15 23:12:02 2015
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Apr 13 11:11:00 2022
    13 // Update Count     : 672
     12// Last Modified On : Wed Jan 26 14:09:00 2022
     13// Update Count     : 670
    1414//
    1515
     
    3232
    3333#include "AST/Convert.hpp"
     34#include "AST/Print.hpp"
    3435#include "CompilationState.h"
    3536#include "../config.h"                      // for CFA_LIBDIR
     
    7576#include "Tuples/Tuples.h"                  // for expandMemberTuples, expan...
    7677#include "Validate/Autogen.hpp"             // for autogenerateRoutines
    77 #include "Validate/GenericParameter.hpp"    // for fillGenericParameters, tr...
    7878#include "Validate/FindSpecialDecls.h"      // for findGlobalDecls
    7979#include "Validate/ForallPointerDecay.hpp"  // for decayForallPointers
     
    8181#include "Validate/InitializerLength.hpp"   // for setLengthFromInitializer
    8282#include "Validate/LabelAddressFixer.hpp"   // for fixLabelAddresses
    83 #include "Validate/ReturnCheck.hpp"         // for checkReturnStatements
    8483#include "Virtual/ExpandCasts.h"            // for expandCasts
    8584
     
    329328                PASS( "Validate-A", SymTab::validate_A( translationUnit ) );
    330329                PASS( "Validate-B", SymTab::validate_B( translationUnit ) );
     330                PASS( "Validate-C", SymTab::validate_C( translationUnit ) );
    331331
    332332                CodeTools::fillLocations( translationUnit );
    333333
    334334                if( useNewAST ) {
     335                        PASS( "Implement Concurrent Keywords", Concurrency::applyKeywords( translationUnit ) );
     336                        //PASS( "Forall Pointer Decay - A", SymTab::decayForallPointersA( translationUnit ) );
     337                        //PASS( "Forall Pointer Decay - B", SymTab::decayForallPointersB( translationUnit ) );
     338                        //PASS( "Forall Pointer Decay - C", SymTab::decayForallPointersC( translationUnit ) );
     339                        //PASS( "Forall Pointer Decay - D", SymTab::decayForallPointersD( translationUnit ) );
    335340                        CodeTools::fillLocations( translationUnit );
    336341
     
    342347
    343348                        forceFillCodeLocations( transUnit );
    344 
    345                         // Check as early as possible. Can't happen before
    346                         // LinkReferenceToType, observed failing when attempted
    347                         // before eliminateTypedef
    348                         PASS( "Validate Generic Parameters", Validate::fillGenericParameters( transUnit ) );
    349 
    350                         PASS( "Translate Dimensions", Validate::translateDimensionParameters( transUnit ) );
    351                         PASS( "Check Function Returns", Validate::checkReturnStatements( transUnit ) );
    352 
    353                         // Must happen before Autogen.
    354                         PASS( "Fix Return Statements", InitTweak::fixReturnStatements( transUnit ) );
    355 
    356                         PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords( transUnit ) );
    357349
    358350                        // Must be after implement concurrent keywords; because uniqueIds
     
    438430                        translationUnit = convert( move( transUnit ) );
    439431                } else {
    440                         PASS( "Validate-C", SymTab::validate_C( translationUnit ) );
    441432                        PASS( "Validate-D", SymTab::validate_D( translationUnit ) );
    442433                        PASS( "Validate-E", SymTab::validate_E( translationUnit ) );
     
    506497                        PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) );
    507498                }
     499
     500               
    508501
    509502                PASS( "Gen Waitfor" , Concurrency::generateWaitFor( translationUnit ) );
  • tests/Makefile.am

    r2e9b59b rba897d21  
    2828DEBUG_FLAGS=-debug -g -O0
    2929
    30 quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes meta/dumpable
     30quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes
    3131
    3232concurrent=
     
    6666PRETTY_PATH=mkdir -p $(dir $(abspath ${@})) && cd ${srcdir} &&
    6767
    68 .PHONY: list .validate .test_makeflags
    69 .INTERMEDIATE: .validate .validate.cfa .test_makeflags
     68.PHONY: list .validate
     69.INTERMEDIATE: .validate .validate.cfa
    7070EXTRA_PROGRAMS = avl_test linkonce .dummy_hack # build but do not install
    7171EXTRA_DIST = test.py \
     
    123123        @+${TEST_PY} --list ${concurrent}
    124124
    125 .test_makeflags:
    126         @echo "${MAKEFLAGS}"
    127 
    128125.validate: .validate.cfa
    129126        $(CFACOMPILE) .validate.cfa -fsyntax-only -Wall -Wextra -Werror
  • tests/PRNG.cfa

    r2e9b59b rba897d21  
    88// Created On       : Wed Dec 29 09:38:12 2021
    99// Last Modified By : Peter A. Buhr
    10 // Last Modified On : Sat Apr  9 15:21:14 2022
    11 // Update Count     : 344
     10// Last Modified On : Sat Feb 12 12:23:57 2022
     11// Update Count     : 342
    1212//
    1313
     
    2222#include <mutex_stmt.hfa>
    2323
     24// FIX ME: spurious characters appear in output
     25Duration default_preemption() { return 0; }
     26
    2427#ifdef TIME                                                                                             // use -O2 -nodebug
    2528#define STARTTIME start = timeHiRes()
  • tests/collections/.expect/vector-err-pass-perm-it-byval.txt

    r2e9b59b rba897d21  
    1 collections/vector-demo.cfa:95:1 error: Unique best alternative includes deleted identifier in Generated Cast of:
     1error: Unique best alternative includes deleted identifier in Generated Cast of:
    22  Application of
    33    Deleted Expression
  • tests/concurrent/mutexstmt/.expect/locks.txt

    r2e9b59b rba897d21  
    33Start Test: multi lock deadlock/mutual exclusion
    44End Test: multi lock deadlock/mutual exclusion
    5 Start Test: multi polymorphic lock deadlock/mutual exclusion
    6 End Test: multi polymorphic lock deadlock/mutual exclusion
     5Start Test: single scoped lock mutual exclusion
     6End Test: single scoped lock mutual exclusion
     7Start Test: multi scoped lock deadlock/mutual exclusion
     8End Test: multi scoped lock deadlock/mutual exclusion
  • tests/concurrent/mutexstmt/locks.cfa

    r2e9b59b rba897d21  
    33
    44const unsigned int num_times = 10000;
    5 
    6 Duration default_preemption() { return 0; }
    75
    86single_acquisition_lock m1, m2, m3, m4, m5;
     
    2422}
    2523
    26 void refTest( single_acquisition_lock & m ) {
    27         mutex ( m ) {
    28                 assert(!insideFlag);
    29                 insideFlag = true;
    30                 assert(insideFlag);
    31                 insideFlag = false;
    32         }
    33 }
    34 
    3524thread T_Multi {};
    3625
    3726void main( T_Multi & this ) {
    3827        for (unsigned int i = 0; i < num_times; i++) {
    39                 refTest( m1 );
    4028                mutex ( m1 ) {
    4129                        assert(!insideFlag);
     
    7159}
    7260
    73 single_acquisition_lock l1;
    74 linear_backoff_then_block_lock l2;
    75 owner_lock l3;
     61thread T_Mutex_Scoped {};
    7662
    77 monitor monitor_t {};
    78 
    79 monitor_t l4;
    80 
    81 thread T_Multi_Poly {};
    82 
    83 void main( T_Multi_Poly & this ) {
     63void main( T_Mutex_Scoped & this ) {
    8464        for (unsigned int i = 0; i < num_times; i++) {
    85                 refTest( l1 );
    86                 mutex ( l1, l4 ) {
     65                {
     66                        scoped_lock(single_acquisition_lock) s{m1};
     67                        count++;
     68                }
     69                {
     70                        scoped_lock(single_acquisition_lock) s{m1};
    8771                        assert(!insideFlag);
    8872                        insideFlag = true;
     
    9074                        insideFlag = false;
    9175                }
    92                 mutex ( l1, l2, l3 ) {
     76        }
     77}
     78
     79thread T_Multi_Scoped {};
     80
     81void main( T_Multi_Scoped & this ) {
     82        for (unsigned int i = 0; i < num_times; i++) {
     83                {
     84                        scoped_lock(single_acquisition_lock) s{m1};
    9385                        assert(!insideFlag);
    9486                        insideFlag = true;
     
    9688                        insideFlag = false;
    9789                }
    98                 mutex ( l3, l1, l4 ) {
     90                {
     91                        scoped_lock(single_acquisition_lock) s1{m1};
     92                        scoped_lock(single_acquisition_lock) s2{m2};
     93                        scoped_lock(single_acquisition_lock) s3{m3};
     94                        scoped_lock(single_acquisition_lock) s4{m4};
     95                        scoped_lock(single_acquisition_lock) s5{m5};
    9996                        assert(!insideFlag);
    10097                        insideFlag = true;
     
    10299                        insideFlag = false;
    103100                }
    104                 mutex ( l1, l2, l4 ) {
     101                {
     102                        scoped_lock(single_acquisition_lock) s1{m1};
     103                        scoped_lock(single_acquisition_lock) s3{m3};
     104                        assert(!insideFlag);
     105                        insideFlag = true;
     106                        assert(insideFlag);
     107                        insideFlag = false;
     108                }
     109                {
     110                        scoped_lock(single_acquisition_lock) s1{m1};
     111                        scoped_lock(single_acquisition_lock) s2{m2};
     112                        scoped_lock(single_acquisition_lock) s4{m4};
     113                        assert(!insideFlag);
     114                        insideFlag = true;
     115                        assert(insideFlag);
     116                        insideFlag = false;
     117                }
     118                {
     119                        scoped_lock(single_acquisition_lock) s1{m1};
     120                        scoped_lock(single_acquisition_lock) s3{m3};
     121                        scoped_lock(single_acquisition_lock) s4{m4};
     122                        scoped_lock(single_acquisition_lock) s5{m5};
    105123                        assert(!insideFlag);
    106124                        insideFlag = true;
     
    113131int num_tasks = 10;
    114132int main() {
    115         processor p[num_tasks - 1];
     133        processor p[10];
    116134
    117135        printf("Start Test: single lock mutual exclusion\n");
    118136        {
    119                 T_Mutex t[num_tasks];
     137                T_Mutex t[10];
    120138        }
    121139        assert(count == num_tasks * num_times);
     
    123141        printf("Start Test: multi lock deadlock/mutual exclusion\n");
    124142        {
    125                 T_Multi t[num_tasks];
     143                T_Multi t[10];
    126144        }
    127145        printf("End Test: multi lock deadlock/mutual exclusion\n");
    128         printf("Start Test: multi polymorphic lock deadlock/mutual exclusion\n");
     146       
     147        count = 0;
     148        printf("Start Test: single scoped lock mutual exclusion\n");
    129149        {
    130                 T_Multi_Poly t[num_tasks];
     150                T_Mutex_Scoped t[10];
    131151        }
    132         printf("End Test: multi polymorphic lock deadlock/mutual exclusion\n");
     152        assert(count == num_tasks * num_times);
     153        printf("End Test: single scoped lock mutual exclusion\n");
     154        printf("Start Test: multi scoped lock deadlock/mutual exclusion\n");
     155        {
     156                T_Multi_Scoped t[10];
     157        }
     158        printf("End Test: multi scoped lock deadlock/mutual exclusion\n");     
    133159}
  • tests/designations.cfa

    r2e9b59b rba897d21  
    1010// Created On       : Thu Jun 29 15:26:36 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Mar 28 22:41:55 2022
    13 // Update Count     : 15
     12// Last Modified On : Thu Jul 27 11:46:35 2017
     13// Update Count     : 3
    1414//
    1515
     
    1818// is used for the designation syntax
    1919#ifdef __cforall
    20 #define _ :
    21 #define AT @
     20#define DES :
    2221#else
    23 int printf( const char *, ...);
    24 #define _ =
    25 #define AT
     22int printf(const char *, ...);
     23#define DES =
    2624#endif
    2725
    2826const int indentAmt = 2;
    29 void indent( int level ) {
    30         for ( int i = 0; i < level; ++i ) {
    31                 printf( " " );
     27void indent(int level) {
     28        for (int i = 0; i < level; ++i) {
     29                printf(" ");
    3230        }
    3331}
     
    3836        int * ptr;
    3937};
    40 void printA( struct A a, int level ) {
    41         indent( level );
    42         printf( "(A){ %d %d %p }\n", a.x, a.y, a.ptr );
     38void printA(struct A a, int level) {
     39        indent(level);
     40        printf("(A){ %d %d %p }\n", a.x, a.y, a.ptr);
    4341}
    4442
     
    4745        struct A a0, a1;
    4846};
    49 void printB( struct B b, int level ) {
    50         indent( level );
    51         printf( "(B){\n" );
    52         printA( b.a0, level+indentAmt );
    53         printA( b.a1, level+indentAmt );
    54         indent( level );
    55         printf( "}\n" );
     47void printB(struct B b, int level) {
     48        indent(level);
     49        printf("(B){\n");
     50        printA(b.a0, level+indentAmt);
     51        printA(b.a1, level+indentAmt);
     52        indent(level);
     53        printf("}\n");
    5654}
    5755
     
    6159        struct B b;
    6260};
    63 void printC( struct C c, int level ) {
    64         indent( level );
    65         printf( "(C){\n" );
    66         indent( level+indentAmt );
    67         printf( "(int[]{ %d %d %d }\n", c.arr[0], c.arr[1], c.arr[2]);
    68         printB( c.b, level+indentAmt );
    69         indent( level );
    70         printf( "}\n" );
     61void printC(struct C c, int level) {
     62        indent(level);
     63        printf("(C){\n");
     64        indent(level+indentAmt);
     65        printf("(int[]{ %d %d %d }\n", c.arr[0], c.arr[1], c.arr[2]);
     66        printB(c.b, level+indentAmt);
     67        indent(level);
     68        printf("}\n");
    7169}
    7270
     
    7775        };
    7876};
    79 void printD( struct D d, int level ) {
    80         indent( level);
    81         printf( "(D ){ %d }\n", d.x );
     77void printD(struct D d, int level) {
     78        indent(level);
     79        printf("(D){ %d }\n", d.x);
    8280}
    8381
     
    10199    } m;
    102100};
    103 struct Fred s1 AT= { .m.j _ 3 };
    104 struct Fred s2 AT= { .i _ { [2] _ 2 } };
     101struct Fred s1 @= { .m.j : 3 };
     102struct Fred s2 @= { .i : { [2] : 2 } };
    105103
    106104int main() {
    107105        // simple designation case - starting from beginning of structure, leaves ptr default-initialized (zero)
    108106        struct A y0 = {
    109                 .x _ 2,
    110                 .y _ 3
     107                .x DES 2,
     108                .y DES 3
    111109        };
    112110
     
    119117        // use designation to move to member y, leaving x default-initialized (zero)
    120118        struct A y2 = {
    121                 .y _ 3,
     119                .y DES 3,
    122120                0
    123121        };
     
    129127#endif
    130128
    131         printf( "=====A=====\n" );
    132         printA( y0, 0 );
    133         printA( y1, 0 );
    134         printA( y2, 0 );
    135         printf( "=====A=====\n\n" );
     129        printf("=====A=====\n");
     130        printA(y0, 0);
     131        printA(y1, 0);
     132        printA(y2, 0);
     133        printf("=====A=====\n\n");
    136134
    137135        // initialize only first element (z0.a.x), leaving everything else default-initialized (zero), no nested curly-braces
     
    142140                { 3 }, // z1.a0
    143141                { 4 }, // z1.a1
    144                 .a0 _ { 5 }, // z1.a0
     142                .a0 DES { 5 }, // z1.a0
    145143                { 6 }, // z1.a1
    146                 .a0.y _ 2, // z1.a0.y
     144                .a0.y DES 2, // z1.a0.y
    147145                0, // z1.a0.ptr
    148146        };
     
    172170        };
    173171
    174         printf( "=====B=====\n" );
    175         printB( z0, 0 );
    176         printB( z1, 0 );
    177         printB( z2, 0 );
    178         printB( z3, 0 );
    179         printB( z5, 0 );
    180         printB( z6, 0 );
    181         printf( "=====B=====\n\n" );
     172        printf("=====B=====\n");
     173        printB(z0, 0);
     174        printB(z1, 0);
     175        printB(z2, 0);
     176        printB(z3, 0);
     177        printB(z5, 0);
     178        printB(z6, 0);
     179        printf("=====B=====\n\n");
    182180
    183181        // TODO: what about extra things in a nested init? are empty structs skipped??
     
    190188        };
    191189
    192         printf( "=====C=====\n" );
    193         printC( c1, 0 );
    194         printf( "=====C=====\n\n" );
     190        printf("=====C=====\n");
     191        printC(c1, 0);
     192        printf("=====C=====\n\n");
    195193
    196194#if ERROR
     
    215213#endif
    216214        // array designation
    217         int i[2] = { [1] _ 3 };
     215        int i[2] = { [1] : 3 };
    218216        // allowed to have 'too many' initialized lists - essentially they are ignored.
    219217        int i1 = { 3 };
     
    221219        // doesn't work yet.
    222220        // designate unnamed object's members
    223         // struct D d = { .x _ 3 };
     221        // struct D d = { .x DES 3 };
    224222#if ERROR
    225         struct D d1 = { .y _ 3 };
     223        struct D d1 = { .y DES 3 };
    226224#endif
    227225
     
    243241        // move cursor to e4.b.a0.x and initialize until e3.b.a1.ptr inclusive
    244242        union E e3 = {
    245                 .b.a0.x _ 2, 3, 0, 5, 6, 0
    246         };
    247 
    248         printf( "=====E=====\n" );
    249         printA( e0.a, 0 );
    250         printA( e1.a, 0 );
    251         printA( e2.a, 0 );
    252         printB( e3.b, 0 );
    253         printf( "=====E=====\n\n" );
     243                .b.a0.x DES 2, 3, 0, 5, 6, 0
     244        };
     245
     246        printf("=====E=====\n");
     247        printA(e0.a, 0);
     248        printA(e1.a, 0);
     249        printA(e2.a, 0);
     250        printB(e3.b, 0);
     251        printf("=====E=====\n\n");
    254252
    255253        // special case of initialization: char[] can be initialized with a string literal
    256254        const char * str0 = "hello";
    257255        char str1[] = "hello";
    258         const char c2[] = "abc";
    259         const char c3[] = { 'a', 'b', 'c' };
    260         const char c4[][2] = { { 'a', 'b' }, { 'c', 'd'}, { 'c', 'd'} };
    261 
    262         // more cases
    263 
    264 //      int widths[] = { [3 ... 9] _ 1, [10 ... 99] _ 2, [100] _ 3 };
    265 //      int widths[] = { [3 ~ 9] _ 1, [10 ~ 99] _ 2, [100] _ 3 };
    266         struct point { int x, y; };
    267         struct point p = { .y _ 5, .x _ 7 };
    268         union foo { int i; double d; };
    269         union foo f = { .d _ 4 };
    270         int v1, v2, v4;
    271         int w[6] = { [1] _ v1, v2, [4] _ v4 };
    272         int whitespace[256] = { [' '] _ 1, ['\t'] _ 1, ['\v'] _ 1, ['\f'] _ 1, ['\n'] _ 1, ['\r'] _ 1 };
    273         struct point ptarray[10] = { [2].y _ 34, [2].x _ 35, [0].x _ 36 };
     256        const char c1[] = "abc";
     257        const char c2[] = { 'a', 'b', 'c' };
     258        const char c3[][2] = { { 'a', 'b' }, { 'c', 'd'}, { 'c', 'd'} };
    274259}
    275260
  • tests/io/away_fair.cfa

    r2e9b59b rba897d21  
    2020#include <thread.hfa>
    2121#include <iofwd.hfa>
     22#include <io/types.hfa>
    2223
    2324Duration default_preemption() {
     
    5051}
    5152
    52 // ----- Submitter -----
     53// ----- Spinner -----
    5354// try to submit io but yield so that it's likely we are moved to the slow path
    5455thread Submitter {};
  • tests/io/io-acquire.cfa

    r2e9b59b rba897d21  
    1010// Created On       : Mon Mar  1 18:40:09 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Apr  9 15:22:03 2022
    13 // Update Count     : 76
     12// Last Modified On : Fri Jan 14 09:13:18 2022
     13// Update Count     : 74
    1414//
    1515
     
    1717#include <thread.hfa>
    1818#include <mutex_stmt.hfa>
     19
     20Duration default_preemption() { return 0; }
    1921
    2022thread T {};
  • tests/meta/dumpable.cfa

    r2e9b59b rba897d21  
    1414//
    1515
     16#include <limits.h>
    1617#include <errno.h>
    17 #include <limits.h>
    18 #include <string.h>
    1918
    2019#include <fstream.hfa>
    2120
    2221extern "C" {
    23         #include <fcntl.h>
    24         #include <unistd.h>
    2522        #include <sys/prctl.h>
    2623        #include <sys/resource.h>
    2724        #include <sys/statvfs.h>
    28         #include <sys/stat.h>
    29         #include <sys/types.h>
     25        #include <unistd.h>
    3026}
    3127
     
    106102}
    107103
    108 void check_core_pattern() {
    109         int ret;
    110         int cp = open("/proc/sys/kernel/core_pattern", 0, O_RDONLY);
    111         if(cp < 0) {
    112                 perror("open(/proc/sys/kernel/core_pattern, O_RDONLY) error");
    113                 return;
    114         }
    115 
    116         try {
    117                 const char * expected = "core\n";
    118                 const int sz = sizeof("core\n");
    119                 char buf[512];
    120                 ret = read(cp, buf, 512);
    121                 if(ret < 0) {
    122                         perror("first core pattern read error");
    123                         return;
    124                 }
    125                 ret = strncmp(expected, buf, sz - 1);
    126                 if(ret != 0) {
    127                         serr | "/proc/sys/kernel/core_pattern does not contain 'core', was:" | nl | nl | buf | nl
    128                              | "Test script expect cores files to be dumped with name 'core' in current working directory." | nl
    129                              | "Apport is not supported, it should be deactivated in /etc/default/apport for the test suite to work with core dumps.";
    130 
    131                         return;
    132                 }
    133         }
    134         finally {
    135                 ret = close(cp);
    136                 if(ret < 0) perror("close(/proc/sys/kernel/core_pattern) error");
    137         }
    138 
    139 }
    140 
    141104int main() {
    142105        check_ulimit();
     
    150113        check_dumpflag();
    151114
    152         check_core_pattern();
    153 
    154115        sout | "Done";
    155116}
  • tests/pybin/settings.py

    r2e9b59b rba897d21  
    155155        global generating
    156156        global make
    157         global make_jobfds
    158157        global output_width
    159158        global timeout
     
    169168        generating   = options.regenerate_expected
    170169        make         = ['make']
    171         make_jobfds  = []
    172170        output_width = 24
    173171        timeout      = Timeouts(options.timeout, options.global_timeout)
     
    179177                os.putenv('DISTCC_LOG', os.path.join(BUILDDIR, 'distcc_error.log'))
    180178
    181 def update_make_cmd(flags):
     179def update_make_cmd(force, jobs):
    182180        global make
    183         make = ['make', *flags]
    184 
    185 def update_make_fds(r, w):
    186         global make_jobfds
    187         make_jobfds = (r, w)
     181
     182        make = ['make'] if not force else ['make', "-j%i" % jobs]
    188183
    189184def validate():
     
    192187        global distcc
    193188        distcc       = "DISTCC_CFA_PATH=~/.cfadistcc/%s/cfa" % tools.config_hash()
    194         make_ret, out, err = tools.make( ".validate", output_file=subprocess.PIPE, error=subprocess.PIPE )
     189        errf = os.path.join(BUILDDIR, ".validate.err")
     190        make_ret, out = tools.make( ".validate", error_file = errf, output_file=subprocess.DEVNULL, error=subprocess.DEVNULL )
    195191        if make_ret != 0:
     192                with open (errf, "r") as myfile:
     193                        error=myfile.read()
    196194                print("ERROR: Invalid configuration %s:%s" % (arch.string, debug.string), file=sys.stderr)
    197                 print("       verify returned : \n%s" % err, file=sys.stderr)
     195                print("       verify returned : \n%s" % error, file=sys.stderr)
     196                tools.rm(errf)
    198197                sys.exit(1)
     198
     199        tools.rm(errf)
    199200
    200201def prep_output(tests):
  • tests/pybin/tools.py

    r2e9b59b rba897d21  
    2323
    2424# helper functions to run terminal commands
    25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False, pass_fds = []):
     25def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False):
    2626        try:
    2727                cmd = list(cmd)
     
    6565                                **({'input' : bytes(input_text, encoding='utf-8')} if input_text else {'stdin' : input_file}),
    6666                                stdout  = output_file,
    67                                 stderr  = error,
    68                                 pass_fds = pass_fds
     67                                stderr  = error
    6968                        ) as proc:
    7069
    7170                                try:
    72                                         out, errout = proc.communicate(
     71                                        out, _ = proc.communicate(
    7372                                                timeout = settings.timeout.single if timeout else None
    7473                                        )
    7574
    76                                         return proc.returncode, out.decode("latin-1") if out else None, errout.decode("latin-1") if errout else None
     75                                        return proc.returncode, out.decode("latin-1") if out else None
    7776                                except subprocess.TimeoutExpired:
    7877                                        if settings.timeout2gdb:
    7978                                                print("Process {} timeout".format(proc.pid))
    8079                                                proc.communicate()
    81                                                 return 124, str(None), "Subprocess Timeout 2 gdb"
     80                                                return 124, str(None)
    8281                                        else:
    8382                                                proc.send_signal(signal.SIGABRT)
    8483                                                proc.communicate()
    85                                                 return 124, str(None), "Subprocess Timeout 2 gdb"
     84                                                return 124, str(None)
    8685
    8786        except Exception as ex:
     
    106105                return (False, "No file")
    107106
    108         code, out, err = sh("file", fname, output_file=subprocess.PIPE)
     107        code, out = sh("file", fname, output_file=subprocess.PIPE)
    109108        if code != 0:
    110                 return (False, "'file EXPECT' failed with code {} '{}'".format(code, err))
     109                return (False, "'file EXPECT' failed with code {}".format(code))
    111110
    112111        match = re.search(".*: (.*)", out)
     
    191190        ]
    192191        cmd = [s for s in cmd if s]
    193         return sh(*cmd, output_file=output_file, error=error, pass_fds=settings.make_jobfds)
     192        return sh(*cmd, output_file=output_file, error=error)
    194193
    195194def make_recon(target):
     
    242241# move a file
    243242def mv(source, dest):
    244         ret, _, _ = sh("mv", source, dest)
     243        ret, _ = sh("mv", source, dest)
    245244        return ret
    246245
    247246# cat one file into the other
    248247def cat(source, dest):
    249         ret, _, _ = sh("cat", source, output_file=dest)
     248        ret, _ = sh("cat", source, output_file=dest)
    250249        return ret
    251250
     
    290289#               system
    291290################################################################################
    292 def jobserver_version():
    293         make_ret, out, err = sh('make', '.test_makeflags', '-j2', output_file=subprocess.PIPE, error=subprocess.PIPE)
    294         if make_ret != 0:
    295                 print("ERROR: cannot find Makefile jobserver version", file=sys.stderr)
    296                 print("       test returned : {} '{}'".format(make_ret, err), file=sys.stderr)
     291# count number of jobs to create
     292def job_count( options, tests ):
     293        # check if the user already passed in a number of jobs for multi-threading
     294        if not options.jobs:
     295                make_flags = os.environ.get('MAKEFLAGS')
     296                force = bool(make_flags)
     297                make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
     298                if make_jobs_fds :
     299                        tokens = os.read(int(make_jobs_fds.group(2)), 1024)
     300                        options.jobs = len(tokens)
     301                        os.write(int(make_jobs_fds.group(3)), tokens)
     302                else :
     303                        if settings.distribute:
     304                                ret, jstr = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True)
     305                                if ret == 0:
     306                                        options.jobs = int(jstr.strip())
     307                                else :
     308                                        options.jobs = multiprocessing.cpu_count()
     309                        else:
     310                                options.jobs = multiprocessing.cpu_count()
     311        else :
     312                force = True
     313
     314        # make sure we have a valid number of jobs that corresponds to user input
     315        if options.jobs <= 0 :
     316                print('ERROR: Invalid number of jobs', file=sys.stderr)
    297317                sys.exit(1)
    298318
    299         re_jobs = re.search("--jobserver-(auth|fds)", out)
    300         if not re_jobs:
    301                 print("ERROR: cannot find Makefile jobserver version", file=sys.stderr)
    302                 print("       MAKEFLAGS are : '{}'".format(out), file=sys.stderr)
    303                 sys.exit(1)
    304 
    305         return "--jobserver-{}".format(re_jobs.group(1))
    306 
    307 def prep_recursive_make(N):
    308         if N < 2:
    309                 return []
    310 
    311         # create the pipe
    312         (r, w) = os.pipe()
    313 
    314         # feel it with N-1 tokens, (Why N-1 and not N, I don't know it's in the manpage for make)
    315         os.write(w, b'+' * (N - 1));
    316 
    317         # prep the flags for make
    318         make_flags = ["-j{}".format(N), "--jobserver-auth={},{}".format(r, w)]
    319 
    320         # tell make about the pipes
    321         os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = " ".join(make_flags)
    322 
    323         # make sure pass the pipes to our children
    324         settings.update_make_fds(r, w)
    325 
    326         return make_flags
    327 
    328 def prep_unlimited_recursive_make():
    329         # prep the flags for make
    330         make_flags = ["-j"]
    331 
    332         # tell make about the pipes
    333         os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = "-j"
    334 
    335         return make_flags
    336 
    337 
    338 def eval_hardware():
    339         # we can create as many things as we want
    340         # how much hardware do we have?
    341         if settings.distribute:
    342                 # remote hardware is allowed
    343                 # how much do we have?
    344                 ret, jstr, _ = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True)
    345                 return int(jstr.strip()) if ret == 0 else multiprocessing.cpu_count()
    346         else:
    347                 # remote isn't allowed, use local cpus
    348                 return multiprocessing.cpu_count()
    349 
    350 # count number of jobs to create
    351 def job_count( options ):
    352         # check if the user already passed in a number of jobs for multi-threading
    353         make_env = os.environ.get('MAKEFLAGS')
    354         make_flags = make_env.split() if make_env else None
    355         jobstr = jobserver_version()
    356 
    357         if options.jobs and make_flags:
    358                 print('WARNING: -j options should not be specified when called form Make', file=sys.stderr)
    359 
    360         # Top level make is calling the shots, just follow
    361         if make_flags:
    362                 # do we have -j and --jobserver-...
    363                 jobopt = None
    364                 exists_fds = None
    365                 for f in make_flags:
    366                         jobopt = f if f.startswith("-j") else jobopt
    367                         exists_fds = f if f.startswith(jobstr) else exists_fds
    368 
    369                 # do we have limited parallelism?
    370                 if exists_fds :
    371                         try:
    372                                 rfd, wfd = tuple(exists_fds.split('=')[1].split(','))
    373                         except:
    374                                 print("ERROR: jobserver has unrecoginzable format, was '{}'".format(exists_fds), file=sys.stderr)
    375                                 sys.exit(1)
    376 
    377                         # read the token pipe to count number of available tokens and restore the pipe
    378                         # this assumes the test suite script isn't invoked in parellel with something else
    379                         tokens = os.read(int(rfd), 65536)
    380                         os.write(int(wfd), tokens)
    381 
    382                         # the number of tokens is off by one for obscure but well documented reason
    383                         # see man make for more details
    384                         options.jobs = len(tokens) + 1
    385 
    386                 # do we have unlimited parallelism?
    387                 elif jobopt and jobopt != "-j1":
    388                         # check that this actually make sense
    389                         if jobopt != "-j":
    390                                 print("ERROR: -j option passed by make but no {}, was '{}'".format(jobstr, jobopt), file=sys.stderr)
    391                                 sys.exit(1)
    392 
    393                         options.jobs = eval_hardware()
    394                         flags = prep_unlimited_recursive_make()
    395 
    396 
    397                 # then no parallelism
    398                 else:
    399                         options.jobs = 1
    400 
    401                 # keep all flags make passed along, except the weird 'w' which is about subdirectories
    402                 flags = [f for f in make_flags if f != 'w']
    403 
    404         # Arguments are calling the shots, fake the top level make
    405         elif options.jobs :
    406 
    407                 # make sure we have a valid number of jobs that corresponds to user input
    408                 if options.jobs < 0 :
    409                         print('ERROR: Invalid number of jobs', file=sys.stderr)
    410                         sys.exit(1)
    411 
    412                 flags = prep_recursive_make(options.jobs)
    413 
    414         # Arguments are calling the shots, fake the top level make, but 0 is a special case
    415         elif options.jobs == 0:
    416                 options.jobs = eval_hardware()
    417                 flags = prep_unlimited_recursive_make()
    418 
    419         # No one says to run in parallel, then don't
    420         else :
    421                 options.jobs = 1
    422                 flags = []
    423 
    424         # Make sure we call make as expected
    425         settings.update_make_cmd( flags )
    426 
    427         # return the job count
    428         return options.jobs
     319        return min( options.jobs, len(tests) ), force
    429320
    430321# enable core dumps for all the test children
     
    443334        distcc_hash = os.path.join(settings.SRCDIR, '../tools/build/distcc_hash')
    444335        config = "%s-%s" % (settings.arch.target, settings.debug.path)
    445         _, out, _ = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)
     336        _, out = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)
    446337        return out.strip()
    447338
     
    483374
    484375        if not os.path.isfile(core):
    485                 return 1, "ERR No core dump, expected '{}' (limit soft: {} hard: {})".format(core, *resource.getrlimit(resource.RLIMIT_CORE))
     376                return 1, "ERR No core dump (limit soft: {} hard: {})".format(*resource.getrlimit(resource.RLIMIT_CORE))
    486377
    487378        try:
    488                 ret, out, err = sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE)
    489                 if ret == 0:
    490                         return 0, out
    491                 else:
    492                         return 1, err
     379                return sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE)
    493380        except:
    494381                return 1, "ERR Could not read core with gdb"
  • tests/test.py

    r2e9b59b rba897d21  
    140140        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
    141141        parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
    142         parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int)
     142        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
    143143        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
    144144        parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true')
     
    195195        # build, skipping to next test on error
    196196        with Timed() as comp_dur:
    197                 make_ret, _, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
     197                make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
    198198
    199199        # ----------
     
    208208                                if settings.dry_run or is_exe(exe_file):
    209209                                        # run test
    210                                         retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
     210                                        retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
    211211                                else :
    212212                                        # simply cat the result into the output
     
    226226                        else :
    227227                                # fetch return code and error from the diff command
    228                                 retcode, error, _ = diff(cmp_file, out_file)
     228                                retcode, error = diff(cmp_file, out_file)
    229229
    230230                else:
     
    366366                        print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ')
    367367                        print(os.path.relpath(t.input() , settings.SRCDIR), end=' ')
    368                         code, out, err = make_recon(t.target())
     368                        code, out = make_recon(t.target())
    369369
    370370                        if code != 0:
    371                                 print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr)
     371                                print('ERROR: recond failed for test {}'.format(t.target()), file=sys.stderr)
    372372                                sys.exit(1)
    373373
     
    417417                        if is_empty(t.expect()):
    418418                                print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
    419 
    420         options.jobs = job_count( options )
    421419
    422420        # for each build configurations, run the test
     
    432430                        local_tests = settings.ast.filter( tests )
    433431                        local_tests = settings.arch.filter( local_tests )
     432                        options.jobs, forceJobs = job_count( options, local_tests )
     433                        settings.update_make_cmd(forceJobs, options.jobs)
    434434
    435435                        # check the build configuration works
    436436                        settings.validate()
    437                         jobs = min(options.jobs, len(local_tests))
    438437
    439438                        # print configuration
     
    441440                                'Regenerating' if settings.generating else 'Running',
    442441                                len(local_tests),
    443                                 jobs,
     442                                options.jobs,
    444443                                settings.ast.string,
    445444                                settings.arch.string,
     
    451450
    452451                        # otherwise run all tests and make sure to return the correct error code
    453                         failed = run_tests(local_tests, jobs)
     452                        failed = run_tests(local_tests, options.jobs)
    454453                        if failed:
    455454                                if not settings.continue_:
Note: See TracChangeset for help on using the changeset viewer.