Changeset 2e9b59b


Ignore:
Timestamp:
Apr 19, 2022, 3:00:04 PM (3 years ago)
Author:
m3zulfiq <m3zulfiq@…>
Branches:
ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
Children:
5b84a321
Parents:
ba897d21 (diff), bb7c77d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

added benchmark and evaluations chapter to thesis

Files:
70 added
135 edited
6 moved

Legend:

Unmodified
Added
Removed
  • Jenkinsfile

    rba897d21 r2e9b59b  
    108108
    109109                        // Configure libcfa
    110                         sh 'make -j 8 --no-print-directory configure-libcfa'
     110                        sh 'make -j $(nproc) --no-print-directory configure-libcfa'
    111111                }
    112112        }
     
    116116                dir (BuildDir) {
    117117                        // Build driver
    118                         sh 'make -j 8 --no-print-directory -C driver'
     118                        sh 'make -j $(nproc) --no-print-directory -C driver'
    119119
    120120                        // Build translator
    121                         sh 'make -j 8 --no-print-directory -C src'
     121                        sh 'make -j $(nproc) --no-print-directory -C src'
    122122                }
    123123        }
     
    126126                // Build outside of the src tree to ease cleaning
    127127                dir (BuildDir) {
    128                         sh "make -j 8 --no-print-directory -C libcfa/${Settings.Architecture.name}-debug"
     128                        sh "make -j \$(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-debug"
    129129                }
    130130        }
     
    133133                // Build outside of the src tree to ease cleaning
    134134                dir (BuildDir) {
    135                         sh "make -j 8 --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"
     135                        sh "make -j \$(nproc) --no-print-directory -C libcfa/${Settings.Architecture.name}-nodebug"
    136136                }
    137137        }
     
    140140                // Build outside of the src tree to ease cleaning
    141141                dir (BuildDir) {
    142                         sh "make -j 8 --no-print-directory install"
     142                        sh 'make -j $(nproc) --no-print-directory install'
    143143                }
    144144        }
     
    161161                Tools.BuildStage('Test: full', Settings.RunAllTests) {
    162162                        dir (BuildDir) {
    163                                         jopt = ""
     163                                        jopt = '-j $(nproc)'
    164164                                        if( Settings.Architecture.node == 'x86' ) {
    165                                                 jopt = "-j2"
     165                                                jopt = '-j2'
    166166                                        }
    167167                                        //Run the tests from the tests directory
  • benchmark/io/http/main.cfa

    rba897d21 r2e9b59b  
    3333//============================================================================================='
    3434
    35 thread StatsPrinter {};
     35thread StatsPrinter {
     36        Worker * workers;
     37        int worker_cnt;
     38};
    3639
    3740void ?{}( StatsPrinter & this, cluster & cl ) {
    3841        ((thread&)this){ "Stats Printer Thread", cl };
     42        this.worker_cnt = 0;
    3943}
    4044
    4145void ^?{}( StatsPrinter & mutex this ) {}
     46
     47#define eng3(X) (ws(3, 3, unit(eng( X ))))
    4248
    4349void main(StatsPrinter & this) {
     
    5157
    5258                print_stats_now( *active_cluster(), CFA_STATS_READY_Q | CFA_STATS_IO );
     59                if(this.worker_cnt != 0) {
     60                        uint64_t tries = 0;
     61                        uint64_t calls = 0;
     62                        uint64_t header = 0;
     63                        uint64_t splcin = 0;
     64                        uint64_t splcot = 0;
     65                        struct {
     66                                volatile uint64_t calls;
     67                                volatile uint64_t bytes;
     68                        } avgrd[zipf_cnts];
     69                        memset(avgrd, 0, sizeof(avgrd));
     70
     71                        for(i; this.worker_cnt) {
     72                                tries += this.workers[i].stats.sendfile.tries;
     73                                calls += this.workers[i].stats.sendfile.calls;
     74                                header += this.workers[i].stats.sendfile.header;
     75                                splcin += this.workers[i].stats.sendfile.splcin;
     76                                splcot += this.workers[i].stats.sendfile.splcot;
     77                                for(j; zipf_cnts) {
     78                                        avgrd[j].calls += this.workers[i].stats.sendfile.avgrd[j].calls;
     79                                        avgrd[j].bytes += this.workers[i].stats.sendfile.avgrd[j].bytes;
     80                                }
     81                        }
     82
     83                        double ratio = ((double)tries) / calls;
     84
     85                        sout | "----- Worker Stats -----";
     86                        sout | "sendfile  : " | calls | "calls," | tries | "tries (" | ratio | " try/call)";
     87                        sout | "            " | header | "header," | splcin | "splice in," | splcot | "splice out";
     88                        sout | " - zipf sizes:";
     89                        for(i; zipf_cnts) {
     90                                double written = avgrd[i].calls > 0 ? ((double)avgrd[i].bytes) / avgrd[i].calls : 0;
     91                                sout | "        " | zipf_sizes[i] | "bytes," | avgrd[i].calls | "shorts," | written | "written";
     92                        }
     93                }
     94                else {
     95                        sout | "No Workers!";
     96                }
    5397        }
    5498}
     
    218262                        {
    219263                                Worker * workers = anew(options.clopts.nworkers);
     264                                cl[0].prnt->workers = workers;
     265                                cl[0].prnt->worker_cnt = options.clopts.nworkers;
    220266                                for(i; options.clopts.nworkers) {
    221267                                        // if( options.file_cache.fixed_fds ) {
     
    311357        }
    312358}
     359
     360const size_t zipf_sizes[] = { 102, 204, 307, 409, 512, 614, 716, 819, 921, 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192, 9216, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 92160, 102400, 204800, 307200, 409600, 512000, 614400, 716800, 819200, 921600 };
     361static_assert(zipf_cnts == sizeof(zipf_sizes) / sizeof(zipf_sizes[0]));
  • benchmark/io/http/parhttperf

    rba897d21 r2e9b59b  
    66
    77mkdir -p out
    8 rm -v out/*
     8rm out/*
     9echo "httperf --client [0-$(($NTHREADS - 1))]/$NTHREADS $@ > out/result.[0-$(($NTHREADS - 1))].out"
    910for ((i=0; i<$NTHREADS; i++))
    1011do
    11         # echo "httperf --client $i/$NTHREADS $@ > out/result.$i.out"
    1212        httperf --client $i/$NTHREADS $@ > out/result.$i.out &
    1313done
  • benchmark/io/http/protocol.cfa

    rba897d21 r2e9b59b  
    2424
    2525#include "options.hfa"
     26#include "worker.hfa"
    2627
    2728#define PLAINTEXT_1WRITE
     
    156157
    157158                count -= ret;
    158                 offset += ret;
    159159                size_t in_pipe = ret;
    160160                SPLICE2: while(in_pipe > 0) {
     
    249249}
    250250
    251 static inline int wait_and_process(header_g & this) {
     251static inline int wait_and_process(header_g & this, sendfile_stats_t & stats) {
    252252        wait(this.f);
    253253
     
    278278        }
    279279
     280        stats.header++;
     281
    280282        // It must be a Short read
    281283        this.len  -= this.f.result;
     
    289291        io_future_t f;
    290292        int fd; int pipe; size_t len; off_t off;
     293        short zipf_idx;
    291294        FSM_Result res;
    292295};
     
    297300        this.len = len;
    298301        this.off = 0;
     302        this.zipf_idx = -1;
     303        STATS: for(i; zipf_cnts) {
     304                if(len <= zipf_sizes[i]) {
     305                        this.zipf_idx = i;
     306                        break STATS;
     307                }
     308        }
     309        if(this.zipf_idx < 0) mutex(serr) serr | "SPLICE IN" | len | " greated than biggest zipf file";
    299310}
    300311
     
    312323}
    313324
    314 static inline int wait_and_process(splice_in_t & this) {
     325static inline int wait_and_process(splice_in_t & this, sendfile_stats_t & stats ) {
    315326        wait(this.f);
    316327
     
    328339                        return error(this.res, -ECONNRESET);
    329340                }
     341                mutex(serr) serr | "SPLICE IN got" | error | ", WTF!";
     342                return error(this.res, -ECONNRESET);
    330343        }
    331344
     
    340353                return done(this.res);
    341354        }
     355
     356        stats.splcin++;
     357        stats.avgrd[this.zipf_idx].calls++;
     358        stats.avgrd[this.zipf_idx].bytes += this.f.result;
    342359
    343360        // It must be a Short read
     
    381398}
    382399
    383 static inline void wait_and_process(splice_out_g & this) {
     400static inline void wait_and_process(splice_out_g & this, sendfile_stats_t & stats ) {
    384401        wait(this.f);
    385402
     
    397414                        return error(this, -ECONNRESET);
    398415                }
     416                mutex(serr) serr | "SPLICE OUT got" | error | ", WTF!";
     417                return error(this, -ECONNRESET);
    399418        }
    400419
     
    411430
    412431SHORT_WRITE:
     432        stats.splcot++;
     433
    413434        // It must be a Short Write
    414435        this.len -= this.f.result;
     
    417438}
    418439
    419 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t fsize ) {
     440int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t fsize, sendfile_stats_t & stats ) {
     441        stats.calls++;
    420442        #if defined(LINKED_IO)
    421443                char buffer[512];
     
    426448
    427449                RETRY_LOOP: for() {
     450                        stats.tries++;
    428451                        int have = need(header.res) + need(splice_in.res) + 1;
    429452                        int idx = 0;
     
    444467                        // we may need to kill the connection if it fails
    445468                        // If it already completed, this is a no-op
    446                         wait_and_process(splice_in);
     469                        wait_and_process(splice_in, stats);
    447470
    448471                        if(is_error(splice_in.res)) {
     
    452475
    453476                        // Process the other 2
    454                         wait_and_process(header);
    455                         wait_and_process(splice_out);
     477                        wait_and_process(header, stats);
     478                        wait_and_process(splice_out, stats);
    456479
    457480                        if(is_done(splice_out.res)) {
     
    473496                return len + fsize;
    474497        #else
     498                stats.tries++;
    475499                int ret = answer_header(fd, fsize);
    476500                if( ret < 0 ) { close(fd); return ret; }
  • benchmark/io/http/protocol.hfa

    rba897d21 r2e9b59b  
    11#pragma once
     2
     3struct sendfile_stats_t;
    24
    35enum HttpCode {
     
    1820int answer_plaintext( int fd );
    1921int answer_empty( int fd );
    20 int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t count );
     22int answer_sendfile( int pipe[2], int fd, int ans_fd, size_t count, struct sendfile_stats_t & );
    2123
    2224[HttpCode code, bool closed, * const char file, size_t len] http_read(int fd, []char buffer, size_t len);
  • benchmark/io/http/worker.cfa

    rba897d21 r2e9b59b  
    2323        this.pipe[1] = -1;
    2424        this.done = false;
     25
     26        this.stats.sendfile.calls = 0;
     27        this.stats.sendfile.tries = 0;
     28        this.stats.sendfile.header = 0;
     29        this.stats.sendfile.splcin = 0;
     30        this.stats.sendfile.splcot = 0;
     31        for(i; zipf_cnts) {
     32                this.stats.sendfile.avgrd[i].calls = 0;
     33                this.stats.sendfile.avgrd[i].bytes = 0;
     34        }
    2535}
    2636
     
    123133
    124134                        // Send the desired file
    125                         int ret = answer_sendfile( this.pipe, fd, ans_fd, count);
     135                        int ret = answer_sendfile( this.pipe, fd, ans_fd, count, this.stats.sendfile );
    126136                        if( ret == -ECONNRESET ) break REQUEST;
    127137
  • benchmark/io/http/worker.hfa

    rba897d21 r2e9b59b  
    1111//=============================================================================================
    1212
     13extern const size_t zipf_sizes[];
     14enum { zipf_cnts = 36, };
     15
     16struct sendfile_stats_t {
     17        volatile uint64_t calls;
     18        volatile uint64_t tries;
     19        volatile uint64_t header;
     20        volatile uint64_t splcin;
     21        volatile uint64_t splcot;
     22        struct {
     23                volatile uint64_t calls;
     24                volatile uint64_t bytes;
     25        } avgrd[zipf_cnts];
     26};
     27
    1328thread Worker {
    1429        int pipe[2];
     
    1833        int flags;
    1934        volatile bool done;
     35        struct {
     36                sendfile_stats_t sendfile;
     37        } stats;
    2038};
    2139void ?{}( Worker & this);
  • benchmark/plot.py

    rba897d21 r2e9b59b  
    4040}
    4141
    42 def plot(data, x, y):
     42def plot(data, x, y, out):
    4343        fig, ax = plt.subplots()
    4444        colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00'])
     
    6767        ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
    6868        plt.legend(loc='upper left')
    69         plt.show()
     69        if out:
     70                plt.savefig(out)
     71        else:
     72                plt.show()
    7073
    7174
     
    7578        parser = parser = argparse.ArgumentParser(description='Python Script to draw R.M.I.T. results')
    7679        parser.add_argument('-f', '--file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
     80        parser.add_argument('-o', '--out', nargs='?', type=str, default=None)
     81        parser.add_argument('-y', nargs='?', type=str, default="")
    7782
    7883        try:
     
    103108                        fields.add(label)
    104109
    105         print(series)
    106         print("fields")
    107         for f in fields:
    108                 print("{}".format(f))
     110        if not options.out :
     111                print(series)
     112                print("fields")
     113                for f in fields:
     114                        print("{}".format(f))
    109115
    110         plot(data, "Number of processors", "ns per ops")
     116        if options.y and options.y in field_names.keys():
     117                plot(data, "Number of processors", options.y, options.out)
     118        else:
     119                if options.y:
     120                        print("Could not find key '{}', defaulting to 'ns per ops'".format(options.y))
     121                plot(data, "Number of processors", "ns per ops", options.out)
  • doc/LaTeXmacros/common.sty

    rba897d21 r2e9b59b  
    1111%% Created On       : Sat Apr  9 10:06:17 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Mon Feb  7 23:00:46 2022
    14 %% Update Count     : 569
     13%% Last Modified On : Sat Apr  2 17:35:23 2022
     14%% Update Count     : 570
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    301301  {=>}{$\Rightarrow$}2
    302302  {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
    303 defaultdialect={CFA},
    304303}% lstset
    305304}% CFAStyle
  • doc/LaTeXmacros/common.tex

    rba897d21 r2e9b59b  
    1111%% Created On       : Sat Apr  9 10:06:17 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Mon Feb  7 23:00:08 2022
    14 %% Update Count     : 552
     13%% Last Modified On : Sat Apr  2 16:42:31 2022
     14%% Update Count     : 553
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    306306  {=>}{$\Rightarrow$}2
    307307  {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
    308 defaultdialect={CFA},
    309308}% lstset
    310309}% CFAStyle
  • doc/theses/mubeen_zulfiqar_MMath/Makefile

    rba897d21 r2e9b59b  
    1 # directory for latex clutter files
     1# Configuration variables
     2
    23Build = build
    34Figures = figures
    45Pictures = pictures
     6
     7LaTMac = ../../LaTeXmacros
     8BibRep = ../../bibliography
     9
    510TeXSRC = ${wildcard *.tex}
    611FigSRC = ${notdir ${wildcard ${Figures}/*.fig}}
    712PicSRC = ${notdir ${wildcard ${Pictures}/*.fig}}
    8 BIBSRC = ${wildcard *.bib}
    9 TeXLIB = .:../../LaTeXmacros:${Build}: # common latex macros
    10 BibLIB = .:../../bibliography # common citation repository
     13BibSRC = ${wildcard *.bib}
     14
     15TeXLIB = .:${LaTMac}:${Build}:
     16BibLIB = .:${BibRep}:
    1117
    1218MAKEFLAGS = --no-print-directory # --silent
    1319VPATH = ${Build} ${Figures} ${Pictures} # extra search path for file names used in document
    1420
    15 ### Special Rules:
     21DOCUMENT = uw-ethesis.pdf
     22BASE = ${basename ${DOCUMENT}}                  # remove suffix
    1623
    17 .PHONY: all clean
    18 .PRECIOUS: %.dvi %.ps # do not delete intermediate files
    19 
    20 ### Commands:
     24# Commands
    2125
    2226LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
    23 BibTeX = BIBINPUTS=${BibLIB} bibtex
     27BibTeX = BIBINPUTS=${BibLIB} && export BIBINPUTS && bibtex
    2428#Glossary = INDEXSTYLE=${Build} makeglossaries-lite
    2529
    26 ### Rules and Recipes:
     30# Rules and Recipes
    2731
    28 DOC = uw-ethesis.pdf
    29 BASE = ${DOC:%.pdf=%} # remove suffix
     32.PHONY : all clean                              # not file names
     33.PRECIOUS: %.dvi %.ps # do not delete intermediate files
     34.ONESHELL :
    3035
    31 all: ${DOC}
     36all : ${DOCUMENT}
    3237
    33 clean:
    34         @rm -frv ${DOC} ${Build}
     38clean :
     39        @rm -frv ${DOCUMENT} ${Build}
    3540
    36 # File Dependencies #
     41# File Dependencies
    3742
    38 ${Build}/%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BIBSRC} Makefile | ${Build}
     43%.dvi : ${TeXSRC} ${FigSRC:%.fig=%.tex} ${PicSRC:%.fig=%.pstex} ${BibSRC} ${BibRep}/pl.bib ${LaTMac}/common.tex Makefile | ${Build}
    3944        ${LaTeX} ${BASE}
    4045        ${BibTeX} ${Build}/${BASE}
    4146        ${LaTeX} ${BASE}
    42         # if nedded, run latex again to get citations
     47        # if needed, run latex again to get citations
    4348        if fgrep -s "LaTeX Warning: Citation" ${basename $@}.log ; then ${LaTeX} ${BASE} ; fi
    4449#       ${Glossary} ${Build}/${BASE}
     
    4651
    4752${Build}:
    48         mkdir $@
     53        mkdir -p $@
    4954
    5055%.pdf : ${Build}/%.ps | ${Build}
  • doc/theses/mubeen_zulfiqar_MMath/allocator.tex

    rba897d21 r2e9b59b  
    11\chapter{Allocator}
    22
    3 \section{uHeap}
    4 uHeap is a lightweight memory allocator. The objective behind uHeap is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements).
    5 
    6 The objective of uHeap's new design was to fulfill following requirements:
    7 \begin{itemize}
    8 \item It should be concurrent and thread-safe for multi-threaded programs.
    9 \item It should avoid global locks, on resources shared across all threads, as much as possible.
    10 \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
    11 \item It should be a lightweight memory allocator.
    12 \end{itemize}
     3This chapter presents a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code), called llheap (low-latency heap), for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
     4The new allocator fulfills the GNU C Library allocator API~\cite{GNUallocAPI}.
     5
     6
     7\section{llheap}
     8
     9The primary design objective for llheap is low-latency across all allocator calls independent of application access-patterns and/or number of threads, \ie very seldom does the allocator have a delay during an allocator call.
     10(Large allocations requiring initialization, \eg zero fill, and/or copying are not covered by the low-latency objective.)
     11A direct consequence of this objective is very simple or no storage coalescing;
     12hence, llheap's design is willing to use more storage to lower latency.
     13This objective is apropos because systems research and industrial applications are striving for low latency and computers have huge amounts of RAM memory.
     14Finally, llheap's performance should be comparable with the current best allocators (see performance comparison in \VRef[Chapter]{c:Performance}).
     15
     16% The objective of llheap's new design was to fulfill following requirements:
     17% \begin{itemize}
     18% \item It should be concurrent and thread-safe for multi-threaded programs.
     19% \item It should avoid global locks, on resources shared across all threads, as much as possible.
     20% \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators).
     21% \item It should be a lightweight memory allocator.
     22% \end{itemize}
    1323
    1424%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1525
     26<<<<<<< HEAD
    1627\section{Design choices for uHeap}\label{sec:allocatorSec}
    1728uHeap's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed:
    18 
    19 \paragraph{Design 1: Centralized}
    20 One heap, but lower bucket sizes are N-shared across KTs.
    21 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.
    22 When KTs $\le$ N, the important bucket sizes are uncontented.
    23 When KTs $>$ N, the free buckets are contented.
    24 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.
    25 \begin{cquote}
     29=======
     30\section{Design Choices}
     31>>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
     32
     33llheap's design was reviewed and changed multiple times throughout the thesis.
     34Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}).
     35Note, a few simples tests for a design choice were compared with the current best allocators to determine the viability of a design.
     36
     37
     38\subsection{Allocation Fastpath}
     39\label{s:AllocationFastpath}
     40
     41These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced.
     42\paragraph{T:1 model}
     43\VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared across KTs.
     44This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5 different request sizes.
     45When KTs $\le$ N, the common bucket sizes are uncontented;
     46when KTs $>$ N, the free buckets are contented and latency increases significantly.
     47In all cases, a KT must acquire/release a lock, contented or uncontented, along the fast allocation path because a bucket is shared.
     48Therefore, while threads are contending for a small number of buckets sizes, the buckets are distributed among them to reduce contention, which lowers latency;
     49however, picking N is workload specific.
     50
     51\begin{figure}
     52\centering
     53\input{AllocDS1}
     54\caption{T:1 with Shared Buckets}
     55\label{f:T1SharedBuckets}
     56\end{figure}
     57
     58Problems:
     59\begin{itemize}
     60\item
     61Need to know when a KT is created/destroyed to assign/unassign a shared bucket-number from the memory allocator.
     62\item
     63When no thread is assigned a bucket number, its free storage is unavailable.
     64\item
     65All KTs contend for the global-pool lock for initial allocations, before free-lists get populated.
     66\end{itemize}
     67Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency.
     68
     69\paragraph{T:H model}
     70\VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs.
     71A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket.
     72When KT $\le$ N, the heaps are uncontented;
     73when KTs $>$ N, the heaps are contented.
     74In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared.
     75By adjusting N upwards, this approach reduces contention but increases storage (time versus space);
     76however, picking N is workload specific.
     77
     78\begin{figure}
    2679\centering
    2780\input{AllocDS2}
    28 \end{cquote}
    29 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.
    30 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated).
    31 
    32 \paragraph{Design 2: Decentralized N Heaps}
    33 Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area.
    34 Kernel threads (KT) are assigned to the N heaps.
    35 When KTs $\le$ N, the heaps are uncontented.
    36 When KTs $>$ N, the heaps are contented.
    37 By adjusting N, this approach reduces storage at the cost of speed due to contention.
    38 In all cases, a thread acquires/releases a lock, contented or uncontented.
    39 \begin{cquote}
    40 \centering
    41 \input{AllocDS1}
    42 \end{cquote}
    43 Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT.
    44 
    45 \paragraph{Design 3: Decentralized Per-thread Heaps}
    46 Design 3 is similar to design 2 but instead of having an M:N model, it uses a 1:1 model. So, instead of having N heaos and sharing them among M KTs, Design 3 has one heap for each KT.
    47 Dynamic number of heaps: create a thread-local heap for each kernel thread (KT) with a bump-area allocated from the @sbrk@ area.
    48 Each KT will have its own exclusive thread-local heap. Heap will be uncontended between KTs regardless how many KTs have been created.
    49 Operations on @sbrk@ area will still be protected by locks.
    50 %\begin{cquote}
    51 %\centering
    52 %\input{AllocDS3} FIXME add figs
    53 %\end{cquote}
    54 Problems: We cannot destroy the heap when a KT exits because our dynamic objects have ownership and they are returned to the heap that created them when the program frees a dynamic object. All dynamic objects point back to their owner heap. If a thread A creates an object O, passes it to another thread B, and A itself exits. When B will free object O, O should return to A's heap so A's heap should be preserved for the lifetime of the whole program as their might be objects in-use of other threads that were allocated by A. Also, we need to know when a KT is created and destroyed to know when to create/destroy a heap for the KT.
    55 
    56 \paragraph{Design 4: Decentralized Per-CPU Heaps}
    57 Design 4 is similar to Design 3 but instead of having a heap for each thread, it creates a heap for each CPU.
    58 Fixed number of heaps for a machine: create a heap for each CPU with a bump-area allocated from the @sbrk@ area.
    59 Each CPU will have its own CPU-local heap. When the program does a dynamic memory operation, it will be entertained by the heap of the CPU where the process is currently running on.
    60 Each CPU will have its own exclusive heap. Just like Design 3(FIXME cite), heap will be uncontended between KTs regardless how many KTs have been created.
    61 Operations on @sbrk@ area will still be protected by locks.
    62 To deal with preemtion during a dynamic memory operation, librseq(FIXME cite) will be used to make sure that the whole dynamic memory operation completes on one CPU. librseq's restartable sequences can make it possible to re-run a critical section and undo the current writes if a preemption happened during the critical section's execution.
    63 %\begin{cquote}
    64 %\centering
    65 %\input{AllocDS4} FIXME add figs
    66 %\end{cquote}
    67 
    68 Problems: This approach was slower than the per-thread model. Also, librseq does not provide such restartable sequences to detect preemtions in user-level threading system which is important to us as CFA(FIXME cite) has its own threading system that we want to support.
    69 
    70 Out of the four designs, Design 3 was chosen because of the following reasons.
    71 \begin{itemize}
    72 \item
    73 Decentralized designes are better in general as compared to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designes shard the whole heap which has all the buckets with the addition of sharding sbrk area. So Design 1 was eliminated.
    74 \item
    75 Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenerio.
    76 \item
    77 Design 4 was eliminated because it was slower than Design 3 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achive user-threading safety which has some cost to it. Desing 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
    78 \end{itemize}
    79 
    80 
    81 \subsection{Advantages of distributed design}
    82 
    83 The distributed design of uHeap is concurrent to work in multi-threaded applications.
    84 
    85 Some key benefits of the distributed design of uHeap are as follows:
    86 
    87 \begin{itemize}
    88 \item
    89 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The call to sbrk will be protected using locks but bump allocation (on memory taken from sbrk) will not be contended once the sbrk call has returned.
    90 \item
    91 Low or almost no contention on heap resources.
    92 \item
    93 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
    94 \item
    95 Distributed design avoids unnecassry locks on resources shared across all KTs.
    96 \end{itemize}
     81\caption{T:H with Shared Heaps}
     82\label{f:THSharedHeaps}
     83\end{figure}
     84
     85Problems:
     86\begin{itemize}
     87\item
     88Need to know when a KT is created/destroyed to assign/unassign a heap from the memory allocator.
     89\item
     90When no thread is assigned to a heap, its free storage is unavailable.
     91\item
     92Ownership issues arise (see \VRef{s:Ownership}).
     93\item
     94All KTs contend for the local/global-pool lock for initial allocations, before free-lists get populated.
     95\end{itemize}
     96Tests showed having locks along the allocation fast-path produced a significant increase in allocation costs and any contention among KTs produces a significant spike in latency.
     97
     98\paragraph{T:H model, H = number of CPUs}
     99This design is the T:H model but H is set to the number of CPUs on the computer or the number restricted to an application, \eg via @taskset@.
     100(See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per CPU.)
     101Hence, each CPU logically has its own private heap and local pool.
     102A memory operation is serviced from the heap associated with the CPU executing the operation.
     103This approach removes fastpath locking and contention, regardless of the number of KTs mapped across the CPUs, because only one KT is running on each CPU at a time (modulo operations on the global pool and ownership).
     104This approach is essentially an M:N approach where M is the number if KTs and N is the number of CPUs.
     105
     106Problems:
     107\begin{itemize}
     108\item
     109Need to know when a CPU is added/removed from the @taskset@.
     110\item
     111Need a fast way to determine the CPU a KT is executing on to access the appropriate heap.
     112\item
     113Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
     114\begin{quote}
     115A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}
     116\end{quote}
     117If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
     118Note, the serially-reusable problem can occur in sequential programs with preemption, if the signal handler calls the preempted function, unless the function is serially reusable.
     119Essentially, the serially-reusable problem is a race condition on an unprotected critical section, where the operating system is providing the second thread via the signal handler.
     120
     121Library @librseq@~\cite{librseq} was used to perform a fast determination of the CPU and to ensure all memory operations complete on one CPU using @librseq@'s restartable sequences, which restart the critical section after undoing its writes, if the critical section is preempted.
     122\end{itemize}
     123Tests showed that @librseq@ can determine the particular CPU quickly but setting up the restartable critical-section along the allocation fast-path produced a significant increase in allocation costs.
     124Also, the number of undoable writes in @librseq@ is limited and restartable sequences cannot deal with user-level thread (UT) migration across KTs.
     125For example, UT$_1$ is executing a memory operation by KT$_1$ on CPU$_1$ and a time-slice preemption occurs.
     126The signal handler context switches UT$_1$ onto the user-level ready-queue and starts running UT$_2$ on KT$_1$, which immediately calls a memory operation.
     127Since KT$_1$ is still executing on CPU$_1$, @librseq@ takes no action because it assumes KT$_1$ is still executing the same critical section.
     128Then UT$_1$ is scheduled onto KT$_2$ by the user-level scheduler, and its memory operation continues in parallel with UT$_2$ using references into the heap associated with CPU$_1$, which corrupts CPU$_1$'s heap.
     129If @librseq@ had an @rseq_abort@ which:
     130\begin{enumerate}
     131\item
     132Marked the current restartable critical-section as cancelled so it restarts when attempting to commit.
     133\item
     134Do nothing if there is no current restartable critical section in progress.
     135\end{enumerate}
     136Then @rseq_abort@ could be called on the backside of a  user-level context-switching.
     137A feature similar to this idea might exist for hardware transactional-memory.
     138A significant effort was made to make this approach work but its complexity, lack of robustness, and performance costs resulted in its rejection.
     139
     140\paragraph{1:1 model}
     141This design is the T:H model with T = H, where there is one thread-local heap for each KT.
     142(See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
     143Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
     144Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership).
     145
     146Problems:
     147\begin{itemize}
     148\item
     149Need to know when a KT is starts/terminates to create/delete its heap.
     150
     151\noindent
     152It is possible to leverage constructors/destructors for thread-local objects to get a general handle on when a KT starts/terminates.
     153\item
     154There is a classic \newterm{memory-reclamation} problem for ownership because storage passed to another thread can be returned to a terminated heap.
     155
     156\noindent
     157The classic solution only deletes a heap after all referents are returned, which is complex.
     158The cheap alternative is for heaps to persist for program duration to handle outstanding referent frees.
     159If old referents return storage to a terminated heap, it is handled in the same way as an active heap.
     160To prevent heap blowup, terminated heaps can be reused by new KTs, where a reused heap may be populated with free storage from a prior KT (external fragmentation).
     161In most cases, heap blowup is not a problem because programs have a small allocation set-size, so the free storage from a prior KT is apropos for a new KT.
     162\item
     163There can be significant external fragmentation as the number of KTs increases.
     164
     165\noindent
     166In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
     167Since the number of CPUs is relatively small, >~1024, and a heap relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
     168\item
     169There is the same serially-reusable problem with UTs migrating across KTs.
     170\end{itemize}
     171Tests showed this design produced the closest performance match with the best current allocators, and code inspection showed most of these allocators use different variations of this approach.
     172
     173
     174\vspace{5pt}
     175\noindent
     176The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
     177For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps maybe shared by multiple threads, even when KTs $\le$ N.
     178For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
     179However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
     180More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
     181Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required.
     182The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
     183
     184
     185% \begin{itemize}
     186% \item
     187% A decentralized design is better to centralized design because their concurrency is better across all bucket-sizes as design 1 shards a few buckets of selected sizes while other designs shards all the buckets. Decentralized designs shard the whole heap which has all the buckets with the addition of sharding @sbrk@ area. So Design 1 was eliminated.
     188% \item
     189% Design 2 was eliminated because it has a possibility of contention in-case of KT > N while Design 3 and 4 have no contention in any scenario.
     190% \item
     191% Design 3 was eliminated because it was slower than Design 4 and it provided no way to achieve user-threading safety using librseq. We had to use CFA interruption handling to achieve user-threading safety which has some cost to it.
     192% that  because of 4 was already slower than Design 3, adding cost of interruption handling on top of that would have made it even slower.
     193% \end{itemize}
     194% Of the four designs for a low-latency memory allocator, the 1:1 model was chosen for the following reasons:
     195
     196% \subsection{Advantages of distributed design}
     197%
     198% The distributed design of llheap is concurrent to work in multi-threaded applications.
     199% Some key benefits of the distributed design of llheap are as follows:
     200% \begin{itemize}
     201% \item
     202% The bump allocation is concurrent as memory taken from @sbrk@ is sharded across all heaps as bump allocation reserve. The call to @sbrk@ will be protected using locks but bump allocation (on memory taken from @sbrk@) will not be contended once the @sbrk@ call has returned.
     203% \item
     204% Low or almost no contention on heap resources.
     205% \item
     206% It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty.
     207% \item
     208% Distributed design avoids unnecessary locks on resources shared across all KTs.
     209% \end{itemize}
     210
     211\subsection{Allocation Latency}
     212
     213A primary goal of llheap is low latency.
     214Two forms of latency are internal and external.
     215Internal latency is the time to perform an allocation, while external latency is time to obtain/return storage from/to the operating system.
     216Ideally latency is $O(1)$ with a small constant.
     217
     218To obtain $O(1)$ internal latency means no searching on the allocation fastpath, largely prohibits coalescing, which leads to external fragmentation.
     219The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
     220
     221To obtain $O(1)$ external latency means obtaining one large storage area from the operating system and subdividing it across all program allocations, which requires a good guess at the program storage high-watermark and potential large external fragmentation.
     222Excluding real-time operating-systems, operating-system operations are unbounded, and hence some external latency is unavoidable.
     223The mitigating factor is that operating-system calls can often be reduced if a programmer has a sense of the storage high-watermark and the allocator is capable of using this information (see @malloc_expansion@ \VPageref{p:malloc_expansion}).
     224Furthermore, while operating-system calls are unbounded, many are now reasonably fast, so their latency is tolerable and infrequent.
     225
    97226
    98227%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    99228
    100 \section{uHeap Structure}
    101 
    102 As described in (FIXME cite 2.4) uHeap uses following features of multi-threaded memory allocators.
    103 \begin{itemize}
    104 \item
    105 uHeap has multiple heaps without a global heap and uses 1:1 model. (FIXME cite 2.5 1:1 model)
    106 \item
    107 uHeap uses object ownership. (FIXME cite 2.5.2)
    108 \item
    109 uHeap does not use object containers (FIXME cite 2.6) or any coalescing technique. Instead each dynamic object allocated by uHeap has a header than contains bookkeeping information.
    110 \item
    111 Each thread-local heap in uHeap has its own allocation buffer that is taken from the system using sbrk() call. (FIXME cite 2.7)
    112 \item
    113 Unless a heap is freeing an object that is owned by another thread's heap or heap is using sbrk() system call, uHeap is mostly lock-free which eliminates most of the contention on shared resources. (FIXME cite 2.8)
    114 \end{itemize}
    115 
    116 As uHeap uses a heap per-thread model to reduce contention on heap resources, we manage a list of heaps (heap-list) that can be used by threads. The list is empty at the start of the program. When a kernel thread (KT) is created, we check if heap-list is empty. If no then a heap is removed from the heap-list and is given to this new KT to use exclusively. If yes then a new heap object is created in dynamic memory and is given to this new KT to use exclusively. When a KT exits, its heap is not destroyed but instead its heap is put on the heap-list and is ready to be reused by new KTs.
    117 
    118 This reduces the memory footprint as the objects on free-lists of a KT that has exited can be reused by a new KT. Also, we preserve all the heaps that were created during the lifetime of the program till the end of the program. uHeap uses object ownership where an object is freed to the free-buckets of the heap that allocated it. Even after a KT A has exited, its heap has to be preserved as there might be objects in-use of other threads that were initially allocated by A and the passed to other threads.
     229\section{llheap Structure}
     230
     231\VRef[Figure]{f:llheapStructure} shows the design of llheap, which uses the following features:
     232\begin{itemize}
     233\item
     2341:1 multiple-heap model to minimize the fastpath,
     235\item
     236can be built with or without heap ownership,
     237\item
     238headers per allocation versus containers,
     239\item
     240no coalescing to minimize latency,
     241\item
     242global heap memory (pool) obtained from the operating system using @mmap@ to create and reuse heaps needed by threads,
     243\item
     244local reserved memory (pool) per heap obtained from global pool,
     245\item
     246global reserved memory (pool) obtained from the operating system using @sbrk@ call,
     247\item
     248optional fast-lookup table for converting allocation requests into bucket sizes,
     249\item
     250optional statistic-counters table for accumulating counts of allocation operations.
     251\end{itemize}
    119252
    120253\begin{figure}
    121254\centering
     255<<<<<<< HEAD
    122256\includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
    123257\caption{uHeap Structure}
    124258\label{fig:heapStructureFig}
     259=======
     260% \includegraphics[width=0.65\textwidth]{figures/NewHeapStructure.eps}
     261\input{llheap}
     262\caption{llheap Structure}
     263\label{f:llheapStructure}
     264>>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
    125265\end{figure}
    126266
    127 Each heap uses seggregated free-buckets that have free objects of a specific size. Each free-bucket of a specific size has following 2 lists in it:
    128 \begin{itemize}
    129 \item
    130 Free list is used when a thread is freeing an object that is owned by its own heap so free list does not use any locks/atomic-operations as it is only used by the owner KT.
    131 \item
    132 Away list is used when a thread A is freeing an object that is owned by another KT B's heap. This object should be freed to the owner heap (B's heap) so A will place the object on the away list of B. Away list is lock protected as it is shared by all other threads.
    133 \end{itemize}
    134 
    135 When a dynamic object of a size S is requested. The thread-local heap will check if S is greater than or equal to the mmap threshhold. Any request larger than the mmap threshhold is fulfilled by allocating an mmap area of that size and such requests are not allocated on sbrk area. The value of this threshhold can be changed using mallopt routine but the new value should not be larger than our biggest free-bucket size.
    136 
    137 Algorithm~\ref{alg:heapObjectAlloc} briefly shows how an allocation request is fulfilled.
    138 
    139 \begin{algorithm}
    140 \caption{Dynamic object allocation of size S}\label{alg:heapObjectAlloc}
     267llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
     268There is a global bump-pointer to the next free heap in the array.
     269When this array is exhausted, another array is allocated.
     270There is a global top pointer for a heap intrusive link to chain free heaps from terminated threads.
     271When statistics are turned on, there is a global top pointer for a heap intrusive link to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
     272
     273When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
     274When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps.
     275The free heaps is a stack so hot storage is reused first.
     276Preserving all heaps created during the program lifetime, solves the storage lifetime problem, when ownership is used.
     277This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
     278llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
     279
     280Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
     281The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system.
     282Each free bucket of a specific size has the following two lists:
     283\begin{itemize}
     284\item
     285A free stack used solely by the KT heap-owner, so push/pop operations do not require locking.
     286The free objects are a stack so hot storage is reused first.
     287\item
     288For ownership, a shared away-stack for KTs to return storage allocated by other KTs, so push/pop operations require locking.
     289When the free stack is empty, the entire ownership stack is removed and becomes the head of the corresponding free stack.
     290\end{itemize}
     291
     292Algorithm~\ref{alg:heapObjectAlloc} shows the allocation outline for an object of size $S$.
     293First, the allocation is divided into small (@sbrk@) or large (@mmap@).
     294For large allocations, the storage is mapped directly from the operating system.
     295For small allocations, $S$ is quantized into a bucket size.
     296Quantizing is performed using a binary search over the ordered bucket array.
     297An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
     298(Type @char@ restricts the number of bucket sizes to 256.)
     299For $S$ > 64K, a binary search is used.
     300Then, the allocation storage is obtained from the following locations (in order), with increasing latency.
     301\begin{enumerate}[topsep=0pt,itemsep=0pt,parsep=0pt]
     302\item
     303bucket's free stack,
     304\item
     305bucket's away stack,
     306\item
     307heap's local pool
     308\item
     309global pool
     310\item
     311operating system (@sbrk@)
     312\end{enumerate}
     313
     314\begin{figure}
     315\vspace*{-10pt}
     316\begin{algorithm}[H]
     317\small
     318\caption{Dynamic object allocation of size $S$}\label{alg:heapObjectAlloc}
    141319\begin{algorithmic}[1]
    142320\State $\textit{O} \gets \text{NULL}$
    143 \If {$S < \textit{mmap-threshhold}$}
    144         \State $\textit{B} \gets (\text{smallest free-bucket} \geq S)$
     321\If {$S >= \textit{mmap-threshhold}$}
     322        \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
     323\Else
     324        \State $\textit{B} \gets \text{smallest free-bucket} \geq S$
    145325        \If {$\textit{B's free-list is empty}$}
    146326                \If {$\textit{B's away-list is empty}$}
    147327                        \If {$\textit{heap's allocation buffer} < S$}
    148                                 \State $\text{get allocation buffer using system call sbrk()}$
     328                                \State $\text{get allocation from global pool (which might call \lstinline{sbrk})}$
    149329                        \EndIf
    150330                        \State $\textit{O} \gets \text{bump allocate an object of size S from allocation buffer}$
     
    157337        \EndIf
    158338        \State $\textit{O's owner} \gets \text{B}$
    159 \Else
    160         \State $\textit{O} \gets \text{allocate dynamic memory using system call mmap with size S}$
    161339\EndIf
    162340\State $\Return \textit{ O}$
     
    164342\end{algorithm}
    165343
     344<<<<<<< HEAD
    166345Algorithm~\ref{alg:heapObjectFreeOwn} shows how a free request is fulfilled if object ownership is turned on. Algorithm~\ref{alg:heapObjectFreeNoOwn} shows how the same free request is fulfilled without object ownership.
    167346
     
    171350\If {$\textit{A was mmap-ed}$}
    172351        \State $\text{return A's dynamic memory to system using system call munmap}$
     352=======
     353\vspace*{-15pt}
     354\begin{algorithm}[H]
     355\small
     356\caption{Dynamic object free at address $A$ with object ownership}\label{alg:heapObjectFreeOwn}
     357\begin{algorithmic}[1]
     358\If {$\textit{A mapped allocation}$}
     359        \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
     360>>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
    173361\Else
    174362        \State $\text{B} \gets \textit{O's owner}$
     
    181369\end{algorithmic}
    182370\end{algorithm}
     371<<<<<<< HEAD
    183372
    184373\begin{algorithm}
     
    199388\end{algorithm}
    200389
     390=======
     391>>>>>>> bb7c77dc425e289ed60aa638529b3e5c7c3e4961
     392
     393\vspace*{-15pt}
     394\begin{algorithm}[H]
     395\small
     396\caption{Dynamic object free at address $A$ without object ownership}\label{alg:heapObjectFreeNoOwn}
     397\begin{algorithmic}[1]
     398\If {$\textit{A mapped allocation}$}
     399        \State $\text{return A's dynamic memory to system using system call \lstinline{munmap}}$
     400\Else
     401        \State $\text{B} \gets \textit{O's owner}$
     402        \If {$\textit{B is thread-local heap's bucket}$}
     403                \State $\text{push A to B's free-list}$
     404        \Else
     405                \State $\text{C} \gets \textit{thread local heap's bucket with same size as B}$
     406                \State $\text{push A to C's free-list}$
     407        \EndIf
     408\EndIf
     409\end{algorithmic}
     410\end{algorithm}
     411\end{figure}
     412
     413Algorithm~\ref{alg:heapObjectFreeOwn} shows the de-allocation (free) outline for an object at address $A$ with ownership.
     414First, the address is divided into small (@sbrk@) or large (@mmap@).
     415For large allocations, the storage is unmapped back to the operating system.
     416For small allocations, the bucket associated with the request size is retrieved.
     417If the bucket is local to the thread, the allocation is pushed onto the thread's associated bucket.
     418If the bucket is not local to the thread, the allocation is pushed onto the owning thread's associated away stack.
     419
     420Algorithm~\ref{alg:heapObjectFreeNoOwn} shows the de-allocation (free) outline for an object at address $A$ without ownership.
     421The algorithm is the same as for ownership except if the bucket is not local to the thread.
     422Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
     423
     424Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap.
     425Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
     426This design simplifies heap-management code during development and maintenance.
     427
     428
     429\subsection{Alignment}
     430
     431All dynamic memory allocations must have a minimum storage alignment for the contained object(s).
     432Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX).
     433In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers.
     434For consistency, the object header is normally aligned at this same boundary.
     435Larger alignments must be a power of 2, such page alignment (4/8K).
     436Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment.
     437
     438For alignments greater than the minimum, the obvious approach for aligning to address @A@ is: compute the next address that is a multiple of @N@ after the current end of the heap, @E@, plus room for the header before @A@ and the size of the allocation after @A@, moving the end of the heap to @E'@.
     439\begin{center}
     440\input{Alignment1}
     441\end{center}
     442The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations.
     443This approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
     444In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation.
     445However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object.
     446As well, it does not work for large allocations, where many memory allocators switch from program @sbrk@ to operating-system @mmap@.
     447The reason is that @mmap@ only starts on a page boundary, and it is difficult to reuse the storage before the alignment boundary for other requests.
     448Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request.
     449
     450Instead, llheap alignment is accomplished by making a \emph{pessimistically} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
     451\begin{center}
     452\input{Alignment2}
     453\end{center}
     454The amount of storage necessary is @alignment - M + size@, which ensures there is an address, @A@, after the storage returned from @malloc@, @P@, that is a multiple of @alignment@ followed by sufficient storage for the data object.
     455The approach is pessimistic because if @P@ already has the correct alignment @N@, the initial allocation has already requested sufficient space to move to the next multiple of @N@.
     456For this special case, there is @alignment - M@ bytes of unused storage after the data object, which subsequently can be used by @realloc@.
     457
     458Note, the address returned is @A@, which is subsequently returned to @free@.
     459However, to correctly free the allocated object, the value @P@ must be computable, since that is the value generated by @malloc@ and returned within @memalign@.
     460Hence, there must be a mechanism to detect when @P@ $\neq$ @A@ and how to compute @P@ from @A@.
     461
     462The llheap approach uses two headers:
     463the \emph{original} header associated with a memory allocation from @malloc@, and a \emph{fake} header within this storage before the alignment boundary @A@, which is returned from @memalign@, e.g.:
     464\begin{center}
     465\input{Alignment2Impl}
     466\end{center}
     467Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater.
     468When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation.
     469Therefore, there is always room for an @M@-byte fake header before @A@.
     470
     471The fake header must supply an indicator to distinguish it from a normal header and the location of address @P@ generated by @malloc@.
     472This information is encoded as an offset from A to P and the initialize alignment (discussed in \VRef{s:ReallocStickyProperties}).
     473To distinguish a fake header from a normal header, the least-significant bit of the alignment is used because the offset participates in multiple calculations, while the alignment is just remembered data.
     474\begin{center}
     475\input{FakeHeader}
     476\end{center}
     477
     478
     479\subsection{\lstinline{realloc} and Sticky Properties}
     480\label{s:ReallocStickyProperties}
     481
     482Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
     483\begin{flushleft}
     484\begin{tabular}{ll}
     485\multicolumn{1}{c}{\textbf{realloc pattern}} & \multicolumn{1}{c}{\textbf{manually}} \\
     486\begin{lstlisting}
     487T * naddr = realloc( oaddr, newSize );
     488
     489
     490
     491\end{lstlisting}
     492&
     493\begin{lstlisting}
     494T * naddr = (T *)malloc( newSize ); $\C[2.4in]{// new storage}$
     495memcpy( naddr, addr, oldSize );  $\C{// copy old bytes}$
     496free( addr );                           $\C{// free old storage}$
     497addr = naddr;                           $\C{// change pointer}\CRT$
     498\end{lstlisting}
     499\end{tabular}
     500\end{flushleft}
     501The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying.
     502This pattern is not used enough to reduce storage management costs.
     503In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern.
     504
     505The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation.
     506Are these properties transient or persistent (``sticky'')?
     507For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size.
     508That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve?
     509Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment.
     510This silent problem is unintuitive to programmers and difficult to locate because it is transient.
     511To prevent these problems, llheap preserves initial allocation properties for the lifetime of an allocation and the semantics of @realloc@ are augmented to preserve these properties, with additional query routines.
     512This change makes the realloc pattern efficient and safe.
     513
     514
     515\subsection{Header}
     516
     517To preserve allocation properties requires storing additional information with an allocation,
     518The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.
     519The header has two data field sized appropriately for 32/64-bit alignment requirements.
     520The first field is a union of three values:
     521\begin{description}
     522\item[bucket pointer]
     523is for allocated storage and points back to the bucket associated with this storage requests (see \VRef[Figure]{f:llheapStructure} for the fields accessible in a bucket).
     524\item[mapped size]
     525is for mapped storage and is the storage size for use in unmapping.
     526\item[next free block]
     527is for free storage and is an intrusive pointer chaining same-size free blocks onto a bucket's free stack.
     528\end{description}
     529The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes.
     530Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors.
     531
     532\begin{figure}
     533\centering
     534\input{Header}
     535\caption{llheap Normal Header}
     536\label{f:llheapNormalHeader}
     537\end{figure}
     538
     539The low-order 3-bits of the first field are \emph{unused} for any stored values, whereas the second field may use all of its bits.
     540The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
     541Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header.
     542This implementation allows a fast test if any of the lower 3-bits are on (@&@ and compare).
     543If no bits are on, it implies a basic allocation, which is handled quickly;
     544otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
     545Since most allocations are basic, this implementation results in a significant performance gain along the allocation and free fastpath.
     546
     547
     548\section{Statistics and Debugging}
     549
     550llheap can be built to accumulate fast and largely contention-free allocation statistics to help understand allocation behaviour.
     551Incrementing statistic counters must appear on the allocation fastpath.
     552As noted, any atomic operation along the fastpath produces a significant increase in allocation costs.
     553To make statistics performant enough for use on running systems, each heap has its own set of statistic counters, so heap operations do not require atomic operations.
     554
     555To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
     556Note, the list is locked to prevent errors traversing an active list;
     557the statistics counters are not locked and can flicker during accumulation, which is not an issue with atomic read/write.
     558\VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
     559No other memory allocator studied provides as comprehensive statistical information.
     560Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness, and hence, should be equally valuable to application developers.
     561
     562\begin{figure}
     563\begin{lstlisting}
     564Heap statistics: (storage request / allocation)
     565  malloc >0 calls 2,766; 0 calls 2,064; storage 12,715 / 13,367 bytes
     566  aalloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     567  calloc >0 calls 6; 0 calls 0; storage 1,008 / 1,104 bytes
     568  memalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     569  amemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     570  cmemalign >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     571  resize >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     572  realloc >0 calls 0; 0 calls 0; storage 0 / 0 bytes
     573  free !null calls 2,766; null calls 4,064; storage 12,715 / 13,367 bytes
     574  away pulls 0; pushes 0; storage 0 / 0 bytes
     575  sbrk calls 1; storage 10,485,760 bytes
     576  mmap calls 10,000; storage 10,000 / 10,035 bytes
     577  munmap calls 10,000; storage 10,000 / 10,035 bytes
     578  threads started 4; exited 3
     579  heaps new 4; reused 0
     580\end{lstlisting}
     581\caption{Statistics Output}
     582\label{f:StatiticsOutput}
     583\end{figure}
     584
     585llheap can also be built with debug checking, which inserts many asserts along all allocation paths.
     586These assertions detect incorrect allocation usage, like double frees, unfreed storage, or memory corruptions because internal values (like header fields) are overwritten.
     587These checks are best effort as opposed to complete allocation checking as in @valgrind@.
     588Nevertheless, the checks detect many allocation problems.
     589There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
     590For example, @printf@ allocates a 1024 buffer on first call and never deletes this buffer.
     591To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
     592Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
     593
     594Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
     595
     596
     597\section{User-level Threading Support}
     598\label{s:UserlevelThreadingSupport}
     599
     600The serially-reusable problem (see \VRef{s:AllocationFastpath}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
     601The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections.
     602Locking these critical sections negates any attempt for a quick fastpath and results in high contention.
     603For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
     604Without time slicing, a user thread performing a long computation can prevent execution (starve) other threads.
     605To prevent starvation for an allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
     606The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
     607
     608llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period.
     609On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately.
     610On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and not thread-safe.
     611For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
     612Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
     613
     614The fast technique defines a special code section and places all non-interruptible routines in this section.
     615The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified.
     616Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag.
     617This technique is fragile because any calls in the non-interruptible code outside of the non-interruptible section (like @sbrk@) must be bracketed with disable/enable interrupts and these calls must be along the slowpath.
     618Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section.
     619This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined.
     620These techniques are used in both the \uC and \CFA versions of llheap, where both of these systems have user-level threading.
     621
     622
     623\section{Bootstrapping}
     624
     625There are problems bootstrapping a memory allocator.
     626\begin{enumerate}
     627\item
     628Programs can be statically or dynamically linked.
     629\item
     630The order the linker schedules startup code is poorly supported.
     631\item
     632Knowing a KT's start and end independently from the KT code is difficult.
     633\end{enumerate}
     634
     635For static linking, the allocator is loaded with the program.
     636Hence, allocation calls immediately invoke the allocator operation defined by the loaded allocation library and there is only one memory allocator used in the program.
     637This approach allows allocator substitution by placing an allocation library before any other in the linked/load path.
     638
     639Allocator substitution is similar for dynamic linking, but the problem is that the dynamic loader starts first and needs to perform dynamic allocations \emph{before} the substitution allocator is loaded.
     640As a result, the dynamic loader uses a default allocator until the substitution allocator is loaded, after which all allocation operations are handled by the substitution allocator, including from the dynamic loader.
     641Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
     642Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
     643Testing showed up to a 5\% performance increase for dynamic linking over static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
     644
     645All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
     646The problem is getting initialized done before the first allocator call.
     647However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
     648As a result, calls to allocation routines occur without initialization.
     649To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
     650
     651Two other important execution points are program startup and termination, which include prologue or epilogue code to bootstrap a program, which programmers are unaware of.
     652For example, dynamic-memory allocations before/after the application starts should not be considered in statistics because the application does not make these calls.
     653llheap establishes these two points using routines:
     654\begin{lstlisting}
     655__attribute__(( constructor( 100 ) )) static void startup( void ) {
     656        // clear statistic counters
     657        // reset allocUnfreed counter
     658}
     659__attribute__(( destructor( 100 ) )) static void shutdown( void ) {
     660        // sum allocUnfreed for all heaps
     661        // subtract global unfreed storage
     662        // if allocUnfreed > 0 then print warning message
     663}
     664\end{lstlisting}
     665which use global constructor/destructor priority 100, where the linker calls these routines at program prologue/epilogue in increasing/decreasing order of priority.
     666Application programs may only use global constructor/destructor priorities greater than 100.
     667Hence, @startup@ is called after the program prologue but before the application starts, and @shutdown@ is called after the program terminates but before the program epilogue.
     668By resetting counters in @startup@, prologue allocations are ignored, and checking unfreed storage in @shutdown@ checks only application memory management, ignoring the program epilogue.
     669
     670While @startup@/@shutdown@ apply to the program KT, a concurrent program creates additional KTs that do not trigger these routines.
     671However, it is essential for the allocator to know when each KT is started/terminated.
     672One approach is to create a thread-local object with a construct/destructor, which is triggered after a new KT starts and before it terminates, respectively.
     673\begin{lstlisting}
     674struct ThreadManager {
     675        volatile bool pgm_thread;
     676        ThreadManager() {} // unusable
     677        ~ThreadManager() { if ( pgm_thread ) heapManagerDtor(); }
     678};
     679static thread_local ThreadManager threadManager;
     680\end{lstlisting}
     681Unfortunately, thread-local variables are created lazily, \ie on the first dereference of @threadManager@, which then triggers its constructor.
     682Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
     683Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
     684Now when a KT terminates, @~ThreadManager@ is called to chained it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
     685The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
     686
     687Finally, there is a recursive problem when the singleton pattern dereferences @pgm_thread@ to initialize the thread-local object, because its initialization calls @atExit@, which immediately calls @malloc@ to obtain storage.
     688This recursion is handled with another thread-local flag to prevent double initialization.
     689A similar problem exists when the KT terminates and calls member @~ThreadManager@, because immediately afterwards, the terminating KT calls @free@ to deallocate the storage obtained from the @atExit@.
     690In the meantime, the terminated heap has been put on the global-heap free-stack, and may be active by a new KT, so the @atExit@ free is handled as a free to another heap and put onto the away list using locking.
     691
     692For user threading systems, the KTs are controlled by the runtime, and hence, start/end pointers are known and interact directly with the llheap allocator for \uC and \CFA, which eliminates or simplifies several of these problems.
     693The following API was created to provide interaction between the language runtime and the allocator.
     694\begin{lstlisting}
     695void startTask();                       $\C{// KT starts}$
     696void finishTask();                      $\C{// KT ends}$
     697void startup();                         $\C{// when application code starts}$
     698void shutdown();                        $\C{// when application code ends}$
     699bool traceHeap();                       $\C{// enable allocation/free printing for debugging}$
     700bool traceHeapOn();                     $\C{// start printing allocation/free calls}$
     701bool traceHeapOff();                    $\C{// stop printing allocation/free calls}$
     702\end{lstlisting}
     703This kind of API is necessary to allow concurrent runtime systems to interact with difference memory allocators in a consistent way.
    201704
    202705%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    203706
    204707\section{Added Features and Methods}
    205 To improve the uHeap allocator (FIX ME: cite uHeap) interface and make it more user friendly, we added a few more routines to the C allocator. Also, we built a \CFA (FIX ME: cite cforall) interface on top of C interface to increase the usability of the allocator.
    206 
    207 \subsection{C Interface}
    208 We added a few more features and routines to the allocator's C interface that can make the allocator more usable to the programmers. THese features will programmer more control on the dynamic memory allocation.
     708
     709The C dynamic-allocation API (see \VRef[Figure]{f:CDynamicAllocationAPI}) is neither orthogonal nor complete.
     710For example,
     711\begin{itemize}
     712\item
     713It is possible to zero fill or align an allocation but not both.
     714\item
     715It is \emph{only} possible to zero fill an array allocation.
     716\item
     717It is not possible to resize a memory allocation without data copying.
     718\item
     719@realloc@ does not preserve initial allocation properties.
     720\end{itemize}
     721As a result, programmers must provide these options, which is error prone, resulting in blaming the entire programming language for a poor dynamic-allocation API.
     722Furthermore, newer programming languages have better type systems that can provide safer and more powerful APIs for memory allocation.
     723
     724\begin{figure}
     725\begin{lstlisting}
     726void * malloc( size_t size );
     727void * calloc( size_t nmemb, size_t size );
     728void * realloc( void * ptr, size_t size );
     729void * reallocarray( void * ptr, size_t nmemb, size_t size );
     730void free( void * ptr );
     731void * memalign( size_t alignment, size_t size );
     732void * aligned_alloc( size_t alignment, size_t size );
     733int posix_memalign( void ** memptr, size_t alignment, size_t size );
     734void * valloc( size_t size );
     735void * pvalloc( size_t size );
     736
     737struct mallinfo mallinfo( void );
     738int mallopt( int param, int val );
     739int malloc_trim( size_t pad );
     740size_t malloc_usable_size( void * ptr );
     741void malloc_stats( void );
     742int malloc_info( int options, FILE * fp );
     743\end{lstlisting}
     744\caption{C Dynamic-Allocation API}
     745\label{f:CDynamicAllocationAPI}
     746\end{figure}
     747
     748The following presents design and API changes for C, \CC (\uC), and \CFA, all of which are implemented in llheap.
     749
    209750
    210751\subsection{Out of Memory}
     
    212753Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
    213754hence the need to return an alternate value for a zero-sized allocation.
    214 The alternative is to abort a program when out of memory.
    215 In theory, notifying the programmer allows recovery;
    216 in practice, it is almost impossible to gracefully when out of memory, so the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen.
    217 
    218 
    219 \subsection{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
    220 @aalloc@ is an extension of malloc. It allows programmer to allocate a dynamic array of objects without calculating the total size of array explicitly. The only alternate of this routine in the other allocators is calloc but calloc also fills the dynamic memory with 0 which makes it slower for a programmer who only wants to dynamically allocate an array of objects without filling it with 0.
    221 \paragraph{Usage}
     755A different approach allowed by the C API is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
     756In theory, notifying the programmer of memory failure allows recovery;
     757in practice, it is almost impossible to gracefully recover when out of memory.
     758Hence, the cheaper approach of returning @nullptr@ for a zero-sized allocation is chosen because no pseudo allocation is necessary.
     759
     760
     761\subsection{C Interface}
     762
     763For C, it is possible to increase functionality and orthogonality of the dynamic-memory API to make allocation better for programmers.
     764
     765For existing C allocation routines:
     766\begin{itemize}
     767\item
     768@calloc@ sets the sticky zero-fill property.
     769\item
     770@memalign@, @aligned_alloc@, @posix_memalign@, @valloc@ and @pvalloc@ set the sticky alignment property.
     771\item
     772@realloc@ and @reallocarray@ preserve sticky properties.
     773\end{itemize}
     774
     775The C dynamic-memory API is extended with the following routines:
     776
     777\paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
     778extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory.
     779@aalloc@ is significantly faster than @calloc@, which is the only alternative.
     780
     781\noindent\textbf{Usage}
    222782@aalloc@ takes two parameters.
    223 
    224 \begin{itemize}
    225 \item
    226 @dim@: number of objects in the array
    227 \item
    228 @elemSize@: size of the object in the array.
    229 \end{itemize}
    230 It returns address of dynamic object allocatoed on heap that can contain dim number of objects of the size elemSize. On failure, it returns a @NULL@ pointer.
    231 
    232 \subsection{\lstinline{void * resize( void * oaddr, size_t size )}}
    233 @resize@ is an extension of relloc. It allows programmer to reuse a cuurently allocated dynamic object with a new size requirement. Its alternate in the other allocators is @realloc@ but relloc also copy the data in old object to the new object which makes it slower for the programmer who only wants to reuse an old dynamic object for a new size requirement but does not want to preserve the data in the old object to the new object.
    234 \paragraph{Usage}
     783\begin{itemize}
     784\item
     785@dim@: number of array objects
     786\item
     787@elemSize@: size of array object
     788\end{itemize}
     789It returns the address of the dynamic array or @NULL@ if either @dim@ or @elemSize@ are zero.
     790
     791\paragraph{\lstinline{void * resize( void * oaddr, size_t size )}}
     792extends @realloc@ for resizing an existing allocation \emph{without} copying previous data into the new allocation or preserving sticky properties.
     793@resize@ is significantly faster than @realloc@, which is the only alternative.
     794
     795\noindent\textbf{Usage}
    235796@resize@ takes two parameters.
    236 
    237 \begin{itemize}
    238 \item
    239 @oaddr@: the address of the old object that needs to be resized.
    240 \item
    241 @size@: the new size requirement of the to which the old object needs to be resized.
    242 \end{itemize}
    243 It returns an object that is of the size given but it does not preserve the data in the old object. On failure, it returns a @NULL@ pointer.
    244 
    245 \subsection{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
    246 This @resize@ is an extension of the above @resize@ (FIX ME: cite above resize). In addition to resizing the size of of an old object, it can also realign the old object to a new alignment requirement.
    247 \paragraph{Usage}
    248 This resize takes three parameters. It takes an additional parameter of nalign as compared to the above resize (FIX ME: cite above resize).
    249 
    250 \begin{itemize}
    251 \item
    252 @oaddr@: the address of the old object that needs to be resized.
    253 \item
    254 @nalign@: the new alignment to which the old object needs to be realigned.
    255 \item
    256 @size@: the new size requirement of the to which the old object needs to be resized.
    257 \end{itemize}
    258 It returns an object with the size and alignment given in the parameters. On failure, it returns a @NULL@ pointer.
    259 
    260 \subsection{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
    261 amemalign is a hybrid of memalign and aalloc. It allows programmer to allocate an aligned dynamic array of objects without calculating the total size of the array explicitly. It frees the programmer from calculating the total size of the array.
    262 \paragraph{Usage}
    263 amemalign takes three parameters.
    264 
    265 \begin{itemize}
    266 \item
    267 @alignment@: the alignment to which the dynamic array needs to be aligned.
    268 \item
    269 @dim@: number of objects in the array
    270 \item
    271 @elemSize@: size of the object in the array.
    272 \end{itemize}
    273 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment. On failure, it returns a @NULL@ pointer.
    274 
    275 \subsection{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
    276 cmemalign is a hybrid of amemalign and calloc. It allows programmer to allocate an aligned dynamic array of objects that is 0 filled. The current way to do this in other allocators is to allocate an aligned object with memalign and then fill it with 0 explicitly. This routine provides both features of aligning and 0 filling, implicitly.
    277 \paragraph{Usage}
    278 cmemalign takes three parameters.
    279 
    280 \begin{itemize}
    281 \item
    282 @alignment@: the alignment to which the dynamic array needs to be aligned.
    283 \item
    284 @dim@: number of objects in the array
    285 \item
    286 @elemSize@: size of the object in the array.
    287 \end{itemize}
    288 It returns a dynamic array of objects that has the capacity to contain dim number of objects of the size of elemSize. The returned dynamic array is aligned to the given alignment and is 0 filled. On failure, it returns a @NULL@ pointer.
    289 
    290 \subsection{\lstinline{size_t malloc_alignment( void * addr )}}
    291 @malloc_alignment@ returns the alignment of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required alignment.
    292 \paragraph{Usage}
    293 @malloc_alignment@ takes one parameters.
    294 
    295 \begin{itemize}
    296 \item
    297 @addr@: the address of the currently allocated dynamic object.
    298 \end{itemize}
    299 @malloc_alignment@ returns the alignment of the given dynamic object. On failure, it return the value of default alignment of the uHeap allocator.
    300 
    301 \subsection{\lstinline{bool malloc_zero_fill( void * addr )}}
    302 @malloc_zero_fill@ returns whether a currently allocated dynamic object was initially zero filled at the time of allocation. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verifying the zero filled property of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was zero filled at the time of allocation.
    303 \paragraph{Usage}
     797\begin{itemize}
     798\item
     799@oaddr@: address to be resized
     800\item
     801@size@: new allocation size (smaller or larger than previous)
     802\end{itemize}
     803It returns the address of the old or new storage with the specified new size or @NULL@ if @size@ is zero.
     804
     805\paragraph{\lstinline{void * amemalign( size_t alignment, size_t dim, size_t elemSize )}}
     806extends @aalloc@ and @memalign@ for allocating an aligned dynamic array of objects.
     807Sets sticky alignment property.
     808
     809\noindent\textbf{Usage}
     810@amemalign@ takes three parameters.
     811\begin{itemize}
     812\item
     813@alignment@: alignment requirement
     814\item
     815@dim@: number of array objects
     816\item
     817@elemSize@: size of array object
     818\end{itemize}
     819It returns the address of the aligned dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
     820
     821\paragraph{\lstinline{void * cmemalign( size_t alignment, size_t dim, size_t elemSize )}}
     822extends @amemalign@ with zero fill and has the same usage as @amemalign@.
     823Sets sticky zero-fill and alignment property.
     824It returns the address of the aligned, zero-filled dynamic-array or @NULL@ if either @dim@ or @elemSize@ are zero.
     825
     826\paragraph{\lstinline{size_t malloc_alignment( void * addr )}}
     827returns the alignment of the dynamic object for use in aligning similar allocations.
     828
     829\noindent\textbf{Usage}
     830@malloc_alignment@ takes one parameter.
     831\begin{itemize}
     832\item
     833@addr@: address of an allocated object.
     834\end{itemize}
     835It returns the alignment of the given object, where objects not allocated with alignment return the minimal allocation alignment.
     836
     837\paragraph{\lstinline{bool malloc_zero_fill( void * addr )}}
     838returns true if the object has the zero-fill sticky property for use in zero filling similar allocations.
     839
     840\noindent\textbf{Usage}
    304841@malloc_zero_fill@ takes one parameters.
    305842
    306843\begin{itemize}
    307844\item
    308 @addr@: the address of the currently allocated dynamic object.
    309 \end{itemize}
    310 @malloc_zero_fill@ returns true if the dynamic object was initially zero filled and return false otherwise. On failure, it returns false.
    311 
    312 \subsection{\lstinline{size_t malloc_size( void * addr )}}
    313 @malloc_size@ returns the allocation size of a currently allocated dynamic object. It allows the programmer in memory management and personal bookkeeping. It helps the programmer in verofying the alignment of a dynamic object especially in a scenerio similar to prudcer-consumer where a producer allocates a dynamic object and the consumer needs to assure that the dynamic object was allocated with the required size. Its current alternate in the other allocators is @malloc_usable_size@. But, @malloc_size@ is different from @malloc_usable_size@ as @malloc_usabe_size@ returns the total data capacity of dynamic object including the extra space at the end of the dynamic object. On the other hand, @malloc_size@ returns the size that was given to the allocator at the allocation of the dynamic object. This size is updated when an object is realloced, resized, or passed through a similar allocator routine.
    314 \paragraph{Usage}
     845@addr@: address of an allocated object.
     846\end{itemize}
     847It returns true if the zero-fill sticky property is set and false otherwise.
     848
     849\paragraph{\lstinline{size_t malloc_size( void * addr )}}
     850returns the request size of the dynamic object (updated when an object is resized) for use in similar allocations.
     851See also @malloc_usable_size@.
     852
     853\noindent\textbf{Usage}
    315854@malloc_size@ takes one parameters.
    316 
    317 \begin{itemize}
    318 \item
    319 @addr@: the address of the currently allocated dynamic object.
    320 \end{itemize}
    321 @malloc_size@ returns the allocation size of the given dynamic object. On failure, it return zero.
    322 
    323 \subsection{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
    324 This @realloc@ is an extension of the default @realloc@ (FIX ME: cite default @realloc@). In addition to reallocating an old object and preserving the data in old object, it can also realign the old object to a new alignment requirement.
    325 \paragraph{Usage}
    326 This @realloc@ takes three parameters. It takes an additional parameter of nalign as compared to the default @realloc@.
    327 
    328 \begin{itemize}
    329 \item
    330 @oaddr@: the address of the old object that needs to be reallocated.
    331 \item
    332 @nalign@: the new alignment to which the old object needs to be realigned.
    333 \item
    334 @size@: the new size requirement of the to which the old object needs to be resized.
    335 \end{itemize}
    336 It returns an object with the size and alignment given in the parameters that preserves the data in the old object. On failure, it returns a @NULL@ pointer.
    337 
    338 \subsection{\CFA Malloc Interface}
    339 We added some routines to the malloc interface of \CFA. These routines can only be used in \CFA and not in our standalone uHeap allocator as these routines use some features that are only provided by \CFA and not by C. It makes the allocator even more usable to the programmers.
    340 \CFA provides the liberty to know the returned type of a call to the allocator. So, mainly in these added routines, we removed the object size parameter from the routine as allocator can calculate the size of the object from the returned type.
    341 
    342 \subsection{\lstinline{T * malloc( void )}}
    343 This malloc is a simplified polymorphic form of defualt malloc (FIX ME: cite malloc). It does not take any parameter as compared to default malloc that takes one parameter.
    344 \paragraph{Usage}
    345 This malloc takes no parameters.
    346 It returns a dynamic object of the size of type @T@. On failure, it returns a @NULL@ pointer.
    347 
    348 \subsection{\lstinline{T * aalloc( size_t dim )}}
    349 This aalloc is a simplified polymorphic form of above aalloc (FIX ME: cite aalloc). It takes one parameter as compared to the above aalloc that takes two parameters.
    350 \paragraph{Usage}
    351 aalloc takes one parameters.
    352 
    353 \begin{itemize}
    354 \item
    355 @dim@: required number of objects in the array.
    356 \end{itemize}
    357 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
    358 
    359 \subsection{\lstinline{T * calloc( size_t dim )}}
    360 This calloc is a simplified polymorphic form of defualt calloc (FIX ME: cite calloc). It takes one parameter as compared to the default calloc that takes two parameters.
    361 \paragraph{Usage}
    362 This calloc takes one parameter.
    363 
    364 \begin{itemize}
    365 \item
    366 @dim@: required number of objects in the array.
    367 \end{itemize}
    368 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. On failure, it returns a @NULL@ pointer.
    369 
    370 \subsection{\lstinline{T * resize( T * ptr, size_t size )}}
    371 This resize is a simplified polymorphic form of above resize (FIX ME: cite resize with alignment). It takes two parameters as compared to the above resize that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
    372 \paragraph{Usage}
    373 This resize takes two parameters.
    374 
    375 \begin{itemize}
    376 \item
    377 @ptr@: address of the old object.
    378 \item
    379 @size@: the required size of the new object.
    380 \end{itemize}
    381 It returns a dynamic object of the size given in paramters. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
    382 
    383 \subsection{\lstinline{T * realloc( T * ptr, size_t size )}}
    384 This @realloc@ is a simplified polymorphic form of defualt @realloc@ (FIX ME: cite @realloc@ with align). It takes two parameters as compared to the above @realloc@ that takes three parameters. It frees the programmer from explicitly mentioning the alignment of the allocation as \CFA provides gives allocator the liberty to get the alignment of the returned type.
    385 \paragraph{Usage}
    386 This @realloc@ takes two parameters.
    387 
    388 \begin{itemize}
    389 \item
    390 @ptr@: address of the old object.
    391 \item
    392 @size@: the required size of the new object.
    393 \end{itemize}
    394 It returns a dynamic object of the size given in paramters that preserves the data in the given object. The returned object is aligned to the alignemtn of type @T@. On failure, it returns a @NULL@ pointer.
    395 
    396 \subsection{\lstinline{T * memalign( size_t align )}}
    397 This memalign is a simplified polymorphic form of defualt memalign (FIX ME: cite memalign). It takes one parameters as compared to the default memalign that takes two parameters.
    398 \paragraph{Usage}
    399 memalign takes one parameters.
    400 
    401 \begin{itemize}
    402 \item
    403 @align@: the required alignment of the dynamic object.
    404 \end{itemize}
    405 It returns a dynamic object of the size of type @T@ that is aligned to given parameter align. On failure, it returns a @NULL@ pointer.
    406 
    407 \subsection{\lstinline{T * amemalign( size_t align, size_t dim )}}
    408 This amemalign is a simplified polymorphic form of above amemalign (FIX ME: cite amemalign). It takes two parameter as compared to the above amemalign that takes three parameters.
    409 \paragraph{Usage}
    410 amemalign takes two parameters.
    411 
    412 \begin{itemize}
    413 \item
    414 @align@: required alignment of the dynamic array.
    415 \item
    416 @dim@: required number of objects in the array.
    417 \end{itemize}
    418 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align. On failure, it returns a @NULL@ pointer.
    419 
    420 \subsection{\lstinline{T * cmemalign( size_t align, size_t dim  )}}
    421 This cmemalign is a simplified polymorphic form of above cmemalign (FIX ME: cite cmemalign). It takes two parameter as compared to the above cmemalign that takes three parameters.
    422 \paragraph{Usage}
    423 cmemalign takes two parameters.
    424 
    425 \begin{itemize}
    426 \item
    427 @align@: required alignment of the dynamic array.
    428 \item
    429 @dim@: required number of objects in the array.
    430 \end{itemize}
    431 It returns a dynamic object that has the capacity to contain dim number of objects, each of the size of type @T@. The returned object is aligned to the given parameter align and is zero filled. On failure, it returns a @NULL@ pointer.
    432 
    433 \subsection{\lstinline{T * aligned_alloc( size_t align )}}
    434 This @aligned_alloc@ is a simplified polymorphic form of defualt @aligned_alloc@ (FIX ME: cite @aligned_alloc@). It takes one parameter as compared to the default @aligned_alloc@ that takes two parameters.
    435 \paragraph{Usage}
    436 This @aligned_alloc@ takes one parameter.
    437 
    438 \begin{itemize}
    439 \item
    440 @align@: required alignment of the dynamic object.
    441 \end{itemize}
    442 It returns a dynamic object of the size of type @T@ that is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
    443 
    444 \subsection{\lstinline{int posix_memalign( T ** ptr, size_t align )}}
    445 This @posix_memalign@ is a simplified polymorphic form of defualt @posix_memalign@ (FIX ME: cite @posix_memalign@). It takes two parameters as compared to the default @posix_memalign@ that takes three parameters.
    446 \paragraph{Usage}
    447 This @posix_memalign@ takes two parameter.
    448 
    449 \begin{itemize}
    450 \item
    451 @ptr@: variable address to store the address of the allocated object.
    452 \item
    453 @align@: required alignment of the dynamic object.
    454 \end{itemize}
    455 
    456 It stores address of the dynamic object of the size of type @T@ in given parameter ptr. This object is aligned to the given parameter. On failure, it returns a @NULL@ pointer.
    457 
    458 \subsection{\lstinline{T * valloc( void )}}
    459 This @valloc@ is a simplified polymorphic form of defualt @valloc@ (FIX ME: cite @valloc@). It takes no parameters as compared to the default @valloc@ that takes one parameter.
    460 \paragraph{Usage}
    461 @valloc@ takes no parameters.
    462 It returns a dynamic object of the size of type @T@ that is aligned to the page size. On failure, it returns a @NULL@ pointer.
    463 
    464 \subsection{\lstinline{T * pvalloc( void )}}
    465 \paragraph{Usage}
    466 @pvalloc@ takes no parameters.
    467 It returns a dynamic object of the size that is calcutaed by rouding the size of type @T@. The returned object is also aligned to the page size. On failure, it returns a @NULL@ pointer.
    468 
    469 \subsection{Alloc Interface}
    470 In addition to improve allocator interface both for \CFA and our standalone allocator uHeap in C. We also added a new alloc interface in \CFA that increases usability of dynamic memory allocation.
    471 This interface helps programmers in three major ways.
    472 
    473 \begin{itemize}
    474 \item
    475 Routine Name: alloc interfce frees programmers from remmebring different routine names for different kind of dynamic allocations.
    476 \item
    477 Parametre Positions: alloc interface frees programmers from remembering parameter postions in call to routines.
    478 \item
    479 Object Size: alloc interface does not require programmer to mention the object size as \CFA allows allocator to determince the object size from returned type of alloc call.
    480 \end{itemize}
    481 
    482 Alloc interface uses polymorphism, backtick routines (FIX ME: cite backtick) and ttype parameters of \CFA (FIX ME: cite ttype) to provide a very simple dynamic memory allocation interface to the programmers. The new interfece has just one routine name alloc that can be used to perform a wide range of dynamic allocations. The parameters use backtick functions to provide a similar-to named parameters feature for our alloc interface so that programmers do not have to remember parameter positions in alloc call except the position of dimension (dim) parameter.
    483 
    484 \subsection{Routine: \lstinline{T * alloc( ... )}}
    485 Call to alloc wihout any parameter returns one object of size of type @T@ allocated dynamically.
    486 Only the dimension (dim) parameter for array allocation has the fixed position in the alloc routine. If programmer wants to allocate an array of objects that the required number of members in the array has to be given as the first parameter to the alloc routine.
    487 alocc routine accepts six kinds of arguments. Using different combinations of tha parameters, different kind of allocations can be performed. Any combincation of parameters can be used together except @`realloc@ and @`resize@ that should not be used simultanously in one call to routine as it creates ambiguity about whether to reallocate or resize a currently allocated dynamic object. If both @`resize@ and @`realloc@ are used in a call to alloc then the latter one will take effect or unexpected resulted might be produced.
    488 
    489 \paragraph{Dim}
    490 This is the only parameter in the alloc routine that has a fixed-position and it is also the only parameter that does not use a backtick function. It has to be passed at the first position to alloc call in-case of an array allocation of objects of type @T@.
    491 It represents the required number of members in the array allocation as in \CFA's aalloc (FIX ME: cite aalloc).
    492 This parameter should be of type @size_t@.
    493 
    494 Example: @int a = alloc( 5 )@
    495 This call will return a dynamic array of five integers.
    496 
    497 \paragraph{Align}
    498 This parameter is position-free and uses a backtick routine align (@`align@). The parameter passed with @`align@ should be of type @size_t@. If the alignment parameter is not a power of two or is less than the default alignment of the allocator (that can be found out using routine libAlign in \CFA) then the passed alignment parameter will be rejected and the default alignment will be used.
    499 
    500 Example: @int b = alloc( 5 , 64`align )@
    501 This call will return a dynamic array of five integers. It will align the allocated object to 64.
    502 
    503 \paragraph{Fill}
    504 This parameter is position-free and uses a backtick routine fill (@`fill@). In case of @realloc@, only the extra space after copying the data in the old object will be filled with given parameter.
    505 Three types of parameters can be passed using `fill.
    506 
    507 \begin{itemize}
    508 \item
    509 @char@: A char can be passed with @`fill@ to fill the whole dynamic allocation with the given char recursively till the end of required allocation.
    510 \item
    511 Object of returned type: An object of type of returned type can be passed with @`fill@ to fill the whole dynamic allocation with the given object recursively till the end of required allocation.
    512 \item
    513 Dynamic object of returned type: A dynamic object of type of returned type can be passed with @`fill@ to fill the dynamic allocation with the given dynamic object. In this case, the allocated memory is not filled recursively till the end of allocation. The filling happen untill the end object passed to @`fill@ or the end of requested allocation reaches.
    514 \end{itemize}
    515 
    516 Example: @int b = alloc( 5 , 'a'`fill )@
    517 This call will return a dynamic array of five integers. It will fill the allocated object with character 'a' recursively till the end of requested allocation size.
    518 
    519 Example: @int b = alloc( 5 , 4`fill )@
    520 This call will return a dynamic array of five integers. It will fill the allocated object with integer 4 recursively till the end of requested allocation size.
    521 
    522 Example: @int b = alloc( 5 , a`fill )@ where @a@ is a pointer of int type
    523 This call will return a dynamic array of five integers. It will copy data in a to the returned object non-recursively untill end of a or the newly allocated object is reached.
    524 
    525 \paragraph{Resize}
    526 This parameter is position-free and uses a backtick routine resize (@`resize@). It represents the old dynamic object (oaddr) that the programmer wants to
    527 \begin{itemize}
    528 \item
    529 resize to a new size.
    530 \item
    531 realign to a new alignment
    532 \item
    533 fill with something.
    534 \end{itemize}
    535 The data in old dynamic object will not be preserved in the new object. The type of object passed to @`resize@ and the returned type of alloc call can be different.
    536 
    537 Example: @int b = alloc( 5 , a`resize )@
    538 This call will resize object a to a dynamic array that can contain 5 integers.
    539 
    540 Example: @int b = alloc( 5 , a`resize , 32`align )@
    541 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
    542 
    543 Example: @int b = alloc( 5 , a`resize , 32`align , 2`fill )@
    544 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32 and will be filled with 2.
    545 
    546 \paragraph{Realloc}
    547 This parameter is position-free and uses a backtick routine @realloc@ (@`realloc@). It represents the old dynamic object (oaddr) that the programmer wants to
    548 \begin{itemize}
    549 \item
    550 realloc to a new size.
    551 \item
    552 realign to a new alignment
    553 \item
    554 fill with something.
    555 \end{itemize}
    556 The data in old dynamic object will be preserved in the new object. The type of object passed to @`realloc@ and the returned type of alloc call cannot be different.
    557 
    558 Example: @int b = alloc( 5 , a`realloc )@
    559 This call will realloc object a to a dynamic array that can contain 5 integers.
    560 
    561 Example: @int b = alloc( 5 , a`realloc , 32`align )@
    562 This call will realloc object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32.
    563 
    564 Example: @int b = alloc( 5 , a`realloc , 32`align , 2`fill )@
    565 This call will resize object a to a dynamic array that can contain 5 integers. The returned object will also be aligned to 32. The extra space after copying data of a to the returned object will be filled with 2.
     855\begin{itemize}
     856\item
     857@addr@: address of an allocated object.
     858\end{itemize}
     859It returns the request size or zero if @addr@ is @NULL@.
     860
     861\paragraph{\lstinline{int malloc_stats_fd( int fd )}}
     862changes the file descriptor where @malloc_stats@ writes statistics (default @stdout@).
     863
     864\noindent\textbf{Usage}
     865@malloc_stats_fd@ takes one parameters.
     866\begin{itemize}
     867\item
     868@fd@: files description.
     869\end{itemize}
     870It returns the previous file descriptor.
     871
     872\paragraph{\lstinline{size_t malloc_expansion()}}
     873\label{p:malloc_expansion}
     874set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
     875It returns the heap extension size used throughout a program, \ie called once at heap initialization.
     876
     877\paragraph{\lstinline{size_t malloc_mmap_start()}}
     878set the crossover between allocations occurring in the @sbrk@ area or separately mapped.
     879It returns the crossover point used throughout a program, \ie called once at heap initialization.
     880
     881\paragraph{\lstinline{size_t malloc_unfreed()}}
     882\label{p:malloc_unfreed}
     883amount subtracted to adjust for unfreed program storage (debug only).
     884It returns the new subtraction amount and called by @malloc_stats@.
     885
     886
     887\subsection{\CC Interface}
     888
     889The following extensions take advantage of overload polymorphism in the \CC type-system.
     890
     891\paragraph{\lstinline{void * resize( void * oaddr, size_t nalign, size_t size )}}
     892extends @resize@ with an alignment re\-quirement.
     893
     894\noindent\textbf{Usage}
     895takes three parameters.
     896\begin{itemize}
     897\item
     898@oaddr@: address to be resized
     899\item
     900@nalign@: alignment requirement
     901\item
     902@size@: new allocation size (smaller or larger than previous)
     903\end{itemize}
     904It returns the address of the old or new storage with the specified new size and alignment, or @NULL@ if @size@ is zero.
     905
     906\paragraph{\lstinline{void * realloc( void * oaddr, size_t nalign, size_t size )}}
     907extends @realloc@ with an alignment re\-quirement and has the same usage as aligned @resize@.
     908
     909
     910\subsection{\CFA Interface}
     911
     912The following extensions take advantage of overload polymorphism in the \CFA type-system.
     913The key safety advantage of the \CFA type system is using the return type to select overloads;
     914hence, a polymorphic routine knows the returned type and its size.
     915This capability is used to remove the object size parameter and correctly cast the return storage to match the result type.
     916For example, the following is the \CFA wrapper for C @malloc@:
     917\begin{cfa}
     918forall( T & | sized(T) ) {
     919        T * malloc( void ) {
     920                if ( _Alignof(T) <= libAlign() ) return @(T *)@malloc( @sizeof(T)@ ); // C allocation
     921                else return @(T *)@memalign( @_Alignof(T)@, @sizeof(T)@ ); // C allocation
     922        } // malloc
     923\end{cfa}
     924and is used as follows:
     925\begin{lstlisting}
     926int * i = malloc();
     927double * d = malloc();
     928struct Spinlock { ... } __attribute__(( aligned(128) ));
     929Spinlock * sl = malloc();
     930\end{lstlisting}
     931where each @malloc@ call provides the return type as @T@, which is used with @sizeof@, @_Alignof@, and casting the storage to the correct type.
     932This interface removes many of the common allocation errors in C programs.
     933\VRef[Figure]{f:CFADynamicAllocationAPI} show the \CFA wrappers for the equivalent C/\CC allocation routines with same semantic behaviour.
     934
     935\begin{figure}
     936\begin{lstlisting}
     937T * malloc( void );
     938T * aalloc( size_t dim );
     939T * calloc( size_t dim );
     940T * resize( T * ptr, size_t size );
     941T * realloc( T * ptr, size_t size );
     942T * memalign( size_t align );
     943T * amemalign( size_t align, size_t dim );
     944T * cmemalign( size_t align, size_t dim  );
     945T * aligned_alloc( size_t align );
     946int posix_memalign( T ** ptr, size_t align );
     947T * valloc( void );
     948T * pvalloc( void );
     949\end{lstlisting}
     950\caption{\CFA C-Style Dynamic-Allocation API}
     951\label{f:CFADynamicAllocationAPI}
     952\end{figure}
     953
     954In addition to the \CFA C-style allocator interface, a new allocator interface is provided to further increase orthogonality and usability of dynamic-memory allocation.
     955This interface helps programmers in three ways.
     956\begin{itemize}
     957\item
     958naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
     959\item
     960named arguments: individual allocation properties are specified using postfix function call, so programmers do have to remember parameter positions in allocation calls.
     961\item
     962object size: like the \CFA C-style interface, programmers do not have to specify object size or cast allocation results.
     963\end{itemize}
     964Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg
     965\begin{cfa}
     966duration ?@`@h( int h );                // ? denote the position of the function operand
     967duration ?@`@m( int m );
     968duration ?@`@s( int s );
     969duration dur = 3@`@h + 42@`@m + 17@`@s;
     970\end{cfa}
     971@ttype@ polymorphism is similar to \CC variadic templates.
     972
     973\paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}}
     974is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines.
     975A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
     976A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
     977The variable number of arguments consist of allocation properties, which can be combined to produce different kinds of allocations.
     978The only restriction is for properties @realloc@ and @resize@, which cannot be combined.
     979
     980The allocation property functions are:
     981\subparagraph{\lstinline{T_align ?`align( size_t alignment )}}
     982to align the allocation.
     983The alignment parameter must be $\ge$ the default alignment (@libAlign()@ in \CFA) and a power of two, \eg:
     984\begin{cfa}
     985int * i0 = alloc( @4096`align@ );  sout | i0 | nl;
     986int * i1 = alloc( 3, @4096`align@ );  sout | i1; for (i; 3 ) sout | &i1[i]; sout | nl;
     987
     9880x555555572000
     9890x555555574000 0x555555574000 0x555555574004 0x555555574008
     990\end{cfa}
     991returns a dynamic object and object array aligned on a 4096-byte boundary.
     992
     993\subparagraph{\lstinline{S_fill(T) ?`fill ( /* various types */ )}}
     994to initialize storage.
     995There are three ways to fill storage:
     996\begin{enumerate}
     997\item
     998A char fills each byte of each object.
     999\item
     1000An object of the returned type fills each object.
     1001\item
     1002An object array pointer fills some or all of the corresponding object array.
     1003\end{enumerate}
     1004For example:
     1005\begin{cfa}[numbers=left]
     1006int * i0 = alloc( @0n`fill@ );  sout | *i0 | nl;  // disambiguate 0
     1007int * i1 = alloc( @5`fill@ );  sout | *i1 | nl;
     1008int * i2 = alloc( @'\xfe'`fill@ ); sout | hex( *i2 ) | nl;
     1009int * i3 = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | i3[i]; sout | nl;
     1010int * i4 = alloc( 5, @0xdeadbeefN`fill@ );  for ( i; 5 ) sout | hex( i4[i] ); sout | nl;
     1011int * i5 = alloc( 5, @i3`fill@ );  for ( i; 5 ) sout | i5[i]; sout | nl;
     1012int * i6 = alloc( 5, @[i3, 3]`fill@ );  for ( i; 5 ) sout | i6[i]; sout | nl;
     1013\end{cfa}
     1014\begin{lstlisting}[numbers=left]
     10150
     10165
     10170xfefefefe
     10185 5 5 5 5
     10190xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef
     10205 5 5 5 5
     10215 5 5 -555819298 -555819298  // two undefined values
     1022\end{lstlisting}
     1023Examples 1 to 3, fill an object with a value or characters.
     1024Examples 4 to 7, fill an array of objects with values, another array, or part of an array.
     1025
     1026\subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}}
     1027used to resize, realign, and fill, where the old object data is not copied to the new object.
     1028The old object type may be different from the new object type, since the values are not used.
     1029For example:
     1030\begin{cfa}[numbers=left]
     1031int * i = alloc( @5`fill@ );  sout | i | *i;
     1032i = alloc( @i`resize@, @256`align@, @7`fill@ );  sout | i | *i;
     1033double * d = alloc( @i`resize@, @4096`align@, @13.5`fill@ );  sout | d | *d;
     1034\end{cfa}
     1035\begin{lstlisting}[numbers=left]
     10360x55555556d5c0 5
     10370x555555570000 7
     10380x555555571000 13.5
     1039\end{lstlisting}
     1040Examples 2 to 3 change the alignment, fill, and size for the initial storage of @i@.
     1041
     1042\begin{cfa}[numbers=left]
     1043int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
     1044ia = alloc( 10, @ia`resize@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
     1045sout | ia; ia = alloc( 5, @ia`resize@, @512`align@, @13`fill@ ); sout | ia; for ( i; 5 ) sout | ia[i]; sout | nl;;
     1046ia = alloc( 3, @ia`resize@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
     1047\end{cfa}
     1048\begin{lstlisting}[numbers=left]
     10495 5 5 5 5
     10507 7 7 7 7 7 7 7 7 7
     10510x55555556d560 0x555555571a00 13 13 13 13 13
     10520x555555572000 0x555555572000 2 0x555555572004 2 0x555555572008 2
     1053\end{lstlisting}
     1054Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
     1055
     1056\subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}}
     1057used to resize, realign, and fill, where the old object data is copied to the new object.
     1058The old object type must be the same as the new object type, since the values used.
     1059Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
     1060For example:
     1061\begin{cfa}[numbers=left]
     1062int * i = alloc( @5`fill@ );  sout | i | *i;
     1063i = alloc( @i`realloc@, @256`align@ );  sout | i | *i;
     1064i = alloc( @i`realloc@, @4096`align@, @13`fill@ );  sout | i | *i;
     1065\end{cfa}
     1066\begin{lstlisting}[numbers=left]
     10670x55555556d5c0 5
     10680x555555570000 5
     10690x555555571000 5
     1070\end{lstlisting}
     1071Examples 2 to 3 change the alignment for the initial storage of @i@.
     1072The @13`fill@ for example 3 does nothing because no extra space is added.
     1073
     1074\begin{cfa}[numbers=left]
     1075int * ia = alloc( 5, @5`fill@ );  for ( i; 5 ) sout | ia[i]; sout | nl;
     1076ia = alloc( 10, @ia`realloc@, @7`fill@ ); for ( i; 10 ) sout | ia[i]; sout | nl;
     1077sout | ia; ia = alloc( 1, @ia`realloc@, @512`align@, @13`fill@ ); sout | ia; for ( i; 1 ) sout | ia[i]; sout | nl;;
     1078ia = alloc( 3, @ia`realloc@, @4096`align@, @2`fill@ );  sout | ia; for ( i; 3 ) sout | &ia[i] | ia[i]; sout | nl;
     1079\end{cfa}
     1080\begin{lstlisting}[numbers=left]
     10815 5 5 5 5
     10825 5 5 5 5 7 7 7 7 7
     10830x55555556c560 0x555555570a00 5
     10840x555555571000 0x555555571000 5 0x555555571004 2 0x555555571008 2
     1085\end{lstlisting}
     1086Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
     1087The @13`fill@ for example 3 does nothing because no extra space is added.
     1088
     1089These \CFA allocation features are used extensively in the development of the \CFA runtime.
  • doc/theses/mubeen_zulfiqar_MMath/background.tex

    rba897d21 r2e9b59b  
    3434\VRef[Figure]{f:AllocatorComponents} shows the two important data components for a memory allocator, management and storage, collectively called the \newterm{heap}.
    3535The \newterm{management data} is a data structure located at a known memory address and contains all information necessary to manage the storage data.
    36 The management data starts with fixed-sized information in the static-data memory that flows into the dynamic-allocation memory.
     36The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
    3737The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
    38 Allocated objects (white) are variable sized, and allocated and maintained by the program;
     38Allocated objects (light grey) are variable sized, and allocated and maintained by the program;
    3939\ie only the program knows the location of allocated storage, not the memory allocator.
    4040\begin{figure}[h]
     
    4444\label{f:AllocatorComponents}
    4545\end{figure}
    46 Freed objects (light grey) are memory deallocated by the program, which are linked into one or more lists facilitating easy location for new allocations.
     46Freed objects (white) represent memory deallocated by the program, which are linked into one or more lists facilitating easy location of new allocations.
    4747Often the free list is chained internally so it does not consume additional storage, \ie the link fields are placed at known locations in the unused memory blocks.
    4848Reserved memory (dark grey) is one or more blocks of memory obtained from the operating system but not yet allocated to the program;
     
    5454The trailer may be used to simplify an allocation implementation, \eg coalescing, and/or for security purposes to mark the end of an object.
    5555An object may be preceded by padding to ensure proper alignment.
    56 Some algorithms quantize allocation requests into distinct sizes resulting in additional spacing after objects less than the quantized value.
     56Some algorithms quantize allocation requests into distinct sizes, called \newterm{buckets}, resulting in additional spacing after objects less than the quantized value.
     57(Note, the buckets are often organized as an array of ascending bucket sizes for fast searching, \eg binary search, and the array is stored in the heap management-area, where each bucket is a top point to the freed objects of that size.)
    5758When padding and spacing are necessary, neither can be used to satisfy a future allocation request while the current allocation exists.
    5859A free object also contains management data, \eg size, chaining, etc.
     
    8182Fragmentation is memory requested from the operating system but not used by the program;
    8283hence, allocated objects are not fragmentation.
    83 \VRef[Figure]{f:InternalExternalFragmentation}) shows fragmentation is divided into two forms: internal or external.
     84\VRef[Figure]{f:InternalExternalFragmentation} shows fragmentation is divided into two forms: internal or external.
    8485
    8586\begin{figure}
     
    9697An allocator should strive to keep internal management information to a minimum.
    9798
    98 \newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes freed objects, all external management data, and reserved memory.
     99\newterm{External fragmentation} is all memory space reserved from the operating system but not allocated to the program~\cite{Wilson95,Lim98,Siebert00}, which includes all external management data, freed objects, and reserved memory.
    99100This memory is problematic in two ways: heap blowup and highly fragmented memory.
    100101\newterm{Heap blowup} occurs when memory freed by the program is not reused for future allocations leading to potentially unbounded external fragmentation growth~\cite{Berger00}.
     
    125126\end{figure}
    126127
    127 For a single-threaded memory allocator, three basic approaches for controlling fragmentation have been identified~\cite{Johnstone99}.
     128For a single-threaded memory allocator, three basic approaches for controlling fragmentation are identified~\cite{Johnstone99}.
    128129The first approach is a \newterm{sequential-fit algorithm} with one list of free objects that is searched for a block large enough to fit a requested object size.
    129130Different search policies determine the free object selected, \eg the first free object large enough or closest to the requested size.
     
    132133
    133134The second approach is a \newterm{segregated} or \newterm{binning algorithm} with a set of lists for different sized freed objects.
    134 When an object is allocated, the requested size is rounded up to the nearest bin-size, possibly with spacing after the object.
     135When an object is allocated, the requested size is rounded up to the nearest bin-size, often leading to spacing after the object.
    135136A binning algorithm is fast at finding free memory of the appropriate size and allocating it, since the first free object on the free list is used.
    136137The fewer bin-sizes, the fewer lists need to be searched and maintained;
     
    158159Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.
    159160
    160 Hardware takes advantage of temporal and spatial locality through multiple levels of caching (\ie memory hierarchy).
     161Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy.
    161162When an object is accessed, the memory physically located around the object is also cached with the expectation that the current and nearby objects will be referenced within a short period of time.
    162163For example, entire cache lines are transferred between memory and cache and entire virtual-memory pages are transferred between disk and memory.
     
    171172
    172173There are a number of ways a memory allocator can degrade locality by increasing the working set.
    173 For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request (\eg sequential-fit algorithm).
     174For example, a memory allocator may access multiple free objects before finding one to satisfy an allocation request, \eg sequential-fit algorithm.
    174175If there are a (large) number of objects accessed in very different areas of memory, the allocator may perturb the program's memory hierarchy causing multiple cache or page misses~\cite{Grunwald93}.
    175176Another way locality can be degraded is by spatially separating related data.
     
    181182
    182183A multi-threaded memory-allocator does not run any threads itself, but is used by a multi-threaded program.
    183 In addition to single-threaded design issues of locality and fragmentation, a multi-threaded allocator may be simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
     184In addition to single-threaded design issues of fragmentation and locality, a multi-threaded allocator is simultaneously accessed by multiple threads, and hence, must deal with concurrency issues such as mutual exclusion, false sharing, and additional forms of heap blowup.
    184185
    185186
     
    192193Second is when multiple threads contend for a shared resource simultaneously, and hence, some threads must wait until the resource is released.
    193194Contention can be reduced in a number of ways:
     195\begin{itemize}[itemsep=0pt]
     196\item
    194197using multiple fine-grained locks versus a single lock, spreading the contention across a number of locks;
     198\item
    195199using trylock and generating new storage if the lock is busy, yielding a classic space versus time tradeoff;
     200\item
    196201using one of the many lock-free approaches for reducing contention on basic data-structure operations~\cite{Oyama99}.
    197 However, all of these approaches have degenerate cases where contention occurs.
     202\end{itemize}
     203However, all of these approaches have degenerate cases where program contention is high, which occurs outside of the allocator.
    198204
    199205
     
    275281\label{s:MultipleHeaps}
    276282
    277 A single-threaded allocator has at most one thread and heap, while a multi-threaded allocator has potentially multiple threads and heaps.
     283A multi-threaded allocator has potentially multiple threads and heaps.
    278284The multiple threads cause complexity, and multiple heaps are a mechanism for dealing with the complexity.
    279285The spectrum ranges from multiple threads using a single heap, denoted as T:1 (see \VRef[Figure]{f:SingleHeap}), to multiple threads sharing multiple heaps, denoted as T:H (see \VRef[Figure]{f:SharedHeaps}), to one thread per heap, denoted as 1:1 (see \VRef[Figure]{f:PerThreadHeap}), which is almost back to a single-threaded allocator.
     
    339345An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
    340346Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
    341 Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area.
     347Other storage-management options are to use @mmap@ to set aside (large) areas of virtual memory for each heap and suballocate each heap's storage within that area, pushing part of the storage management complexity back to the operating system.
    342348
    343349\begin{figure}
     
    368374
    369375
    370 \paragraph{1:1 model (thread heaps)} where each thread has its own heap, which eliminates most contention and locking because threads seldom accesses another thread's heap (see ownership in \VRef{s:Ownership}).
     376\paragraph{1:1 model (thread heaps)} where each thread has its own heap eliminating most contention and locking because threads seldom access another thread's heap (see ownership in \VRef{s:Ownership}).
    371377An additional benefit of thread heaps is improved locality due to better memory layout.
    372378As each thread only allocates from its heap, all objects for a thread are consolidated in the storage area for that heap, better utilizing each CPUs cache and accessing fewer pages.
     
    380386Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
    381387Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
    382 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap.
     388Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible..
    383389
    384390
     
    388394However, an important goal of user-level threading is for fast operations (creation/termination/context-switching) by not interacting with the operating system, which allows the ability to create large numbers of high-performance interacting threads ($>$ 10,000).
    389395It is difficult to retain this goal, if the user-threading model is directly involved with the heap model.
    390 \VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model provided by the language runtime.
     396\VRef[Figure]{f:UserLevelKernelHeaps} shows that virtually all user-level threading systems use whatever kernel-level heap-model is provided by the language runtime.
    391397Hence, a user thread allocates/deallocates from/to the heap of the kernel thread on which it is currently executing.
    392398
     
    400406Adopting this model results in a subtle problem with shared heaps.
    401407With kernel threading, an operation that is started by a kernel thread is always completed by that thread.
    402 For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted.
    403 Any correctness locking associated with the shared heap is preserved across preemption.
     408For example, if a kernel thread starts an allocation/deallocation on a shared heap, it always completes that operation with that heap even if preempted, \ie any locking correctness associated with the shared heap is preserved across preemption.
    404409
    405410However, this correctness property is not preserved for user-level threading.
     
    409414However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).
    410415Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
    411 Occasionally ignoring a preemption should be benign.
     416Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation.
    412417
    413418
     
    430435
    431436\newterm{Ownership} defines which heap an object is returned-to on deallocation.
    432 If a thread returns an object to the heap it was originally allocated from, the heap has ownership of its objects.
    433 Alternatively, a thread can return an object to the heap it is currently allocating from, which can be any heap accessible during a thread's lifetime.
     437If a thread returns an object to the heap it was originally allocated from, a heap has ownership of its objects.
     438Alternatively, a thread can return an object to the heap it is currently associated with, which can be any heap accessible during a thread's lifetime.
    434439\VRef[Figure]{f:HeapsOwnership} shows an example of multiple heaps (minus the global heap) with and without ownership.
    435440Again, the arrows indicate the direction memory conceptually moves for each kind of operation.
     
    539544Only with the 1:1 model and ownership is active and passive false-sharing avoided (see \VRef{s:Ownership}).
    540545Passive false-sharing may still occur, if delayed ownership is used.
     546Finally, a completely free container can become reserved storage and be reset to allocate objects of a new size or freed to the global heap.
    541547
    542548\begin{figure}
     
    553559\caption{Free-list Structure with Container Ownership}
    554560\end{figure}
    555 
    556 A fragmented heap has multiple containers that may be partially or completely free.
    557 A completely free container can become reserved storage and be reset to allocate objects of a new size.
    558 When a heap reaches a threshold of free objects, it moves some free storage to the global heap for reuse to prevent heap blowup.
    559 Without ownership, when a heap frees objects to the global heap, individual objects must be passed, and placed on the global-heap's free-list.
    560 Containers cannot be freed to the global heap unless completely free because
    561561
    562562When a container changes ownership, the ownership of all objects within it change as well.
     
    569569Note, once the object is freed by Task$_1$, no more false sharing can occur until the container changes ownership again.
    570570To prevent this form of false sharing, container movement may be restricted to when all objects in the container are free.
    571 One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area.
     571One implementation approach that increases the freedom to return a free container to the operating system involves allocating containers using a call like @mmap@, which allows memory at an arbitrary address to be returned versus only storage at the end of the contiguous @sbrk@ area, again pushing storage management complexity back to the operating system.
    572572
    573573\begin{figure}
     
    700700\end{figure}
    701701
    702 As mentioned, an implementation may have only one heap deal with the global heap, so the other heap can be simplified.
     702As mentioned, an implementation may have only one heap interact with the global heap, so the other heap can be simplified.
    703703For example, if only the private heap interacts with the global heap, the public heap can be reduced to a lock-protected free-list of objects deallocated by other threads due to ownership, called a \newterm{remote free-list}.
    704704To avoid heap blowup, the private heap allocates from the remote free-list when it reaches some threshold or it has no free storage.
     
    721721An allocation buffer is reserved memory (see~\VRef{s:AllocatorComponents}) not yet allocated to the program, and is used for allocating objects when the free list is empty.
    722722That is, rather than requesting new storage for a single object, an entire buffer is requested from which multiple objects are allocated later.
    723 Both any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
     723Any heap may use an allocation buffer, resulting in allocation from the buffer before requesting objects (containers) from the global heap or operating system, respectively.
    724724The allocation buffer reduces contention and the number of global/operating-system calls.
    725725For coalescing, a buffer is split into smaller objects by allocations, and recomposed into larger buffer areas during deallocations.
    726726
    727 Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts.
     727Allocation buffers are useful initially when there are no freed objects in a heap because many allocations usually occur when a thread starts (simple bump allocation).
    728728Furthermore, to prevent heap blowup, objects should be reused before allocating a new allocation buffer.
    729 Thus, allocation buffers are often allocated more frequently at program/thread start, and then their use often diminishes.
     729Thus, allocation buffers are often allocated more frequently at program/thread start, and then allocations often diminish.
    730730
    731731Using an allocation buffer with a thread heap avoids active false-sharing, since all objects in the allocation buffer are allocated to the same thread.
     
    746746\label{s:LockFreeOperations}
    747747
    748 A lock-free algorithm guarantees safe concurrent-access to a data structure, so that at least one thread can make progress in the system, but an individual task has no bound to execution, and hence, may starve~\cite[pp.~745--746]{Herlihy93}.
    749 % A wait-free algorithm puts a finite bound on the number of steps any thread takes to complete an operation, so an individual task cannot starve
     748A \newterm{lock-free algorithm} guarantees safe concurrent-access to a data structure, so that at least one thread makes progress, but an individual task has no execution bound and may starve~\cite[pp.~745--746]{Herlihy93}.
     749(A \newterm{wait-free algorithm} puts a bound on the number of steps any thread takes to complete an operation to prevent starvation.)
    750750Lock-free operations can be used in an allocator to reduce or eliminate the use of locks.
    751 Locks are a problem for high contention or if the thread holding the lock is preempted and other threads attempt to use that lock.
    752 With respect to the heap, these situations are unlikely unless all threads makes extremely high use of dynamic-memory allocation, which can be an indication of poor design.
     751While locks and lock-free data-structures often have equal performance, lock-free has the advantage of not holding a lock across preemption so other threads can continue to make progress.
     752With respect to the heap, these situations are unlikely unless all threads make extremely high use of dynamic-memory allocation, which can be an indication of poor design.
    753753Nevertheless, lock-free algorithms can reduce the number of context switches, since a thread does not yield/block while waiting for a lock;
    754 on the other hand, a thread may busy-wait for an unbounded period.
     754on the other hand, a thread may busy-wait for an unbounded period holding a processor.
    755755Finally, lock-free implementations have greater complexity and hardware dependency.
    756756Lock-free algorithms can be applied most easily to simple free-lists, \eg remote free-list, to allow lock-free insertion and removal from the head of a stack.
    757 Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is more complex.
     757Implementing lock-free operations for more complex data-structures (queue~\cite{Valois94}/deque~\cite{Sundell08}) is correspondingly more complex.
    758758Michael~\cite{Michael04} and Gidenstam \etal \cite{Gidenstam05} have created lock-free variations of the Hoard allocator.
    759759
  • doc/theses/mubeen_zulfiqar_MMath/figures/AllocDS1.fig

    rba897d21 r2e9b59b  
    88-2
    991200 2
    10 6 4200 1575 4500 1725
    11 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 1650 20 20 4275 1650 4295 1650
    12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4350 1650 20 20 4350 1650 4370 1650
    13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4425 1650 20 20 4425 1650 4445 1650
     106 2850 2100 3150 2250
     111 3 0 1 0 0 50 -1 20 0.000 1 0.0000 2925 2175 20 20 2925 2175 2945 2175
     121 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 2175 20 20 3000 2175 3020 2175
     131 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 2175 20 20 3075 2175 3095 2175
    1414-6
    15 6 2850 2475 3150 2850
     156 4050 2100 4350 2250
     161 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 2175 20 20 4125 2175 4145 2175
     171 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 2175 20 20 4200 2175 4220 2175
     181 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 2175 20 20 4275 2175 4295 2175
     19-6
     206 4650 2100 4950 2250
     211 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4725 2175 20 20 4725 2175 4745 2175
     221 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 2175 20 20 4800 2175 4820 2175
     231 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 2175 20 20 4875 2175 4895 2175
     24-6
     256 3450 2100 3750 2250
     261 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3525 2175 20 20 3525 2175 3545 2175
     271 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3600 2175 20 20 3600 2175 3620 2175
     281 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3675 2175 20 20 3675 2175 3695 2175
     29-6
     306 3300 2175 3600 2550
    16312 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    1732        1 1 1.00 45.00 90.00
    18          2925 2475 2925 2700
     33         3375 2175 3375 2400
    19342 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    20          2850 2700 3150 2700 3150 2850 2850 2850 2850 2700
     35         3300 2400 3600 2400 3600 2550 3300 2550 3300 2400
    2136-6
    22 6 4350 2475 4650 2850
     372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     38         3150 1800 3150 2250
     392 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     40         2850 1800 2850 2250
     412 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     42         4650 1800 4650 2250
     432 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     44         4950 1800 4950 2250
     452 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     46         4500 1725 4500 2250
     472 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     48         5100 1725 5100 2250
     492 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     50         3450 1800 3450 2250
     512 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     52         3750 1800 3750 2250
     532 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     54         3300 1725 3300 2250
     552 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     56         3900 1725 3900 2250
     572 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     58         5250 1800 5250 2250
     592 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     60         5400 1800 5400 2250
     612 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     62         5550 1800 5550 2250
     632 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     64         5700 1800 5700 2250
     652 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     66         5850 1800 5850 2250
     672 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     68         2700 1725 2700 2250
    23692 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    2470        1 1 1.00 45.00 90.00
    25          4425 2475 4425 2700
    26 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    27          4350 2700 4650 2700 4650 2850 4350 2850 4350 2700
    28 -6
    29 6 3600 2475 3825 3150
     71         3375 1275 3375 1575
    30722 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    3173        1 1 1.00 45.00 90.00
    32          3675 2475 3675 2700
    33 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    34          3600 2700 3825 2700 3825 2850 3600 2850 3600 2700
    35 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    36          3600 3000 3825 3000 3825 3150 3600 3150 3600 3000
     74         2700 1275 2700 1575
     752 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
     76        1 1 1.00 45.00 90.00
     77         2775 1275 2775 1575
    37782 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    3879        1 1 1.00 45.00 90.00
    39          3675 2775 3675 3000
    40 -6
    41 6 4875 3600 5175 3750
    42 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 3675 20 20 4950 3675 4970 3675
    43 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 3675 20 20 5025 3675 5045 3675
    44 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3675 20 20 5100 3675 5120 3675
    45 -6
    46 6 4875 2325 5175 2475
    47 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 2400 20 20 4950 2400 4970 2400
    48 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 2400 20 20 5025 2400 5045 2400
    49 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 2400 20 20 5100 2400 5120 2400
    50 -6
    51 6 5625 2325 5925 2475
    52 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5700 2400 20 20 5700 2400 5720 2400
    53 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5775 2400 20 20 5775 2400 5795 2400
    54 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5850 2400 20 20 5850 2400 5870 2400
    55 -6
    56 6 5625 3600 5925 3750
    57 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5700 3675 20 20 5700 3675 5720 3675
    58 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5775 3675 20 20 5775 3675 5795 3675
    59 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5850 3675 20 20 5850 3675 5870 3675
    60 -6
    61 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    62          2400 2100 2400 2550
    63 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    64          2550 2100 2550 2550
    65 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    66          2700 2100 2700 2550
    67 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    68          2850 2100 2850 2550
    69 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    70          3000 2100 3000 2550
    71 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    72          3600 2100 3600 2550
    73 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    74          3900 2100 3900 2550
    75 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    76          4050 2100 4050 2550
    77 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    78          4200 2100 4200 2550
    79 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    80          4350 2100 4350 2550
    81 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    82          4500 2100 4500 2550
    83 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    84          3300 1500 3300 1800
    85 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    86          3600 1500 3600 1800
    87 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    88          3900 1500 3900 1800
    89 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    90          3000 1500 4800 1500 4800 1800 3000 1800 3000 1500
     80         5175 1275 5175 1575
     812 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     82        1 1 1.00 45.00 90.00
     83         5625 1275 5625 1575
     842 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     85        1 1 1.00 45.00 90.00
     86         3750 1275 3750 1575
    91872 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
    9288        1 1 1.00 45.00 90.00
    93          3225 1650 2625 2100
     89         3825 1275 3825 1575
     902 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     91         2700 1950 6000 1950
     922 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     93         2700 2100 6000 2100
     942 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     95         2700 1800 6000 1800 6000 2250 2700 2250 2700 1800
    94962 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    9597        1 1 1.00 45.00 90.00
    96          3150 1650 2550 2100
     98         2775 2175 2775 2400
    97992 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    98100        1 1 1.00 45.00 90.00
    99          3450 1650 4050 2100
    100 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
    101         1 1 1.00 45.00 90.00
    102          3375 1650 3975 2100
    103 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    104          2100 2100 2100 2550
    105 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    106          1950 2250 3150 2250
    107 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    108          3450 2250 4650 2250
     101         2775 2475 2775 2700
    1091022 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    110          1950 2100 3150 2100 3150 2550 1950 2550 1950 2100
     103         2700 2700 2850 2700 2850 2850 2700 2850 2700 2700
    1111042 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    112          3450 2100 4650 2100 4650 2550 3450 2550 3450 2100
    113 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    114          2250 2100 2250 2550
    115 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    116          3750 2100 3750 2550
     105         2700 2400 2850 2400 2850 2550 2700 2550 2700 2400
    1171062 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    118107        1 1 1.00 45.00 90.00
    119          2025 2475 2025 2700
    120 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    121         1 1 1.00 45.00 90.00
    122          2025 2775 2025 3000
     108         4575 2175 4575 2400
    1231092 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    124          1950 3000 2100 3000 2100 3150 1950 3150 1950 3000
    125 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    126          1950 2700 2100 2700 2100 2850 1950 2850 1950 2700
     110         4500 2400 5025 2400 5025 2550 4500 2550 4500 2400
    1271112 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    128112        1 1 1.00 45.00 90.00
    129          1950 3750 2700 3750 2700 3525
     113         3600 3375 4350 3375 4350 3150
    1301142 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    131          1950 3525 3150 3525 3150 3900 1950 3900 1950 3525
    132 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    133         1 1 1.00 45.00 90.00
    134          3450 3750 4200 3750 4200 3525
    135 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    136          3450 3525 4650 3525 4650 3900 3450 3900 3450 3525
    137 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    138         1 1 1.00 45.00 90.00
    139          3150 4650 4200 4650 4200 4275
    140 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    141          3150 4275 4650 4275 4650 4875 3150 4875 3150 4275
    142 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    143          1950 2400 3150 2400
    144 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    145          3450 2400 4650 2400
    146 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    147          5400 2100 5400 3900
    148 4 2 0 50 -1 0 11 0.0000 2 120 300 1875 2250 lock\001
    149 4 1 0 50 -1 0 12 0.0000 2 135 1935 3900 1425 N kernel-thread buckets\001
    150 4 1 0 50 -1 0 12 0.0000 2 195 810 4425 2025 heap$_2$\001
    151 4 1 0 50 -1 0 12 0.0000 2 195 810 2175 2025 heap$_1$\001
    152 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2400 size\001
    153 4 2 0 50 -1 0 11 0.0000 2 120 270 1875 2550 free\001
    154 4 1 0 50 -1 0 12 0.0000 2 180 825 2550 3450 local pool\001
    155 4 0 0 50 -1 0 12 0.0000 2 135 360 3525 3700 lock\001
    156 4 0 0 50 -1 0 12 0.0000 2 135 360 3225 4450 lock\001
    157 4 2 0 50 -1 0 12 0.0000 2 135 600 1875 3000 free list\001
    158 4 1 0 50 -1 0 12 0.0000 2 180 825 4050 3450 local pool\001
    159 4 1 0 50 -1 0 12 0.0000 2 180 1455 3900 4200 global pool (sbrk)\001
    160 4 0 0 50 -1 0 12 0.0000 2 135 360 2025 3700 lock\001
    161 4 1 0 50 -1 0 12 0.0000 2 180 720 6450 3150 free pool\001
    162 4 1 0 50 -1 0 12 0.0000 2 180 390 6450 2925 heap\001
     115         3600 3150 5100 3150 5100 3525 3600 3525 3600 3150
     1164 2 0 50 -1 0 11 0.0000 2 135 300 2625 1950 lock\001
     1174 1 0 50 -1 0 11 0.0000 2 150 1155 3000 1725 N$\\times$S$_1$\001
     1184 1 0 50 -1 0 11 0.0000 2 150 1155 3600 1725 N$\\times$S$_2$\001
     1194 1 0 50 -1 0 12 0.0000 2 180 390 4425 1500 heap\001
     1204 2 0 50 -1 0 12 0.0000 2 135 1140 2550 1425 kernel threads\001
     1214 2 0 50 -1 0 11 0.0000 2 120 270 2625 2100 size\001
     1224 2 0 50 -1 0 11 0.0000 2 120 270 2625 2250 free\001
     1234 2 0 50 -1 0 12 0.0000 2 135 600 2625 2700 free list\001
     1244 0 0 50 -1 0 12 0.0000 2 135 360 3675 3325 lock\001
     1254 1 0 50 -1 0 12 0.0000 2 180 1455 4350 3075 global pool (sbrk)\001
     1264 1 0 50 -1 0 11 0.0000 2 150 1110 4800 1725 N$\\times$S$_t$\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/AllocDS2.fig

    rba897d21 r2e9b59b  
    88-2
    991200 2
    10 6 2850 2100 3150 2250
    11 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 2925 2175 20 20 2925 2175 2945 2175
    12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3000 2175 20 20 3000 2175 3020 2175
    13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3075 2175 20 20 3075 2175 3095 2175
    14 -6
    15 6 4050 2100 4350 2250
    16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 2175 20 20 4125 2175 4145 2175
    17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 2175 20 20 4200 2175 4220 2175
    18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 2175 20 20 4275 2175 4295 2175
    19 -6
    20 6 4650 2100 4950 2250
    21 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4725 2175 20 20 4725 2175 4745 2175
    22 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4800 2175 20 20 4800 2175 4820 2175
    23 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4875 2175 20 20 4875 2175 4895 2175
    24 -6
    25 6 3450 2100 3750 2250
    26 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3525 2175 20 20 3525 2175 3545 2175
    27 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3600 2175 20 20 3600 2175 3620 2175
    28 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 3675 2175 20 20 3675 2175 3695 2175
    29 -6
    30 6 3300 2175 3600 2550
     106 2850 2475 3150 2850
    31112 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    3212        1 1 1.00 45.00 90.00
    33          3375 2175 3375 2400
     13         2925 2475 2925 2700
    34142 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    35          3300 2400 3600 2400 3600 2550 3300 2550 3300 2400
     15         2850 2700 3150 2700 3150 2850 2850 2850 2850 2700
     16-6
     176 4350 2475 4650 2850
     182 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     19        1 1 1.00 45.00 90.00
     20         4425 2475 4425 2700
     212 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     22         4350 2700 4650 2700 4650 2850 4350 2850 4350 2700
     23-6
     246 3600 2475 3825 3150
     252 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     26        1 1 1.00 45.00 90.00
     27         3675 2475 3675 2700
     282 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     29         3600 2700 3825 2700 3825 2850 3600 2850 3600 2700
     302 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     31         3600 3000 3825 3000 3825 3150 3600 3150 3600 3000
     322 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
     33        1 1 1.00 45.00 90.00
     34         3675 2775 3675 3000
     35-6
     366 1950 3525 3150 3900
     372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     38        1 1 1.00 45.00 90.00
     39         1950 3750 2700 3750 2700 3525
     402 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     41         1950 3525 3150 3525 3150 3900 1950 3900 1950 3525
     424 0 0 50 -1 0 12 0.0000 2 135 360 2025 3700 lock\001
     43-6
     446 4050 1575 4350 1725
     451 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4125 1650 20 20 4125 1650 4145 1650
     461 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4200 1650 20 20 4200 1650 4220 1650
     471 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4275 1650 20 20 4275 1650 4295 1650
     48-6
     496 4875 2325 6150 3750
     506 4875 2325 5175 2475
     511 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 2400 20 20 4950 2400 4970 2400
     521 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 2400 20 20 5025 2400 5045 2400
     531 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 2400 20 20 5100 2400 5120 2400
     54-6
     556 4875 3600 5175 3750
     561 3 0 1 0 0 50 -1 20 0.000 1 0.0000 4950 3675 20 20 4950 3675 4970 3675
     571 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5025 3675 20 20 5025 3675 5045 3675
     581 3 0 1 0 0 50 -1 20 0.000 1 0.0000 5100 3675 20 20 5100 3675 5120 3675
     59-6
     604 1 0 50 -1 0 12 0.0000 2 180 900 5700 3150 local pools\001
     614 1 0 50 -1 0 12 0.0000 2 180 465 5700 2925 heaps\001
     62-6
     636 3600 4050 5100 4650
     642 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
     65        1 1 1.00 45.00 90.00
     66         3600 4500 4350 4500 4350 4275
     672 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     68         3600 4275 5100 4275 5100 4650 3600 4650 3600 4275
     694 1 0 50 -1 0 12 0.0000 2 180 1455 4350 4200 global pool (sbrk)\001
     704 0 0 50 -1 0 12 0.0000 2 135 360 3675 4450 lock\001
    3671-6
    37722 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    38          3150 1800 3150 2250
     73         2400 2100 2400 2550
    39742 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    40          2850 1800 2850 2250
     75         2550 2100 2550 2550
    41762 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    42          4650 1800 4650 2250
     77         2700 2100 2700 2550
    43782 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    44          4950 1800 4950 2250
    45 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    46          4500 1725 4500 2250
    47 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    48          5100 1725 5100 2250
     79         2850 2100 2850 2550
    49802 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    50          3450 1800 3450 2250
     81         3000 2100 3000 2550
    51822 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    52          3750 1800 3750 2250
    53 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    54          3300 1725 3300 2250
    55 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    56          3900 1725 3900 2250
     83         3600 2100 3600 2550
    57842 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    58          5250 1800 5250 2250
     85         3900 2100 3900 2550
    59862 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    60          5400 1800 5400 2250
     87         4050 2100 4050 2550
    61882 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    62          5550 1800 5550 2250
     89         4200 2100 4200 2550
    63902 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    64          5700 1800 5700 2250
     91         4350 2100 4350 2550
    65922 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    66          5850 1800 5850 2250
    67 2 1 0 3 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    68          2700 1725 2700 2250
     93         4500 2100 4500 2550
     942 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     95         3300 1500 3300 1800
     962 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     97         3600 1500 3600 1800
     982 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     99         3000 1500 4800 1500 4800 1800 3000 1800 3000 1500
    691002 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    70101        1 1 1.00 45.00 90.00
    71          3375 1275 3375 1575
     102         3150 1650 2550 2100
    721032 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    73104        1 1 1.00 45.00 90.00
    74          2700 1275 2700 1575
    75 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
    76         1 1 1.00 45.00 90.00
    77          2775 1275 2775 1575
     105         3450 1650 4050 2100
     1062 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     107         2100 2100 2100 2550
     1082 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     109         1950 2250 3150 2250
     1102 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     111         3450 2250 4650 2250
     1122 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     113         1950 2100 3150 2100 3150 2550 1950 2550 1950 2100
     1142 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     115         3450 2100 4650 2100 4650 2550 3450 2550 3450 2100
     1162 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     117         2250 2100 2250 2550
     1182 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     119         3750 2100 3750 2550
    781202 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    79121        1 1 1.00 45.00 90.00
    80          5175 1275 5175 1575
     122         2025 2475 2025 2700
    811232 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    82124        1 1 1.00 45.00 90.00
    83          5625 1275 5625 1575
    84 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    85         1 1 1.00 45.00 90.00
    86          3750 1275 3750 1575
    87 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 2
    88         1 1 1.00 45.00 90.00
    89          3825 1275 3825 1575
    90 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    91          2700 1950 6000 1950
    92 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    93          2700 2100 6000 2100
     125         2025 2775 2025 3000
    941262 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    95          2700 1800 6000 1800 6000 2250 2700 2250 2700 1800
    96 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    97         1 1 1.00 45.00 90.00
    98          2775 2175 2775 2400
    99 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    100         1 1 1.00 45.00 90.00
    101          2775 2475 2775 2700
     127         1950 3000 2100 3000 2100 3150 1950 3150 1950 3000
    1021282 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    103          2700 2700 2850 2700 2850 2850 2700 2850 2700 2700
    104 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    105          2700 2400 2850 2400 2850 2550 2700 2550 2700 2400
    106 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    107         1 1 1.00 45.00 90.00
    108          4575 2175 4575 2400
    109 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    110          4500 2400 5025 2400 5025 2550 4500 2550 4500 2400
     129         1950 2700 2100 2700 2100 2850 1950 2850 1950 2700
    1111302 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
    112131        1 1 1.00 45.00 90.00
    113          3600 3525 4650 3525 4650 3150
     132         3450 3750 4200 3750 4200 3525
    1141332 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    115          3600 3150 5100 3150 5100 3750 3600 3750 3600 3150
    116 4 2 0 50 -1 0 11 0.0000 2 120 300 2625 1950 lock\001
    117 4 1 0 50 -1 0 10 0.0000 2 150 1155 3000 1725 N$\\times$S$_1$\001
    118 4 1 0 50 -1 0 10 0.0000 2 150 1155 3600 1725 N$\\times$S$_2$\001
    119 4 1 0 50 -1 0 12 0.0000 2 180 390 4425 1500 heap\001
    120 4 2 0 50 -1 0 12 0.0000 2 135 1140 2550 1425 kernel threads\001
    121 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2100 size\001
    122 4 2 0 50 -1 0 11 0.0000 2 120 270 2625 2250 free\001
    123 4 2 0 50 -1 0 12 0.0000 2 135 600 2625 2700 free list\001
    124 4 0 0 50 -1 0 12 0.0000 2 135 360 3675 3325 lock\001
    125 4 1 0 50 -1 0 12 0.0000 2 180 1455 4350 3075 global pool (sbrk)\001
    126 4 1 0 50 -1 0 10 0.0000 2 150 1110 4800 1725 N$\\times$S$_t$\001
     134         3450 3525 4650 3525 4650 3900 3450 3900 3450 3525
     1352 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     136         1950 2400 3150 2400
     1372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     138         3450 2400 4650 2400
     1394 2 0 50 -1 0 11 0.0000 2 135 300 1875 2250 lock\001
     1404 1 0 50 -1 0 12 0.0000 2 180 1245 3900 1425 H heap buckets\001
     1414 1 0 50 -1 0 12 0.0000 2 180 810 4425 2025 heap$_2$\001
     1424 1 0 50 -1 0 12 0.0000 2 180 810 2175 2025 heap$_1$\001
     1434 2 0 50 -1 0 11 0.0000 2 120 270 1875 2400 size\001
     1444 2 0 50 -1 0 11 0.0000 2 120 270 1875 2550 free\001
     1454 1 0 50 -1 0 12 0.0000 2 180 825 2550 3450 local pool\001
     1464 0 0 50 -1 0 12 0.0000 2 135 360 3525 3700 lock\001
     1474 2 0 50 -1 0 12 0.0000 2 135 600 1875 3000 free list\001
     1484 1 0 50 -1 0 12 0.0000 2 180 825 4050 3450 local pool\001
  • doc/theses/mubeen_zulfiqar_MMath/intro.tex

    rba897d21 r2e9b59b  
    4848Attempts have been made to perform quasi garbage collection in C/\CC~\cite{Boehm88}, but it is a compromise.
    4949This thesis only examines dynamic memory-management with \emph{explicit} deallocation.
    50 While garbage collection and compaction are not part this work, many of the results are applicable to the allocation phase in any memory-management approach.
     50While garbage collection and compaction are not part this work, many of the work's results are applicable to the allocation phase in any memory-management approach.
    5151
    5252Most programs use a general-purpose allocator, often the one provided implicitly by the programming-language's runtime.
     
    6565\begin{enumerate}[leftmargin=*]
    6666\item
    67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
    68 
    69 \item
    70 Adopt returning of @nullptr@ for a zero-sized allocation, rather than an actual memory address, both of which can be passed to @free@.
    71 
    72 \item
    73 Extended the standard C heap functionality by preserving with each allocation its original request size versus the amount allocated, if an allocation is zero fill, and the allocation alignment.
    74 
    75 \item
    76 Use the zero fill and alignment as \emph{sticky} properties for @realloc@, to realign existing storage, or preserve existing zero-fill and alignment when storage is copied.
     67Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
     68
     69\item
     70Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@.
     71
     72\item
     73Extend the standard C heap functionality by preserving with each allocation:
     74\begin{itemize}[itemsep=0pt]
     75\item
     76its request size plus the amount allocated,
     77\item
     78whether an allocation is zero fill,
     79\item
     80and allocation alignment.
     81\end{itemize}
     82
     83\item
     84Use the preserved zero fill and alignment as \emph{sticky} properties for @realloc@ to zero-fill and align when storage is extended or copied.
    7785Without this extension, it is unsafe to @realloc@ storage initially allocated with zero-fill/alignment as these properties are not preserved when copying.
    7886This silent generation of a problem is unintuitive to programmers and difficult to locate because it is transient.
     
    8694@resize( oaddr, alignment, size )@ re-purpose an old allocation with new alignment but \emph{without} preserving fill.
    8795\item
    88 @realloc( oaddr, alignment, size )@ same as previous @realloc@ but adding or changing alignment.
     96@realloc( oaddr, alignment, size )@ same as @realloc@ but adding or changing alignment.
    8997\item
    9098@aalloc( dim, elemSize )@ same as @calloc@ except memory is \emph{not} zero filled.
     
    96104
    97105\item
    98 Provide additional heap wrapper functions in \CFA to provide a complete orthogonal set of allocation operations and properties.
     106Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties.
    99107
    100108\item
     
    109117@malloc_size( addr )@ returns the size of the memory allocation pointed-to by @addr@.
    110118\item
    111 @malloc_usable_size( addr )@ returns the usable size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
     119@malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
    112120\end{itemize}
    113121
     
    116124
    117125\item
    118 Provide complete, fast, and contention-free allocation statistics to help understand program behaviour:
     126Provide complete, fast, and contention-free allocation statistics to help understand allocation behaviour:
    119127\begin{itemize}
    120128\item
  • doc/theses/mubeen_zulfiqar_MMath/performance.tex

    rba897d21 r2e9b59b  
    11\chapter{Performance}
     2\label{c:Performance}
    23
    34\section{Machine Specification}
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.bib

    rba897d21 r2e9b59b  
    124124}
    125125
    126 @misc{nedmalloc,
    127     author      = {Niall Douglas},
    128     title       = {nedmalloc version 1.06 Beta},
    129     month       = jan,
    130     year        = 2010,
    131     note        = {\textsf{http://\-prdownloads.\-sourceforge.\-net/\-nedmalloc/\-nedmalloc\_v1.06beta1\_svn1151.zip}},
     126@misc{ptmalloc2,
     127    author      = {Wolfram Gloger},
     128    title       = {ptmalloc version 2},
     129    month       = jun,
     130    year        = 2006,
     131    note        = {\href{http://www.malloc.de/malloc/ptmalloc2-current.tar.gz}{http://www.malloc.de/\-malloc/\-ptmalloc2-current.tar.gz}},
     132}
     133
     134@misc{GNUallocAPI,
     135    author      = {GNU},
     136    title       = {Summary of malloc-Related Functions},
     137    year        = 2020,
     138    note        = {\href{https://www.gnu.org/software/libc/manual/html\_node/Summary-of-Malloc.html}{https://www.gnu.org/\-software/\-libc/\-manual/\-html\_node/\-Summary-of-Malloc.html}},
     139}
     140
     141@misc{SeriallyReusable,
     142    author      = {IBM},
     143    title       = {Serially reusable programs},
     144    month       = mar,
     145    year        = 2021,
     146    note        = {\href{https://www.ibm.com/docs/en/ztpf/1.1.0.15?topic=structures-serially-reusable-programs}{https://www.ibm.com/\-docs/\-en/\-ztpf/\-1.1.0.15?\-topic=structures-serially-reusable-programs}},
     147}
     148
     149@misc{librseq,
     150    author      = {Mathieu Desnoyers},
     151    title       = {Library for Restartable Sequences},
     152    month       = mar,
     153    year        = 2022,
     154    note        = {\href{https://github.com/compudj/librseq}{https://github.com/compudj/librseq}},
    132155}
    133156
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

    rba897d21 r2e9b59b  
    6060% For hyperlinked PDF, suitable for viewing on a computer, use this:
    6161\documentclass[letterpaper,12pt,titlepage,oneside,final]{book}
     62\usepackage[T1]{fontenc}        % Latin-1 => 256-bit characters, => | not dash, <> not Spanish question marks
    6263
    6364% For PDF, suitable for double-sided printing, change the PrintVersion variable below to "true" and use this \documentclass line instead of the one above:
     
    9495% Use the "hyperref" package
    9596% N.B. HYPERREF MUST BE THE LAST PACKAGE LOADED; ADD ADDITIONAL PKGS ABOVE
    96 \usepackage[pagebackref=true]{hyperref} % with basic options
     97\usepackage{url}
     98\usepackage[dvips,pagebackref=true]{hyperref} % with basic options
    9799%\usepackage[pdftex,pagebackref=true]{hyperref}
    98100% N.B. pagebackref=true provides links back from the References to the body text. This can cause trouble for printing.
     
    113115    citecolor=blue,        % color of links to bibliography
    114116    filecolor=magenta,      % color of file links
    115     urlcolor=blue           % color of external links
     117    urlcolor=blue,           % color of external links
     118    breaklinks=true
    116119}
    117120\ifthenelse{\boolean{PrintVersion}}{   % for improved print quality, change some hyperref options
     
    122125    urlcolor=black
    123126}}{} % end of ifthenelse (no else)
     127%\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,pagebackref=true,breaklinks=true,colorlinks=true,linkcolor=blue,citecolor=blue,urlcolor=blue]{hyperref}
     128\usepackage{breakurl}
     129\urlstyle{sf}
    124130
    125131%\usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package
     
    171177\input{common}
    172178%\usepackageinput{common}
    173 \CFAStyle                                               % CFA code-style for all languages
     179\CFAStyle                                               % CFA code-style
     180\lstset{language=CFA}                                   % default language
    174181\lstset{basicstyle=\linespread{0.9}\sf}                 % CFA typewriter font
    175182\newcommand{\uC}{$\mu$\CC}
  • doc/theses/thierry_delisle_PhD/thesis/Makefile

    rba897d21 r2e9b59b  
    2929PICTURES = ${addsuffix .pstex, \
    3030        base \
     31        base_avg \
     32        cache-share \
     33        cache-noshare \
    3134        empty \
    3235        emptybit \
     
    3841        system \
    3942        cycle \
     43        result.cycle.jax.ops \
    4044}
    4145
     
    112116        python3 $< $@
    113117
     118build/result.%.ns.svg : data/% | ${Build}
     119        ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops"
     120
     121build/result.%.ops.svg : data/% | ${Build}
     122        ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second"
     123
    114124## pstex with inverted colors
    115125%.dark.pstex : fig/%.fig Makefile | ${Build}
  • doc/theses/thierry_delisle_PhD/thesis/fig/base.fig

    rba897d21 r2e9b59b  
    8989         5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470
    9090         5700 5210
     912 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     92         3600 5700 3600 1200
     932 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     94         4800 5700 4800 1200
     952 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
     96         6000 5700 6000 1200
    91974 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001
    92984 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001
  • doc/theses/thierry_delisle_PhD/thesis/glossary.tex

    rba897d21 r2e9b59b  
    101101
    102102\longnewglossaryentry{at}
    103 {name={fred}}
     103{name={task}}
    104104{
    105105Abstract object representing an unit of work. Systems will offer one or more concrete implementations of this concept (\eg \gls{kthrd}, \gls{job}), however, most of the concept of schedulings are independent of the particular implementations of the work representation. For this reason, this document use the term \Gls{at} to mean any representation and not one in particular.
  • doc/theses/thierry_delisle_PhD/thesis/local.bib

    rba897d21 r2e9b59b  
    685685  note = "[Online; accessed 9-February-2021]"
    686686}
     687
     688@misc{wiki:rcu,
     689  author = "{Wikipedia contributors}",
     690  title = "Read-copy-update --- {W}ikipedia{,} The Free Encyclopedia",
     691  year = "2022",
     692  url = "https://en.wikipedia.org/wiki/Linear_congruential_generator",
     693  note = "[Online; accessed 12-April-2022]"
     694}
     695
     696@misc{wiki:rwlock,
     697  author = "{Wikipedia contributors}",
     698  title = "Readers-writer lock --- {W}ikipedia{,} The Free Encyclopedia",
     699  year = "2021",
     700  url = "https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock",
     701  note = "[Online; accessed 12-April-2022]"
     702}
  • doc/theses/thierry_delisle_PhD/thesis/text/core.tex

    rba897d21 r2e9b59b  
    33Before discussing scheduling in general, where it is important to address systems that are changing states, this document discusses scheduling in a somewhat ideal scenario, where the system has reached a steady state. For this purpose, a steady state is loosely defined as a state where there are always \glspl{thrd} ready to run and the system has the resources necessary to accomplish the work, \eg, enough workers. In short, the system is neither overloaded nor underloaded.
    44
    5 I believe it is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state can to be pervasive in all states.
     5It is important to discuss the steady state first because it is the easiest case to handle and, relatedly, the case in which the best performance is to be expected. As such, when the system is either overloaded or underloaded, a common approach is to try to adapt the system to this new load and return to the steady state, \eg, by adding or removing workers. Therefore, flaws in scheduling the steady state tend to be pervasive in all states.
    66
    77\section{Design Goals}
     
    2525It is important to note that these guarantees are expected only up to a point. \Glspl{thrd} that are ready to run should not be prevented to do so, but they still share the limited hardware resources. Therefore, the guarantee is considered respected if a \gls{thrd} gets access to a \emph{fair share} of the hardware resources, even if that share is very small.
    2626
    27 Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is equivalent to or lower than other popular languages, I consider the guarantee achieved.
     27Similarly the performance guarantee, the lack of interference among threads, is only relevant up to a point. Ideally, the cost of running and blocking should be constant regardless of contention, but the guarantee is considered satisfied if the cost is not \emph{too high} with or without contention. How much is an acceptable cost is obviously highly variable. For this document, the performance experimentation attempts to show the cost of scheduling is at worst equivalent to existing algorithms used in popular languages. This demonstration can be made by comparing applications built in \CFA to applications built with other languages or other models. Recall programmer expectation is that the impact of the scheduler can be ignored. Therefore, if the cost of scheduling is compatitive to other popular languages, the guarantee will be consider achieved.
    2828
    2929More precisely the scheduler should be:
     
    3333\end{itemize}
    3434
    35 \subsection{Fairness vs Scheduler Locality}
     35\subsection{Fairness Goals}
     36For this work fairness will be considered as having two strongly related requirements: true starvation freedom and ``fast'' load balancing.
     37
     38\paragraph{True starvation freedom} is more easily defined: As long as at least one \proc continues to dequeue \ats, all read \ats should be able to run eventually.
     39In any running system, \procs can stop dequeing \ats if they start running a \at that will simply never park.
     40Traditional workstealing schedulers do not have starvation freedom in these cases.
     41Now this requirement begs the question, what about preemption?
     42Generally speaking preemption happens on the timescale of several milliseconds, which brings us to the next requirement: ``fast'' load balancing.
     43
     44\paragraph{Fast load balancing} means that load balancing should happen faster than preemption would normally allow.
     45For interactive applications that need to run at 60, 90, 120 frames per second, \ats having to wait for several millseconds to run are effectively starved.
     46Therefore load-balancing should be done at a faster pace, one that can detect starvation at the microsecond scale.
     47With that said, this is a much fuzzier requirement since it depends on the number of \procs, the number of \ats and the general load of the system.
     48
     49\subsection{Fairness vs Scheduler Locality} \label{fairnessvlocal}
    3650An important performance factor in modern architectures is cache locality. Waiting for data at lower levels or not present in the cache can have a major impact on performance. Having multiple \glspl{hthrd} writing to the same cache lines also leads to cache lines that must be waited on. It is therefore preferable to divide data among each \gls{hthrd}\footnote{This partitioning can be an explicit division up front or using data structures where different \glspl{hthrd} are naturally routed to different cache lines.}.
    3751
    38 For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in part~\ref{Evaluation} on evaluation.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.
     52For a scheduler, having good locality\footnote{This section discusses \emph{internal locality}, \ie, the locality of the data used by the scheduler versus \emph{external locality}, \ie, how the data used by the application is affected by scheduling. External locality is a much more complicated subject and is discussed in the next section.}, \ie, having the data local to each \gls{hthrd}, generally conflicts with fairness. Indeed, good locality often requires avoiding the movement of cache lines, while fairness requires dynamically moving a \gls{thrd}, and as consequence cache lines, to a \gls{hthrd} that is currently available.
    3953
    4054However, I claim that in practice it is possible to strike a balance between fairness and performance because these goals do not necessarily overlap temporally, where Figure~\ref{fig:fair} shows a visual representation of this behaviour. As mentioned, some unfairness is acceptable; therefore it is desirable to have an algorithm that prioritizes cache locality as long as thread delay does not exceed the execution mental-model.
     
    4862\end{figure}
    4963
    50 \section{Design}
     64\subsection{Performance Challenges}\label{pref:challenge}
     65While there exists a multitude of potential scheduling algorithms, they generally always have to contend with the same performance challenges. Since these challenges are recurring themes in the design of a scheduler it is relevant to describe the central ones here before looking at the design.
     66
     67\subsubsection{Scalability}
     68The most basic performance challenge of a scheduler is scalability.
     69Given a large number of \procs and an even larger number of \ats, scalability measures how fast \procs can enqueue and dequeues \ats.
     70One could expect that doubling the number of \procs would double the rate at which \ats are dequeued, but contention on the internal data structure of the scheduler can lead to worst improvements.
     71While the ready-queue itself can be sharded to alleviate the main source of contention, auxillary scheduling features, \eg counting ready \ats, can also be sources of contention.
     72
     73\subsubsection{Migration Cost}
     74Another important source of latency in scheduling is migration.
     75An \at is said to have migrated if it is executed by two different \proc consecutively, which is the process discussed in \ref{fairnessvlocal}.
     76Migrations can have many different causes, but it certain programs it can be all but impossible to limit migrations.
     77Chapter~\ref{microbench} for example, has a benchmark where any \at can potentially unblock any other \at, which can leat to \ats migrating more often than not.
     78Because of this it is important to design the internal data structures of the scheduler to limit the latency penalty from migrations.
     79
     80
     81\section{Inspirations}
    5182In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering.
    5283
    53 Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
     84Before going into the design of \CFA's scheduler proper, it is relevant to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
    5485
    5586\subsection{Work-Stealing}
    5687
    57 As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work.
    58 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
     88As mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing.
     89In this pattern each \gls{proc} has its own local ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work on their local ready-queue.
     90The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed.
     91In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
    5992On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work.
    60 This means that the scheduler may never balance unfairness that does not result in a \gls{proc} running out of work.
     93This means that the scheduler never balances unfair loads unless they result in a \gls{proc} running out of work.
    6194Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation.
    6295
    6396
    64 Based on these observation, I conclude that \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
     97Based on these observation, the conclusion is that a \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
    6598
    6699\subsection{Relaxed-Fifo}
    67100An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from.
    68101\Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element.
     102All subqueues are protected by TryLocks and \procs simply pick a different subqueue if they fail to acquire the TryLock.
    69103The result is a queue that has both decent scalability and sufficient fairness.
    70104The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element.
     
    75109
    76110While the fairness, of this scheme is good, it does suffer in terms of performance.
    77 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult.
    78 
    79 \section{\CFA}
    80 The \CFA is effectively attempting to merge these two approaches, keeping the best of both.
    81 It is based on the
     111It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and finding non-empty queues can be difficult if there are too few ready \ats.
     112
     113\section{Relaxed-FIFO++}
     114Since it has inherent fairness quelities and decent performance in the presence of many \ats, the relaxed-FIFO queue appears as a good candidate to form the basis of a scheduler.
     115The most obvious problems is for workloads where the number of \ats is barely greater than the number of \procs.
     116In these situations, the wide sharding means most of the sub-queues from which the relaxed queue is formed will be empty.
     117The consequence is that when a dequeue operations attempts to pick a sub-queue at random, it is likely that it picks an empty sub-queue and will have to pick again.
     118This problem can repeat an unbounded number of times.
     119
     120As this is the most obvious challenge, it is worth addressing first.
     121The obvious solution is to supplement each subqueue with some sharded data structure that keeps track of which subqueues are empty.
     122This data structure can take many forms, for example simple bitmask or a binary tree that tracks which branch are empty.
     123Following a binary tree on each pick has fairly good Big O complexity and many modern architectures have powerful bitmask manipulation instructions.
     124However, precisely tracking which sub-queues are empty is actually fundamentally problematic.
     125The reason is that each subqueues are already a form of sharding and the sharding width has presumably already chosen to avoid contention.
     126However, tracking which ready queue is empty is only useful if the tracking mechanism uses denser sharding than the sub queues, then it will invariably create a new source of contention.
     127But if the tracking mechanism is not denser than the sub-queues, then it will generally not provide useful because reading this new data structure risks being as costly as simply picking a sub-queue at random.
     128Early experiments with this approach have shown that even with low success rates, randomly picking a sub-queue can be faster than a simple tree walk.
     129
     130The exception to this rule is using local tracking.
     131If each \proc keeps track locally of which sub-queue is empty, then this can be done with a very dense data structure without introducing a new source of contention.
     132The consequence of local tracking however, is that the information is not complete.
     133Each \proc is only aware of the last state it saw each subqueues but does not have any information about freshness.
     134Even on systems with low \gls{hthrd} count, \eg 4 or 8, this can quickly lead to the local information being no better than the random pick.
     135This is due in part to the cost of this maintaining this information and its poor quality.
     136
     137However, using a very low cost approach to local tracking may actually be beneficial.
     138If the local tracking is no more costly than the random pick, than \emph{any} improvement to the succes rate, however low it is, would lead to a performance benefits.
     139This leads to the following approach:
     140
     141\subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
     142The Relaxed-FIFO approach can be made to handle the case of mostly empty sub-queues by tweaking the \glsxtrlong{prng}.
     143The \glsxtrshort{prng} state can be seen as containing a list of all the future sub-queues that will be accessed.
     144While this is not particularly useful on its own, the consequence is that if the \glsxtrshort{prng} algorithm can be run \emph{backwards}, then the state also contains a list of all the subqueues that were accessed.
     145Luckily, bidirectional \glsxtrshort{prng} algorithms do exist, for example some Linear Congruential Generators\cit{https://en.wikipedia.org/wiki/Linear\_congruential\_generator} support running the algorithm backwards while offering good quality and performance.
     146This particular \glsxtrshort{prng} can be used as follows:
     147
     148Each \proc maintains two \glsxtrshort{prng} states, which whill be refered to as \texttt{F} and \texttt{B}.
     149
     150When a \proc attempts to dequeue a \at, it picks the subqueues by running the \texttt{B} backwards.
     151When a \proc attempts to enqueue a \at, it runs \texttt{F} forward to pick to subqueue to enqueue to.
     152If the enqueue is successful, the state \texttt{B} is overwritten with the content of \texttt{F}.
     153
     154The result is that each \proc will tend to dequeue \ats that it has itself enqueued.
     155When most sub-queues are empty, this technique increases the odds of finding \ats at very low cost, while also offering an improvement on locality in many cases.
     156
     157However, while this approach does notably improve performance in many cases, this algorithm is still not competitive with work-stealing algorithms.
     158The fundamental problem is that the constant randomness limits how much locality the scheduler offers.
     159This becomes problematic both because the scheduler is likely to get cache misses on internal data-structures and because migration become very frequent.
     160Therefore since the approach of modifying to relaxed-FIFO algorithm to behave more like work stealing does not seem to pan out, the alternative is to do it the other way around.
     161
     162\section{Work Stealing++}
     163To add stronger fairness guarantees to workstealing a few changes.
     164First, the relaxed-FIFO algorithm has fundamentally better fairness because each \proc always monitors all subqueues.
     165Therefore the workstealing algorithm must be prepended with some monitoring.
     166Before attempting to dequeue from a \proc's local queue, the \proc must make some effort to make sure remote queues are not being neglected.
     167To make this possible, \procs must be able to determie which \at has been on the ready-queue the longest.
     168Which is the second aspect that much be added.
     169The relaxed-FIFO approach uses timestamps for each \at and this is also what is done here.
     170
    82171\begin{figure}
    83172        \centering
    84173        \input{base.pstex_t}
    85         \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.}
     174        \caption[Base \CFA design]{Base \CFA design \smallskip\newline A Pool of sub-ready queues offers the sharding, two per \glspl{proc}. Each \gls{proc} have local subqueues, however \glspl{proc} can access any of the sub-queues. Each \at is timestamped when enqueued.}
    86175        \label{fig:base}
    87176\end{figure}
    88 
    89 
    90 
    91 % The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.
    92 
    93 % \subsection{Sharding} \label{sec:sharding}
    94 % An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again.
    95 
    96 % \begin{figure}
    97 %       \centering
    98 %       \input{base.pstex_t}
    99 %       \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}
    100 %       \label{fig:base}
    101 % \end{figure}
    102 
    103 % \subsection{Finding threads}
    104 % Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.
    105 
    106 % \begin{figure}
    107 %       \centering
    108 %       \input{empty.pstex_t}
    109 %       \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}
    110 %       \label{fig:empty}
    111 % \end{figure}
    112 
    113 % There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information:
    114 
    115 % \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done.
    116 
    117 % \begin{figure}
    118 %       \centering
    119 %       \vspace*{-5pt}
    120 %       {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}}
    121 %       \vspace*{-5pt}
    122 %       \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.}
    123 %       \label{fig:emptybit}
    124 
    125 %       \vspace*{10pt}
    126 %       {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}}
    127 %       \vspace*{-5pt}
    128 %       \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.}
    129 %       \label{fig:emptytree}
    130 
    131 %       \vspace*{10pt}
    132 %       {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}}
    133 %       \vspace*{-5pt}
    134 %       \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}
    135 %       \label{fig:emptytls}
    136 % \end{figure}
    137 
    138 % \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow.
    139 
    140 % \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.
    141 
    142 % I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach.
    143 
    144 % \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
    145 % In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later.
    146 
    147 % To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards.  Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached.
    148 
    149 % The algorithm works as follows:
    150 % \begin{itemize}
    151 %       \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$.
    152 %       \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions:
    153 %       \begin{itemize}
    154 %               \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$.
    155 %               \item Pop operations use $B$ going backwards on each try.
    156 %       \end{itemize}
    157 % \end{itemize}
    158 
    159 % The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.
    160 
    161 % \section{Details}
     177The algorithm is structure as shown in Figure~\ref{fig:base}.
     178This is very similar to classic workstealing except the local queues are placed in an array so \procs can access eachother's queue in constant time.
     179Sharding width can be adjusted based on need.
     180When a \proc attempts to dequeue a \at, it first picks a random remote queue and compares its timestamp to the timestamps of the local queue(s), dequeue from the remote queue if needed.
     181
     182Implemented as as naively state above, this approach has some obvious performance problems.
     183First, it is necessary to have some damping effect on helping.
     184Random effects like cache misses and preemption can add spurious but short bursts of latency for which helping is not helpful, pun intended.
     185The effect of these bursts would be to cause more migrations than needed and make this workstealing approach slowdown to the match the relaxed-FIFO approach.
     186
     187\begin{figure}
     188        \centering
     189        \input{base_avg.pstex_t}
     190        \caption[\CFA design with Moving Average]{\CFA design with Moving Average \smallskip\newline A moving average is added to each subqueue.}
     191        \label{fig:base-ma}
     192\end{figure}
     193
     194A simple solution to this problem is to compare an exponential moving average\cit{https://en.wikipedia.org/wiki/Moving\_average\#Exponential\_moving\_average} instead if the raw timestamps, shown in Figure~\ref{fig:base-ma}.
     195Note that this is slightly more complex than it sounds because since the \at at the head of a subqueue is still waiting, its wait time has not ended.
     196Therefore the exponential moving average is actually an exponential moving average of how long each already dequeued \at have waited.
     197To compare subqueues, the timestamp at the head must be compared to the current time, yielding the bestcase wait time for the \at at the head of the queue.
     198This new waiting is averaged with the stored average.
     199To limit even more the amount of unnecessary migration, a bias can be added to the local queue, where a remote queue is helped only if its moving average is more than \emph{X} times the local queue's average.
     200None of the experimentation that I have run with these scheduler seem to indicate that the choice of the weight for the moving average or the choice of bis is particularly important.
     201Weigths and biases of similar \emph{magnitudes} have similar effects.
     202
     203With these additions to workstealing, scheduling can be made as fair as the relaxed-FIFO approach, well avoiding the majority of unnecessary migrations.
     204Unfortunately, the performance of this approach does suffer in the cases with no risks of starvation.
     205The problem is that the constant polling of remote subqueues generally entail a cache miss.
     206To make things worst, remote subqueues that are very active, \ie \ats are frequently enqueued and dequeued from them, the higher the chances are that polling will incurr a cache-miss.
     207Conversly, the active subqueues do not benefit much from helping since starvation is already a non-issue.
     208This puts this algorithm in an akward situation where it is paying for a cost, but the cost itself suggests the operation was unnecessary.
     209The good news is that this problem can be mitigated
     210
     211\subsection{Redundant Timestamps}
     212The problem with polling remote queues is due to a tension between the consistency requirement on the subqueue.
     213For the subqueues, correctness is critical. There must be a consensus among \procs on which subqueues hold which \ats.
     214Since the timestamps are use for fairness, it is alco important to have consensus and which \at is the oldest.
     215However, when deciding if a remote subqueue is worth polling, correctness is much less of a problem.
     216Since the only need is that a subqueue will eventually be polled, some data staleness can be acceptable.
     217This leads to a tension where stale timestamps are only problematic in some cases.
     218Furthermore, stale timestamps can be somewhat desirable since lower freshness requirements means less tension on the cache coherence protocol.
     219
     220
     221\begin{figure}
     222        \centering
     223        % \input{base_ts2.pstex_t}
     224        \caption[\CFA design with Redundant Timestamps]{\CFA design with Redundant Timestamps \smallskip\newline A array is added containing a copy of the timestamps. These timestamps are written to with relaxed atomics, without fencing, leading to fewer cache invalidations.}
     225        \label{fig:base-ts2}
     226\end{figure}
     227A solution to this is to create a second array containing a copy of the timestamps and average.
     228This copy is updated \emph{after} the subqueue's critical sections using relaxed atomics.
     229\Glspl{proc} now check if polling is needed by comparing the copy of the remote timestamp instead of the actual timestamp.
     230The result is that since there is no fencing, the writes can be buffered and cause fewer cache invalidations.
     231
     232The correctness argument here is somewhat subtle.
     233The data used for deciding whether or not to poll a queue can be stale as long as it does not cause starvation.
     234Therefore, it is acceptable if stale data make queues appear older than they really are but not fresher.
     235For the timestamps, this means that missing writes to the timestamp is acceptable since they will make the head \at look older.
     236For the moving average, as long as the operation are RW-safe, the average is guaranteed to yield a value that is between the oldest and newest values written.
     237Therefore this unprotected read of the timestamp and average satisfy the limited correctness that is required.
     238
     239\begin{figure}
     240        \centering
     241        \input{cache-share.pstex_t}
     242        \caption[CPU design with wide L3 sharing]{CPU design with wide L3 sharing \smallskip\newline A very simple CPU with 4 \glspl{hthrd}. L1 and L2 are private to each \gls{hthrd} but the L3 is shared across to entire core.}
     243        \label{fig:cache-share}
     244\end{figure}
     245
     246\begin{figure}
     247        \centering
     248        \input{cache-noshare.pstex_t}
     249        \caption[CPU design with a narrower L3 sharing]{CPU design with a narrower L3 sharing \smallskip\newline A different CPU design, still with 4 \glspl{hthrd}. L1 and L2 are still private to each \gls{hthrd} but the L3 is shared some of the CPU but there is still two distinct L3 instances.}
     250        \label{fig:cache-noshare}
     251\end{figure}
     252
     253With redundant tiemstamps this scheduling algorithm achieves both the fairness and performance requirements, on some machines.
     254The problem is that the cost of polling and helping is not necessarily consistent across each \gls{hthrd}.
     255For example, on machines where the motherboard holds multiple CPU, cache misses can be satisfied from a cache that belongs to the CPU that missed, the \emph{local} CPU, or by a different CPU, a \emph{remote} one.
     256Cache misses that are satisfied by a remote CPU will have higher latency than if it is satisfied by the local CPU.
     257However, this is not specific to systems with multiple CPUs.
     258Depending on the cache structure, cache-misses can have different latency for the same CPU.
     259The AMD EPYC 7662 CPUs that is described in Chapter~\ref{microbench} is an example of that.
     260Figure~\ref{fig:cache-share} and Figure~\ref{fig:cache-noshare} show two different cache topologies with highlight this difference.
     261In Figure~\ref{fig:cache-share}, all cache instances are either private to a \gls{hthrd} or shared to the entire system, this means latency due to cache-misses are likely fairly consistent.
     262By comparison, in Figure~\ref{fig:cache-noshare} misses in the L2 cache can be satisfied by a hit in either instance of the L3.
     263However, the memory access latency to the remote L3 instance will be notably higher than the memory access latency to the local L3.
     264The impact of these different design on this algorithm is that scheduling will scale very well on architectures similar to Figure~\ref{fig:cache-share}, both will have notably worst scalling with many narrower L3 instances.
     265This is simply because as the number of L3 instances grow, so two does the chances that the random helping will cause significant latency.
     266The solution is to have the scheduler be aware of the cache topology.
     267
     268\subsection{Per CPU Sharding}
     269Building a scheduler that is aware of cache topology poses two main challenges: discovering cache topology and matching \procs to cache instance.
     270Sadly, there is no standard portable way to discover cache topology in C.
     271Therefore, while this is a significant portability challenge, it is outside the scope of this thesis to design a cross-platform cache discovery mechanisms.
     272The rest of this work assumes discovering the cache topology based on Linux's \texttt{/sys/devices/system/cpu} directory.
     273This leaves the challenge of matching \procs to cache instance, or more precisely identifying which subqueues of the ready queue are local to which cache instance.
     274Once this matching is available, the helping algorithm can be changed to add bias so that \procs more often help subqueues local to the same cache instance
     275\footnote{Note that like other biases mentioned in this section, the actual bias value does not appear to need precise tuinng.}.
     276
     277The obvious approach to mapping cache instances to subqueues is to statically tie subqueues to CPUs.
     278Instead of having each subqueue local to a specific \proc, the system is initialized with subqueues for each \glspl{hthrd} up front.
     279Then \procs dequeue and enqueue by first asking which CPU id they are local to, in order to identify which subqueues are the local ones.
     280\Glspl{proc} can get the CPU id from \texttt{sched\_getcpu} or \texttt{librseq}.
     281
     282This approach solves the performance problems on systems with topologies similar to Figure~\ref{fig:cache-noshare}.
     283However, it actually causes some subtle fairness problems in some systems, specifically systems with few \procs and many \glspl{hthrd}.
     284In these cases, the large number of subqueues and the bias agains subqueues tied to different cache instances make it so it is very unlikely any single subqueue is picked.
     285To make things worst, the small number of \procs mean that few helping attempts will be made.
     286This combination of few attempts and low chances make it so a \at stranded on a subqueue that is not actively dequeued from may wait very long before it gets randomly helped.
     287On a system with 2 \procs, 256 \glspl{hthrd} with narrow cache sharing, and a 100:1 bias, it can actually take multiple seconds for a \at to get dequeued from a remote queue.
     288Therefore, a more dynamic matching of subqueues to cache instance is needed.
     289
     290\subsection{Topological Work Stealing}
     291The approach that is used in the \CFA scheduler is to have per-\proc subqueue, but have an excplicit data-structure track which cache instance each subqueue is tied to.
     292This is requires some finess because reading this data structure must lead to fewer cache misses than not having the data structure in the first place.
     293A key element however is that, like the timestamps for helping, reading the cache instance mapping only needs to give the correct result \emph{often enough}.
     294Therefore the algorithm can be built as follows: Before enqueuing or dequeing a \at, each \proc queries the CPU id and the corresponding cache instance.
     295Since subqueues are tied to \procs, each \proc can then update the cache instance mapped to the local subqueue(s).
     296To avoid unnecessary cache line invalidation, the map is only written to if the mapping changes.
     297
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex

    rba897d21 r2e9b59b  
    33The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly.
    44This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler.
     5
     6\section{Benchmark Environment}
     7All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine.
     8
     9\paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM.
     10The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55.
     11These EPYCs have 64 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 256 \glspl{hthrd}.
     12The cpus each have 4 MB, 64 MB and 512 MB of L1, L2 and L3 caches respectively.
     13Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared by 4 cores, therefore 8 \glspl{hthrd}.
     14
     15\paragraph{Intel} The Intel machine is a server with four Intel Xeon Platinum 8160 CPUs and 384GB of DDR4 RAM.
     16The server runs Ubuntu 20.04.2 LTS on top of Linux Kernel 5.8.0-55.
     17These Xeon Platinums have 24 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 192 \glspl{hthrd}.
     18The cpus each have 3 MB, 96 MB and 132 MB of L1, L2 and L3 caches respectively.
     19Each L1 and L2 instance are only shared by \glspl{hthrd} on a given core, but each L3 instance is shared across the entire CPU, therefore 48 \glspl{hthrd}.
     20
     21This limited sharing of the last level cache on the AMD machine is markedly different than the Intel machine. Indeed, while on both architectures L2 cache misses that are served by L3 caches on a different cpu incurr a significant latency, on AMD it is also the case that cache misses served by a different L3 instance on the same cpu still incur high latency.
     22
    523
    624\section{Cycling latency}
     
    3149\end{figure}
    3250
    33 \todo{check term ``idle sleep handling''}
    3451To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
    3552Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
    36 This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
     53This is to avoid the case where one of the \glspl{proc} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
    3754
    3855The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
    3956
    40 \todo{code, setup, results}
    4157\begin{lstlisting}
    4258        Thread.main() {
     
    5268\end{lstlisting}
    5369
     70\begin{figure}
     71        \centering
     72        \input{result.cycle.jax.ops.pstex_t}
     73        \vspace*{-10pt}
     74        \label{fig:cycle:ns:jax}
     75\end{figure}
    5476
    5577\section{Yield}
  • doc/theses/thierry_delisle_PhD/thesis/text/existing.tex

    rba897d21 r2e9b59b  
    22Scheduling is the process of assigning resources to incomming requests.
    33A very common form of this is assigning available workers to work-requests.
    4 The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but it is also common in other fields.
    5 For example, assmebly lines are an example of scheduling where parts needed assembly are assigned to line workers.
     4The need for scheduling is very common in Computer Science, \eg Operating Systems and Hypervisors schedule available CPUs, NICs schedule available bamdwith, but scheduling is also common in other fields.
     5For example, in assmebly lines assigning parts in need of assembly to line workers is a form of scheduling.
    66
    77In all these cases, the choice of a scheduling algorithm generally depends first and formost on how much information is available to the scheduler.
     
    1515
    1616\section{Naming Convention}
    17 Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{task} to refer to the abstract objects being scheduled and the term \newterm{worker} to refer to the objects which will execute these tasks.
     17Scheduling has been studied by various different communities concentrating on different incarnation of the same problems. As a result, their is no real naming convention for scheduling that is respected across these communities. For this document, I will use the term \newterm{\Gls{at}} to refer to the abstract objects being scheduled and the term \newterm{\Gls{proc}} to refer to the objects which will execute these \glspl{at}.
    1818
    1919\section{Static Scheduling}
    20 Static schedulers require that tasks have their dependencies and costs explicitly and exhaustively specified prior schedule.
     20Static schedulers require that \glspl{at} have their dependencies and costs explicitly and exhaustively specified prior schedule.
    2121The scheduler then processes this input ahead of time and producess a \newterm{schedule} to which the system can later adhere.
    2222This approach is generally popular in real-time systems since the need for strong guarantees justifies the cost of supplying this information.
     
    2626
    2727\section{Dynamic Scheduling}
    28 It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of halting or suspending a task with unfulfilled dependencies and adding one or more new task(s) to the system. The new task(s) have the responsability of adding the dependent task back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only tasks we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
     28It may be difficult to fulfill the requirements of static scheduler if dependencies are conditionnal. In this case, it may be preferable to detect dependencies at runtime. This detection effectively takes the form of adding one or more new \gls{at}(s) to the system as their dependencies are resolved. As well as potentially halting or suspending a \gls{at} that dynamically detect unfulfilled dependencies. Each \gls{at} has the responsability of adding the dependent \glspl{at} back in the system once completed. As a consequence, the scheduler may have an incomplete view of the system, seeing only \glspl{at} we no pending dependencies. Schedulers that support this detection at runtime are referred to as \newterm{Dynamic Schedulers}.
    2929
    3030\subsection{Explicitly Informed Dynamic Schedulers}
    31 While dynamic schedulers do not have access to an exhaustive list of dependencies for a task, they may require to provide more or less information about each task, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a tasks takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of tasks and there complexity. For example, providing an exhaustive list of files read by 5 tasks is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct tasks.
     31While dynamic schedulers do not have access to an exhaustive list of dependencies for a \gls{at}, they may require to provide more or less information about each \gls{at}, including for example: expected duration, required ressources, relative importance, etc. The scheduler can then use this information to direct the scheduling decisions. \cit{Examples of schedulers with more information} Precisely providing this information can be difficult for programmers, especially \emph{predicted} behaviour, and the scheduler may need to support some amount of imprecision in the provided information. For example, specifying that a \glspl{at} takes approximately 5 seconds to complete, rather than exactly 5 seconds. User provided information can also become a significant burden depending how the effort to provide the information scales with the number of \glspl{at} and there complexity. For example, providing an exhaustive list of files read by 5 \glspl{at} is an easier requirement the providing an exhaustive list of memory addresses accessed by 10'000 distinct \glspl{at}.
    3232
    3333Since the goal of this thesis is to provide a scheduler as a replacement for \CFA's existing \emph{uninformed} scheduler, Explicitly Informed schedulers are less relevant to this project. Nevertheless, some strategies are worth mentionnding.
    3434
    3535\subsubsection{Prority Scheduling}
    36 A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority tasks are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every task have a distinct pre-established priority and always run the available task with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of tasks. It can therefore be diserable for schedulers to support tasks with identical priorities and/or automatically setting and adjusting priorites for tasks.
     36A commonly used information that schedulers used to direct the algorithm is priorities. Each Task is given a priority and higher-priority \glspl{at} are preferred to lower-priority ones. The simplest priority scheduling algorithm is to simply require that every \gls{at} have a distinct pre-established priority and always run the available \gls{at} with the highest priority. Asking programmers to provide an exhaustive set of unique priorities can be prohibitive when the system has a large number of \glspl{at}. It can therefore be diserable for schedulers to support \glspl{at} with identical priorities and/or automatically setting and adjusting priorites for \glspl{at}. The most common operating some variation on priorities with overlaps and dynamic priority adjustments. For example, Microsoft Windows uses a pair of priorities
     37\cit{https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities,https://docs.microsoft.com/en-us/windows/win32/taskschd/taskschedulerschema-priority-settingstype-element}, one specified by users out of ten possible options and one adjusted by the system.
    3738
    3839\subsection{Uninformed and Self-Informed Dynamic Schedulers}
    39 Several scheduling algorithms do not require programmers to provide additionnal information on each task, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.
     40Several scheduling algorithms do not require programmers to provide additionnal information on each \gls{at}, and instead make scheduling decisions based solely on internal state and/or information implicitly gathered by the scheduler.
    4041
    4142
    4243\subsubsection{Feedback Scheduling}
    43 As mentionned, Schedulers may also gather information about each tasks to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain tasks, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
     44As mentionned, Schedulers may also gather information about each \glspl{at} to direct their decisions. This design effectively moves the scheduler to some extent into the realm of \newterm{Control Theory}\cite{wiki:controltheory}. This gathering does not generally involve programmers and as such does not increase programmer burden the same way explicitly provided information may. However, some feedback schedulers do offer the option to programmers to offer additionnal information on certain \glspl{at}, in order to direct scheduling decision. The important distinction being whether or not the scheduler can function without this additionnal information.
    4445
    4546
    4647\section{Work Stealing}\label{existing:workstealing}
    47 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasks and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
     48One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local \glspl{at} first, but allows the possibility for other workers to steal local \glspl{at} if they run out of \glspl{at}. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of \glspl{at} to accomplish and workers without \glspl{at} steal \glspl{at} from random workers. (The Burton and Sleep algorithm had trees of \glspl{at} and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
    4849
    4950Many variations of this algorithm have been proposed over the years\cite{DBLP:journals/ijpp/YangH18}, both optmizations of existing implementations and approaches that account for new metrics.
     
    5152\paragraph{Granularity} A significant portion of early Work Stealing research was concentrating on \newterm{Implicit Parellelism}\cite{wiki:implicitpar}. Since the system was responsible to split the work, granularity is a challenge that cannot be left to the programmers (as opposed to \newterm{Explicit Parellelism}\cite{wiki:explicitpar} where the burden can be left to programmers). In general, fine granularity is better for load balancing and coarse granularity reduces communication overhead. The best performance generally means finding a middle ground between the two. Several methods can be employed, but I believe these are less relevant for threads, which are generally explicit and more coarse grained.
    5253
    53 \paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating tasks from one core to another can be .  \cite{DBLP:journals/tpds/SquillanteL93}
     54\paragraph{Task Placement} Since modern computers rely heavily on cache hierarchies\cit{Do I need a citation for this}, migrating \glspl{at} from one core to another can be .  \cite{DBLP:journals/tpds/SquillanteL93}
    5455
    5556\todo{The survey is not great on this subject}
     
    5859
    5960\subsection{Theoretical Results}
    60 There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among tasks.
     61There is also a large body of research on the theoretical aspects of work stealing. These evaluate, for example, the cost of migration\cite{DBLP:conf/sigmetrics/SquillanteN91,DBLP:journals/pe/EagerLZ86}, how affinity affects performance\cite{DBLP:journals/tpds/SquillanteL93,DBLP:journals/mst/AcarBB02,DBLP:journals/ipl/SuksompongLS16} and theoretical models for heterogenous systems\cite{DBLP:journals/jpdc/MirchandaneyTS90,DBLP:journals/mst/BenderR02,DBLP:conf/sigmetrics/GastG10}. \cite{DBLP:journals/jacm/BlellochGM99} examine the space bounds of Work Stealing and \cite{DBLP:journals/siamcomp/BerenbrinkFG03} show that for underloaded systems, the scheduler will complete computations in finite time, \ie is \newterm{stable}. Others show that Work-Stealing is applicable to various scheduling contexts\cite{DBLP:journals/mst/AroraBP01,DBLP:journals/anor/TchiboukdjianGT13,DBLP:conf/isaac/TchiboukdjianGTRB10,DBLP:conf/ppopp/AgrawalLS10,DBLP:conf/spaa/AgrawalFLSSU14}. \cite{DBLP:conf/ipps/ColeR13} also studied how Randomized Work Stealing affects false sharing among \glspl{at}.
    6162
    6263However, as \cite{DBLP:journals/ijpp/YangH18} highlights, it is worth mentionning that this theoretical research has mainly focused on ``fully-strict'' computations, \ie workloads that can be fully represented with a Direct Acyclic Graph. It is unclear how well these distributions represent workloads in real world scenarios.
    6364
    6465\section{Preemption}
    65 One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting tasks that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a task are never further apart than some fixed duration. While this helps schedulers guarantee that no tasks will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.
     66One last aspect of scheduling worth mentionning is preemption since many schedulers rely on it for some of their guarantees. Preemption is the idea of interrupting \glspl{at} that have been running for too long, effectively injecting suspend points in the applications. There are multiple techniques to achieve this but they all aim to have the effect of guaranteeing that suspend points in a \gls{at} are never further apart than some fixed duration. While this helps schedulers guarantee that no \glspl{at} will unfairly monopolize a worker, preemption can effectively added to any scheduler. Therefore, the only interesting aspect of preemption for the design of scheduling is whether or not to require it.
    6667
    6768\section{Schedulers in Production}\label{existing:prod}
     
    6970
    7071\subsection{Operating System Schedulers}
    71 Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive tasks and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.
     72Operating System Schedulers tend to be fairly complex schedulers, they generally support some amount of real-time, aim to balance interactive and non-interactive \glspl{at} and support for multiple users sharing hardware without requiring these users to cooperate. Here are more details on a few schedulers used in the common operating systems: Linux, FreeBsd, Microsoft Windows and Apple's OS X. The information is less complete for operating systems behind closed source.
    7273
    7374\paragraph{Linux's CFS}
    74 The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of tasks waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the task that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of tasks is also impacted by a group based notion of fairness, where tasks belonging to groups having spent less CPU time are preferred to tasks beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).
     75The default scheduler used by Linux (the Completely Fair Scheduler)\cite{MAN:linux/cfs,MAN:linux/cfs2} is a feedback scheduler based on CPU time. For each processor, it constructs a Red-Black tree of \glspl{at} waiting to run, ordering them by amount of CPU time spent. The scheduler schedules the \gls{at} that has spent the least CPU time. It also supports the concept of \newterm{Nice values}, which are effectively multiplicative factors on the CPU time spent. The ordering of \glspl{at} is also impacted by a group based notion of fairness, where \glspl{at} belonging to groups having spent less CPU time are preferred to \glspl{at} beloning to groups having spent more CPU time. Linux achieves load-balancing by regularly monitoring the system state\cite{MAN:linux/cfs/balancing} and using some heuristic on the load (currently CPU time spent in the last millisecond plus decayed version of the previous time slots\cite{MAN:linux/cfs/pelt}.).
    7576
    76 \cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across tasks \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single task and the other with one thousand tasks, the user with a single task does not receive one one thousandth of the CPU time.}, increasing the complexity.
     77\cite{DBLP:conf/eurosys/LoziLFGQF16} shows that Linux's CFS also does work-stealing to balance the workload of each processors, but the paper argues this aspect can be improved significantly. The issues highlighted sem to stem from Linux's need to support fairness across \glspl{at} \emph{and} across users\footnote{Enforcing fairness across users means, for example, that given two users: one with a single \gls{at} and the other with one thousand \glspl{at}, the user with a single \gls{at} does not receive one one thousandth of the CPU time.}, increasing the complexity.
    7778
    78 Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority task, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}
     79Linux also offers a FIFO scheduler, a real-time schedulerwhich runs the highest-priority \gls{at}, and a round-robin scheduler, which is an extension of the fifo-scheduler that adds fixed time slices. \cite{MAN:linux/sched}
    7980
    8081\paragraph{FreeBSD}
     
    8283
    8384\paragraph{Windows(OS)}
    84 Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules tasks based on the highest priorities (lowest number) and how much cpu time each tasks have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
     85Microsoft's Operating System's Scheduler\cite{MAN:windows/scheduler} is a feedback scheduler with priorities. It supports 32 levels of priorities, some of which are reserved for real-time and prviliged applications. It schedules \glspl{at} based on the highest priorities (lowest number) and how much cpu time each \glspl{at} have used. The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
    8586
    8687\todo{load balancing}
     
    99100
    100101\subsection{User-Level Schedulers}
    101 By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all tasks have the same user, and therefore cooperation is both feasible and probable.
     102By comparison, user level schedulers tend to be simpler, gathering fewer metrics and avoid complex notions of fairness. Part of the simplicity is due to the fact that all \glspl{at} have the same user, and therefore cooperation is both feasible and probable.
    102103\paragraph{Go}
    103104Go's scheduler uses a Randomized Work Stealing algorithm that has a global runqueue(\emph{GRQ}) and each processor(\emph{P}) has both a fixed-size runqueue(\emph{LRQ}) and a high-priority next ``chair'' holding a single element.\cite{GITHUB:go,YTUBE:go} Preemption is present, but only at function call boundaries.
     
    116117
    117118\paragraph{Intel\textregistered ~Threading Building Blocks}
    118 \newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs tasks or \newterm{jobs}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules tasks as follows (where \textit{t} is the last task completed):
     119\newterm{Thread Building Blocks}(TBB) is Intel's task parellelism\cite{wiki:taskparallel} framework. It runs \newterm{jobs}, uninterruptable \glspl{at}, schedulable objects that must always run to completion, on a pool of worker threads. TBB's scheduler is a variation of Randomized Work Stealing that also supports higher-priority graph-like dependencies\cite{MAN:tbb/scheduler}. It schedules \glspl{at} as follows (where \textit{t} is the last \gls{at} completed):
    119120\begin{displayquote}
    120121        \begin{enumerate}
     
    136137
    137138\paragraph{Grand Central Dispatch}
    138 This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg tasks on queue $A$ are executed in \emph{FIFO} order.
     139This is an API produce by Apple\cit{Official GCD source} that offers task parellelism\cite{wiki:taskparallel}. Its distinctive aspect is that it uses multiple ``Dispatch Queues'', some of which are created by programmers. These queues each have their own local ordering guarantees, \eg \glspl{at} on queue $A$ are executed in \emph{FIFO} order.
    139140
    140141\todo{load balancing and scheduling}
  • doc/theses/thierry_delisle_PhD/thesis/text/io.tex

    rba897d21 r2e9b59b  
    173173The consequence is that the amount of parallelism used to prepare submissions for the next system call is limited.
    174174Beyond this limit, the length of the system call is the throughput limiting factor.
    175 I concluded from early experiments that preparing submissions seems to take about as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.
     175I concluded from early experiments that preparing submissions seems to take at most as long as the system call itself, which means that with a single @io_uring@ instance, there is no benefit in terms of \io throughput to having more than two \glspl{hthrd}.
    176176Therefore the design of the submission engine must manage multiple instances of @io_uring@ running in parallel, effectively sharding @io_uring@ instances.
    177177Similarly to scheduling, this sharding can be done privately, \ie, one instance per \glspl{proc}, in decoupled pools, \ie, a pool of \glspl{proc} use a pool of @io_uring@ instances without one-to-one coupling between any given instance and any given \gls{proc}, or some mix of the two.
     
    200200The only added complexity is that the number of SQEs is fixed, which means allocation can fail.
    201201
    202 Allocation failures need to be pushed up to the routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.
     202Allocation failures need to be pushed up to a routing algorithm: \glspl{thrd} attempting \io operations must not be directed to @io_uring@ instances without sufficient SQEs available.
    203203Furthermore, the routing algorithm should block operations up-front if none of the instances have available SQEs.
    204204
     
    214214
    215215In the case of designating a \gls{thrd}, ideally, when multiple \glspl{thrd} attempt to submit operations to the same @io_uring@ instance, all requests would be batched together and one of the \glspl{thrd} would do the system call on behalf of the others, referred to as the \newterm{submitter}.
    216 In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a current submitter and a next submitter.
     216In practice however, it is important that the \io requests are not left pending indefinitely and as such, it may be required to have a ``next submitter'' that guarentees everything that is missed by the current submitter is seen by the next one.
    217217Indeed, as long as there is a ``next'' submitter, \glspl{thrd} submitting new \io requests can move on, knowing that some future system call will include their request.
    218218Once the system call is done, the submitter must also free SQEs so that the allocator can reused them.
     
    223223If the submission side does not designate submitters, polling can also submit all SQEs as it is polling events.
    224224A simple approach to polling is to allocate a \gls{thrd} per @io_uring@ instance and simply let the poller \glspl{thrd} poll their respective instances when scheduled.
    225 This design is especially convenient for reasons explained in Chapter~\ref{practice}.
    226225
    227226With this pool of instances approach, the big advantage is that it is fairly flexible.
    228227It does not impose restrictions on what \glspl{thrd} submitting \io operations can and cannot do between allocations and submissions.
    229 It also can gracefully handles running out of ressources, SQEs or the kernel returning @EBUSY@.
     228It also can gracefully handle running out of ressources, SQEs or the kernel returning @EBUSY@.
    230229The down side to this is that many of the steps used for submitting need complex synchronization to work properly.
    231230The routing and allocation algorithm needs to keep track of which ring instances have available SQEs, block incoming requests if no instance is available, prevent barging if \glspl{thrd} are already queued up waiting for SQEs and handle SQEs being freed.
    232231The submission side needs to safely append SQEs to the ring buffer, correctly handle chains, make sure no SQE is dropped or left pending forever, notify the allocation side when SQEs can be reused and handle the kernel returning @EBUSY@.
    233 All this synchronization may have a significant cost and, compare to the next approach presented, this synchronization is entirely overhead.
     232All this synchronization may have a significant cost and, compared to the next approach presented, this synchronization is entirely overhead.
    234233
    235234\subsubsection{Private Instances}
    236235Another approach is to simply create one ring instance per \gls{proc}.
    237 This alleviate the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.
     236This alleviates the need for synchronization on the submissions, requiring only that \glspl{thrd} are not interrupted in between two submission steps.
    238237This is effectively the same requirement as using @thread_local@ variables.
    239238Since SQEs that are allocated must be submitted to the same ring, on the same \gls{proc}, this effectively forces the application to submit SQEs in allocation order
     
    331330\paragraph{Pending Allocations} can be more complicated to handle.
    332331If the arbiter has available instances, the arbiter can attempt to directly hand over the instance and satisfy the request.
    333 Otherwise
     332Otherwise it must hold onto the list of threads until SQEs are made available again.
     333This handling becomes that much more complex if pending allocation require more than one SQE, since the arbiter must make a decision between statisfying requests in FIFO ordering or satisfy requests for fewer SQEs first.
     334
     335While this arbiter has the potential to solve many of the problems mentionned in above, it also introduces a significant amount of complexity.
     336Tracking which processors are borrowing which instances and which instances have SQEs available ends-up adding a significant synchronization prelude to any I/O operation.
     337Any submission must start with a handshake that pins the currently borrowed instance, if available.
     338An attempt to allocate is then made, but the arbiter can concurrently be attempting to allocate from the same instance from a different \gls{hthrd}.
     339Once the allocation is completed, the submission must still check that the instance is still burrowed before attempt to flush.
     340These extra synchronization steps end-up having a similar cost to the multiple shared instances approach.
     341Furthermore, if the number of instances does not match the number of processors actively submitting I/O, the system can fall into a state where instances are constantly being revoked and end-up cycling the processors, which leads to significant cache deterioration.
     342Because of these reasons, this approach, which sounds promising on paper, does not improve on the private instance approach in practice.
     343
     344\subsubsection{Private Instances V2}
     345
    334346
    335347
     
    394406Finally, the last important part of the \io subsystem is it's interface. There are multiple approaches that can be offered to programmers, each with advantages and disadvantages. The new \io subsystem can replace the C runtime's API or extend it. And in the later case the interface can go from very similar to vastly different. The following sections discuss some useful options using @read@ as an example. The standard Linux interface for C is :
    395407
    396 @ssize_t read(int fd, void *buf, size_t count);@.
     408@ssize_t read(int fd, void *buf, size_t count);@
    397409
    398410\subsection{Replacement}
    399 Replacing the C \glsxtrshort{api}
     411Replacing the C \glsxtrshort{api} is the more intrusive and draconian approach.
     412The goal is to convince the compiler and linker to replace any calls to @read@ to direct them to the \CFA implementation instead of glibc's.
     413This has the advantage of potentially working transparently and supporting existing binaries without needing recompilation.
     414It also offers a, presumably, well known and familiar API that C programmers can simply continue to work with.
     415However, this approach also entails a plethora of subtle technical challenges which generally boils down to making a perfect replacement.
     416If the \CFA interface replaces only \emph{some} of the calls to glibc, then this can easily lead to esoteric concurrency bugs.
     417Since the gcc ecosystems does not offer a scheme for such perfect replacement, this approach was rejected as being laudable but infeasible.
    400418
    401419\subsection{Synchronous Extension}
     420An other interface option is to simply offer an interface that is different in name only. For example:
     421
     422@ssize_t cfa_read(int fd, void *buf, size_t count);@
     423
     424\noindent This is much more feasible but still familiar to C programmers.
     425It comes with the caveat that any code attempting to use it must be recompiled, which can be a big problem considering the amount of existing legacy C binaries.
     426However, it has the advantage of implementation simplicity.
    402427
    403428\subsection{Asynchronous Extension}
     429It is important to mention that there is a certain irony to using only synchronous, therefore blocking, interfaces for a feature often referred to as ``non-blocking'' \io.
     430A fairly traditional way of doing this is using futures\cit{wikipedia futures}.
     431As simple way of doing so is as follows:
     432
     433@future(ssize_t) read(int fd, void *buf, size_t count);@
     434
     435\noindent Note that this approach is not necessarily the most idiomatic usage of futures.
     436The definition of read above ``returns'' the read content through an output parameter which cannot be synchronized on.
     437A more classical asynchronous API could look more like:
     438
     439@future([ssize_t, void *]) read(int fd, size_t count);@
     440
     441\noindent However, this interface immediately introduces memory lifetime challenges since the call must effectively allocate a buffer to be returned.
     442Because of the performance implications of this, the first approach is considered preferable as it is more familiar to C programmers.
    404443
    405444\subsection{Interface directly to \lstinline{io_uring}}
     445Finally, an other interface that can be relevant is to simply expose directly the underlying \texttt{io\_uring} interface. For example:
     446
     447@array(SQE, want) cfa_io_allocate(int want);@
     448
     449@void cfa_io_submit( const array(SQE, have) & );@
     450
     451\noindent This offers more flexibility to users wanting to fully use all of the \texttt{io\_uring} features.
     452However, it is not the most user-friendly option.
     453It obviously imposes a strong dependency between user code and \texttt{io\_uring} but at the same time restricting users to usages that are compatible with how \CFA internally uses \texttt{io\_uring}.
     454
     455
  • doc/theses/thierry_delisle_PhD/thesis/text/practice.tex

    rba897d21 r2e9b59b  
    22The scheduling algorithm discribed in Chapter~\ref{core} addresses scheduling in a stable state.
    33However, it does not address problems that occur when the system changes state.
    4 Indeed the \CFA runtime, supports expanding and shrinking the number of KTHREAD\_place \todo{add kthrd to glossary}, both manually and, to some extent automatically.
     4Indeed the \CFA runtime, supports expanding and shrinking the number of \procs, both manually and, to some extent, automatically.
    55This entails that the scheduling algorithm must support these transitions.
    66
    7 \section{Resizing}
     7More precise \CFA supports adding \procs using the RAII object @processor@.
     8These objects can be created at any time and can be destroyed at any time.
     9They are normally create as automatic stack variables, but this is not a requirement.
     10
     11The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence.
     12
     13\section{Manual Resizing}
     14The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed.
     15This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}.
     16
     17There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown.
     18However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks.
     19It should also avoid as much as possible any effect on performance when the number of \procs remain constant.
     20This later requirement prehibits simple solutions, like simply adding a global lock to these arrays.
     21
     22\subsection{Read-Copy-Update}
     23One solution is to use the Read-Copy-Update\cite{wiki:rcu} pattern.
     24In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy.
     25This approach potentially has the advantage that it may not need any synchronization to do the switch.
     26The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in.
     27The important question then becomes whether or not this race can be recovered from.
     28If the changes that arrived late can be transferred from the original to the copy then this solution works.
     29
     30For linked-lists, dequeing is somewhat of a problem.
     31Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at.
     32Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves.
     33
     34Another challenge is that the original must be kept until all \procs have witnessed the change.
     35This is a straight forward memory reclamation challenge but it does mean that every operation will need \emph{some} form of synchronization.
     36If each of these operation does need synchronization then it is possible a simpler solution achieves the same performance.
     37Because in addition to the classic challenge of memory reclamation, transferring the original data to the copy before reclaiming it poses additional challenges.
     38Especially merging subqueues while having a minimal impact on fairness and locality.
     39
     40\subsection{Read-Writer Lock}
     41A simpler approach would be to use a \newterm{Readers-Writer Lock}\cite{wiki:rwlock} where the resizing requires acquiring the lock as a writer while simply enqueing/dequeing \ats requires acquiring the lock as a reader.
     42Using a Readers-Writer lock solves the problem of dynamically resizing and leaves the challenge of finding or building a lock with sufficient good read-side performance.
     43Since this is not a very complex challenge and an ad-hoc solution is perfectly acceptable, building a Readers-Writer lock was the path taken.
     44
     45To maximize reader scalability, the readers should not contend with eachother when attempting to acquire and release the critical sections.
     46This effectively requires that each reader have its own piece of memory to mark as locked and unlocked.
     47Reades then acquire the lock wait for writers to finish the critical section and then acquire their local spinlocks.
     48Writers acquire the global lock, so writers have mutual exclusion among themselves, and then acquires each of the local reader locks.
     49Acquiring all the local locks guarantees mutual exclusion between the readers and the writer, while the wait on the read side prevents readers from continously starving the writer.
     50\todo{reference listings}
     51
     52\begin{lstlisting}
     53void read_lock() {
     54        // Step 1 : make sure no writers in
     55        while write_lock { Pause(); }
     56
     57        // May need fence here
     58
     59        // Step 2 : acquire our local lock
     60        while atomic_xchg( tls.lock ) {
     61                Pause();
     62        }
     63}
     64
     65void read_unlock() {
     66        tls.lock = false;
     67}
     68\end{lstlisting}
     69
     70\begin{lstlisting}
     71void write_lock()  {
     72        // Step 1 : lock global lock
     73        while atomic_xchg( write_lock ) {
     74                Pause();
     75        }
     76
     77        // Step 2 : lock per-proc locks
     78        for t in all_tls {
     79                while atomic_xchg( t.lock ) {
     80                        Pause();
     81                }
     82        }
     83}
     84
     85void write_unlock() {
     86        // Step 1 : release local locks
     87        for t in all_tls {
     88                t.lock = false;
     89        }
     90
     91        // Step 2 : release global lock
     92        write_lock = false;
     93}
     94\end{lstlisting}
    895
    996\section{Idle-Sleep}
     97In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful.
     98While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time.
     99Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice.
     100Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
     101This state is referred to as \newterm{Idle-Sleep}.
     102
     103Idle sleep effectively encompasses several challenges.
     104First some data structure needs to keep track of all \procs that are in idle sleep.
     105Because of idle sleep can be spurious, this data structure has strict performance requirements in addition to the strict correctness requirements.
     106Next, some tool must be used to block kernel threads \glspl{kthrd}, \eg \texttt{pthread\_cond\_wait}, pthread semaphores.
     107The complexity here is to support \at parking and unparking, timers, \io operations and all other \CFA features with minimal complexity.
     108Finally, idle sleep also includes a heuristic to determine the appropriate number of \procs to be in idle sleep an any given time.
     109This third challenge is however outside the scope of this thesis because developping a general heuristic is involved enough to justify its own work.
     110The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work.
     111
     112
     113\section{Tracking Sleepers}
     114Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
     115The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
     116
     117Furthermore, the ``Race-to-Idle'' approach means that there is some
     118
     119\section{Sleeping}
     120
     121\subsection{Event FDs}
     122
     123\subsection{Epoll}
     124
     125\subsection{\texttt{io\_uring}}
     126
     127\section{Reducing Latency}
  • doc/theses/thierry_delisle_PhD/thesis/thesis.tex

    rba897d21 r2e9b59b  
    202202
    203203\newcommand\io{\glsxtrshort{io}\xspace}%
     204\newcommand\at{\gls{at}\xspace}%
     205\newcommand\ats{\glspl{at}\xspace}%
     206\newcommand\proc{\gls{proc}\xspace}%
     207\newcommand\procs{\glspl{proc}\xspace}%
    204208
    205209%======================================================================
  • libcfa/src/Makefile.am

    rba897d21 r2e9b59b  
    5858        bits/queue.hfa \
    5959        bits/sequence.hfa \
     60        concurrency/iofwd.hfa \
     61        concurrency/barrier.hfa \
    6062        containers/array.hfa \
    61         concurrency/iofwd.hfa \
    6263        containers/list.hfa \
    6364        containers/queueLockFree.hfa \
     
    119120        concurrency/exception.hfa \
    120121        concurrency/kernel.hfa \
     122        concurrency/kernel/cluster.hfa \
    121123        concurrency/locks.hfa \
    122124        concurrency/monitor.hfa \
     
    134136        concurrency/io/call.cfa \
    135137        concurrency/iofwd.hfa \
    136         concurrency/kernel_private.hfa \
     138        concurrency/kernel/private.hfa \
    137139        concurrency/kernel/startup.cfa \
    138140        concurrency/preemption.cfa \
  • libcfa/src/concurrency/coroutine.cfa

    rba897d21 r2e9b59b  
    2727#include <unwind.h>
    2828
    29 #include "kernel_private.hfa"
     29#include "kernel/private.hfa"
    3030#include "exception.hfa"
    3131#include "math.hfa"
  • libcfa/src/concurrency/io.cfa

    rba897d21 r2e9b59b  
    4141        #include "kernel.hfa"
    4242        #include "kernel/fwd.hfa"
    43         #include "kernel_private.hfa"
     43        #include "kernel/private.hfa"
     44        #include "kernel/cluster.hfa"
    4445        #include "io/types.hfa"
    4546
     
    9394        extern void __kernel_unpark( thread$ * thrd, unpark_hint );
    9495
    95         bool __cfa_io_drain( processor * proc ) {
    96                 /* paranoid */ verify( ! __preemption_enabled() );
    97                 /* paranoid */ verify( ready_schedule_islocked() );
    98                 /* paranoid */ verify( proc );
    99                 /* paranoid */ verify( proc->io.ctx );
    100 
    101                 // Drain the queue
    102                 $io_context * ctx = proc->io.ctx;
    103                 unsigned head = *ctx->cq.head;
    104                 unsigned tail = *ctx->cq.tail;
    105                 const __u32 mask = *ctx->cq.mask;
    106 
    107                 __u32 count = tail - head;
    108                 __STATS__( false, io.calls.drain++; io.calls.completed += count; )
    109 
    110                 if(count == 0) return false;
    111 
    112                 for(i; count) {
    113                         unsigned idx = (head + i) & mask;
    114                         volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
    115 
    116                         /* paranoid */ verify(&cqe);
    117 
    118                         struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
    119                         __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
    120 
    121                         __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
    122                 }
    123 
    124                 __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
    125 
    126                 // Mark to the kernel that the cqe has been seen
    127                 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
    128                 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
    129 
    130                 /* paranoid */ verify( ready_schedule_islocked() );
    131                 /* paranoid */ verify( ! __preemption_enabled() );
    132 
    133                 return true;
    134         }
    135 
    136         bool __cfa_io_flush( processor * proc, int min_comp ) {
    137                 /* paranoid */ verify( ! __preemption_enabled() );
    138                 /* paranoid */ verify( proc );
    139                 /* paranoid */ verify( proc->io.ctx );
    140 
    141                 __attribute__((unused)) cluster * cltr = proc->cltr;
    142                 $io_context & ctx = *proc->io.ctx;
    143 
    144                 __ioarbiter_flush( ctx );
    145 
    146                 if(ctx.sq.to_submit != 0 || min_comp > 0) {
    147 
    148                         __STATS__( true, io.calls.flush++; )
    149                         int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8);
     96        static void ioring_syscsll( struct $io_context & ctx, unsigned int min_comp, unsigned int flags ) {
     97                __STATS__( true, io.calls.flush++; )
     98                int ret;
     99                for() {
     100                        ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
    150101                        if( ret < 0 ) {
    151102                                switch((int)errno) {
     103                                case EINTR:
     104                                        continue;
    152105                                case EAGAIN:
    153                                 case EINTR:
    154106                                case EBUSY:
    155107                                        // Update statistics
     
    160112                                }
    161113                        }
    162 
    163                         __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
    164                         __STATS__( true, io.calls.submitted += ret; )
    165                         /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
    166                         /* paranoid */ verify( ctx.sq.to_submit >= ret );
    167 
    168                         ctx.sq.to_submit -= ret;
    169 
    170                         /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
    171 
    172                         // Release the consumed SQEs
    173                         __release_sqes( ctx );
    174 
     114                        break;
     115                }
     116
     117                __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
     118                __STATS__( true, io.calls.submitted += ret; )
     119                /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
     120                /* paranoid */ verify( ctx.sq.to_submit >= ret );
     121
     122                ctx.sq.to_submit -= ret;
     123
     124                /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
     125
     126                // Release the consumed SQEs
     127                __release_sqes( ctx );
     128
     129                /* paranoid */ verify( ! __preemption_enabled() );
     130
     131                __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
     132        }
     133
     134        static bool try_acquire( $io_context * ctx ) __attribute__((nonnull(1))) {
     135                /* paranoid */ verify( ! __preemption_enabled() );
     136                /* paranoid */ verify( ready_schedule_islocked() );
     137
     138
     139                {
     140                        const __u32 head = *ctx->cq.head;
     141                        const __u32 tail = *ctx->cq.tail;
     142
     143                        if(head == tail) return false;
     144                }
     145
     146                // Drain the queue
     147                if(!__atomic_try_acquire(&ctx->cq.lock)) {
     148                        __STATS__( false, io.calls.locked++; )
     149                        return false;
     150                }
     151
     152                return true;
     153        }
     154
     155        static bool __cfa_do_drain( $io_context * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) {
     156                /* paranoid */ verify( ! __preemption_enabled() );
     157                /* paranoid */ verify( ready_schedule_islocked() );
     158                /* paranoid */ verify( ctx->cq.lock == true );
     159
     160                const __u32 mask = *ctx->cq.mask;
     161                unsigned long long ts_prev = ctx->cq.ts;
     162
     163                // re-read the head and tail in case it already changed.
     164                const __u32 head = *ctx->cq.head;
     165                const __u32 tail = *ctx->cq.tail;
     166                const __u32 count = tail - head;
     167                __STATS__( false, io.calls.drain++; io.calls.completed += count; )
     168
     169                for(i; count) {
     170                        unsigned idx = (head + i) & mask;
     171                        volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
     172
     173                        /* paranoid */ verify(&cqe);
     174
     175                        struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
     176                        // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
     177
     178                        __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
     179                }
     180
     181                unsigned long long ts_next = ctx->cq.ts = rdtscl();
     182
     183                // Mark to the kernel that the cqe has been seen
     184                // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
     185                __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
     186                ctx->proc->idle_wctx.drain_time = ts_next;
     187
     188                __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next);
     189                /* paranoid */ verify( ready_schedule_islocked() );
     190                /* paranoid */ verify( ! __preemption_enabled() );
     191
     192                __atomic_unlock(&ctx->cq.lock);
     193
     194                touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next );
     195
     196                return true;
     197        }
     198
     199        bool __cfa_io_drain( processor * proc ) {
     200                bool local = false;
     201                bool remote = false;
     202
     203                ready_schedule_lock();
     204
     205                cluster * const cltr = proc->cltr;
     206                $io_context * const ctx = proc->io.ctx;
     207                /* paranoid */ verify( cltr );
     208                /* paranoid */ verify( ctx );
     209
     210                with(cltr->sched) {
     211                        const size_t ctxs_count = io.count;
     212
     213                        /* paranoid */ verify( ready_schedule_islocked() );
    175214                        /* paranoid */ verify( ! __preemption_enabled() );
    176 
    177                         ctx.proc->io.pending = false;
    178                 }
    179 
    180                 ready_schedule_lock();
    181                 bool ret = __cfa_io_drain( proc );
     215                        /* paranoid */ verify( active_processor() == proc );
     216                        /* paranoid */ verify( __shard_factor.io > 0 );
     217                        /* paranoid */ verify( ctxs_count > 0 );
     218                        /* paranoid */ verify( ctx->cq.id < ctxs_count );
     219
     220                        const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io);
     221                        const unsigned long long ctsc = rdtscl();
     222
     223                        if(proc->io.target == MAX) {
     224                                uint64_t chaos = __tls_rand();
     225                                unsigned ext = chaos & 0xff;
     226                                unsigned other  = (chaos >> 8) % (ctxs_count);
     227
     228                                if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
     229                                        proc->io.target = other;
     230                                }
     231                        }
     232                        else {
     233                                const unsigned target = proc->io.target;
     234                                /* paranoid */ verify( io.tscs[target].tv != MAX );
     235                                HELP: if(target < ctxs_count) {
     236                                        const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io);
     237                                        const unsigned long long age = moving_average(ctsc, io.tscs[target].tv, io.tscs[target].ma);
     238                                        __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
     239                                        if(age <= cutoff) break HELP;
     240
     241                                        if(!try_acquire(io.data[target])) break HELP;
     242
     243                                        if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
     244
     245                                        remote = true;
     246                                        __STATS__( false, io.calls.helped++; )
     247                                }
     248                                proc->io.target = MAX;
     249                        }
     250                }
     251
     252
     253                // Drain the local queue
     254                if(try_acquire( proc->io.ctx )) {
     255                        local = __cfa_do_drain( proc->io.ctx, cltr );
     256                }
     257
     258                /* paranoid */ verify( ready_schedule_islocked() );
     259                /* paranoid */ verify( ! __preemption_enabled() );
     260                /* paranoid */ verify( active_processor() == proc );
     261
    182262                ready_schedule_unlock();
    183                 return ret;
     263                return local || remote;
     264        }
     265
     266        bool __cfa_io_flush( processor * proc ) {
     267                /* paranoid */ verify( ! __preemption_enabled() );
     268                /* paranoid */ verify( proc );
     269                /* paranoid */ verify( proc->io.ctx );
     270
     271                $io_context & ctx = *proc->io.ctx;
     272
     273                __ioarbiter_flush( ctx );
     274
     275                if(ctx.sq.to_submit != 0) {
     276                        ioring_syscsll(ctx, 0, 0);
     277
     278                }
     279
     280                return __cfa_io_drain( proc );
    184281        }
    185282
     
    209306                struct io_uring_sqe * sqes = ctx->sq.sqes;
    210307                for(i; want) {
    211                         __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
     308                        // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
    212309                        out_sqes[i] = &sqes[idxs[i]];
    213310                }
     
    227324                // copy all the indexes we want from the available list
    228325                for(i; want) {
    229                         __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
     326                        // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
    230327                        idxs[i] = sq.free_ring.array[(fhead + i) & mask];
    231328                }
     
    244341        // sqe == &sqes[idx]
    245342        struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
    246                 __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
     343                // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
    247344
    248345                disable_interrupts();
     
    252349                /* paranoid */ verify( ctx );
    253350
    254                 __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
     351                // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
    255352
    256353                // We can proceed to the fast path
     
    260357                        enable_interrupts();
    261358
    262                         __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
     359                        // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
    263360
    264361                        __fill( sqes, want, idxs, ctx );
     
    275372                /* paranoid */ verify( ioarb );
    276373
    277                 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
     374                // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
    278375
    279376                struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want);
    280377
    281                 __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
     378                // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
    282379
    283380                __fill( sqes, want, idxs,ret );
     
    296393                // Add the sqes to the array
    297394                for( i; have ) {
    298                         __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
     395                        // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
    299396                        sq.kring.array[ (tail + i) & mask ] = idxs[i];
    300397                }
     
    304401                sq.to_submit += have;
    305402
    306                 ctx->proc->io.pending = true;
    307                 ctx->proc->io.dirty   = true;
     403                __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
     404                __atomic_store_n(&ctx->proc->io.dirty  , true, __ATOMIC_RELAXED);
    308405        }
    309406
     
    314411                if(sq.to_submit > 30) {
    315412                        __tls_stats()->io.flush.full++;
    316                         __cfa_io_flush( ctx->proc, 0 );
     413                        __cfa_io_flush( ctx->proc );
    317414                }
    318415                if(!lazy) {
    319416                        __tls_stats()->io.flush.eager++;
    320                         __cfa_io_flush( ctx->proc, 0 );
     417                        __cfa_io_flush( ctx->proc );
    321418                }
    322419        }
    323420
    324421        void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
    325                 __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
     422                // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
    326423
    327424                disable_interrupts();
     
    340437                        enable_interrupts();
    341438
    342                         __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
     439                        // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
    343440                        return;
    344441                }
     
    348445                enable_interrupts();
    349446
    350                 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
     447                // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
    351448
    352449                __ioarbiter_submit(inctx, idxs, have, lazy);
     
    392489                // go through the range and release the sqes
    393490                for( i; count ) {
    394                         __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
     491                        // __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
    395492                        __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
    396493                        ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
     
    432529
    433530        static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) {
    434                 __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
     531                // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
    435532
    436533                __STATS__( false, io.alloc.block += 1; )
     
    499596                bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
    500597
    501                 ctx->proc->io.pending = true;
     598                __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
    502599
    503600                if( we ) {
     
    544641
    545642                        // We can proceed to the fast path
    546                         if( !__alloc(ctx, &idx, 1) ) return false;
     643                        if( !__alloc(ctx, &idx, 1) ) {
     644                                /* paranoid */ verify( false ); // for now check if this happens, next time just abort the sleep.
     645                                return false;
     646                        }
    547647
    548648                        // Allocation was successful
     
    574674
    575675                        /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
    576                         __submit( ctx, &idx, 1, true );
     676                        __submit_only( ctx, &idx, 1 );
    577677
    578678                        /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
     
    581681                        return true;
    582682                }
     683
     684                void __cfa_io_idle( processor * proc ) {
     685                        iovec iov;
     686                        __atomic_acquire( &proc->io.ctx->cq.lock );
     687
     688                        __attribute__((used)) volatile bool was_reset = false;
     689
     690                        with( proc->idle_wctx) {
     691
     692                                // Do we already have a pending read
     693                                if(available(*ftr)) {
     694                                        // There is no pending read, we need to add one
     695                                        reset(*ftr);
     696
     697                                        iov.iov_base = rdbuf;
     698                                        iov.iov_len  = sizeof(eventfd_t);
     699                                        __kernel_read(proc, *ftr, iov, evfd );
     700                                        ftr->result = 0xDEADDEAD;
     701                                        *((eventfd_t *)rdbuf) = 0xDEADDEADDEADDEAD;
     702                                        was_reset = true;
     703                                }
     704                        }
     705
     706                        if( !__atomic_load_n( &proc->do_terminate, __ATOMIC_SEQ_CST ) ) {
     707                                __ioarbiter_flush( *proc->io.ctx );
     708                                proc->idle_wctx.sleep_time = rdtscl();
     709                                ioring_syscsll( *proc->io.ctx, 1, IORING_ENTER_GETEVENTS);
     710                        }
     711
     712                        ready_schedule_lock();
     713                        __cfa_do_drain( proc->io.ctx, proc->cltr );
     714                        ready_schedule_unlock();
     715
     716                        asm volatile ("" :: "m" (was_reset));
     717                }
    583718        #endif
    584719#endif
  • libcfa/src/concurrency/io/setup.cfa

    rba897d21 r2e9b59b  
    3232
    3333        void __cfa_io_start( processor * proc ) {}
    34         bool __cfa_io_flush( processor * proc, int ) { return false; }
     34        bool __cfa_io_flush( processor * proc ) { return false; }
     35        bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; }
     36        void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}
    3537        void __cfa_io_stop ( processor * proc ) {}
    3638
     
    3941
    4042#else
     43#pragma GCC diagnostic push
     44#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
    4145        #include <errno.h>
    4246        #include <stdint.h>
     
    5761        #include "bitmanip.hfa"
    5862        #include "fstream.hfa"
    59         #include "kernel_private.hfa"
     63        #include "kernel/private.hfa"
     64        #include "limits.hfa"
    6065        #include "thread.hfa"
     66#pragma GCC diagnostic pop
    6167
    6268        void ?{}(io_context_params & this) {
     
    112118                this.ext_sq.empty = true;
    113119                (this.ext_sq.queue){};
    114                 __io_uring_setup( this, cl.io.params, proc->idle_fd );
     120                __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd );
    115121                __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this);
    116122        }
     
    122128                __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd);
    123129        }
    124 
    125         extern void __disable_interrupts_hard();
    126         extern void __enable_interrupts_hard();
    127130
    128131        static void __io_uring_setup( $io_context & this, const io_context_params & params_in, int procfd ) {
     
    214217
    215218                // completion queue
     219                cq.lock      = false;
     220                cq.id        = MAX;
     221                cq.ts        = rdtscl();
    216222                cq.head      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
    217223                cq.tail      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
     
    227233                        __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
    228234
    229                         __disable_interrupts_hard();
    230 
    231235                        int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
    232236                        if (ret < 0) {
    233237                                abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
    234238                        }
    235 
    236                         __enable_interrupts_hard();
    237239
    238240                        __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
  • libcfa/src/concurrency/io/types.hfa

    rba897d21 r2e9b59b  
    2323#include "bits/locks.hfa"
    2424#include "bits/queue.hfa"
     25#include "iofwd.hfa"
    2526#include "kernel/fwd.hfa"
     27#include "limits.hfa"
    2628
    2729#if defined(CFA_HAVE_LINUX_IO_URING_H)
     
    7779
    7880        struct __cmp_ring_t {
     81                volatile bool lock;
     82
     83                unsigned id;
     84
     85                unsigned long long ts;
     86
    7987                // Head and tail of the ring
    8088                volatile __u32 * head;
     
    128136        };
    129137
     138        static inline unsigned long long ts($io_context *& this) {
     139                const __u32 head = *this->cq.head;
     140                const __u32 tail = *this->cq.tail;
     141
     142                if(head == tail) return MAX;
     143
     144                return this->cq.ts;
     145        }
     146
    130147        struct __pending_alloc {
    131148                inline __outstanding_io;
     
    170187        // void __ioctx_prepare_block($io_context & ctx);
    171188#endif
    172 
    173 //-----------------------------------------------------------------------
    174 // IO user data
    175 struct io_future_t {
    176         future_t self;
    177         __s32 result;
    178 };
    179 
    180 static inline {
    181         thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
    182                 this.result = result;
    183                 return fulfil(this.self, do_unpark);
    184         }
    185 
    186         // Wait for the future to be fulfilled
    187         bool wait     ( io_future_t & this ) { return wait     (this.self); }
    188         void reset    ( io_future_t & this ) { return reset    (this.self); }
    189         bool available( io_future_t & this ) { return available(this.self); }
    190 }
  • libcfa/src/concurrency/iofwd.hfa

    rba897d21 r2e9b59b  
    2525}
    2626#include "bits/defs.hfa"
     27#include "kernel/fwd.hfa"
    2728#include "time.hfa"
    2829
     
    4849
    4950struct cluster;
    50 struct io_future_t;
    5151struct $io_context;
    5252
     
    5858
    5959struct io_uring_sqe;
     60
     61//-----------------------------------------------------------------------
     62// IO user data
     63struct io_future_t {
     64        future_t self;
     65        __s32 result;
     66};
     67
     68static inline {
     69        thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
     70                this.result = result;
     71                return fulfil(this.self, do_unpark);
     72        }
     73
     74        // Wait for the future to be fulfilled
     75        bool wait     ( io_future_t & this ) { return wait     (this.self); }
     76        void reset    ( io_future_t & this ) { return reset    (this.self); }
     77        bool available( io_future_t & this ) { return available(this.self); }
     78}
    6079
    6180//----------
  • libcfa/src/concurrency/kernel.cfa

    rba897d21 r2e9b59b  
    1919// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
    2020
     21#pragma GCC diagnostic push
     22#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
     23
    2124//C Includes
    2225#include <errno.h>
     
    2528#include <signal.h>
    2629#include <unistd.h>
     30
    2731extern "C" {
    2832        #include <sys/eventfd.h>
     
    3135
    3236//CFA Includes
    33 #include "kernel_private.hfa"
     37#include "kernel/private.hfa"
    3438#include "preemption.hfa"
    3539#include "strstream.hfa"
     
    4044#define __CFA_INVOKE_PRIVATE__
    4145#include "invoke.h"
     46#pragma GCC diagnostic pop
    4247
    4348#if !defined(__CFA_NO_STATISTICS__)
     
    127132static void __wake_one(cluster * cltr);
    128133
    129 static void idle_sleep(processor * proc, io_future_t & future, iovec & iov);
     134static void idle_sleep(processor * proc);
    130135static bool mark_idle (__cluster_proc_list & idles, processor & proc);
    131136static void mark_awake(__cluster_proc_list & idles, processor & proc);
    132137
    133 extern void __cfa_io_start( processor * );
    134 extern bool __cfa_io_drain( processor * );
    135 extern bool __cfa_io_flush( processor *, int min_comp );
    136 extern void __cfa_io_stop ( processor * );
    137 static inline bool __maybe_io_drain( processor * );
     138extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
     139extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
     140extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
    138141
    139142#if defined(CFA_WITH_IO_URING_IDLE)
     
    159162        verify(this);
    160163
    161         io_future_t future; // used for idle sleep when io_uring is present
    162         future.self.ptr = 1p;  // mark it as already fulfilled so we know if there is a pending request or not
    163         eventfd_t idle_val;
    164         iovec idle_iovec = { &idle_val, sizeof(idle_val) };
    165 
    166         __cfa_io_start( this );
     164        /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
     165        /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
     166
     167        // used for idle sleep when io_uring is present
     168        // mark it as already fulfilled so we know if there is a pending request or not
     169        this->idle_wctx.ftr->self.ptr = 1p;
    167170
    168171        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
     
    189192                for() {
    190193                        // Check if there is pending io
    191                         __maybe_io_drain( this );
     194                        __cfa_io_drain( this );
    192195
    193196                        // Try to get the next thread
     
    195198
    196199                        if( !readyThread ) {
     200                                // there is no point in holding submissions if we are idle
    197201                                __IO_STATS__(true, io.flush.idle++; )
    198                                 __cfa_io_flush( this, 0 );
     202                                __cfa_io_flush( this );
     203
     204                                // drain again in case something showed up
     205                                __cfa_io_drain( this );
    199206
    200207                                readyThread = __next_thread( this->cltr );
     
    202209
    203210                        if( !readyThread ) for(5) {
     211                                readyThread = __next_thread_slow( this->cltr );
     212
     213                                if( readyThread ) break;
     214
     215                                // It's unlikely we still I/O to submit, but the arbiter could
    204216                                __IO_STATS__(true, io.flush.idle++; )
    205 
    206                                 readyThread = __next_thread_slow( this->cltr );
    207 
    208                                 if( readyThread ) break;
    209 
    210                                 __cfa_io_flush( this, 0 );
     217                                __cfa_io_flush( this );
     218
     219                                // drain again in case something showed up
     220                                __cfa_io_drain( this );
    211221                        }
    212222
     
    231241                                }
    232242
    233                                 idle_sleep( this, future, idle_iovec );
     243                                idle_sleep( this );
    234244
    235245                                // We were woken up, remove self from idle
     
    251261                        if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    252262
    253                         if(this->io.pending && !this->io.dirty) {
     263                        if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
    254264                                __IO_STATS__(true, io.flush.dirty++; )
    255                                 __cfa_io_flush( this, 0 );
     265                                __cfa_io_flush( this );
    256266                        }
    257267                }
     
    259269                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
    260270        }
    261 
    262         for(int i = 0; !available(future); i++) {
    263                 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);
    264                 __cfa_io_flush( this, 1 );
    265         }
    266 
    267         __cfa_io_stop( this );
    268271
    269272        post( this->terminated );
     
    634637
    635638        int fd = 1;
    636         if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) {
    637                 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED);
     639        if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
     640                fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
    638641        }
    639642
    640643        switch(fd) {
     644                __attribute__((unused)) int ret;
    641645        case 0:
    642646                // If the processor isn't ready to sleep then the exchange will already wake it up
     
    656660                // If the processor was ready to sleep, we need to wake it up with an actual write
    657661                val = 1;
    658                 eventfd_write( fd, val );
     662                ret = eventfd_write( fd, val );
     663                /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
    659664
    660665                #if !defined(__CFA_NO_STATISTICS__)
     
    677682        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    678683
    679         this->idle_wctx.fd = 1;
     684        this->idle_wctx.sem = 1;
     685
     686        this->idle_wctx.wake__time = rdtscl();
    680687
    681688        eventfd_t val;
    682689        val = 1;
    683         eventfd_write( this->idle_fd, val );
    684 
    685         /* paranoid */ verify( ! __preemption_enabled() );
    686 }
    687 
    688 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {
     690        __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val );
     691
     692        /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
     693        /* paranoid */ verify( ! __preemption_enabled() );
     694}
     695
     696static void idle_sleep(processor * this) {
     697        /* paranoid */ verify( this->idle_wctx.evfd != 1 );
     698        /* paranoid */ verify( this->idle_wctx.evfd != 2 );
     699
    689700        // Tell everyone we are ready to go do sleep
    690701        for() {
    691                 int expected = this->idle_wctx.fd;
     702                int expected = this->idle_wctx.sem;
    692703
    693704                // Someone already told us to wake-up! No time for a nap.
     
    695706
    696707                // Try to mark that we are going to sleep
    697                 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     708                if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
    698709                        // Every one agreed, taking a nap
    699710                        break;
     
    713724                {
    714725                        eventfd_t val;
    715                         ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
     726                        ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
    716727                        if(ret < 0) {
    717728                                switch((int)errno) {
     
    735746                #endif
    736747        #else
    737                 // Do we already have a pending read
    738                 if(available(future)) {
    739                         // There is no pending read, we need to add one
    740                         reset(future);
    741 
    742                         __kernel_read(this, future, iov, this->idle_fd );
    743                 }
    744 
    745                 __cfa_io_flush( this, 1 );
     748                __cfa_io_idle( this );
    746749        #endif
    747750}
     
    750753        __STATS__(true, ready.sleep.halts++; )
    751754
    752         proc.idle_wctx.fd = 0;
     755        proc.idle_wctx.sem = 0;
    753756
    754757        /* paranoid */ verify( ! __preemption_enabled() );
     
    831834#endif
    832835
    833 static inline bool __maybe_io_drain( processor * proc ) {
    834         bool ret = false;
    835         #if defined(CFA_HAVE_LINUX_IO_URING_H)
    836                 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
    837 
    838                 // Check if we should drain the queue
    839                 $io_context * ctx = proc->io.ctx;
    840                 unsigned head = *ctx->cq.head;
    841                 unsigned tail = *ctx->cq.tail;
    842                 if(head == tail) return false;
    843                 ready_schedule_lock();
    844                 ret = __cfa_io_drain( proc );
    845                 ready_schedule_unlock();
    846         #endif
    847         return ret;
    848 }
     836
    849837
    850838//-----------------------------------------------------------------------------
     
    903891        void print_stats_now( cluster & this, int flags ) {
    904892                crawl_cluster_stats( this );
    905                 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
     893                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
    906894        }
    907895#endif
  • libcfa/src/concurrency/kernel.hfa

    rba897d21 r2e9b59b  
    4848extern struct cluster * mainCluster;
    4949
    50 // Processor id, required for scheduling threads
    51 
    52 
     50// Coroutine used py processors for the 2-step context switch
    5351coroutine processorCtx_t {
    5452        struct processor * proc;
    5553};
    5654
    57 
     55struct io_future_t;
     56
     57// Information needed for idle sleep
    5858struct __fd_waitctx {
    59         volatile int fd;
     59        // semaphore/future like object
     60        // values can be 0, 1 or some file descriptor.
     61        // 0 - is the default state
     62        // 1 - means the proc should wake-up immediately
     63        // FD - means the proc is going asleep and should be woken by writing to the FD.
     64        volatile int sem;
     65
     66        // The event FD that corresponds to this processor
     67        int evfd;
     68
     69        // buffer into which the proc will read from evfd
     70        // unused if not using io_uring for idle sleep
     71        void * rdbuf;
     72
     73        // future use to track the read of the eventfd
     74        // unused if not using io_uring for idle sleep
     75        io_future_t * ftr;
     76
     77        volatile unsigned long long wake__time;
     78        volatile unsigned long long sleep_time;
     79        volatile unsigned long long drain_time;
    6080};
    6181
     
    92112        struct {
    93113                $io_context * ctx;
    94                 bool pending;
    95                 bool dirty;
     114                unsigned target;
     115                volatile bool pending;
     116                volatile bool dirty;
    96117        } io;
    97118
     
    103124        bool pending_preemption;
    104125
    105         // Idle lock (kernel semaphore)
    106         int idle_fd;
    107 
    108         // Idle waitctx
     126        // context for idle sleep
    109127        struct __fd_waitctx idle_wctx;
    110128
     
    155173void ^?{}(__intrusive_lane_t & this);
    156174
    157 // Aligned timestamps which are used by the relaxed ready queue
     175// Aligned timestamps which are used by the ready queue and io subsystem
    158176struct __attribute__((aligned(128))) __timestamp_t {
    159177        volatile unsigned long long tv;
     
    161179};
    162180
     181static inline void  ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }
     182static inline void ^?{}(__timestamp_t &) {}
     183
     184
    163185struct __attribute__((aligned(16))) __cache_id_t {
    164186        volatile unsigned id;
    165187};
    166 
    167 // Aligned timestamps which are used by the relaxed ready queue
    168 struct __attribute__((aligned(128))) __help_cnts_t {
    169         volatile unsigned long long src;
    170         volatile unsigned long long dst;
    171         volatile unsigned long long tri;
    172 };
    173 
    174 static inline void  ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }
    175 static inline void ^?{}(__timestamp_t &) {}
    176 
    177 struct __attribute__((aligned(128))) __ready_queue_caches_t;
    178 void  ?{}(__ready_queue_caches_t & this);
    179 void ^?{}(__ready_queue_caches_t & this);
    180 
    181 //TODO adjust cache size to ARCHITECTURE
    182 // Structure holding the ready queue
    183 struct __ready_queue_t {
    184         // Data tracking the actual lanes
    185         // On a seperate cacheline from the used struct since
    186         // used can change on each push/pop but this data
    187         // only changes on shrink/grow
    188         struct {
    189                 // Arary of lanes
    190                 __intrusive_lane_t * volatile data;
    191 
    192                 // Array of times
    193                 __timestamp_t * volatile tscs;
    194 
    195                 __cache_id_t * volatile caches;
    196 
    197                 // Array of stats
    198                 __help_cnts_t * volatile help;
    199 
    200                 // Number of lanes (empty or not)
    201                 volatile size_t count;
    202         } lanes;
    203 };
    204 
    205 void  ?{}(__ready_queue_t & this);
    206 void ^?{}(__ready_queue_t & this);
    207 #if !defined(__CFA_NO_STATISTICS__)
    208         unsigned cnt(const __ready_queue_t & this, unsigned idx);
    209 #endif
    210188
    211189// Idle Sleep
     
    233211// Cluster
    234212struct __attribute__((aligned(128))) cluster {
    235         // Ready queue for threads
    236         __ready_queue_t ready_queue;
     213        struct {
     214                struct {
     215                        // Arary of subqueues
     216                        __intrusive_lane_t * data;
     217
     218                        // Time since subqueues were processed
     219                        __timestamp_t * tscs;
     220
     221                        // Number of subqueue / timestamps
     222                        size_t count;
     223                } readyQ;
     224
     225                struct {
     226                        // Array of $io_
     227                        $io_context ** data;
     228
     229                        // Time since subqueues were processed
     230                        __timestamp_t * tscs;
     231
     232                        // Number of I/O subqueues
     233                        size_t count;
     234                } io;
     235
     236                // Cache each kernel thread belongs to
     237                __cache_id_t * caches;
     238        } sched;
     239
     240        // // Ready queue for threads
     241        // __ready_queue_t ready_queue;
    237242
    238243        // Name of the cluster
  • libcfa/src/concurrency/kernel/fwd.hfa

    rba897d21 r2e9b59b  
    248248                        // check if the future is available
    249249                        bool available( future_t & this ) {
     250                                while( this.ptr == 2p ) Pause();
    250251                                return this.ptr == 1p;
    251252                        }
  • libcfa/src/concurrency/kernel/private.hfa

    rba897d21 r2e9b59b  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // kernel_private.hfa --
     7// kernel/private.hfa --
    88//
    99// Author           : Thierry Delisle
     
    1717
    1818#if !defined(__cforall_thread__)
    19         #error kernel_private.hfa should only be included in libcfathread source
     19        #error kernel/private.hfa should only be included in libcfathread source
    2020#endif
    2121
     
    3333#else
    3434        #ifndef _GNU_SOURCE
    35         #error kernel_private requires gnu_source
     35        #error kernel/private requires gnu_source
    3636        #endif
    3737        #include <sched.h>
     
    4040
    4141// Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call
    42 #define CFA_WANT_IO_URING_IDLE
     42// #define CFA_WANT_IO_URING_IDLE
    4343
    4444// Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call
     
    365365void ready_queue_shrink(struct cluster * cltr);
    366366
     367//-----------------------------------------------------------------------
     368// Decrease the width of the ready queue (number of lanes) by 4
     369void ready_queue_close(struct cluster * cltr);
    367370
    368371// Local Variables: //
  • libcfa/src/concurrency/kernel/startup.cfa

    rba897d21 r2e9b59b  
    3232
    3333// CFA Includes
    34 #include "kernel_private.hfa"
     34#include "kernel/private.hfa"
     35#include "iofwd.hfa"
    3536#include "startup.hfa"                                  // STARTUP_PRIORITY_XXX
    3637#include "limits.hfa"
     
    9798extern void __kernel_alarm_startup(void);
    9899extern void __kernel_alarm_shutdown(void);
     100extern void __cfa_io_start( processor * );
     101extern void __cfa_io_stop ( processor * );
    99102
    100103//-----------------------------------------------------------------------------
     
    111114KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    112115KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
     116KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
     117KERNEL_STORAGE(io_future_t,          mainIdleFuture);
    113118#if !defined(__CFA_NO_STATISTICS__)
    114119KERNEL_STORAGE(__stats_t, mainProcStats);
     
    224229        (*mainProcessor){};
    225230
     231        mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;
     232        mainProcessor->idle_wctx.ftr   = (io_future_t*)&storage_mainIdleFuture;
     233        /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
     234
     235        __cfa_io_start( mainProcessor );
    226236        register_tls( mainProcessor );
    227237
     
    305315
    306316        unregister_tls( mainProcessor );
     317        __cfa_io_stop( mainProcessor );
    307318
    308319        // Destroy the main processor and its context in reverse order of construction
     
    353364        proc->local_data = &__cfaabi_tls;
    354365
     366        __cfa_io_start( proc );
    355367        register_tls( proc );
     368
     369        // used for idle sleep when io_uring is present
     370        io_future_t future;
     371        eventfd_t idle_buf;
     372        proc->idle_wctx.ftr = &future;
     373        proc->idle_wctx.rdbuf = &idle_buf;
     374
    356375
    357376        // SKULLDUGGERY: We want to create a context for the processor coroutine
     
    395414
    396415        unregister_tls( proc );
     416        __cfa_io_stop( proc );
    397417
    398418        return 0p;
     
    515535        this.rdq.its = 0;
    516536        this.rdq.itr = 0;
    517         this.rdq.id  = MAX;
     537        this.rdq.id  = 0;
    518538        this.rdq.target = MAX;
    519539        this.rdq.last = MAX;
     
    532552        this.local_data = 0p;
    533553
    534         this.idle_fd = eventfd(0, 0);
    535         if (idle_fd < 0) {
     554        idle_wctx.evfd = eventfd(0, 0);
     555        if (idle_wctx.evfd < 0) {
    536556                abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno));
    537557        }
    538558
    539         this.idle_wctx.fd = 0;
     559        idle_wctx.sem = 0;
     560        idle_wctx.wake__time = 0;
    540561
    541562        // I'm assuming these two are reserved for standard input and output
    542563        // so I'm using them as sentinels with idle_wctx.
    543         /* paranoid */ verify( this.idle_fd != 0 );
    544         /* paranoid */ verify( this.idle_fd != 1 );
     564        /* paranoid */ verify( idle_wctx.evfd != 0 );
     565        /* paranoid */ verify( idle_wctx.evfd != 1 );
    545566
    546567        #if !defined(__CFA_NO_STATISTICS__)
     
    554575// Not a ctor, it just preps the destruction but should not destroy members
    555576static void deinit(processor & this) {
    556         close(this.idle_fd);
     577        close(this.idle_wctx.evfd);
    557578}
    558579
     
    605626        this.name = name;
    606627        this.preemption_rate = preemption_rate;
    607         ready_queue{};
     628        this.sched.readyQ.data = 0p;
     629        this.sched.readyQ.tscs = 0p;
     630        this.sched.readyQ.count = 0;
     631        this.sched.io.tscs = 0p;
     632        this.sched.io.data = 0p;
     633        this.sched.caches = 0p;
    608634
    609635        #if !defined(__CFA_NO_STATISTICS__)
     
    644670        // Unlock the RWlock
    645671        ready_mutate_unlock( last_size );
     672
     673        ready_queue_close( &this );
     674        /* paranoid */ verify( this.sched.readyQ.data == 0p );
     675        /* paranoid */ verify( this.sched.readyQ.tscs == 0p );
     676        /* paranoid */ verify( this.sched.readyQ.count == 0 );
     677        /* paranoid */ verify( this.sched.io.tscs == 0p );
     678        /* paranoid */ verify( this.sched.caches == 0p );
     679
    646680        enable_interrupts( false ); // Don't poll, could be in main cluster
     681
    647682
    648683        #if !defined(__CFA_NO_STATISTICS__)
  • libcfa/src/concurrency/locks.cfa

    rba897d21 r2e9b59b  
    1919
    2020#include "locks.hfa"
    21 #include "kernel_private.hfa"
     21#include "kernel/private.hfa"
    2222
    2323#include <kernel.hfa>
  • libcfa/src/concurrency/locks.hfa

    rba897d21 r2e9b59b  
    164164}
    165165
    166 static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
     166static inline void lock(linear_backoff_then_block_lock & this) with(this) {
    167167        // if owner just return
    168         if (active_thread() == owner) return true;
     168        if (active_thread() == owner) return;
    169169        size_t compare_val = 0;
    170170        int spin = spin_start;
     
    172172        for( ;; ) {
    173173                compare_val = 0;
    174                 if (internal_try_lock(this, compare_val)) return true;
     174                if (internal_try_lock(this, compare_val)) return;
    175175                if (2 == compare_val) break;
    176176                for (int i = 0; i < spin; i++) Pause();
     
    179179        }
    180180
    181         if(2 != compare_val && try_lock_contention(this)) return true;
     181        if(2 != compare_val && try_lock_contention(this)) return;
    182182        // block until signalled
    183         while (block(this)) if(try_lock_contention(this)) return true;
    184 
    185         // this should never be reached as block(this) always returns true
    186         return false;
     183        while (block(this)) if(try_lock_contention(this)) return;
    187184}
    188185
  • libcfa/src/concurrency/monitor.cfa

    rba897d21 r2e9b59b  
    2222#include <inttypes.h>
    2323
    24 #include "kernel_private.hfa"
     24#include "kernel/private.hfa"
    2525
    2626#include "bits/algorithm.hfa"
  • libcfa/src/concurrency/mutex.cfa

    rba897d21 r2e9b59b  
    2121#include "mutex.hfa"
    2222
    23 #include "kernel_private.hfa"
     23#include "kernel/private.hfa"
    2424
    2525//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/mutex_stmt.hfa

    rba897d21 r2e9b59b  
    1212};
    1313
     14
     15struct __mutex_stmt_lock_guard {
     16    void ** lockarr;
     17    __lock_size_t count;
     18};
     19
     20static inline void ?{}( __mutex_stmt_lock_guard & this, void * lockarr [], __lock_size_t count  ) {
     21    this.lockarr = lockarr;
     22    this.count = count;
     23
     24    // Sort locks based on address
     25    __libcfa_small_sort(this.lockarr, count);
     26
     27    // acquire locks in order
     28    // for ( size_t i = 0; i < count; i++ ) {
     29    //     lock(*this.lockarr[i]);
     30    // }
     31}
     32
     33static inline void ^?{}( __mutex_stmt_lock_guard & this ) with(this) {
     34    // for ( size_t i = count; i > 0; i-- ) {
     35    //     unlock(*lockarr[i - 1]);
     36    // }
     37}
     38
    1439forall(L & | is_lock(L)) {
    15 
    16     struct __mutex_stmt_lock_guard {
    17         L ** lockarr;
    18         __lock_size_t count;
    19     };
    20    
    21     static inline void ?{}( __mutex_stmt_lock_guard(L) & this, L * lockarr [], __lock_size_t count  ) {
    22         this.lockarr = lockarr;
    23         this.count = count;
    24 
    25         // Sort locks based on address
    26         __libcfa_small_sort(this.lockarr, count);
    27 
    28         // acquire locks in order
    29         for ( size_t i = 0; i < count; i++ ) {
    30             lock(*this.lockarr[i]);
    31         }
    32     }
    33    
    34     static inline void ^?{}( __mutex_stmt_lock_guard(L) & this ) with(this) {
    35         for ( size_t i = count; i > 0; i-- ) {
    36             unlock(*lockarr[i - 1]);
    37         }
    38     }
    3940
    4041    struct scoped_lock {
     
    5152    }
    5253
    53     static inline L * __get_ptr( L & this ) {
     54    static inline void * __get_mutexstmt_lock_ptr( L & this ) {
    5455        return &this;
    5556    }
    5657
    57     static inline L __get_type( L & this );
     58    static inline L __get_mutexstmt_lock_type( L & this );
    5859
    59     static inline L __get_type( L * this );
     60    static inline L __get_mutexstmt_lock_type( L * this );
    6061}
  • libcfa/src/concurrency/preemption.cfa

    rba897d21 r2e9b59b  
    3131#include "bits/debug.hfa"
    3232#include "bits/signal.hfa"
    33 #include "kernel_private.hfa"
     33#include "kernel/private.hfa"
    3434
    3535
  • libcfa/src/concurrency/ready_queue.cfa

    rba897d21 r2e9b59b  
    2020
    2121
    22 // #define USE_RELAXED_FIFO
    23 // #define USE_WORK_STEALING
    24 // #define USE_CPU_WORK_STEALING
    2522#define USE_AWARE_STEALING
    2623
    2724#include "bits/defs.hfa"
    2825#include "device/cpu.hfa"
    29 #include "kernel_private.hfa"
    30 
    31 #include "stdlib.hfa"
    32 #include "limits.hfa"
    33 #include "math.hfa"
    34 
    35 #include <errno.h>
    36 #include <unistd.h>
    37 
    38 extern "C" {
    39         #include <sys/syscall.h>  // __NR_xxx
    40 }
     26#include "kernel/cluster.hfa"
     27#include "kernel/private.hfa"
     28
     29// #include <errno.h>
     30// #include <unistd.h>
    4131
    4232#include "ready_subqueue.hfa"
     
    5040#endif
    5141
    52 // No overriden function, no environment variable, no define
    53 // fall back to a magic number
    54 #ifndef __CFA_MAX_PROCESSORS__
    55         #define __CFA_MAX_PROCESSORS__ 1024
    56 #endif
    57 
    58 #if   defined(USE_AWARE_STEALING)
    59         #define READYQ_SHARD_FACTOR 2
    60         #define SEQUENTIAL_SHARD 2
    61 #elif defined(USE_CPU_WORK_STEALING)
    62         #define READYQ_SHARD_FACTOR 2
    63 #elif defined(USE_RELAXED_FIFO)
    64         #define BIAS 4
    65         #define READYQ_SHARD_FACTOR 4
    66         #define SEQUENTIAL_SHARD 1
    67 #elif defined(USE_WORK_STEALING)
    68         #define READYQ_SHARD_FACTOR 2
    69         #define SEQUENTIAL_SHARD 2
    70 #else
    71         #error no scheduling strategy selected
    72 #endif
    73 
    7442static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
    7543static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
    7644static inline struct thread$ * search(struct cluster * cltr);
    77 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
    78 
    79 
    80 // returns the maximum number of processors the RWLock support
    81 __attribute__((weak)) unsigned __max_processors() {
    82         const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
    83         if(!max_cores_s) {
    84                 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
    85                 return __CFA_MAX_PROCESSORS__;
    86         }
    87 
    88         char * endptr = 0p;
    89         long int max_cores_l = strtol(max_cores_s, &endptr, 10);
    90         if(max_cores_l < 1 || max_cores_l > 65535) {
    91                 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
    92                 return __CFA_MAX_PROCESSORS__;
    93         }
    94         if('\0' != *endptr) {
    95                 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
    96                 return __CFA_MAX_PROCESSORS__;
    97         }
    98 
    99         return max_cores_l;
    100 }
    101 
    102 #if   defined(CFA_HAVE_LINUX_LIBRSEQ)
    103         // No forward declaration needed
    104         #define __kernel_rseq_register rseq_register_current_thread
    105         #define __kernel_rseq_unregister rseq_unregister_current_thread
    106 #elif defined(CFA_HAVE_LINUX_RSEQ_H)
    107         static void __kernel_raw_rseq_register  (void);
    108         static void __kernel_raw_rseq_unregister(void);
    109 
    110         #define __kernel_rseq_register __kernel_raw_rseq_register
    111         #define __kernel_rseq_unregister __kernel_raw_rseq_unregister
    112 #else
    113         // No forward declaration needed
    114         // No initialization needed
    115         static inline void noop(void) {}
    116 
    117         #define __kernel_rseq_register noop
    118         #define __kernel_rseq_unregister noop
    119 #endif
    120 
    121 //=======================================================================
    122 // Cluster wide reader-writer lock
    123 //=======================================================================
    124 void  ?{}(__scheduler_RWLock_t & this) {
    125         this.max   = __max_processors();
    126         this.alloc = 0;
    127         this.ready = 0;
    128         this.data  = alloc(this.max);
    129         this.write_lock  = false;
    130 
    131         /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
    132         /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
    133 
    134 }
    135 void ^?{}(__scheduler_RWLock_t & this) {
    136         free(this.data);
    137 }
    138 
    139 
    140 //=======================================================================
    141 // Lock-Free registering/unregistering of threads
    142 unsigned register_proc_id( void ) with(*__scheduler_lock) {
    143         __kernel_rseq_register();
    144 
    145         bool * handle = (bool *)&kernelTLS().sched_lock;
    146 
    147         // Step - 1 : check if there is already space in the data
    148         uint_fast32_t s = ready;
    149 
    150         // Check among all the ready
    151         for(uint_fast32_t i = 0; i < s; i++) {
    152                 bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems
    153                 /* paranoid */ verify( handle != *cell );
    154 
    155                 bool * null = 0p; // Re-write every loop since compare thrashes it
    156                 if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null
    157                         && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    158                         /* paranoid */ verify(i < ready);
    159                         /* paranoid */ verify( (kernelTLS().sched_id = i, true) );
    160                         return i;
    161                 }
    162         }
    163 
    164         if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
    165 
    166         // Step - 2 : F&A to get a new spot in the array.
    167         uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
    168         if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
    169 
    170         // Step - 3 : Mark space as used and then publish it.
    171         data[n] = handle;
    172         while() {
    173                 unsigned copy = n;
    174                 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
    175                         && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
    176                         break;
    177                 Pause();
    178         }
    179 
    180         // Return new spot.
    181         /* paranoid */ verify(n < ready);
    182         /* paranoid */ verify( (kernelTLS().sched_id = n, true) );
    183         return n;
    184 }
    185 
    186 void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
    187         /* paranoid */ verify(id < ready);
    188         /* paranoid */ verify(id == kernelTLS().sched_id);
    189         /* paranoid */ verify(data[id] == &kernelTLS().sched_lock);
    190 
    191         bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems
    192 
    193         __atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
    194 
    195         __kernel_rseq_unregister();
    196 }
    197 
    198 //-----------------------------------------------------------------------
    199 // Writer side : acquire when changing the ready queue, e.g. adding more
    200 //  queues or removing them.
    201 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
    202         /* paranoid */ verify( ! __preemption_enabled() );
    203 
    204         // Step 1 : lock global lock
    205         // It is needed to avoid processors that register mid Critical-Section
    206         //   to simply lock their own lock and enter.
    207         __atomic_acquire( &write_lock );
    208 
    209         // Make sure we won't deadlock ourself
    210         // Checking before acquiring the writer lock isn't safe
    211         // because someone else could have locked us.
    212         /* paranoid */ verify( ! kernelTLS().sched_lock );
    213 
    214         // Step 2 : lock per-proc lock
    215         // Processors that are currently being registered aren't counted
    216         //   but can't be in read_lock or in the critical section.
    217         // All other processors are counted
    218         uint_fast32_t s = ready;
    219         for(uint_fast32_t i = 0; i < s; i++) {
    220                 volatile bool * llock = data[i];
    221                 if(llock) __atomic_acquire( llock );
    222         }
    223 
    224         /* paranoid */ verify( ! __preemption_enabled() );
    225         return s;
    226 }
    227 
    228 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
    229         /* paranoid */ verify( ! __preemption_enabled() );
    230 
    231         // Step 1 : release local locks
    232         // This must be done while the global lock is held to avoid
    233         //   threads that where created mid critical section
    234         //   to race to lock their local locks and have the writer
    235         //   immidiately unlock them
    236         // Alternative solution : return s in write_lock and pass it to write_unlock
    237         for(uint_fast32_t i = 0; i < last_s; i++) {
    238                 volatile bool * llock = data[i];
    239                 if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);
    240         }
    241 
    242         // Step 2 : release global lock
    243         /*paranoid*/ assert(true == write_lock);
    244         __atomic_store_n(&write_lock, (bool)false, __ATOMIC_RELEASE);
    245 
    246         /* paranoid */ verify( ! __preemption_enabled() );
    247 }
    248 
    249 //=======================================================================
    250 // caches handling
    251 
    252 struct __attribute__((aligned(128))) __ready_queue_caches_t {
    253         // Count States:
    254         // - 0  : No one is looking after this cache
    255         // - 1  : No one is looking after this cache, BUT it's not empty
    256         // - 2+ : At least one processor is looking after this cache
    257         volatile unsigned count;
    258 };
    259 
    260 void  ?{}(__ready_queue_caches_t & this) { this.count = 0; }
    261 void ^?{}(__ready_queue_caches_t & this) {}
    262 
    263 static inline void depart(__ready_queue_caches_t & cache) {
    264         /* paranoid */ verify( cache.count > 1);
    265         __atomic_fetch_add(&cache.count, -1, __ATOMIC_SEQ_CST);
    266         /* paranoid */ verify( cache.count != 0);
    267         /* paranoid */ verify( cache.count < 65536 ); // This verify assumes no cluster will have more than 65000 kernel threads mapped to a single cache, which could be correct but is super weird.
    268 }
    269 
    270 static inline void arrive(__ready_queue_caches_t & cache) {
    271         // for() {
    272         //      unsigned expected = cache.count;
    273         //      unsigned desired  = 0 == expected ? 2 : expected + 1;
    274         // }
    275 }
    27645
    27746//=======================================================================
    27847// Cforall Ready Queue used for scheduling
    27948//=======================================================================
    280 unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) {
    281         /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );
    282         /* paranoid */ verifyf( instsc  < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );
    283         /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg );
    284 
    285         const unsigned long long new_val = currtsc > instsc ? currtsc - instsc : 0;
    286         const unsigned long long total_weight = 16;
    287         const unsigned long long new_weight   = 4;
    288         const unsigned long long old_weight = total_weight - new_weight;
    289         const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight;
    290         return ret;
    291 }
    292 
    293 void ?{}(__ready_queue_t & this) with (this) {
    294         #if defined(USE_CPU_WORK_STEALING)
    295                 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR;
    296                 lanes.data = alloc( lanes.count );
    297                 lanes.tscs = alloc( lanes.count );
    298                 lanes.help = alloc( cpu_info.hthrd_count );
    299 
    300                 for( idx; (size_t)lanes.count ) {
    301                         (lanes.data[idx]){};
    302                         lanes.tscs[idx].tv = rdtscl();
    303                         lanes.tscs[idx].ma = rdtscl();
    304                 }
    305                 for( idx; (size_t)cpu_info.hthrd_count ) {
    306                         lanes.help[idx].src = 0;
    307                         lanes.help[idx].dst = 0;
    308                         lanes.help[idx].tri = 0;
    309                 }
    310         #else
    311                 lanes.data   = 0p;
    312                 lanes.tscs   = 0p;
    313                 lanes.caches = 0p;
    314                 lanes.help   = 0p;
    315                 lanes.count  = 0;
    316         #endif
    317 }
    318 
    319 void ^?{}(__ready_queue_t & this) with (this) {
    320         #if !defined(USE_CPU_WORK_STEALING)
    321                 verify( SEQUENTIAL_SHARD == lanes.count );
    322         #endif
    323 
    324         free(lanes.data);
    325         free(lanes.tscs);
    326         free(lanes.caches);
    327         free(lanes.help);
    328 }
    329 
    330 //-----------------------------------------------------------------------
    331 #if defined(USE_AWARE_STEALING)
    332         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    333                 processor * const proc = kernelTLS().this_processor;
    334                 const bool external = (!proc) || (cltr != proc->cltr);
    335                 const bool remote   = hint == UNPARK_REMOTE;
    336 
    337                 unsigned i;
    338                 if( external || remote ) {
    339                         // Figure out where thread was last time and make sure it's valid
    340                         /* paranoid */ verify(thrd->preferred >= 0);
    341                         if(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count) {
    342                                 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
    343                                 unsigned start = thrd->preferred * READYQ_SHARD_FACTOR;
    344                                 do {
    345                                         unsigned r = __tls_rand();
    346                                         i = start + (r % READYQ_SHARD_FACTOR);
    347                                         /* paranoid */ verify( i < lanes.count );
    348                                         // If we can't lock it retry
    349                                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    350                         } else {
    351                                 do {
    352                                         i = __tls_rand() % lanes.count;
    353                                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    354                         }
     49// void ?{}(__ready_queue_t & this) with (this) {
     50//      lanes.data   = 0p;
     51//      lanes.tscs   = 0p;
     52//      lanes.caches = 0p;
     53//      lanes.count  = 0;
     54// }
     55
     56// void ^?{}(__ready_queue_t & this) with (this) {
     57//      free(lanes.data);
     58//      free(lanes.tscs);
     59//      free(lanes.caches);
     60// }
     61
     62//-----------------------------------------------------------------------
     63__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) {
     64        processor * const proc = kernelTLS().this_processor;
     65        const bool external = (!proc) || (cltr != proc->cltr);
     66        const bool remote   = hint == UNPARK_REMOTE;
     67        const size_t lanes_count = readyQ.count;
     68
     69        /* paranoid */ verify( __shard_factor.readyq > 0 );
     70        /* paranoid */ verify( lanes_count > 0 );
     71
     72        unsigned i;
     73        if( external || remote ) {
     74                // Figure out where thread was last time and make sure it's valid
     75                /* paranoid */ verify(thrd->preferred >= 0);
     76                unsigned start = thrd->preferred * __shard_factor.readyq;
     77                if(start < lanes_count) {
     78                        do {
     79                                unsigned r = __tls_rand();
     80                                i = start + (r % __shard_factor.readyq);
     81                                /* paranoid */ verify( i < lanes_count );
     82                                // If we can't lock it retry
     83                        } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
    35584                } else {
    35685                        do {
    357                                 unsigned r = proc->rdq.its++;
    358                                 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
    359                                 /* paranoid */ verify( i < lanes.count );
    360                                 // If we can't lock it retry
    361                         } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    362                 }
    363 
    364                 // Actually push it
    365                 push(lanes.data[i], thrd);
    366 
    367                 // Unlock and return
    368                 __atomic_unlock( &lanes.data[i].lock );
    369 
    370                 #if !defined(__CFA_NO_STATISTICS__)
    371                         if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    372                         else __tls_stats()->ready.push.local.success++;
    373                 #endif
    374         }
    375 
    376         static inline unsigned long long calc_cutoff(const unsigned long long ctsc, const processor * proc, __ready_queue_t & rdq) {
    377                 unsigned start = proc->rdq.id;
    378                 unsigned long long max = 0;
    379                 for(i; READYQ_SHARD_FACTOR) {
    380                         unsigned long long ptsc = ts(rdq.lanes.data[start + i]);
    381                         if(ptsc != -1ull) {
    382                                 /* paranoid */ verify( start + i < rdq.lanes.count );
    383                                 unsigned long long tsc = moving_average(ctsc, ptsc, rdq.lanes.tscs[start + i].ma);
    384                                 if(tsc > max) max = tsc;
    385                         }
    386                 }
    387                 return (max + 2 * max) / 2;
    388         }
    389 
    390         __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    391                 /* paranoid */ verify( lanes.count > 0 );
    392                 /* paranoid */ verify( kernelTLS().this_processor );
    393                 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
    394 
    395                 processor * const proc = kernelTLS().this_processor;
    396                 unsigned this = proc->rdq.id;
    397                 /* paranoid */ verify( this < lanes.count );
    398                 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
    399 
    400                 // Figure out the current cpu and make sure it is valid
    401                 const int cpu = __kernel_getcpu();
    402                 /* paranoid */ verify(cpu >= 0);
    403                 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
    404                 unsigned this_cache = cpu_info.llc_map[cpu].cache;
    405 
    406                 // Super important: don't write the same value over and over again
    407                 // We want to maximise our chances that his particular values stays in cache
    408                 if(lanes.caches[this / READYQ_SHARD_FACTOR].id != this_cache)
    409                         __atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED);
    410 
    411                 const unsigned long long ctsc = rdtscl();
    412 
    413                 if(proc->rdq.target == MAX) {
    414                         uint64_t chaos = __tls_rand();
    415                         unsigned ext = chaos & 0xff;
    416                         unsigned other  = (chaos >> 8) % (lanes.count);
    417 
    418                         if(ext < 3 || __atomic_load_n(&lanes.caches[other / READYQ_SHARD_FACTOR].id, __ATOMIC_RELAXED) == this_cache) {
    419                                 proc->rdq.target = other;
    420                         }
    421                 }
    422                 else {
    423                         const unsigned target = proc->rdq.target;
    424                         __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, lanes.tscs[target].tv);
    425                         /* paranoid */ verify( lanes.tscs[target].tv != MAX );
    426                         if(target < lanes.count) {
    427                                 const unsigned long long cutoff = calc_cutoff(ctsc, proc, cltr->ready_queue);
    428                                 const unsigned long long age = moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma);
    429                                 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
    430                                 if(age > cutoff) {
    431                                         thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    432                                         if(t) return t;
    433                                 }
    434                         }
    435                         proc->rdq.target = MAX;
    436                 }
    437 
    438                 for(READYQ_SHARD_FACTOR) {
    439                         unsigned i = this + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    440                         if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    441                 }
    442 
    443                 // All lanes where empty return 0p
    444                 return 0p;
    445 
    446         }
    447         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    448                 unsigned i = __tls_rand() % lanes.count;
    449                 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    450         }
    451         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    452                 return search(cltr);
    453         }
    454 #endif
    455 #if defined(USE_CPU_WORK_STEALING)
    456         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    457                 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    458 
    459                 processor * const proc = kernelTLS().this_processor;
    460                 const bool external = (!proc) || (cltr != proc->cltr);
    461 
    462                 // Figure out the current cpu and make sure it is valid
    463                 const int cpu = __kernel_getcpu();
    464                 /* paranoid */ verify(cpu >= 0);
    465                 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
    466                 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
    467 
    468                 // Figure out where thread was last time and make sure it's
    469                 /* paranoid */ verify(thrd->preferred >= 0);
    470                 /* paranoid */ verify(thrd->preferred < cpu_info.hthrd_count);
    471                 /* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
    472                 const int prf = thrd->preferred * READYQ_SHARD_FACTOR;
    473 
    474                 const cpu_map_entry_t & map;
    475                 choose(hint) {
    476                         case UNPARK_LOCAL : &map = &cpu_info.llc_map[cpu];
    477                         case UNPARK_REMOTE: &map = &cpu_info.llc_map[prf];
    478                 }
    479                 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
    480                 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
    481                 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
    482 
    483                 const int start = map.self * READYQ_SHARD_FACTOR;
    484                 unsigned i;
     86                                i = __tls_rand() % lanes_count;
     87                        } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
     88                }
     89        } else {
    48590                do {
    486                         unsigned r;
    487                         if(unlikely(external)) { r = __tls_rand(); }
    488                         else { r = proc->rdq.its++; }
    489                         choose(hint) {
    490                                 case UNPARK_LOCAL : i = start + (r % READYQ_SHARD_FACTOR);
    491                                 case UNPARK_REMOTE: i = prf   + (r % READYQ_SHARD_FACTOR);
    492                         }
     91                        unsigned r = proc->rdq.its++;
     92                        i = proc->rdq.id + (r % __shard_factor.readyq);
     93                        /* paranoid */ verify( i < lanes_count );
    49394                        // If we can't lock it retry
    494                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    495 
    496                 // Actually push it
    497                 push(lanes.data[i], thrd);
    498 
    499                 // Unlock and return
    500                 __atomic_unlock( &lanes.data[i].lock );
    501 
    502                 #if !defined(__CFA_NO_STATISTICS__)
    503                         if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    504                         else __tls_stats()->ready.push.local.success++;
    505                 #endif
    506 
    507                 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
    508 
    509         }
    510 
    511         // Pop from the ready queue from a given cluster
    512         __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    513                 /* paranoid */ verify( lanes.count > 0 );
    514                 /* paranoid */ verify( kernelTLS().this_processor );
    515 
    516                 processor * const proc = kernelTLS().this_processor;
    517                 const int cpu = __kernel_getcpu();
    518                 /* paranoid */ verify(cpu >= 0);
    519                 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
    520                 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count);
    521 
    522                 const cpu_map_entry_t & map = cpu_info.llc_map[cpu];
    523                 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count);
    524                 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count);
    525                 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
    526 
    527                 const int start = map.self * READYQ_SHARD_FACTOR;
    528                 const unsigned long long ctsc = rdtscl();
    529 
    530                 // Did we already have a help target
    531                 if(proc->rdq.target == MAX) {
    532                         unsigned long long max = 0;
    533                         for(i; READYQ_SHARD_FACTOR) {
    534                                 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma);
    535                                 if(tsc > max) max = tsc;
    536                         }
    537                         //  proc->rdq.cutoff = (max + 2 * max) / 2;
    538                         /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores.
    539                         /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores.
    540 
    541                         if(0 == (__tls_rand() % 100)) {
    542                                 proc->rdq.target = __tls_rand() % lanes.count;
    543                         } else {
    544                                 unsigned cpu_chaos = map.start + (__tls_rand() % map.count);
    545                                 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (__tls_rand() % READYQ_SHARD_FACTOR);
    546                                 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR));
    547                                 /* paranoid */ verify(proc->rdq.target <  ((map.start + map.count) * READYQ_SHARD_FACTOR));
    548                         }
    549 
    550                         /* paranoid */ verify(proc->rdq.target != MAX);
    551                 }
    552                 else {
    553                         unsigned long long max = 0;
    554                         for(i; READYQ_SHARD_FACTOR) {
    555                                 unsigned long long tsc = moving_average(ctsc, ts(lanes.data[start + i]), lanes.tscs[start + i].ma);
    556                                 if(tsc > max) max = tsc;
    557                         }
    558                         const unsigned long long cutoff = (max + 2 * max) / 2;
    559                         {
    560                                 unsigned target = proc->rdq.target;
    561                                 proc->rdq.target = MAX;
    562                                 lanes.help[target / READYQ_SHARD_FACTOR].tri++;
    563                                 if(moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma) > cutoff) {
    564                                         thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    565                                         proc->rdq.last = target;
    566                                         if(t) return t;
    567                                 }
    568                                 proc->rdq.target = MAX;
    569                         }
    570 
    571                         unsigned last = proc->rdq.last;
    572                         if(last != MAX && moving_average(ctsc, lanes.tscs[last].tv, lanes.tscs[last].ma) > cutoff) {
    573                                 thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
    574                                 if(t) return t;
    575                         }
    576                         else {
    577                                 proc->rdq.last = MAX;
    578                         }
    579                 }
    580 
    581                 for(READYQ_SHARD_FACTOR) {
    582                         unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    583                         if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    584                 }
    585 
    586                 // All lanes where empty return 0p
    587                 return 0p;
    588         }
    589 
    590         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    591                 processor * const proc = kernelTLS().this_processor;
    592                 unsigned last = proc->rdq.last;
    593                 if(last != MAX) {
    594                         struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
    595                         if(t) return t;
    596                         proc->rdq.last = MAX;
    597                 }
    598 
    599                 unsigned i = __tls_rand() % lanes.count;
    600                 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    601         }
    602         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    603                 return search(cltr);
    604         }
    605 #endif
    606 #if defined(USE_RELAXED_FIFO)
    607         //-----------------------------------------------------------------------
    608         // get index from random number with or without bias towards queues
    609         static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
    610                 unsigned i;
    611                 bool local;
    612                 unsigned rlow  = r % BIAS;
    613                 unsigned rhigh = r / BIAS;
    614                 if((0 != rlow) && preferred >= 0) {
    615                         // (BIAS - 1) out of BIAS chances
    616                         // Use perferred queues
    617                         i = preferred + (rhigh % READYQ_SHARD_FACTOR);
    618                         local = true;
    619                 }
    620                 else {
    621                         // 1 out of BIAS chances
    622                         // Use all queues
    623                         i = rhigh;
    624                         local = false;
    625                 }
    626                 return [i, local];
    627         }
    628 
    629         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    630                 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    631 
    632                 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
    633                 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
    634 
    635                 bool local;
    636                 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id;
    637 
    638                 // Try to pick a lane and lock it
    639                 unsigned i;
    640                 do {
    641                         // Pick the index of a lane
    642                         unsigned r = __tls_rand_fwd();
    643                         [i, local] = idx_from_r(r, preferred);
    644 
    645                         i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    646 
    647                         #if !defined(__CFA_NO_STATISTICS__)
    648                                 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
    649                                 else if(local) __tls_stats()->ready.push.local.attempt++;
    650                                 else __tls_stats()->ready.push.share.attempt++;
    651                         #endif
    652 
    653                         // If we can't lock it retry
    654                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    655 
    656                 // Actually push it
    657                 push(lanes.data[i], thrd);
    658 
    659                 // Unlock and return
    660                 __atomic_unlock( &lanes.data[i].lock );
    661 
    662                 // Mark the current index in the tls rng instance as having an item
    663                 __tls_rand_advance_bck();
    664 
    665                 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
    666 
    667                 // Update statistics
    668                 #if !defined(__CFA_NO_STATISTICS__)
    669                         if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    670                         else if(local) __tls_stats()->ready.push.local.success++;
    671                         else __tls_stats()->ready.push.share.success++;
    672                 #endif
    673         }
    674 
    675         // Pop from the ready queue from a given cluster
    676         __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    677                 /* paranoid */ verify( lanes.count > 0 );
    678                 /* paranoid */ verify( kernelTLS().this_processor );
    679                 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
    680 
    681                 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
    682                 int preferred = kernelTLS().this_processor->rdq.id;
    683 
    684 
    685                 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
    686                 for(25) {
    687                         // Pick two lists at random
    688                         unsigned ri = __tls_rand_bck();
    689                         unsigned rj = __tls_rand_bck();
    690 
    691                         unsigned i, j;
    692                         __attribute__((unused)) bool locali, localj;
    693                         [i, locali] = idx_from_r(ri, preferred);
    694                         [j, localj] = idx_from_r(rj, preferred);
    695 
    696                         i %= count;
    697                         j %= count;
    698 
    699                         // try popping from the 2 picked lists
    700                         struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
    701                         if(thrd) {
    702                                 return thrd;
    703                         }
    704                 }
    705 
    706                 // All lanes where empty return 0p
    707                 return 0p;
    708         }
    709 
    710         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
    711         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    712                 return search(cltr);
    713         }
    714 #endif
    715 #if defined(USE_WORK_STEALING)
    716         __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
    717                 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    718 
    719                 // #define USE_PREFERRED
    720                 #if !defined(USE_PREFERRED)
    721                 const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr);
    722                 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count );
    723                 #else
    724                         unsigned preferred = thrd->preferred;
    725                         const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == MAX || thrd->curr_cluster != cltr;
    726                         /* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
    727 
    728                         unsigned r = preferred % READYQ_SHARD_FACTOR;
    729                         const unsigned start = preferred - r;
    730                 #endif
    731 
    732                 // Try to pick a lane and lock it
    733                 unsigned i;
    734                 do {
    735                         #if !defined(__CFA_NO_STATISTICS__)
    736                                 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.attempt, 1, __ATOMIC_RELAXED);
    737                                 else __tls_stats()->ready.push.local.attempt++;
    738                         #endif
    739 
    740                         if(unlikely(external)) {
    741                                 i = __tls_rand() % lanes.count;
    742                         }
    743                         else {
    744                                 #if !defined(USE_PREFERRED)
    745                                         processor * proc = kernelTLS().this_processor;
    746                                         unsigned r = proc->rdq.its++;
    747                                         i =  proc->rdq.id + (r % READYQ_SHARD_FACTOR);
    748                                 #else
    749                                         i = start + (r++ % READYQ_SHARD_FACTOR);
    750                                 #endif
    751                         }
    752                         // If we can't lock it retry
    753                 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
    754 
    755                 // Actually push it
    756                 push(lanes.data[i], thrd);
    757 
    758                 // Unlock and return
    759                 __atomic_unlock( &lanes.data[i].lock );
    760 
    761                 #if !defined(__CFA_NO_STATISTICS__)
    762                         if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
    763                         else __tls_stats()->ready.push.local.success++;
    764                 #endif
    765 
    766                 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
    767         }
    768 
    769         // Pop from the ready queue from a given cluster
    770         __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    771                 /* paranoid */ verify( lanes.count > 0 );
    772                 /* paranoid */ verify( kernelTLS().this_processor );
    773                 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
    774 
    775                 processor * proc = kernelTLS().this_processor;
    776 
    777                 if(proc->rdq.target == MAX) {
    778                         unsigned long long min = ts(lanes.data[proc->rdq.id]);
    779                         for(int i = 0; i < READYQ_SHARD_FACTOR; i++) {
    780                                 unsigned long long tsc = ts(lanes.data[proc->rdq.id + i]);
    781                                 if(tsc < min) min = tsc;
    782                         }
    783                         proc->rdq.cutoff = min;
    784                         proc->rdq.target = __tls_rand() % lanes.count;
    785                 }
    786                 else {
    787                         unsigned target = proc->rdq.target;
    788                         proc->rdq.target = MAX;
    789                         const unsigned long long bias = 0; //2_500_000_000;
    790                         const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
    791                         if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
     95                } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
     96        }
     97
     98        // Actually push it
     99        push(readyQ.data[i], thrd);
     100
     101        // Unlock and return
     102        __atomic_unlock( &readyQ.data[i].lock );
     103
     104        #if !defined(__CFA_NO_STATISTICS__)
     105                if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
     106                else __tls_stats()->ready.push.local.success++;
     107        #endif
     108}
     109
     110__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) {
     111        const size_t lanes_count = readyQ.count;
     112
     113        /* paranoid */ verify( __shard_factor.readyq > 0 );
     114        /* paranoid */ verify( lanes_count > 0 );
     115        /* paranoid */ verify( kernelTLS().this_processor );
     116        /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count );
     117
     118        processor * const proc = kernelTLS().this_processor;
     119        unsigned this = proc->rdq.id;
     120        /* paranoid */ verify( this < lanes_count );
     121        __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
     122
     123        // Figure out the current cache is
     124        const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq);
     125        const unsigned long long ctsc = rdtscl();
     126
     127        if(proc->rdq.target == MAX) {
     128                uint64_t chaos = __tls_rand();
     129                unsigned ext = chaos & 0xff;
     130                unsigned other  = (chaos >> 8) % (lanes_count);
     131
     132                if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) {
     133                        proc->rdq.target = other;
     134                }
     135        }
     136        else {
     137                const unsigned target = proc->rdq.target;
     138                __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
     139                /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
     140                if(target < lanes_count) {
     141                        const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
     142                        const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma);
     143                        __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
     144                        if(age > cutoff) {
    792145                                thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    793146                                if(t) return t;
    794147                        }
    795148                }
    796 
    797                 for(READYQ_SHARD_FACTOR) {
    798                         unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    799                         if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    800                 }
    801                 return 0p;
    802         }
    803 
    804         __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    805                 unsigned i = __tls_rand() % lanes.count;
    806                 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    807         }
    808 
    809         __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
    810                 return search(cltr);
    811         }
    812 #endif
     149                proc->rdq.target = MAX;
     150        }
     151
     152        for(__shard_factor.readyq) {
     153                unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq);
     154                if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     155        }
     156
     157        // All lanes where empty return 0p
     158        return 0p;
     159
     160}
     161__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) {
     162        unsigned i = __tls_rand() % (cltr->sched.readyQ.count);
     163        return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
     164}
     165__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
     166        return search(cltr);
     167}
    813168
    814169//=======================================================================
     
    820175//-----------------------------------------------------------------------
    821176// try to pop from a lane given by index w
    822 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
    823         /* paranoid */ verify( w < lanes.count );
     177static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
     178        /* paranoid */ verify( w < readyQ.count );
    824179        __STATS( stats.attempt++; )
    825180
    826181        // Get relevant elements locally
    827         __intrusive_lane_t & lane = lanes.data[w];
     182        __intrusive_lane_t & lane = readyQ.data[w];
    828183
    829184        // If list looks empty retry
     
    845200        // Actually pop the list
    846201        struct thread$ * thrd;
    847         #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
    848                 unsigned long long tsc_before = ts(lane);
    849         #endif
    850         unsigned long long tsv;
    851         [thrd, tsv] = pop(lane);
     202        unsigned long long ts_prev = ts(lane);
     203        unsigned long long ts_next;
     204        [thrd, ts_next] = pop(lane);
    852205
    853206        /* paranoid */ verify(thrd);
    854         /* paranoid */ verify(tsv);
     207        /* paranoid */ verify(ts_next);
    855208        /* paranoid */ verify(lane.lock);
    856209
     
    861214        __STATS( stats.success++; )
    862215
    863         #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
    864                 if (tsv != MAX) {
    865                         unsigned long long now = rdtscl();
    866                         unsigned long long pma = __atomic_load_n(&lanes.tscs[w].ma, __ATOMIC_RELAXED);
    867                         __atomic_store_n(&lanes.tscs[w].tv, tsv, __ATOMIC_RELAXED);
    868                         __atomic_store_n(&lanes.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
    869                 }
    870         #endif
    871 
    872         #if defined(USE_AWARE_STEALING) || defined(USE_CPU_WORK_STEALING)
    873                 thrd->preferred = w / READYQ_SHARD_FACTOR;
    874         #else
    875                 thrd->preferred = w;
    876         #endif
     216        touch_tsc(readyQ.tscs, w, ts_prev, ts_next);
     217
     218        thrd->preferred = w / __shard_factor.readyq;
    877219
    878220        // return the popped thread
     
    883225// try to pop from any lanes making sure you don't miss any threads push
    884226// before the start of the function
    885 static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) {
    886         /* paranoid */ verify( lanes.count > 0 );
    887         unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     227static inline struct thread$ * search(struct cluster * cltr) {
     228        const size_t lanes_count = cltr->sched.readyQ.count;
     229        /* paranoid */ verify( lanes_count > 0 );
     230        unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED );
    888231        unsigned offset = __tls_rand();
    889232        for(i; count) {
     
    902245// get preferred ready for new thread
    903246unsigned ready_queue_new_preferred() {
    904         unsigned pref = 0;
     247        unsigned pref = MAX;
    905248        if(struct thread$ * thrd = publicTLS_get( this_thread )) {
    906249                pref = thrd->preferred;
    907250        }
    908         else {
    909                 #if defined(USE_CPU_WORK_STEALING)
    910                         pref = __kernel_getcpu();
    911                 #endif
    912         }
    913 
    914         #if defined(USE_CPU_WORK_STEALING)
    915                 /* paranoid */ verify(pref >= 0);
    916                 /* paranoid */ verify(pref < cpu_info.hthrd_count);
    917         #endif
    918251
    919252        return pref;
     
    921254
    922255//-----------------------------------------------------------------------
    923 // Check that all the intrusive queues in the data structure are still consistent
    924 static void check( __ready_queue_t & q ) with (q) {
    925         #if defined(__CFA_WITH_VERIFY__)
    926                 {
    927                         for( idx ; lanes.count ) {
    928                                 __intrusive_lane_t & sl = lanes.data[idx];
    929                                 assert(!lanes.data[idx].lock);
    930 
    931                                         if(is_empty(sl)) {
    932                                                 assert( sl.anchor.next == 0p );
    933                                                 assert( sl.anchor.ts   == -1llu );
    934                                                 assert( mock_head(sl)  == sl.prev );
    935                                         } else {
    936                                                 assert( sl.anchor.next != 0p );
    937                                                 assert( sl.anchor.ts   != -1llu );
    938                                                 assert( mock_head(sl)  != sl.prev );
    939                                         }
    940                         }
    941                 }
    942         #endif
    943 }
    944 
    945 //-----------------------------------------------------------------------
    946256// Given 2 indexes, pick the list with the oldest push an try to pop from it
    947 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
     257static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
    948258        // Pick the bet list
    949259        int w = i;
    950         if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
    951                 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
     260        if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) {
     261                w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j;
    952262        }
    953263
    954264        return try_pop(cltr, w __STATS(, stats));
    955265}
    956 
    957 // Call this function of the intrusive list was moved using memcpy
    958 // fixes the list so that the pointers back to anchors aren't left dangling
    959 static inline void fix(__intrusive_lane_t & ll) {
    960                         if(is_empty(ll)) {
    961                                 verify(ll.anchor.next == 0p);
    962                                 ll.prev = mock_head(ll);
    963                         }
    964 }
    965 
    966 static void assign_list(unsigned & value, dlist(processor) & list, unsigned count) {
    967         processor * it = &list`first;
    968         for(unsigned i = 0; i < count; i++) {
    969                 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
    970                 it->rdq.id = value;
    971                 it->rdq.target = MAX;
    972                 value += READYQ_SHARD_FACTOR;
    973                 it = &(*it)`next;
    974         }
    975 }
    976 
    977 static void reassign_cltr_id(struct cluster * cltr) {
    978         unsigned preferred = 0;
    979         assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
    980         assign_list(preferred, cltr->procs.idles  , cltr->procs.idle );
    981 }
    982 
    983 static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
    984         #if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING)
    985                 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
    986                 for(i; lanes.count) {
    987                         lanes.tscs[i].tv = rdtscl();
    988                         lanes.tscs[i].ma = 0;
    989                 }
    990         #endif
    991 }
    992 
    993 #if defined(USE_CPU_WORK_STEALING)
    994         // ready_queue size is fixed in this case
    995         void ready_queue_grow(struct cluster * cltr) {}
    996         void ready_queue_shrink(struct cluster * cltr) {}
    997 #else
    998         // Grow the ready queue
    999         void ready_queue_grow(struct cluster * cltr) {
    1000                 size_t ncount;
    1001                 int target = cltr->procs.total;
    1002 
    1003                 /* paranoid */ verify( ready_mutate_islocked() );
    1004                 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
    1005 
    1006                 // Make sure that everything is consistent
    1007                 /* paranoid */ check( cltr->ready_queue );
    1008 
    1009                 // grow the ready queue
    1010                 with( cltr->ready_queue ) {
    1011                         // Find new count
    1012                         // Make sure we always have atleast 1 list
    1013                         if(target >= 2) {
    1014                                 ncount = target * READYQ_SHARD_FACTOR;
    1015                         } else {
    1016                                 ncount = SEQUENTIAL_SHARD;
    1017                         }
    1018 
    1019                         // Allocate new array (uses realloc and memcpies the data)
    1020                         lanes.data = alloc( ncount, lanes.data`realloc );
    1021 
    1022                         // Fix the moved data
    1023                         for( idx; (size_t)lanes.count ) {
    1024                                 fix(lanes.data[idx]);
    1025                         }
    1026 
    1027                         // Construct new data
    1028                         for( idx; (size_t)lanes.count ~ ncount) {
    1029                                 (lanes.data[idx]){};
    1030                         }
    1031 
    1032                         // Update original
    1033                         lanes.count = ncount;
    1034 
    1035                         lanes.caches = alloc( target, lanes.caches`realloc );
    1036                 }
    1037 
    1038                 fix_times(cltr);
    1039 
    1040                 reassign_cltr_id(cltr);
    1041 
    1042                 // Make sure that everything is consistent
    1043                 /* paranoid */ check( cltr->ready_queue );
    1044 
    1045                 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
    1046 
    1047                 /* paranoid */ verify( ready_mutate_islocked() );
    1048         }
    1049 
    1050         // Shrink the ready queue
    1051         void ready_queue_shrink(struct cluster * cltr) {
    1052                 /* paranoid */ verify( ready_mutate_islocked() );
    1053                 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
    1054 
    1055                 // Make sure that everything is consistent
    1056                 /* paranoid */ check( cltr->ready_queue );
    1057 
    1058                 int target = cltr->procs.total;
    1059 
    1060                 with( cltr->ready_queue ) {
    1061                         // Remember old count
    1062                         size_t ocount = lanes.count;
    1063 
    1064                         // Find new count
    1065                         // Make sure we always have atleast 1 list
    1066                         lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD;
    1067                         /* paranoid */ verify( ocount >= lanes.count );
    1068                         /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 );
    1069 
    1070                         // for printing count the number of displaced threads
    1071                         #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    1072                                 __attribute__((unused)) size_t displaced = 0;
    1073                         #endif
    1074 
    1075                         // redistribute old data
    1076                         for( idx; (size_t)lanes.count ~ ocount) {
    1077                                 // Lock is not strictly needed but makes checking invariants much easier
    1078                                 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
    1079                                 verify(locked);
    1080 
    1081                                 // As long as we can pop from this lane to push the threads somewhere else in the queue
    1082                                 while(!is_empty(lanes.data[idx])) {
    1083                                         struct thread$ * thrd;
    1084                                         unsigned long long _;
    1085                                         [thrd, _] = pop(lanes.data[idx]);
    1086 
    1087                                         push(cltr, thrd, true);
    1088 
    1089                                         // for printing count the number of displaced threads
    1090                                         #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
    1091                                                 displaced++;
    1092                                         #endif
    1093                                 }
    1094 
    1095                                 // Unlock the lane
    1096                                 __atomic_unlock(&lanes.data[idx].lock);
    1097 
    1098                                 // TODO print the queue statistics here
    1099 
    1100                                 ^(lanes.data[idx]){};
    1101                         }
    1102 
    1103                         __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
    1104 
    1105                         // Allocate new array (uses realloc and memcpies the data)
    1106                         lanes.data = alloc( lanes.count, lanes.data`realloc );
    1107 
    1108                         // Fix the moved data
    1109                         for( idx; (size_t)lanes.count ) {
    1110                                 fix(lanes.data[idx]);
    1111                         }
    1112 
    1113                         lanes.caches = alloc( target, lanes.caches`realloc );
    1114                 }
    1115 
    1116                 fix_times(cltr);
    1117 
    1118 
    1119                 reassign_cltr_id(cltr);
    1120 
    1121                 // Make sure that everything is consistent
    1122                 /* paranoid */ check( cltr->ready_queue );
    1123 
    1124                 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
    1125                 /* paranoid */ verify( ready_mutate_islocked() );
    1126         }
    1127 #endif
    1128 
    1129 #if !defined(__CFA_NO_STATISTICS__)
    1130         unsigned cnt(const __ready_queue_t & this, unsigned idx) {
    1131                 /* paranoid */ verify(this.lanes.count > idx);
    1132                 return this.lanes.data[idx].cnt;
    1133         }
    1134 #endif
    1135 
    1136 
    1137 #if   defined(CFA_HAVE_LINUX_LIBRSEQ)
    1138         // No definition needed
    1139 #elif defined(CFA_HAVE_LINUX_RSEQ_H)
    1140 
    1141         #if defined( __x86_64 ) || defined( __i386 )
    1142                 #define RSEQ_SIG        0x53053053
    1143         #elif defined( __ARM_ARCH )
    1144                 #ifdef __ARMEB__
    1145                 #define RSEQ_SIG    0xf3def5e7      /* udf    #24035    ; 0x5de3 (ARMv6+) */
    1146                 #else
    1147                 #define RSEQ_SIG    0xe7f5def3      /* udf    #24035    ; 0x5de3 */
    1148                 #endif
    1149         #endif
    1150 
    1151         extern void __disable_interrupts_hard();
    1152         extern void __enable_interrupts_hard();
    1153 
    1154         static void __kernel_raw_rseq_register  (void) {
    1155                 /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED );
    1156 
    1157                 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8);
    1158                 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG);
    1159                 if(ret != 0) {
    1160                         int e = errno;
    1161                         switch(e) {
    1162                         case EINVAL: abort("KERNEL ERROR: rseq register invalid argument");
    1163                         case ENOSYS: abort("KERNEL ERROR: rseq register no supported");
    1164                         case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument");
    1165                         case EBUSY : abort("KERNEL ERROR: rseq register already registered");
    1166                         case EPERM : abort("KERNEL ERROR: rseq register sig  argument  on unregistration does not match the signature received on registration");
    1167                         default: abort("KERNEL ERROR: rseq register unexpected return %d", e);
    1168                         }
    1169                 }
    1170         }
    1171 
    1172         static void __kernel_raw_rseq_unregister(void) {
    1173                 /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 );
    1174 
    1175                 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8);
    1176                 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
    1177                 if(ret != 0) {
    1178                         int e = errno;
    1179                         switch(e) {
    1180                         case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument");
    1181                         case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported");
    1182                         case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument");
    1183                         case EBUSY : abort("KERNEL ERROR: rseq unregister already registered");
    1184                         case EPERM : abort("KERNEL ERROR: rseq unregister sig  argument  on unregistration does not match the signature received on registration");
    1185                         default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e);
    1186                         }
    1187                 }
    1188         }
    1189 #else
    1190         // No definition needed
    1191 #endif
  • libcfa/src/concurrency/ready_subqueue.hfa

    rba897d21 r2e9b59b  
    33#define __CFA_NO_SCHED_STATS__
    44
    5 #include "containers/queueLockFree.hfa"
     5#include "limits.hfa"
    66
    77// Intrusives lanes which are used by the relaxed ready queue
     
    2727}
    2828
    29 // Ctor
    30 void ?{}( __intrusive_lane_t & this ) {
    31         this.lock = false;
    32         this.prev = mock_head(this);
    33         this.anchor.next = 0p;
    34         this.anchor.ts   = -1llu;
    35         #if !defined(__CFA_NO_STATISTICS__)
    36                 this.cnt  = 0;
    37         #endif
    38 
    39         // We add a boat-load of assertions here because the anchor code is very fragile
    40         /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
    41         /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
    42         /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) );
    43         /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next );
    44         /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
    45         /* paranoid */ verify( mock_head(this)->link.next == 0p );
    46         /* paranoid */ verify( mock_head(this)->link.ts   == -1llu  );
    47         /* paranoid */ verify( mock_head(this) == this.prev );
    48         /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );
    49         /* paranoid */ verify( __alignof__(this) == 128 );
    50         /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) );
    51 }
    52 
    53 // Dtor is trivial
    54 void ^?{}( __intrusive_lane_t & this ) {
    55         // Make sure the list is empty
    56         /* paranoid */ verify( this.anchor.next == 0p );
    57         /* paranoid */ verify( this.anchor.ts   == -1llu );
    58         /* paranoid */ verify( mock_head(this)  == this.prev );
    59 }
    60 
    6129// Push a thread onto this lane
    6230// returns true of lane was empty before push, false otherwise
     
    6432        /* paranoid */ verify( this.lock );
    6533        /* paranoid */ verify( node->link.next == 0p );
    66         /* paranoid */ verify( node->link.ts   == -1llu  );
     34        /* paranoid */ verify( node->link.ts   == MAX  );
    6735        /* paranoid */ verify( this.prev->link.next == 0p );
    68         /* paranoid */ verify( this.prev->link.ts   == -1llu  );
     36        /* paranoid */ verify( this.prev->link.ts   == MAX  );
    6937        if( this.anchor.next == 0p ) {
    7038                /* paranoid */ verify( this.anchor.next == 0p );
    71                 /* paranoid */ verify( this.anchor.ts   == -1llu );
     39                /* paranoid */ verify( this.anchor.ts   == MAX );
    7240                /* paranoid */ verify( this.anchor.ts   != 0  );
    7341                /* paranoid */ verify( this.prev == mock_head( this ) );
    7442        } else {
    7543                /* paranoid */ verify( this.anchor.next != 0p );
    76                 /* paranoid */ verify( this.anchor.ts   != -1llu );
     44                /* paranoid */ verify( this.anchor.ts   != MAX );
    7745                /* paranoid */ verify( this.anchor.ts   != 0  );
    7846                /* paranoid */ verify( this.prev != mock_head( this ) );
     
    9462        /* paranoid */ verify( this.lock );
    9563        /* paranoid */ verify( this.anchor.next != 0p );
    96         /* paranoid */ verify( this.anchor.ts   != -1llu );
     64        /* paranoid */ verify( this.anchor.ts   != MAX );
    9765        /* paranoid */ verify( this.anchor.ts   != 0  );
    9866
     
    10371        bool is_empty = this.anchor.next == 0p;
    10472        node->link.next = 0p;
    105         node->link.ts   = -1llu;
     73        node->link.ts   = MAX;
    10674        #if !defined(__CFA_NO_STATISTICS__)
    10775                this.cnt--;
     
    11280
    11381        /* paranoid */ verify( node->link.next == 0p );
    114         /* paranoid */ verify( node->link.ts   == -1llu  );
     82        /* paranoid */ verify( node->link.ts   == MAX  );
    11583        /* paranoid */ verify( node->link.ts   != 0  );
    11684        /* paranoid */ verify( this.anchor.ts  != 0  );
  • libcfa/src/concurrency/stats.cfa

    rba897d21 r2e9b59b  
    5555                        stats->io.calls.drain       = 0;
    5656                        stats->io.calls.completed   = 0;
     57                        stats->io.calls.locked      = 0;
     58                        stats->io.calls.helped      = 0;
    5759                        stats->io.calls.errors.busy = 0;
    5860                        stats->io.ops.sockread      = 0;
     
    123125                        tally_one( &cltr->io.calls.drain      , &proc->io.calls.drain       );
    124126                        tally_one( &cltr->io.calls.completed  , &proc->io.calls.completed   );
     127                        tally_one( &cltr->io.calls.locked     , &proc->io.calls.locked      );
     128                        tally_one( &cltr->io.calls.helped     , &proc->io.calls.helped      );
    125129                        tally_one( &cltr->io.calls.errors.busy, &proc->io.calls.errors.busy );
    126130                        tally_one( &cltr->io.ops.sockread     , &proc->io.ops.sockread      );
     
    205209                                     |   " sub " | eng3(io.calls.submitted) | "/" | eng3(io.calls.flush) | "(" | ws(3, 3, avgsubs) | "/flush)"
    206210                                     | " - cmp " | eng3(io.calls.completed) | "/" | eng3(io.calls.drain) | "(" | ws(3, 3, avgcomp) | "/drain)"
     211                                     | " - cmp " | eng3(io.calls.locked) | "locked, " | eng3(io.calls.helped) | "helped"
    207212                                     | " - " | eng3(io.calls.errors.busy) | " EBUSY";
    208213                                sstr | " - sub: " | eng3(io.flush.full) | "full, " | eng3(io.flush.dirty) | "drty, " | eng3(io.flush.idle) | "idle, " | eng3(io.flush.eager) | "eagr, " | eng3(io.flush.external) | "ext";
  • libcfa/src/concurrency/stats.hfa

    rba897d21 r2e9b59b  
    103103                                volatile uint64_t drain;
    104104                                volatile uint64_t completed;
     105                                volatile uint64_t locked;
     106                                volatile uint64_t helped;
    105107                                volatile uint64_t flush;
    106108                                volatile uint64_t submitted;
  • libcfa/src/concurrency/thread.cfa

    rba897d21 r2e9b59b  
    1919#include "thread.hfa"
    2020
    21 #include "kernel_private.hfa"
     21#include "kernel/private.hfa"
    2222#include "exception.hfa"
    2323
  • libcfa/src/containers/array.hfa

    rba897d21 r2e9b59b  
     1#include <assert.h>
    12
    23
     
    3435
    3536    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, int i ) {
     37        assert( i < N );
    3638        return (Timmed &) a.strides[i];
    3739    }
    3840
    3941    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, unsigned int i ) {
     42        assert( i < N );
    4043        return (Timmed &) a.strides[i];
    4144    }
    4245
    4346    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, long int i ) {
     47        assert( i < N );
    4448        return (Timmed &) a.strides[i];
    4549    }
    4650
    4751    static inline Timmed & ?[?]( arpk(N, S, Timmed, Tbase) & a, unsigned long int i ) {
     52        assert( i < N );
    4853        return (Timmed &) a.strides[i];
    4954    }
  • libcfa/src/device/cpu.hfa

    rba897d21 r2e9b59b  
    1313// Update Count     :
    1414//
     15
     16#pragma once
    1517
    1618#include <stddef.h>
  • libcfa/src/fstream.cfa

    rba897d21 r2e9b59b  
    1010// Created On       : Wed May 27 17:56:53 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Jan 10 08:45:05 2022
    13 // Update Count     : 513
     12// Last Modified On : Sat Apr  9 14:55:54 2022
     13// Update Count     : 515
    1414//
    1515
     
    161161    for ( cnt; 10 ) {
    162162                errno = 0;
     163                disable_interrupts();
    163164                len = vfprintf( (FILE *)(os.file$), format, args );
     165                enable_interrupts();
    164166          if ( len != EOF || errno != EINTR ) break;            // timer interrupt ?
    165167          if ( cnt == 9 ) abort( "ofstream fmt EINTR spinning exceeded" );
     
    293295    for () {                                                                                    // no check for EINTR limit waiting for keyboard input
    294296                errno = 0;
     297                disable_interrupts();
    295298                len = vfscanf( (FILE *)(is.file$), format, args );
     299                enable_interrupts();
    296300          if ( len != EOF || errno != EINTR ) break;            // timer interrupt ?
    297301    } // for
  • src/AST/Convert.cpp

    rba897d21 r2e9b59b  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu May 09 15::37::05 2019
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Feb  2 13:19:22 2022
    13 // Update Count     : 41
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 15:01:00 2022
     13// Update Count     : 42
    1414//
    1515
     
    4949//================================================================================================
    5050namespace ast {
    51 
    52 // This is to preserve the FindSpecialDecls hack. It does not (and perhaps should not)
    53 // allow us to use the same stratagy in the new ast.
    54 // xxx - since convert back pass works, this concern seems to be unnecessary.
    55 
    56 // these need to be accessed in new FixInit now
    57 ast::ptr<ast::Type> sizeType = nullptr;
    58 const ast::FunctionDecl * dereferenceOperator = nullptr;
    59 const ast::StructDecl   * dtorStruct = nullptr;
    60 const ast::FunctionDecl * dtorStructDestroy = nullptr;
     51// These are the shared local information used by ConverterNewToOld and
     52// ConverterOldToNew to update the global information in the two versions.
     53
     54static ast::ptr<ast::Type> sizeType = nullptr;
     55static const ast::FunctionDecl * dereferenceOperator = nullptr;
     56static const ast::StructDecl   * dtorStruct = nullptr;
     57static const ast::FunctionDecl * dtorStructDestroy = nullptr;
    6158
    6259}
     
    276273                decl->parent = get<AggregateDecl>().accept1( node->parent );
    277274                declPostamble( decl, node );
    278                 return nullptr;
     275                return nullptr; // ??
    279276        }
    280277
     
    310307                        node->name,
    311308                        get<Attribute>().acceptL( node->attributes ),
    312                         LinkageSpec::Spec( node->linkage.val )
    313                 );
    314                 return aggregatePostamble( decl, node );
     309                        LinkageSpec::Spec( node->linkage.val ),
     310                        get<Type>().accept1(node->base)
     311                );
     312                return aggregatePostamble( decl, node ); // Node info, including members, processed in aggregatePostamble
    315313        }
    316314
     
    354352                this->node = stmt;
    355353                return nullptr;
     354        }
     355
     356        void clausePostamble( Statement * stmt, const ast::StmtClause * node ) {
     357                stmt->location = node->location;
     358                this->node = stmt;
    356359        }
    357360
     
    404407                auto stmt = new SwitchStmt(
    405408                        get<Expression>().accept1( node->cond ),
    406                         get<Statement>().acceptL( node->stmts )
     409                        get<Statement>().acceptL( node->cases )
    407410                );
    408411                return stmtPostamble( stmt, node );
    409412        }
    410413
    411         const ast::Stmt * visit( const ast::CaseStmt * node ) override final {
     414        const ast::CaseClause * visit( const ast::CaseClause * node ) override final {
    412415                if ( inCache( node ) ) return nullptr;
    413416                auto stmt = new CaseStmt(
     
    416419                        node->isDefault()
    417420                );
    418                 return stmtPostamble( stmt, node );
     421                clausePostamble( stmt, node );
     422                return nullptr;
    419423        }
    420424
     
    512516        }
    513517
    514         const ast::Stmt * visit( const ast::CatchStmt * node ) override final {
     518        const ast::CatchClause * visit( const ast::CatchClause * node ) override final {
    515519                if ( inCache( node ) ) return nullptr;
    516520                CatchStmt::Kind kind;
     
    523527                        break;
    524528                default:
    525                         assertf(false, "Invalid ast::CatchStmt::Kind: %d\n", node->kind);
     529                        assertf(false, "Invalid ast::ExceptionKind: %d\n", node->kind);
    526530                }
    527531                auto stmt = new CatchStmt(
     
    531535                        get<Statement>().accept1( node->body )
    532536                );
    533                 return stmtPostamble( stmt, node );
    534         }
    535 
    536         const ast::Stmt * visit( const ast::FinallyStmt * node ) override final {
     537                return clausePostamble( stmt, node ), nullptr;
     538        }
     539
     540        const ast::FinallyClause * visit( const ast::FinallyClause * node ) override final {
    537541                if ( inCache( node ) ) return nullptr;
    538542                auto stmt = new FinallyStmt( get<CompoundStmt>().accept1( node->body ) );
    539                 return stmtPostamble( stmt, node );
     543                return clausePostamble( stmt, node ), nullptr;
    540544        }
    541545
     
    947951        }
    948952
     953        const ast::Expr * visit( const ast::DimensionExpr * node ) override final {
     954                auto expr = visitBaseExpr( node, new DimensionExpr( node->name ) );
     955                this->node = expr;
     956                return nullptr;
     957        }
     958
    949959        const ast::Expr * visit( const ast::AsmExpr * node ) override final {
    950960                auto expr = visitBaseExpr( node,
     
    14671477                return strict_dynamic_cast< ast::Decl * >( node );
    14681478        }
    1469 
     1479       
    14701480        ConverterOldToNew() = default;
    14711481        ConverterOldToNew(const ConverterOldToNew &) = delete;
     
    14951505                getAccept1< ast::type, decltype( old->child ) >( old->child )
    14961506
     1507
    14971508        template<typename NewT, typename OldC>
    14981509        std::vector< ast::ptr<NewT> > getAcceptV( const OldC& old ) {
     
    15091520#       define GET_ACCEPT_V(child, type) \
    15101521                getAcceptV< ast::type, decltype( old->child ) >( old->child )
     1522
     1523#       define GET_ACCEPT_E(child, type) \
     1524                getAccept1< ast::type, decltype( old->base ) >( old->base )
    15111525
    15121526        template<typename NewT, typename OldC>
     
    17101724        }
    17111725
     1726        // Convert SynTree::EnumDecl to AST::EnumDecl
    17121727        virtual void visit( const EnumDecl * old ) override final {
    17131728                if ( inCache( old ) ) return;
     
    17161731                        old->name,
    17171732                        GET_ACCEPT_V(attributes, Attribute),
    1718                         { old->linkage.val }
     1733                        { old->linkage.val },
     1734                        GET_ACCEPT_1(base, Type),
     1735                        old->enumValues
    17191736                );
    17201737                cache.emplace( old, decl );
     
    17261743                decl->uniqueId   = old->uniqueId;
    17271744                decl->storage    = { old->storageClasses.val };
    1728 
    17291745                this->node = decl;
    17301746        }
     
    18871903                        old->location,
    18881904                        GET_ACCEPT_1(condition, Expr),
    1889                         GET_ACCEPT_V(statements, Stmt),
     1905                        GET_ACCEPT_V(statements, CaseClause),
    18901906                        GET_LABELS_V(old->labels)
    18911907                );
     
    18951911        virtual void visit( const CaseStmt * old ) override final {
    18961912                if ( inCache( old ) ) return;
    1897                 this->node = new ast::CaseStmt(
     1913                this->node = new ast::CaseClause(
    18981914                        old->location,
    18991915                        GET_ACCEPT_1(condition, Expr),
    1900                         GET_ACCEPT_V(stmts, Stmt),
    1901                         GET_LABELS_V(old->labels)
    1902                 );
     1916                        GET_ACCEPT_V(stmts, Stmt)
     1917                );
     1918                auto labels = GET_LABELS_V(old->labels);
     1919                assertf(labels.empty(), "Labels found on CaseStmt.");
    19031920                cache.emplace( old, this->node );
    19041921        }
     
    20082025                        old->location,
    20092026                        GET_ACCEPT_1(block, CompoundStmt),
    2010                         GET_ACCEPT_V(handlers, CatchStmt),
    2011                         GET_ACCEPT_1(finallyBlock, FinallyStmt),
     2027                        GET_ACCEPT_V(handlers, CatchClause),
     2028                        GET_ACCEPT_1(finallyBlock, FinallyClause),
    20122029                        GET_LABELS_V(old->labels)
    20132030                );
     
    20292046                }
    20302047
    2031                 this->node = new ast::CatchStmt(
     2048                this->node = new ast::CatchClause(
    20322049                        old->location,
    20332050                        kind,
    20342051                        GET_ACCEPT_1(decl, Decl),
    20352052                        GET_ACCEPT_1(cond, Expr),
    2036                         GET_ACCEPT_1(body, Stmt),
    2037                         GET_LABELS_V(old->labels)
    2038                 );
     2053                        GET_ACCEPT_1(body, Stmt)
     2054                );
     2055                auto labels = GET_LABELS_V(old->labels);
     2056                assertf(labels.empty(), "Labels found on CatchStmt.");
    20392057                cache.emplace( old, this->node );
    20402058        }
     
    20422060        virtual void visit( const FinallyStmt * old ) override final {
    20432061                if ( inCache( old ) ) return;
    2044                 this->node = new ast::FinallyStmt(
    2045                         old->location,
    2046                         GET_ACCEPT_1(block, CompoundStmt),
    2047                         GET_LABELS_V(old->labels)
    2048                 );
     2062                this->node = new ast::FinallyClause(
     2063                        old->location,
     2064                        GET_ACCEPT_1(block, CompoundStmt)
     2065                );
     2066                auto labels = GET_LABELS_V(old->labels);
     2067                assertf(labels.empty(), "Labels found on FinallyStmt.");
    20492068                cache.emplace( old, this->node );
    20502069        }
     
    24502469
    24512470        virtual void visit( const DimensionExpr * old ) override final {
    2452                 // DimensionExpr gets desugared away in Validate.
    2453                 // As long as new-AST passes don't use it, this cheap-cheerful error
    2454                 // detection helps ensure that these occurrences have been compiled
    2455                 // away, as expected.  To move the DimensionExpr boundary downstream
    2456                 // or move the new-AST translation boundary upstream, implement
    2457                 // DimensionExpr in the new AST and implement a conversion.
    2458                 (void) old;
    2459                 assert(false && "DimensionExpr should not be present at new-AST boundary");
     2471                this->node = visitBaseExpr( old,
     2472                        new ast::DimensionExpr( old->location, old->name )
     2473                );
    24602474        }
    24612475
     
    27112725
    27122726                for (auto & param : foralls) {
    2713                         ty->forall.emplace_back(new ast::TypeInstType(param->name, param));
     2727                        ty->forall.emplace_back(new ast::TypeInstType(param));
    27142728                        for (auto asst : param->assertions) {
    27152729                                ty->assertions.emplace_back(new ast::VariableExpr({}, asst));
     
    27612775        }
    27622776
    2763         virtual void visit( const EnumInstType * old ) override final {
    2764                 ast::EnumInstType * ty;
     2777        virtual void visit( const EnumInstType * old ) override final { // Here is visiting the EnumInst Decl not the usage.
     2778                ast::EnumInstType * ty; 
    27652779                if ( old->baseEnum ) {
    2766                         ty = new ast::EnumInstType{
     2780                        ty = new ast::EnumInstType{ // Probably here: missing the specification of the base
    27672781                                GET_ACCEPT_1( baseEnum, EnumDecl ),
    27682782                                cv( old ),
  • src/AST/Decl.cpp

    rba897d21 r2e9b59b  
    6868        }
    6969        for (auto & tp : this->type_params) {
    70                 ftype->forall.emplace_back(new TypeInstType(tp->name, tp));
     70                ftype->forall.emplace_back(new TypeInstType(tp));
    7171                for (auto & ap: tp->assertions) {
    7272                        ftype->assertions.emplace_back(new VariableExpr(loc, ap));
     
    136136
    137137        auto it = enumValues.find( enumerator->name );
     138       
    138139        if ( it != enumValues.end() ) {
    139                 value = it->second;
     140                       
     141                // Handle typed enum by casting the value in (C++) compiler
     142                // if ( base ) { // A typed enum
     143                //      if ( const BasicType * bt = dynamic_cast<const BasicType *>(base) ) {
     144                //              switch( bt->kind ) {
     145                //                      case BasicType::Kind::Bool:     value = (bool) it->second; break;
     146                //                      case BasicType::Kind::Char: value = (char) it->second; break;
     147                //                      case BasicType::Kind::SignedChar: value = (signed char) it->second; break;
     148                //                      case BasicType::Kind::UnsignedChar: value = (unsigned char) it->second; break;
     149                //                      case BasicType::Kind::ShortSignedInt: value = (short signed int) it->second; break;
     150                //                      case BasicType::Kind::SignedInt: value = (signed int) it->second; break;
     151                //                      case BasicType::Kind::UnsignedInt: value = (unsigned int) it->second; break;
     152                //                      case BasicType::Kind::LongSignedInt: value = (long signed int) it->second; break;
     153                //                      case BasicType::Kind::LongUnsignedInt: value = (long unsigned int) it->second; break;
     154                //                      case BasicType::Kind::LongLongSignedInt: value = (long long signed int) it->second; break;
     155                //                      case BasicType::Kind::LongLongUnsignedInt: value = (long long unsigned int) it->second; break;
     156                //                      // TODO: value should be able to handle long long unsigned int
     157
     158                //                      default:
     159                //                      value = it->second;
     160                //              }
     161                //      }
     162                // } else {
     163                        value = it->second;
     164                //}
     165
    140166                return true;
    141167        }
  • src/AST/Decl.hpp

    rba897d21 r2e9b59b  
    302302class EnumDecl final : public AggregateDecl {
    303303public:
     304        ptr<Type> base;
     305
    304306        EnumDecl( const CodeLocation& loc, const std::string& name,
    305                 std::vector<ptr<Attribute>>&& attrs = {}, Linkage::Spec linkage = Linkage::Cforall )
    306         : AggregateDecl( loc, name, std::move(attrs), linkage ), enumValues() {}
     307                std::vector<ptr<Attribute>>&& attrs = {}, Linkage::Spec linkage = Linkage::Cforall, Type * base = nullptr,
     308                 std::unordered_map< std::string, long long > enumValues = std::unordered_map< std::string, long long >() )
     309        : AggregateDecl( loc, name, std::move(attrs), linkage ), base(base), enumValues(enumValues) {}
    307310
    308311        /// gets the integer value for this enumerator, returning true iff value found
     312        // Maybe it is not used in producing the enum value
    309313        bool valueOf( const Decl * enumerator, long long& value ) const;
    310314
     
    312316
    313317        const char * typeString() const override { return aggrString( Enum ); }
     318
     319        bool isTyped() {return base && base.get();}
    314320
    315321private:
  • src/AST/Expr.hpp

    rba897d21 r2e9b59b  
    604604};
    605605
     606class DimensionExpr final : public Expr {
     607public:
     608        std::string name;
     609
     610        DimensionExpr( const CodeLocation & loc, std::string name )
     611        : Expr( loc ), name( name ) {}
     612
     613        const Expr * accept( Visitor & v ) const override { return v.visit( this ); }
     614private:
     615        DimensionExpr * clone() const override { return new DimensionExpr{ *this }; }
     616        MUTATE_FRIEND
     617};
     618
    606619/// A GCC "asm constraint operand" used in an asm statement, e.g. `[output] "=f" (result)`.
    607620/// https://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/Machine-Constraints.html#Machine-Constraints
  • src/AST/Fwd.hpp

    rba897d21 r2e9b59b  
    4747class ForStmt;
    4848class SwitchStmt;
    49 class CaseStmt;
     49class CaseClause;
    5050class BranchStmt;
    5151class ReturnStmt;
    5252class ThrowStmt;
    5353class TryStmt;
    54 class CatchStmt;
    55 class FinallyStmt;
     54class CatchClause;
     55class FinallyClause;
    5656class SuspendStmt;
    5757class WaitForStmt;
     
    8484class CommaExpr;
    8585class TypeExpr;
     86class DimensionExpr;
    8687class AsmExpr;
    8788class ImplicitCopyCtorExpr;
     
    141142
    142143class TranslationUnit;
    143 // TODO: Get from the TranslationUnit:
    144 extern ptr<Type> sizeType;
    145 extern const FunctionDecl * dereferenceOperator;
    146 extern const StructDecl   * dtorStruct;
    147 extern const FunctionDecl * dtorStructDestroy;
     144class TranslationGlobal;
    148145
    149146}
  • src/AST/GenericSubstitution.cpp

    rba897d21 r2e9b59b  
    4545                        visit_children = false;
    4646                        const AggregateDecl * aggr = ty->aggr();
    47                         sub = TypeSubstitution{ aggr->params.begin(), aggr->params.end(), ty->params.begin() };
     47                        sub = TypeSubstitution( aggr->params, ty->params );
    4848                }
    4949
  • src/AST/Node.cpp

    rba897d21 r2e9b59b  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu May 16 14:16:00 2019
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Feb  1 09:09:39 2022
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Fri Mar 25 10:30:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    1919#include <csignal>  // MEMORY DEBUG -- for raise
    2020#include <iostream>
     21#include <utility>
    2122
    2223#include "Attribute.hpp"
     
    7677void ast::ptr_base<node_t, ref_t>::_check() const {
    7778        // if(node) assert(node->was_ever_strong == false || node->strong_count > 0);
     79}
     80
     81template< typename node_t, enum ast::Node::ref_type ref_t >
     82void ast::ptr_base<node_t, ref_t>::swap( ptr_base & other ) noexcept {
     83        std::swap( this->node, other.node );
     84        _trap( this->node );
     85        _trap( other.node );
    7886}
    7987
     
    152160template class ast::ptr_base< ast::SwitchStmt, ast::Node::ref_type::weak >;
    153161template class ast::ptr_base< ast::SwitchStmt, ast::Node::ref_type::strong >;
    154 template class ast::ptr_base< ast::CaseStmt, ast::Node::ref_type::weak >;
    155 template class ast::ptr_base< ast::CaseStmt, ast::Node::ref_type::strong >;
     162template class ast::ptr_base< ast::CaseClause, ast::Node::ref_type::weak >;
     163template class ast::ptr_base< ast::CaseClause, ast::Node::ref_type::strong >;
    156164template class ast::ptr_base< ast::BranchStmt, ast::Node::ref_type::weak >;
    157165template class ast::ptr_base< ast::BranchStmt, ast::Node::ref_type::strong >;
     
    162170template class ast::ptr_base< ast::TryStmt, ast::Node::ref_type::weak >;
    163171template class ast::ptr_base< ast::TryStmt, ast::Node::ref_type::strong >;
    164 template class ast::ptr_base< ast::CatchStmt, ast::Node::ref_type::weak >;
    165 template class ast::ptr_base< ast::CatchStmt, ast::Node::ref_type::strong >;
    166 template class ast::ptr_base< ast::FinallyStmt, ast::Node::ref_type::weak >;
    167 template class ast::ptr_base< ast::FinallyStmt, ast::Node::ref_type::strong >;
     172template class ast::ptr_base< ast::CatchClause, ast::Node::ref_type::weak >;
     173template class ast::ptr_base< ast::CatchClause, ast::Node::ref_type::strong >;
     174template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::weak >;
     175template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::strong >;
    168176template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::weak >;
    169177template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::strong >;
  • src/AST/Node.hpp

    rba897d21 r2e9b59b  
    1010// Created On       : Wed May 8 10:27:04 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Jun 5 9:47:00 2020
    13 // Update Count     : 6
     12// Last Modified On : Fri Mar 25 10:33:00 2022
     13// Update Count     : 7
    1414//
    1515
     
    103103
    104104/// Mutate a node field (only clones if not equal to existing value)
    105 template<typename node_t, typename parent_t, typename field_t, typename assn_t>
    106 const node_t * mutate_field( const node_t * node, field_t parent_t::* field, assn_t && val ) {
     105template<typename node_t, typename super_t, typename field_t, typename assn_t>
     106const node_t * mutate_field( const node_t * node, field_t super_t::* field, assn_t && val ) {
    107107        // skip mutate if equivalent
    108108        if ( node->*field == val ) return node;
     
    115115
    116116/// Mutate a single index of a node field (only clones if not equal to existing value)
    117 template<typename node_t, typename parent_t, typename coll_t, typename ind_t, typename field_t>
     117template<typename node_t, typename super_t, typename coll_t, typename ind_t, typename field_t>
    118118const node_t * mutate_field_index(
    119         const node_t * node, coll_t parent_t::* field, ind_t i, field_t && val
     119        const node_t * node, coll_t super_t::* field, ind_t i, field_t && val
    120120) {
    121121        // skip mutate if equivalent
     
    129129
    130130/// Mutate an entire indexed collection by cloning to accepted value
    131 template<typename node_t, typename parent_t, typename coll_t>
    132 const node_t * mutate_each( const node_t * node, coll_t parent_t::* field, Visitor & v ) {
     131template<typename node_t, typename super_t, typename coll_t>
     132const node_t * mutate_each( const node_t * node, coll_t super_t::* field, Visitor & v ) {
    133133        for ( unsigned i = 0; i < (node->*field).size(); ++i ) {
    134134                node = mutate_field_index( node, field, i, (node->*field)[i]->accept( v ) );
     
    230230        }
    231231
     232        /// Swaps the nodes contained within two pointers.
     233        void swap( ptr_base & other ) noexcept;
     234
    232235        const node_t * get() const { _check(); return  node; }
    233236        const node_t * operator->() const { _check(); return  node; }
     
    292295template< typename node_t >
    293296using readonly = ptr_base< node_t, Node::ref_type::weak >;
     297
     298/// Non-member swap that an participate in overload resolution.
     299template< typename node_t, enum Node::ref_type ref_t >
     300void swap( ptr_base< node_t, ref_t > & l, ptr_base< node_t, ref_t > & r ) {
     301        l.swap( r );
     302}
     303
    294304}
    295305
  • src/AST/Pass.hpp

    rba897d21 r2e9b59b  
    149149        const ast::Stmt *             visit( const ast::ForStmt              * ) override final;
    150150        const ast::Stmt *             visit( const ast::SwitchStmt           * ) override final;
    151         const ast::Stmt *             visit( const ast::CaseStmt             * ) override final;
     151        const ast::CaseClause *       visit( const ast::CaseClause           * ) override final;
    152152        const ast::Stmt *             visit( const ast::BranchStmt           * ) override final;
    153153        const ast::Stmt *             visit( const ast::ReturnStmt           * ) override final;
    154154        const ast::Stmt *             visit( const ast::ThrowStmt            * ) override final;
    155155        const ast::Stmt *             visit( const ast::TryStmt              * ) override final;
    156         const ast::Stmt *             visit( const ast::CatchStmt            * ) override final;
    157         const ast::Stmt *             visit( const ast::FinallyStmt          * ) override final;
     156        const ast::CatchClause *      visit( const ast::CatchClause          * ) override final;
     157        const ast::FinallyClause *    visit( const ast::FinallyClause        * ) override final;
    158158        const ast::Stmt *             visit( const ast::SuspendStmt          * ) override final;
    159159        const ast::Stmt *             visit( const ast::WaitForStmt          * ) override final;
     
    184184        const ast::Expr *             visit( const ast::CommaExpr            * ) override final;
    185185        const ast::Expr *             visit( const ast::TypeExpr             * ) override final;
     186        const ast::Expr *             visit( const ast::DimensionExpr        * ) override final;
    186187        const ast::Expr *             visit( const ast::AsmExpr              * ) override final;
    187188        const ast::Expr *             visit( const ast::ImplicitCopyCtorExpr * ) override final;
  • src/AST/Pass.impl.hpp

    rba897d21 r2e9b59b  
    354354                        // Take all the elements that are different in 'values'
    355355                        // and swap them into 'container'
    356                         if( values[i] != nullptr ) std::swap(container[i], values[i]);
     356                        if( values[i] != nullptr ) swap(container[i], values[i]);
    357357                }
    358358
     
    399399
    400400        template< typename core_t >
    401         template<typename node_t, typename parent_t, typename child_t>
     401        template<typename node_t, typename super_t, typename field_t>
    402402        void ast::Pass< core_t >::maybe_accept(
    403403                const node_t * & parent,
    404                 child_t parent_t::*child
     404                field_t super_t::*field
    405405        ) {
    406                 static_assert( std::is_base_of<parent_t, node_t>::value, "Error deducing member object" );
    407 
    408                 if(__pass::skip(parent->*child)) return;
    409                 const auto & old_val = __pass::get(parent->*child, 0);
     406                static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" );
     407
     408                if(__pass::skip(parent->*field)) return;
     409                const auto & old_val = __pass::get(parent->*field, 0);
    410410
    411411                static_assert( !std::is_same<const ast::Node * &, decltype(old_val)>::value, "ERROR");
     
    417417                if( new_val.differs ) {
    418418                        auto new_parent = __pass::mutate<core_t>(parent);
    419                         new_val.apply(new_parent, child);
     419                        new_val.apply(new_parent, field);
    420420                        parent = new_parent;
    421421                }
     
    423423
    424424        template< typename core_t >
    425         template<typename node_t, typename parent_t, typename child_t>
     425        template<typename node_t, typename super_t, typename field_t>
    426426        void ast::Pass< core_t >::maybe_accept_as_compound(
    427427                const node_t * & parent,
    428                 child_t parent_t::*child
     428                field_t super_t::*child
    429429        ) {
    430                 static_assert( std::is_base_of<parent_t, node_t>::value, "Error deducing member object" );
     430                static_assert( std::is_base_of<super_t, node_t>::value, "Error deducing member object" );
    431431
    432432                if(__pass::skip(parent->*child)) return;
     
    575575                        __pass::symtab::addId( core, 0, func );
    576576                        if ( __visit_children() ) {
    577                                 // parameter declarations
     577                                maybe_accept( node, &FunctionDecl::type_params );
     578                                maybe_accept( node, &FunctionDecl::assertions );
    578579                                maybe_accept( node, &FunctionDecl::params );
    579580                                maybe_accept( node, &FunctionDecl::returns );
    580                                 // type params and assertions
    581                                 maybe_accept( node, &FunctionDecl::type_params );
    582                                 maybe_accept( node, &FunctionDecl::assertions );
     581                                maybe_accept( node, &FunctionDecl::type );
    583582                                // First remember that we are now within a function.
    584583                                ValueGuard< bool > oldInFunction( inFunction );
     
    893892        if ( __visit_children() ) {
    894893                maybe_accept( node, &SwitchStmt::cond  );
    895                 maybe_accept( node, &SwitchStmt::stmts );
     894                maybe_accept( node, &SwitchStmt::cases );
    896895        }
    897896
     
    900899
    901900//--------------------------------------------------------------------------
    902 // CaseStmt
    903 template< typename core_t >
    904 const ast::Stmt * ast::Pass< core_t >::visit( const ast::CaseStmt * node ) {
    905         VISIT_START( node );
    906 
    907         if ( __visit_children() ) {
    908                 maybe_accept( node, &CaseStmt::cond  );
    909                 maybe_accept( node, &CaseStmt::stmts );
    910         }
    911 
    912         VISIT_END( Stmt, node );
     901// CaseClause
     902template< typename core_t >
     903const ast::CaseClause * ast::Pass< core_t >::visit( const ast::CaseClause * node ) {
     904        VISIT_START( node );
     905
     906        if ( __visit_children() ) {
     907                maybe_accept( node, &CaseClause::cond  );
     908                maybe_accept( node, &CaseClause::stmts );
     909        }
     910
     911        VISIT_END( CaseClause, node );
    913912}
    914913
     
    964963
    965964//--------------------------------------------------------------------------
    966 // CatchStmt
    967 template< typename core_t >
    968 const ast::Stmt * ast::Pass< core_t >::visit( const ast::CatchStmt * node ) {
     965// CatchClause
     966template< typename core_t >
     967const ast::CatchClause * ast::Pass< core_t >::visit( const ast::CatchClause * node ) {
    969968        VISIT_START( node );
    970969
     
    972971                // catch statements introduce a level of scope (for the caught exception)
    973972                guard_symtab guard { *this };
    974                 maybe_accept( node, &CatchStmt::decl );
    975                 maybe_accept( node, &CatchStmt::cond );
    976                 maybe_accept_as_compound( node, &CatchStmt::body );
    977         }
    978 
    979         VISIT_END( Stmt, node );
    980 }
    981 
    982 //--------------------------------------------------------------------------
    983 // FinallyStmt
    984 template< typename core_t >
    985 const ast::Stmt * ast::Pass< core_t >::visit( const ast::FinallyStmt * node ) {
    986         VISIT_START( node );
    987 
    988         if ( __visit_children() ) {
    989                 maybe_accept( node, &FinallyStmt::body );
    990         }
    991 
    992         VISIT_END( Stmt, node );
     973                maybe_accept( node, &CatchClause::decl );
     974                maybe_accept( node, &CatchClause::cond );
     975                maybe_accept_as_compound( node, &CatchClause::body );
     976        }
     977
     978        VISIT_END( CatchClause, node );
     979}
     980
     981//--------------------------------------------------------------------------
     982// FinallyClause
     983template< typename core_t >
     984const ast::FinallyClause * ast::Pass< core_t >::visit( const ast::FinallyClause * node ) {
     985        VISIT_START( node );
     986
     987        if ( __visit_children() ) {
     988                maybe_accept( node, &FinallyClause::body );
     989        }
     990
     991        VISIT_END( FinallyClause, node );
    993992}
    994993
     
    10541053                        auto n = __pass::mutate<core_t>(node);
    10551054                        for(size_t i = 0; i < new_clauses.size(); i++) {
    1056                                 if(new_clauses.at(i).target.func != nullptr) std::swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func);
     1055                                if(new_clauses.at(i).target.func != nullptr) swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func);
    10571056
    10581057                                for(size_t j = 0; j < new_clauses.at(i).target.args.size(); j++) {
    1059                                         if(new_clauses.at(i).target.args.at(j) != nullptr) std::swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j));
     1058                                        if(new_clauses.at(i).target.args.at(j) != nullptr) swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j));
    10601059                                }
    10611060
    1062                                 if(new_clauses.at(i).stmt != nullptr) std::swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt);
    1063                                 if(new_clauses.at(i).cond != nullptr) std::swap(n->clauses.at(i).cond, new_clauses.at(i).cond);
     1061                                if(new_clauses.at(i).stmt != nullptr) swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt);
     1062                                if(new_clauses.at(i).cond != nullptr) swap(n->clauses.at(i).cond, new_clauses.at(i).cond);
    10641063                        }
    10651064                        node = n;
     
    15161515                }
    15171516                maybe_accept( node, &TypeExpr::type );
     1517        }
     1518
     1519        VISIT_END( Expr, node );
     1520}
     1521
     1522//--------------------------------------------------------------------------
     1523// DimensionExpr
     1524template< typename core_t >
     1525const ast::Expr * ast::Pass< core_t >::visit( const ast::DimensionExpr * node ) {
     1526        VISIT_START( node );
     1527
     1528        if ( __visit_children() ) {
     1529                guard_symtab guard { *this };
     1530                maybe_accept( node, &DimensionExpr::result );
    15181531        }
    15191532
     
    18591872
    18601873        if ( __visit_children() ) {
    1861                 // xxx - should PointerType visit/mutate dimension?
     1874                maybe_accept( node, &PointerType::dimension );
    18621875                maybe_accept( node, &PointerType::base );
    18631876        }
     
    21512164
    21522165        if ( __visit_children() ) {
    2153                 {
    2154                         bool mutated = false;
    2155                         std::unordered_map< ast::TypeInstType::TypeEnvKey, ast::ptr< ast::Type > > new_map;
    2156                         for ( const auto & p : node->typeEnv ) {
    2157                                 guard_symtab guard { *this };
    2158                                 auto new_node = p.second->accept( *this );
    2159                                 if (new_node != p.second) mutated = true;
    2160                                 new_map.insert({ p.first, new_node });
    2161                         }
    2162                         if (mutated) {
    2163                                 auto new_node = __pass::mutate<core_t>( node );
    2164                                 new_node->typeEnv.swap( new_map );
    2165                                 node = new_node;
    2166                         }
     2166                bool mutated = false;
     2167                std::unordered_map< ast::TypeInstType::TypeEnvKey, ast::ptr< ast::Type > > new_map;
     2168                for ( const auto & p : node->typeEnv ) {
     2169                        guard_symtab guard { *this };
     2170                        auto new_node = p.second->accept( *this );
     2171                        if (new_node != p.second) mutated = true;
     2172                        new_map.insert({ p.first, new_node });
     2173                }
     2174                if (mutated) {
     2175                        auto new_node = __pass::mutate<core_t>( node );
     2176                        new_node->typeEnv.swap( new_map );
     2177                        node = new_node;
    21672178                }
    21682179        }
  • src/AST/Pass.proto.hpp

    rba897d21 r2e9b59b  
    2626
    2727struct PureVisitor;
     28
     29template<typename node_t>
     30node_t * deepCopy( const node_t * localRoot );
    2831
    2932namespace __pass {
     
    396399                static inline auto addStructFwd( core_t & core, int, const ast::StructDecl * decl ) -> decltype( core.symtab.addStruct( decl ), void() ) {
    397400                        ast::StructDecl * fwd = new ast::StructDecl( decl->location, decl->name );
    398                         fwd->params = decl->params;
     401                        for ( const auto & param : decl->params ) {
     402                                fwd->params.push_back( deepCopy( param.get() ) );
     403                        }
    399404                        core.symtab.addStruct( fwd );
    400405                }
     
    405410                template<typename core_t>
    406411                static inline auto addUnionFwd( core_t & core, int, const ast::UnionDecl * decl ) -> decltype( core.symtab.addUnion( decl ), void() ) {
    407                         UnionDecl * fwd = new UnionDecl( decl->location, decl->name );
    408                         fwd->params = decl->params;
     412                        ast::UnionDecl * fwd = new ast::UnionDecl( decl->location, decl->name );
     413                        for ( const auto & param : decl->params ) {
     414                                fwd->params.push_back( deepCopy( param.get() ) );
     415                        }
    409416                        core.symtab.addUnion( fwd );
    410417                }
  • src/AST/Print.cpp

    rba897d21 r2e9b59b  
    210210                }
    211211
     212                auto ptrToEnum = dynamic_cast<const ast::EnumDecl *>(node);
     213                if ( ! short_mode && ptrToEnum && ptrToEnum->base ) {
     214                        os << endl << indent << ".. with (enum) base" << endl;
     215                        ++indent;
     216                        ptrToEnum->base->accept( *this );
     217                        --indent; 
     218                }
     219
    212220                os << endl;
    213221        }
     
    589597
    590598                ++indent;
    591                 for ( const ast::Stmt * stmt : node->stmts ) {
     599                for ( const ast::CaseClause * stmt : node->cases ) {
    592600                        stmt->accept( *this );
    593601                }
     
    597605        }
    598606
    599         virtual const ast::Stmt * visit( const ast::CaseStmt * node ) override final {
     607        virtual const ast::CaseClause * visit( const ast::CaseClause * node ) override final {
    600608                if ( node->isDefault() ) {
    601609                        os << indent << "Default ";
     
    679687
    680688                os << indent-1 << "... and handlers:" << endl;
    681                 for ( const ast::CatchStmt * stmt : node->handlers ) {
     689                for ( const ast::CatchClause * stmt : node->handlers ) {
    682690                        os << indent;
    683691                        stmt->accept( *this );
     
    693701        }
    694702
    695         virtual const ast::Stmt * visit( const ast::CatchStmt * node ) override final {
     703        virtual const ast::CatchClause * visit( const ast::CatchClause * node ) override final {
    696704                os << "Catch ";
    697705                switch ( node->kind ) {
     
    718726        }
    719727
    720         virtual const ast::Stmt * visit( const ast::FinallyStmt * node ) override final {
     728        virtual const ast::FinallyClause * visit( const ast::FinallyClause * node ) override final {
    721729                os << "Finally Statement" << endl;
    722730                os << indent << "... with block:" << endl;
     
    10881096        virtual const ast::Expr * visit( const ast::TypeExpr * node ) override final {
    10891097                safe_print( node->type );
     1098                postprint( node );
     1099
     1100                return node;
     1101        }
     1102
     1103        virtual const ast::Expr * visit( const ast::DimensionExpr * node ) override final {
     1104                os << "Type-Sys Value: " << node->name;
    10901105                postprint( node );
    10911106
  • src/AST/Stmt.hpp

    rba897d21 r2e9b59b  
    99// Author           : Aaron B. Moss
    1010// Created On       : Wed May  8 13:00:00 2019
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Feb  2 20:06:41 2022
    13 // Update Count     : 34
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Mon Mar 28  9:50:00 2022
     13// Update Count     : 35
    1414//
    1515
     
    4747  private:
    4848        Stmt * clone() const override = 0;
     49        MUTATE_FRIEND
     50};
     51
     52// Base statement component node (only serves to group them).
     53class StmtClause : public ParseNode {
     54  public:
     55        // This is for non-statements that still belong with the statements,
     56        // but are not statements, usually some sort of clause. Often these can
     57        // (and should) be folded into the approprate parent node, but if they
     58        // cannot be, they are sub-types of this type, for organization.
     59
     60    StmtClause( const CodeLocation & loc )
     61                : ParseNode(loc) {}
     62
     63  private:
     64        StmtClause * clone() const override = 0;
    4965        MUTATE_FRIEND
    5066};
     
    158174  public:
    159175        ptr<Expr> cond;
     176        std::vector<ptr<CaseClause>> cases;
     177
     178        SwitchStmt( const CodeLocation & loc, const Expr * cond,
     179                                const std::vector<ptr<CaseClause>> && cases,
     180                                const std::vector<Label> && labels = {} )
     181                : Stmt(loc, std::move(labels)), cond(cond), cases(std::move(cases)) {}
     182
     183        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     184  private:
     185        SwitchStmt * clone() const override { return new SwitchStmt{ *this }; }
     186        MUTATE_FRIEND
     187};
     188
     189// Case label: case ...: or default:
     190class CaseClause final : public StmtClause {
     191  public:
     192        // Null for the default label.
     193        ptr<Expr> cond;
    160194        std::vector<ptr<Stmt>> stmts;
    161195
    162         SwitchStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts,
    163                                 const std::vector<Label> && labels = {} )
    164                 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
    165 
    166         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    167   private:
    168         SwitchStmt * clone() const override { return new SwitchStmt{ *this }; }
    169         MUTATE_FRIEND
    170 };
    171 
    172 // Case label: case ...: or default:
    173 class CaseStmt final : public Stmt {
    174   public:
    175         // Null for the default label.
    176         ptr<Expr> cond;
    177         std::vector<ptr<Stmt>> stmts;
    178 
    179         CaseStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts,
    180                           const std::vector<Label> && labels = {} )
    181                 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
     196        CaseClause( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts )
     197                : StmtClause(loc), cond(cond), stmts(std::move(stmts)) {}
    182198
    183199        bool isDefault() const { return !cond; }
    184200
    185         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    186   private:
    187         CaseStmt * clone() const override { return new CaseStmt{ *this }; }
     201        const CaseClause * accept( Visitor & v ) const override { return v.visit( this ); }
     202  private:
     203        CaseClause * clone() const override { return new CaseClause{ *this }; }
    188204        MUTATE_FRIEND
    189205};
     
    298314  public:
    299315        ptr<CompoundStmt> body;
    300         std::vector<ptr<CatchStmt>> handlers;
    301         ptr<FinallyStmt> finally;
     316        std::vector<ptr<CatchClause>> handlers;
     317        ptr<FinallyClause> finally;
    302318
    303319        TryStmt( const CodeLocation & loc, const CompoundStmt * body,
    304                          const std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally,
     320                         const std::vector<ptr<CatchClause>> && handlers, const FinallyClause * finally,
    305321                         const std::vector<Label> && labels = {} )
    306322                : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {}
     
    313329
    314330// Catch clause of try statement
    315 class CatchStmt final : public Stmt {
     331class CatchClause final : public StmtClause {
    316332  public:
    317333        ptr<Decl> decl;
     
    320336        ExceptionKind kind;
    321337
    322         CatchStmt( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,
    323                            const Stmt * body, const std::vector<Label> && labels = {} )
    324                 : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {}
    325 
    326         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    327   private:
    328         CatchStmt * clone() const override { return new CatchStmt{ *this }; }
     338        CatchClause( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,
     339                           const Stmt * body )
     340                : StmtClause(loc), decl(decl), cond(cond), body(body), kind(kind) {}
     341
     342        const CatchClause * accept( Visitor & v ) const override { return v.visit( this ); }
     343  private:
     344        CatchClause * clone() const override { return new CatchClause{ *this }; }
    329345        MUTATE_FRIEND
    330346};
    331347
    332348// Finally clause of try statement
    333 class FinallyStmt final : public Stmt {
     349class FinallyClause final : public StmtClause {
    334350  public:
    335351        ptr<CompoundStmt> body;
    336352
    337         FinallyStmt( const CodeLocation & loc, const CompoundStmt * body,
    338                                  std::vector<Label> && labels = {} )
    339                 : Stmt(loc, std::move(labels)), body(body) {}
    340 
    341         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    342   private:
    343         FinallyStmt * clone() const override { return new FinallyStmt{ *this }; }
     353        FinallyClause( const CodeLocation & loc, const CompoundStmt * body )
     354                : StmtClause(loc), body(body) {}
     355
     356        const FinallyClause * accept( Visitor & v ) const override { return v.visit( this ); }
     357  private:
     358        FinallyClause * clone() const override { return new FinallyClause{ *this }; }
    344359        MUTATE_FRIEND
    345360};
  • src/AST/TranslationUnit.hpp

    rba897d21 r2e9b59b  
    1010// Created On       : Tue Jun 11 15:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Jun 11 15:42:00 2019
    13 // Update Count     : 0
     12// Last Modified On : Tue Mar 11 11:19:00 2022
     13// Update Count     : 1
    1414//
    1515
     
    2323namespace ast {
    2424
     25class TranslationGlobal {
     26public:
     27        std::map< UniqueId, Decl * > idMap;
     28
     29        ptr<Type> sizeType;
     30        const FunctionDecl * dereference;
     31        const StructDecl * dtorStruct;
     32        const FunctionDecl * dtorDestroy;
     33};
     34
    2535class TranslationUnit {
    2636public:
    2737        std::list< ptr< Decl > > decls;
    28 
    29         struct Global {
    30                 std::map< UniqueId, Decl * > idMap;
    31 
    32                 ptr<Type> sizeType;
    33                 const FunctionDecl * dereference;
    34                 const StructDecl * dtorStruct;
    35                 const FunctionDecl * dtorDestroy;
    36         } global;
     38        TranslationGlobal global;
    3739};
    3840
  • src/AST/Type.cpp

    rba897d21 r2e9b59b  
    147147// --- TypeInstType
    148148
     149TypeInstType::TypeInstType( const TypeDecl * b,
     150        CV::Qualifiers q, std::vector<ptr<Attribute>> && as )
     151: BaseInstType( b->name, q, move(as) ), base( b ), kind( b->kind ) {}
     152
    149153void TypeInstType::set_base( const TypeDecl * b ) {
    150154        base = b;
  • src/AST/Type.hpp

    rba897d21 r2e9b59b  
    421421                std::vector<ptr<Attribute>> && as = {} )
    422422        : BaseInstType( n, q, std::move(as) ), base( b ), kind( b->kind ) {}
     423
     424        TypeInstType( const TypeDecl * b,
     425                CV::Qualifiers q = {}, std::vector<ptr<Attribute>> && as = {} );
     426
    423427        TypeInstType( const std::string& n, TypeDecl::Kind k, CV::Qualifiers q = {},
    424428                std::vector<ptr<Attribute>> && as = {} )
  • src/AST/TypeSubstitution.hpp

    rba897d21 r2e9b59b  
    3737  public:
    3838        TypeSubstitution();
     39        template< typename FormalContainer, typename ActualContainer >
     40        TypeSubstitution( FormalContainer formals, ActualContainer actuals );
    3941        template< typename FormalIterator, typename ActualIterator >
    4042        TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
     
    7678        bool empty() const;
    7779
     80        template< typename FormalContainer, typename ActualContainer >
     81        void addAll( FormalContainer formals, ActualContainer actuals );
    7882        template< typename FormalIterator, typename ActualIterator >
    79         void add( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
     83        void addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin );
    8084
    8185        /// create a new TypeSubstitution using bindings from env containing all of the type variables in expr
     
    112116};
    113117
     118template< typename FormalContainer, typename ActualContainer >
     119TypeSubstitution::TypeSubstitution( FormalContainer formals, ActualContainer actuals ) {
     120        assert( formals.size() == actuals.size() );
     121        addAll( formals.begin(), formals.end(), actuals.begin() );
     122}
     123
     124template< typename FormalIterator, typename ActualIterator >
     125TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
     126        addAll( formalBegin, formalEnd, actualBegin );
     127}
     128
     129template< typename FormalContainer, typename ActualContainer >
     130void TypeSubstitution::addAll( FormalContainer formals, ActualContainer actuals ) {
     131        assert( formals.size() == actuals.size() );
     132        addAll( formals.begin(), formals.end(), actuals.begin() );
     133}
     134
    114135// this is the only place where type parameters outside a function formal may be substituted.
    115136template< typename FormalIterator, typename ActualIterator >
    116 void TypeSubstitution::add( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
     137void TypeSubstitution::addAll( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
    117138        // FormalIterator points to a TypeDecl
    118139        // ActualIterator points to a Type
     
    129150                        } // if
    130151                } else {
    131                        
     152                        // Is this an error?
    132153                } // if
    133154        } // for
    134155}
    135 
    136 
    137 
    138 template< typename FormalIterator, typename ActualIterator >
    139 TypeSubstitution::TypeSubstitution( FormalIterator formalBegin, FormalIterator formalEnd, ActualIterator actualBegin ) {
    140         add( formalBegin, formalEnd, actualBegin );
    141 }
    142 
    143156
    144157} // namespace ast
  • src/AST/Util.cpp

    rba897d21 r2e9b59b  
    1010// Created On       : Wed Jan 19  9:46:00 2022
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Feb 18  9:42:00 2022
    13 // Update Count     : 0
     12// Last Modified On : Fri Mar 11 18:07:00 2022
     13// Update Count     : 1
    1414//
    1515
    1616#include "Util.hpp"
    1717
    18 #include "Decl.hpp"
    1918#include "Node.hpp"
     19#include "ParseNode.hpp"
    2020#include "Pass.hpp"
    2121#include "TranslationUnit.hpp"
    22 #include "Common/ScopedMap.h"
    2322
    2423#include <vector>
     
    4645};
    4746
     47/// Check that every note that can has a set CodeLocation.
     48struct SetCodeLocationsCore {
     49        void previsit( const ParseNode * node ) {
     50                assert( node->location.isSet() );
     51        }
     52};
     53
    4854struct InvariantCore {
    4955        // To save on the number of visits: this is a kind of composed core.
    5056        // None of the passes should make changes so ordering doesn't matter.
    5157        NoStrongCyclesCore no_strong_cycles;
     58        SetCodeLocationsCore set_code_locations;
    5259
    5360        void previsit( const Node * node ) {
    5461                no_strong_cycles.previsit( node );
     62        }
     63
     64        void previsit( const ParseNode * node ) {
     65                no_strong_cycles.previsit( node );
     66                set_code_locations.previsit( node );
    5567        }
    5668
  • src/AST/Visitor.hpp

    rba897d21 r2e9b59b  
    4141    virtual const ast::Stmt *             visit( const ast::ForStmt              * ) = 0;
    4242    virtual const ast::Stmt *             visit( const ast::SwitchStmt           * ) = 0;
    43     virtual const ast::Stmt *             visit( const ast::CaseStmt             * ) = 0;
     43    virtual const ast::CaseClause *       visit( const ast::CaseClause           * ) = 0;
    4444    virtual const ast::Stmt *             visit( const ast::BranchStmt           * ) = 0;
    4545    virtual const ast::Stmt *             visit( const ast::ReturnStmt           * ) = 0;
    4646    virtual const ast::Stmt *             visit( const ast::ThrowStmt            * ) = 0;
    4747    virtual const ast::Stmt *             visit( const ast::TryStmt              * ) = 0;
    48     virtual const ast::Stmt *             visit( const ast::CatchStmt            * ) = 0;
    49     virtual const ast::Stmt *             visit( const ast::FinallyStmt          * ) = 0;
     48    virtual const ast::CatchClause *      visit( const ast::CatchClause          * ) = 0;
     49    virtual const ast::FinallyClause *    visit( const ast::FinallyClause        * ) = 0;
    5050    virtual const ast::Stmt *             visit( const ast::SuspendStmt          * ) = 0;
    5151    virtual const ast::Stmt *             visit( const ast::WaitForStmt          * ) = 0;
     
    7676    virtual const ast::Expr *             visit( const ast::CommaExpr            * ) = 0;
    7777    virtual const ast::Expr *             visit( const ast::TypeExpr             * ) = 0;
     78    virtual const ast::Expr *             visit( const ast::DimensionExpr        * ) = 0;
    7879    virtual const ast::Expr *             visit( const ast::AsmExpr              * ) = 0;
    7980    virtual const ast::Expr *             visit( const ast::ImplicitCopyCtorExpr * ) = 0;
  • src/CodeGen/CodeGenerator.cc

    rba897d21 r2e9b59b  
    274274        void CodeGenerator::postvisit( EnumDecl * enumDecl ) {
    275275                extension( enumDecl );
    276                 output << "enum ";
    277                 genAttributes( enumDecl->get_attributes() );
    278 
    279                 output << enumDecl->get_name();
    280 
    281276                std::list< Declaration* > &memb = enumDecl->get_members();
    282 
    283                 if ( ! memb.empty() ) {
    284                         output << " {" << endl;
    285 
    286                         ++indent;
     277                if (enumDecl->base && ! memb.empty()) {
     278                        unsigned long long last_val = -1;
    287279                        for ( std::list< Declaration* >::iterator i = memb.begin(); i != memb.end();  i++) {
    288280                                ObjectDecl * obj = dynamic_cast< ObjectDecl* >( *i );
    289281                                assert( obj );
    290                                 output << indent << mangleName( obj );
    291                                 if ( obj->get_init() ) {
    292                                         output << " = ";
    293                                         obj->get_init()->accept( *visitor );
    294                                 } // if
    295                                 output << "," << endl;
     282                                output << "static const ";
     283                                output << genType(enumDecl->base, "", options) << " ";
     284                                output << mangleName( obj ) << " ";
     285                                output << " = ";
     286                                output << "(" << genType(enumDecl->base, "", options) << ")";
     287                                if ( (BasicType *)(enumDecl->base) && ((BasicType *)(enumDecl->base))->isWholeNumber() ) {
     288                                        if ( obj->get_init() ) {
     289                                                obj->get_init()->accept( *visitor );
     290                                                last_val = ((ConstantExpr *)(((SingleInit *)(obj->init))->value))->constant.get_ival();
     291                                        } else {
     292                                                output << ++last_val;
     293                                        } // if
     294                                } else {
     295                                        if ( obj->get_init() ) {
     296                                                obj->get_init()->accept( *visitor );
     297                                        } else {
     298                                                // Should not reach here!
     299                                        }
     300                                }
     301                                output << ";" << endl;
    296302                        } // for
    297 
     303                } else {
     304                        output << "enum ";
     305                        genAttributes( enumDecl->get_attributes() );
     306
     307                        output << enumDecl->get_name();
     308
     309                        if ( ! memb.empty() ) {
     310                                output << " {" << endl;
     311
     312                                ++indent;
     313                                for ( std::list< Declaration* >::iterator i = memb.begin(); i != memb.end();  i++) {
     314                                        ObjectDecl * obj = dynamic_cast< ObjectDecl* >( *i );
     315                                        assert( obj );
     316                                        output << indent << mangleName( obj );
     317                                        if ( obj->get_init() ) {
     318                                                output << " = ";
     319                                                obj->get_init()->accept( *visitor );
     320                                        } // if
     321                                        output << "," << endl;
     322                                } // for
    298323                        --indent;
    299 
    300324                        output << indent << "}";
     325                        } // if
    301326                } // if
    302327        }
     
    347372                                des->accept( *visitor );
    348373                        } else {
    349                                 // otherwise, it has to be a ConstantExpr or CastExpr, initializing array eleemnt
     374                                // otherwise, it has to be a ConstantExpr or CastExpr, initializing array element
    350375                                output << "[";
    351376                                des->accept( *visitor );
     
    661686                        output << opInfo->symbol;
    662687                } else {
     688                        // if (dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())
     689                        // && dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())->baseEnum->base) {
     690                        //      output << '(' <<genType(dynamic_cast<EnumInstType *>(variableExpr->get_var()->get_type())->baseEnum->base, "", options) << ')';
     691                        // }
    663692                        output << mangleName( variableExpr->get_var() );
    664693                } // if
  • src/CodeGen/FixMain.cc

    rba897d21 r2e9b59b  
    9191}
    9292
    93 ObjectDecl * charStarObj() {
     93ObjectDecl * makeArgvObj() {
    9494        return new ObjectDecl(
    9595                "", Type::StorageClasses(), LinkageSpec::Cforall, 0,
     
    117117        main_type->get_returnVals().push_back( signedIntObj() );
    118118        main_type->get_parameters().push_back( signedIntObj() );
    119         main_type->get_parameters().push_back( charStarObj() );
     119        main_type->get_parameters().push_back( makeArgvObj() );
    120120        return create_mangled_main_function_name( main_type );
    121121}
  • src/CodeGen/GenType.cc

    rba897d21 r2e9b59b  
    253253
    254254        void GenType::postvisit( EnumInstType * enumInst ) {
    255                 typeString = enumInst->name + " " + typeString;
    256                 if ( options.genC ) typeString = "enum " + typeString;
     255                if ( enumInst->baseEnum->base ) {
     256                        typeString = genType(enumInst->baseEnum->base, "", options) + typeString;
     257                } else {
     258                        typeString = enumInst->name + " " + typeString;
     259                        if ( options.genC ) {
     260                                typeString = "enum " + typeString;
     261                        }
     262                }
    257263                handleQualifiers( enumInst );
    258264        }
  • src/Common/CodeLocationTools.cpp

    rba897d21 r2e9b59b  
    99// Author           : Andrew Beach
    1010// Created On       : Fri Dec  4 15:42:00 2020
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Feb  1 09:14:39 2022
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Mon Mar 14 15:14:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    112112    macro(ForStmt, Stmt) \
    113113    macro(SwitchStmt, Stmt) \
    114     macro(CaseStmt, Stmt) \
     114    macro(CaseClause, CaseClause) \
    115115    macro(BranchStmt, Stmt) \
    116116    macro(ReturnStmt, Stmt) \
    117117    macro(ThrowStmt, Stmt) \
    118118    macro(TryStmt, Stmt) \
    119     macro(CatchStmt, Stmt) \
    120     macro(FinallyStmt, Stmt) \
     119    macro(CatchClause, CatchClause) \
     120    macro(FinallyClause, FinallyClause) \
    121121    macro(SuspendStmt, Stmt) \
    122122    macro(WaitForStmt, Stmt) \
     
    147147    macro(CommaExpr, Expr) \
    148148    macro(TypeExpr, Expr) \
     149    macro(DimensionExpr, Expr) \
    149150    macro(AsmExpr, Expr) \
    150151    macro(ImplicitCopyCtorExpr, Expr) \
     
    239240};
    240241
     242class LocalFillCore : public ast::WithGuards {
     243        CodeLocation const * parent;
     244public:
     245        LocalFillCore( CodeLocation const & location ) : parent( &location ) {
     246                assert( location.isSet() );
     247        }
     248
     249        template<typename node_t>
     250        auto previsit( node_t const * node )
     251                        -> typename std::enable_if<has_code_location<node_t>::value, node_t const *>::type {
     252                if ( node->location.isSet() ) {
     253                        GuardValue( parent ) = &node->location;
     254                        return node;
     255                } else {
     256                        node_t * mut = ast::mutate( node );
     257                        mut->location = *parent;
     258                        return mut;
     259                }
     260        }
     261};
     262
    241263} // namespace
    242264
     
    278300        ast::Pass<FillCore>::run( unit );
    279301}
     302
     303ast::Node const * localFillCodeLocations(
     304                CodeLocation const & location , ast::Node const * node ) {
     305        ast::Pass<LocalFillCore> visitor( location );
     306        return node->accept( visitor );
     307}
  • src/Common/CodeLocationTools.hpp

    rba897d21 r2e9b59b  
    1010// Created On       : Fri Dec  4 15:35:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Dec  9  9:53:00 2020
    13 // Update Count     : 1
     12// Last Modified On : Mon Mar 14 15:14:00 2022
     13// Update Count     : 2
    1414//
    1515
    1616#pragma once
    1717
     18struct CodeLocation;
    1819namespace ast {
     20        class Node;
    1921        class TranslationUnit;
    2022}
     
    2830// Assign a nearby code-location to any unset code locations in the forest.
    2931void forceFillCodeLocations( ast::TranslationUnit & unit );
     32
     33// Fill in code-locations with a parent code location,
     34// using the provided CodeLocation as the base.
     35ast::Node const *
     36        localFillCodeLocations( CodeLocation const &, ast::Node const * );
  • src/Common/Eval.cc

    rba897d21 r2e9b59b  
    112112        }
    113113
    114         void postvisit( const ast::VariableExpr * expr ) {
     114        void postvisit( const ast::VariableExpr * expr ) { // No hit
    115115                if ( const ast::EnumInstType * inst = dynamic_cast<const ast::EnumInstType *>(expr->result.get()) ) {
    116116                        if ( const ast::EnumDecl * decl = inst->base ) {
  • src/Common/Examine.cc

    rba897d21 r2e9b59b  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Examine.h --
     7// Examine.cc -- Helpers for examining AST code.
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Wed Sept 2 14:02 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Sep  8 12:15 2020
    13 // Update Count     : 0
     12// Last Modified On : Fri Dec 10 10:27 2021
     13// Update Count     : 1
    1414//
    1515
    1616#include "Common/Examine.h"
    1717
     18#include "AST/Type.hpp"
    1819#include "CodeGen/OperatorTable.h"
     20#include "InitTweak/InitTweak.h"
    1921
    2022DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind ) {
     
    3638
    3739namespace {
     40
     41// getTypeofThis but does some extra checks used in this module.
     42const ast::Type * getTypeofThisSolo( const ast::FunctionDecl * func ) {
     43        if ( 1 != func->params.size() ) {
     44                return nullptr;
     45        }
     46        auto ref = func->type->params.front().as<ast::ReferenceType>();
     47        return (ref) ? ref->base : nullptr;
     48}
     49
     50}
     51
     52const ast::DeclWithType * isMainFor(
     53                const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind ) {
     54        if ( "main" != func->name ) return nullptr;
     55        if ( 1 != func->params.size() ) return nullptr;
     56
     57        auto param = func->params.front();
     58
     59        auto type = dynamic_cast<const ast::ReferenceType *>( param->get_type() );
     60        if ( !type ) return nullptr;
     61
     62        auto obj = type->base.as<ast::StructInstType>();
     63        if ( !obj ) return nullptr;
     64
     65        if ( kind != obj->base->kind ) return nullptr;
     66
     67        return param;
     68}
     69
     70namespace {
    3871        Type * getDestructorParam( FunctionDecl * func ) {
    3972                if ( !CodeGen::isDestructor( func->name ) ) return nullptr;
     
    4881                return nullptr;
    4982        }
     83
     84const ast::Type * getDestructorParam( const ast::FunctionDecl * func ) {
     85        if ( !CodeGen::isDestructor( func->name ) ) return nullptr;
     86        //return InitTweak::getParamThis( func )->type;
     87        return getTypeofThisSolo( func );
     88}
     89
    5090}
    5191
     
    5797        return false;
    5898}
     99
     100bool isDestructorFor(
     101                const ast::FunctionDecl * func, const ast::StructDecl * type_decl ) {
     102        if ( const ast::Type * type = getDestructorParam( func ) ) {
     103                auto stype = dynamic_cast<const ast::StructInstType *>( type );
     104                return stype && stype->base.get() == type_decl;
     105        }
     106        return false;
     107}
  • src/Common/Examine.h

    rba897d21 r2e9b59b  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Examine.h --
     7// Examine.h -- Helpers for examining AST code.
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Wed Sept 2 13:57 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Sep  8 12:08 2020
    13 // Update Count     : 0
     12// Last Modified On : Fri Dec 10 10:28 2021
     13// Update Count     : 1
    1414//
    1515
     16#include "AST/Decl.hpp"
    1617#include "SynTree/Declaration.h"
    1718
    1819/// Check if this is a main function for a type of an aggregate kind.
    1920DeclarationWithType * isMainFor( FunctionDecl * func, AggregateDecl::Aggregate kind );
     21const ast::DeclWithType * isMainFor(
     22        const ast::FunctionDecl * func, ast::AggregateDecl::Aggregate kind );
    2023// Returns a pointer to the parameter if true, nullptr otherwise.
    2124
    2225/// Check if this function is a destructor for the given structure.
    2326bool isDestructorFor( FunctionDecl * func, StructDecl * type_decl );
     27bool isDestructorFor(
     28        const ast::FunctionDecl * func, const ast::StructDecl * type );
  • src/Common/PassVisitor.impl.h

    rba897d21 r2e9b59b  
    754754
    755755        // unlike structs, traits, and unions, enums inject their members into the global scope
     756        // if ( node->base ) maybeAccept_impl( node->base, *this ); // Need this? Maybe not?
    756757        maybeAccept_impl( node->parameters, *this );
    757758        maybeAccept_impl( node->members   , *this );
  • src/Concurrency/Keywords.cc

    rba897d21 r2e9b59b  
    12041204                                        //new TypeofType( noQualifiers, args.front()->clone() )
    12051205                                        new TypeofType( noQualifiers, new UntypedExpr(
    1206                                                         new NameExpr( "__get_type" ),
     1206                                                        new NameExpr( "__get_mutexstmt_lock_type" ),
    12071207                                                        { args.front()->clone() }
    12081208                                                )
     
    12161216                                map_range < std::list<Initializer*> > ( args, [](Expression * var ){
    12171217                                        return new SingleInit( new UntypedExpr(
    1218                                                         new NameExpr( "__get_ptr" ),
     1218                                                        new NameExpr( "__get_mutexstmt_lock_ptr" ),
    12191219                                                        { var }
    12201220                                        ) );
     
    12271227                TypeExpr * lock_type_expr = new TypeExpr(
    12281228                        new TypeofType( noQualifiers, new UntypedExpr(
    1229                                 new NameExpr( "__get_type" ),
     1229                                new NameExpr( "__get_mutexstmt_lock_type" ),
    12301230                                { args.front()->clone() }
    12311231                                )
  • src/Concurrency/KeywordsNew.cpp

    rba897d21 r2e9b59b  
    1010// Created On       : Tue Nov 16  9:53:00 2021
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Dec  1 11:24:00 2021
    13 // Update Count     : 1
     12// Last Modified On : Fri Mar 11 10:40:00 2022
     13// Update Count     : 2
    1414//
     15
     16#include <iostream>
    1517
    1618#include "Concurrency/Keywords.h"
     
    1820#include "AST/Copy.hpp"
    1921#include "AST/Decl.hpp"
     22#include "AST/Expr.hpp"
    2023#include "AST/Pass.hpp"
    2124#include "AST/Stmt.hpp"
     25#include "AST/DeclReplacer.hpp"
    2226#include "AST/TranslationUnit.hpp"
    2327#include "CodeGen/OperatorTable.h"
     28#include "Common/Examine.h"
    2429#include "Common/utility.h"
     30#include "Common/UniqueName.h"
     31#include "ControlStruct/LabelGeneratorNew.hpp"
    2532#include "InitTweak/InitTweak.h"
     33#include "Virtual/Tables.h"
    2634
    2735namespace Concurrency {
     
    2937namespace {
    3038
    31 inline static bool isThread( const ast::DeclWithType * decl ) {
     39// --------------------------------------------------------------------------
     40// Loose Helper Functions:
     41
     42/// Detect threads constructed with the keyword thread.
     43bool isThread( const ast::DeclWithType * decl ) {
    3244        auto baseType = decl->get_type()->stripDeclarator();
    3345        auto instType = dynamic_cast<const ast::StructInstType *>( baseType );
     
    3648}
    3749
     50/// Get the virtual type id if given a type name.
     51std::string typeIdType( std::string const & exception_name ) {
     52        return exception_name.empty() ? std::string()
     53                : Virtual::typeIdType( exception_name );
     54}
     55
     56/// Get the vtable type name if given a type name.
     57std::string vtableTypeName( std::string const & exception_name ) {
     58        return exception_name.empty() ? std::string()
     59                : Virtual::vtableTypeName( exception_name );
     60}
     61
     62static ast::Type * mutate_under_references( ast::ptr<ast::Type>& type ) {
     63        ast::Type * mutType = type.get_and_mutate();
     64        for ( ast::ReferenceType * mutRef
     65                ; (mutRef = dynamic_cast<ast::ReferenceType *>( mutType ))
     66                ; mutType = mutRef->base.get_and_mutate() );
     67        return mutType;
     68}
     69
     70// Describe that it adds the generic parameters and the uses of the generic
     71// parameters on the function and first "this" argument.
     72ast::FunctionDecl * fixupGenerics(
     73                const ast::FunctionDecl * func, const ast::StructDecl * decl ) {
     74        const CodeLocation & location = decl->location;
     75        // We have to update both the declaration
     76        auto mutFunc = ast::mutate( func );
     77        auto mutType = mutFunc->type.get_and_mutate();
     78
     79        if ( decl->params.empty() ) {
     80                return mutFunc;
     81        }
     82
     83        assert( 0 != mutFunc->params.size() );
     84        assert( 0 != mutType->params.size() );
     85
     86        // Add the "forall" clause information.
     87        for ( const ast::ptr<ast::TypeDecl> & typeParam : decl->params ) {
     88                auto typeDecl = ast::deepCopy( typeParam );
     89                mutFunc->type_params.push_back( typeDecl );
     90                mutType->forall.push_back( new ast::TypeInstType( typeDecl ) );
     91                for ( auto & assertion : typeDecl->assertions ) {
     92                        mutFunc->assertions.push_back( assertion );
     93                        mutType->assertions.emplace_back(
     94                                new ast::VariableExpr( location, assertion ) );
     95                }
     96                typeDecl->assertions.clear();
     97        }
     98
     99        // Even chain_mutate is not powerful enough for this:
     100        ast::ptr<ast::Type>& paramType = strict_dynamic_cast<ast::ObjectDecl *>(
     101                mutFunc->params[0].get_and_mutate() )->type;
     102        auto paramTypeInst = strict_dynamic_cast<ast::StructInstType *>(
     103                mutate_under_references( paramType ) );
     104        auto typeParamInst = strict_dynamic_cast<ast::StructInstType *>(
     105                mutate_under_references( mutType->params[0] ) );
     106
     107        for ( const ast::ptr<ast::TypeDecl> & typeDecl : mutFunc->type_params ) {
     108                paramTypeInst->params.push_back(
     109                        new ast::TypeExpr( location, new ast::TypeInstType( typeDecl ) ) );
     110                typeParamInst->params.push_back(
     111                        new ast::TypeExpr( location, new ast::TypeInstType( typeDecl ) ) );
     112        }
     113
     114        return mutFunc;
     115}
     116
    38117// --------------------------------------------------------------------------
    39 struct MutexKeyword final {
     118struct ConcurrentSueKeyword : public ast::WithDeclsToAdd<> {
     119        ConcurrentSueKeyword(
     120                std::string&& type_name, std::string&& field_name,
     121                std::string&& getter_name, std::string&& context_error,
     122                std::string&& exception_name,
     123                bool needs_main, ast::AggregateDecl::Aggregate cast_target
     124        ) :
     125                type_name( type_name ), field_name( field_name ),
     126                getter_name( getter_name ), context_error( context_error ),
     127                exception_name( exception_name ),
     128                typeid_name( typeIdType( exception_name ) ),
     129                vtable_name( vtableTypeName( exception_name ) ),
     130                needs_main( needs_main ), cast_target( cast_target )
     131        {}
     132
     133        virtual ~ConcurrentSueKeyword() {}
     134
     135        const ast::Decl * postvisit( const ast::StructDecl * decl );
     136        const ast::DeclWithType * postvisit( const ast::FunctionDecl * decl );
     137        const ast::Expr * postvisit( const ast::KeywordCastExpr * expr );
     138
     139        struct StructAndField {
     140                const ast::StructDecl * decl;
     141                const ast::ObjectDecl * field;
     142        };
     143
     144        const ast::StructDecl * handleStruct( const ast::StructDecl * );
     145        void handleMain( const ast::FunctionDecl *, const ast::StructInstType * );
     146        void addTypeId( const ast::StructDecl * );
     147        void addVtableForward( const ast::StructDecl * );
     148        const ast::FunctionDecl * forwardDeclare( const ast::StructDecl * );
     149        StructAndField addField( const ast::StructDecl * );
     150        void addGetRoutines( const ast::ObjectDecl *, const ast::FunctionDecl * );
     151        void addLockUnlockRoutines( const ast::StructDecl * );
     152
     153private:
     154        const std::string type_name;
     155        const std::string field_name;
     156        const std::string getter_name;
     157        const std::string context_error;
     158        const std::string exception_name;
     159        const std::string typeid_name;
     160        const std::string vtable_name;
     161        const bool needs_main;
     162        const ast::AggregateDecl::Aggregate cast_target;
     163
     164        const ast::StructDecl   * type_decl = nullptr;
     165        const ast::FunctionDecl * dtor_decl = nullptr;
     166        const ast::StructDecl * except_decl = nullptr;
     167        const ast::StructDecl * typeid_decl = nullptr;
     168        const ast::StructDecl * vtable_decl = nullptr;
     169
     170};
     171
     172// Handles thread type declarations:
     173//
     174// thread Mythread {                         struct MyThread {
     175//  int data;                                  int data;
     176//  a_struct_t more_data;                      a_struct_t more_data;
     177//                                =>             thread$ __thrd_d;
     178// };                                        };
     179//                                           static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; }
     180//
     181struct ThreadKeyword final : public ConcurrentSueKeyword {
     182        ThreadKeyword() : ConcurrentSueKeyword(
     183                "thread$",
     184                "__thrd",
     185                "get_thread",
     186                "thread keyword requires threads to be in scope, add #include <thread.hfa>\n",
     187                "ThreadCancelled",
     188                true,
     189                ast::AggregateDecl::Thread )
     190        {}
     191
     192        virtual ~ThreadKeyword() {}
     193};
     194
     195// Handles coroutine type declarations:
     196//
     197// coroutine MyCoroutine {                   struct MyCoroutine {
     198//  int data;                                  int data;
     199//  a_struct_t more_data;                      a_struct_t more_data;
     200//                                =>             coroutine$ __cor_d;
     201// };                                        };
     202//                                           static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; }
     203//
     204struct CoroutineKeyword final : public ConcurrentSueKeyword {
     205        CoroutineKeyword() : ConcurrentSueKeyword(
     206                "coroutine$",
     207                "__cor",
     208                "get_coroutine",
     209                "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa>\n",
     210                "CoroutineCancelled",
     211                true,
     212                ast::AggregateDecl::Coroutine )
     213        {}
     214
     215        virtual ~CoroutineKeyword() {}
     216};
     217
     218// Handles monitor type declarations:
     219//
     220// monitor MyMonitor {                       struct MyMonitor {
     221//  int data;                                  int data;
     222//  a_struct_t more_data;                      a_struct_t more_data;
     223//                                =>             monitor$ __mon_d;
     224// };                                        };
     225//                                           static inline monitor$ * get_coroutine( MyMonitor * this ) {
     226//                                               return &this->__cor_d;
     227//                                           }
     228//                                           void lock(MyMonitor & this) {
     229//                                               lock(get_monitor(this));
     230//                                           }
     231//                                           void unlock(MyMonitor & this) {
     232//                                               unlock(get_monitor(this));
     233//                                           }
     234//
     235struct MonitorKeyword final : public ConcurrentSueKeyword {
     236        MonitorKeyword() : ConcurrentSueKeyword(
     237                "monitor$",
     238                "__mon",
     239                "get_monitor",
     240                "monitor keyword requires monitors to be in scope, add #include <monitor.hfa>\n",
     241                "",
     242                false,
     243                ast::AggregateDecl::Monitor )
     244        {}
     245
     246        virtual ~MonitorKeyword() {}
     247};
     248
     249// Handles generator type declarations:
     250//
     251// generator MyGenerator {                   struct MyGenerator {
     252//  int data;                                  int data;
     253//  a_struct_t more_data;                      a_struct_t more_data;
     254//                                =>             int __generator_state;
     255// };                                        };
     256//
     257struct GeneratorKeyword final : public ConcurrentSueKeyword {
     258        GeneratorKeyword() : ConcurrentSueKeyword(
     259                "generator$",
     260                "__generator_state",
     261                "get_generator",
     262                "Unable to find builtin type generator$\n",
     263                "",
     264                true,
     265                ast::AggregateDecl::Generator )
     266        {}
     267
     268        virtual ~GeneratorKeyword() {}
     269};
     270
     271const ast::Decl * ConcurrentSueKeyword::postvisit(
     272                const ast::StructDecl * decl ) {
     273        if ( !decl->body ) {
     274                return decl;
     275        } else if ( cast_target == decl->kind ) {
     276                return handleStruct( decl );
     277        } else if ( type_name == decl->name ) {
     278                assert( !type_decl );
     279                type_decl = decl;
     280        } else if ( exception_name == decl->name ) {
     281                assert( !except_decl );
     282                except_decl = decl;
     283        } else if ( typeid_name == decl->name ) {
     284                assert( !typeid_decl );
     285                typeid_decl = decl;
     286        } else if ( vtable_name == decl->name ) {
     287                assert( !vtable_decl );
     288                vtable_decl = decl;
     289        }
     290        return decl;
     291}
     292
     293// Try to get the full definition, but raise an error on conflicts.
     294const ast::FunctionDecl * getDefinition(
     295                const ast::FunctionDecl * old_decl,
     296                const ast::FunctionDecl * new_decl ) {
     297        if ( !new_decl->stmts ) {
     298                return old_decl;
     299        } else if ( !old_decl->stmts ) {
     300                return new_decl;
     301        } else {
     302                assert( !old_decl->stmts || !new_decl->stmts );
     303                return nullptr;
     304        }
     305}
     306
     307const ast::DeclWithType * ConcurrentSueKeyword::postvisit(
     308                const ast::FunctionDecl * decl ) {
     309        if ( type_decl && isDestructorFor( decl, type_decl ) ) {
     310                // Check for forward declarations, try to get the full definition.
     311                dtor_decl = (dtor_decl) ? getDefinition( dtor_decl, decl ) : decl;
     312        } else if ( !vtable_name.empty() && decl->has_body() ) {
     313                if (const ast::DeclWithType * param = isMainFor( decl, cast_target )) {
     314                        if ( !vtable_decl ) {
     315                                SemanticError( decl, context_error );
     316                        }
     317                        // Should be safe because of isMainFor.
     318                        const ast::StructInstType * struct_type =
     319                                static_cast<const ast::StructInstType *>(
     320                                        static_cast<const ast::ReferenceType *>(
     321                                                param->get_type() )->base.get() );
     322
     323                        handleMain( decl, struct_type );
     324                }
     325        }
     326        return decl;
     327}
     328
     329const ast::Expr * ConcurrentSueKeyword::postvisit(
     330                const ast::KeywordCastExpr * expr ) {
     331        if ( cast_target == expr->target ) {
     332                // Convert `(thread &)ex` to `(thread$ &)*get_thread(ex)`, etc.
     333                if ( !type_decl || !dtor_decl ) {
     334                        SemanticError( expr, context_error );
     335                }
     336                assert( nullptr == expr->result );
     337                auto cast = ast::mutate( expr );
     338                cast->result = new ast::ReferenceType( new ast::StructInstType( type_decl ) );
     339                cast->concrete_target.field  = field_name;
     340                cast->concrete_target.getter = getter_name;
     341                return cast;
     342        }
     343        return expr;
     344}
     345
     346const ast::StructDecl * ConcurrentSueKeyword::handleStruct(
     347                const ast::StructDecl * decl ) {
     348        assert( decl->body );
     349
     350        if ( !type_decl || !dtor_decl ) {
     351                SemanticError( decl, context_error );
     352        }
     353
     354        if ( !exception_name.empty() ) {
     355                if( !typeid_decl || !vtable_decl ) {
     356                        SemanticError( decl, context_error );
     357                }
     358                addTypeId( decl );
     359                addVtableForward( decl );
     360        }
     361
     362        const ast::FunctionDecl * func = forwardDeclare( decl );
     363        StructAndField addFieldRet = addField( decl );
     364        decl = addFieldRet.decl;
     365        const ast::ObjectDecl * field = addFieldRet.field;
     366
     367        addGetRoutines( field, func );
     368        // Add routines to monitors for use by mutex stmt.
     369        if ( ast::AggregateDecl::Monitor == cast_target ) {
     370                addLockUnlockRoutines( decl );
     371        }
     372
     373        return decl;
     374}
     375
     376void ConcurrentSueKeyword::handleMain(
     377                const ast::FunctionDecl * decl, const ast::StructInstType * type ) {
     378        assert( vtable_decl );
     379        assert( except_decl );
     380
     381        const CodeLocation & location = decl->location;
     382
     383        std::vector<ast::ptr<ast::Expr>> poly_args = {
     384                new ast::TypeExpr( location, type ),
     385        };
     386        ast::ObjectDecl * vtable_object = Virtual::makeVtableInstance(
     387                location,
     388                "_default_vtable_object_declaration",
     389                new ast::StructInstType( vtable_decl, copy( poly_args ) ),
     390                type,
     391                nullptr
     392        );
     393        declsToAddAfter.push_back( vtable_object );
     394        declsToAddAfter.push_back(
     395                new ast::ObjectDecl(
     396                        location,
     397                        Virtual::concurrentDefaultVTableName(),
     398                        new ast::ReferenceType( vtable_object->type, ast::CV::Const ),
     399                        new ast::SingleInit( location,
     400                                new ast::VariableExpr( location, vtable_object ) ),
     401                        ast::Storage::Classes(),
     402                        ast::Linkage::Cforall
     403                )
     404        );
     405        declsToAddAfter.push_back( Virtual::makeGetExceptionFunction(
     406                location,
     407                vtable_object,
     408                new ast::StructInstType( except_decl, copy( poly_args ) )
     409        ) );
     410}
     411
     412void ConcurrentSueKeyword::addTypeId( const ast::StructDecl * decl ) {
     413        assert( typeid_decl );
     414        const CodeLocation & location = decl->location;
     415
     416        ast::StructInstType * typeid_type =
     417                new ast::StructInstType( typeid_decl, ast::CV::Const );
     418        typeid_type->params.push_back(
     419                new ast::TypeExpr( location, new ast::StructInstType( decl ) ) );
     420        declsToAddBefore.push_back(
     421                Virtual::makeTypeIdInstance( location, typeid_type ) );
     422        // If the typeid_type is going to be kept, the other reference will have
     423        // been made by now, but we also get to avoid extra mutates.
     424        ast::ptr<ast::StructInstType> typeid_cleanup = typeid_type;
     425}
     426
     427void ConcurrentSueKeyword::addVtableForward( const ast::StructDecl * decl ) {
     428        assert( vtable_decl );
     429        const CodeLocation& location = decl->location;
     430
     431        std::vector<ast::ptr<ast::Expr>> poly_args = {
     432                new ast::TypeExpr( location, new ast::StructInstType( decl ) ),
     433        };
     434        declsToAddBefore.push_back( Virtual::makeGetExceptionForward(
     435                location,
     436                new ast::StructInstType( vtable_decl, copy( poly_args ) ),
     437                new ast::StructInstType( except_decl, copy( poly_args ) )
     438        ) );
     439        ast::ObjectDecl * vtable_object = Virtual::makeVtableForward(
     440                location,
     441                "_default_vtable_object_declaration",
     442                new ast::StructInstType( vtable_decl, std::move( poly_args ) )
     443        );
     444        declsToAddBefore.push_back( vtable_object );
     445        declsToAddBefore.push_back(
     446                new ast::ObjectDecl(
     447                        location,
     448                        Virtual::concurrentDefaultVTableName(),
     449                        new ast::ReferenceType( vtable_object->type, ast::CV::Const ),
     450                        nullptr,
     451                        ast::Storage::Extern,
     452                        ast::Linkage::Cforall
     453                )
     454        );
     455}
     456
     457const ast::FunctionDecl * ConcurrentSueKeyword::forwardDeclare(
     458                const ast::StructDecl * decl ) {
     459        const CodeLocation & location = decl->location;
     460
     461        ast::StructDecl * forward = ast::deepCopy( decl );
     462        {
     463                // If removing members makes ref-count go to zero, do not free.
     464                ast::ptr<ast::StructDecl> forward_ptr = forward;
     465                forward->body = false;
     466                forward->members.clear();
     467                forward_ptr.release();
     468        }
     469
     470        ast::ObjectDecl * this_decl = new ast::ObjectDecl(
     471                location,
     472                "this",
     473                new ast::ReferenceType( new ast::StructInstType( decl ) ),
     474                nullptr,
     475                ast::Storage::Classes(),
     476                ast::Linkage::Cforall
     477        );
     478
     479        ast::ObjectDecl * ret_decl = new ast::ObjectDecl(
     480                location,
     481                "ret",
     482                new ast::PointerType( new ast::StructInstType( type_decl ) ),
     483                nullptr,
     484                ast::Storage::Classes(),
     485                ast::Linkage::Cforall
     486        );
     487
     488        ast::FunctionDecl * get_decl = new ast::FunctionDecl(
     489                location,
     490                getter_name,
     491                {}, // forall
     492                { this_decl }, // params
     493                { ret_decl }, // returns
     494                nullptr, // stmts
     495                ast::Storage::Static,
     496                ast::Linkage::Cforall,
     497                { new ast::Attribute( "const" ) },
     498                ast::Function::Inline
     499        );
     500        get_decl = fixupGenerics( get_decl, decl );
     501
     502        ast::FunctionDecl * main_decl = nullptr;
     503        if ( needs_main ) {
     504                // `this_decl` is copied here because the original was used above.
     505                main_decl = new ast::FunctionDecl(
     506                        location,
     507                        "main",
     508                        {},
     509                        { ast::deepCopy( this_decl ) },
     510                        {},
     511                        nullptr,
     512                        ast::Storage::Classes(),
     513                        ast::Linkage::Cforall
     514                );
     515                main_decl = fixupGenerics( main_decl, decl );
     516        }
     517
     518        declsToAddBefore.push_back( forward );
     519        if ( needs_main ) declsToAddBefore.push_back( main_decl );
     520        declsToAddBefore.push_back( get_decl );
     521
     522        return get_decl;
     523}
     524
     525ConcurrentSueKeyword::StructAndField ConcurrentSueKeyword::addField(
     526                const ast::StructDecl * decl ) {
     527        const CodeLocation & location = decl->location;
     528
     529        ast::ObjectDecl * field = new ast::ObjectDecl(
     530                location,
     531                field_name,
     532                new ast::StructInstType( type_decl ),
     533                nullptr,
     534                ast::Storage::Classes(),
     535                ast::Linkage::Cforall
     536        );
     537
     538        auto mutDecl = ast::mutate( decl );
     539        mutDecl->members.push_back( field );
     540
     541        return {mutDecl, field};
     542}
     543
     544void ConcurrentSueKeyword::addGetRoutines(
     545                const ast::ObjectDecl * field, const ast::FunctionDecl * forward ) {
     546        // Say it is generated at the "same" places as the forward declaration.
     547        const CodeLocation & location = forward->location;
     548
     549        const ast::DeclWithType * param = forward->params.front();
     550        ast::Stmt * stmt = new ast::ReturnStmt( location,
     551                new ast::AddressExpr( location,
     552                        new ast::MemberExpr( location,
     553                                field,
     554                                new ast::CastExpr( location,
     555                                        new ast::VariableExpr( location, param ),
     556                                        ast::deepCopy( param->get_type()->stripReferences() ),
     557                                        ast::ExplicitCast
     558                                )
     559                        )
     560                )
     561        );
     562
     563        ast::FunctionDecl * decl = ast::deepCopy( forward );
     564        decl->stmts = new ast::CompoundStmt( location, { stmt } );
     565        declsToAddAfter.push_back( decl );
     566}
     567
     568void ConcurrentSueKeyword::addLockUnlockRoutines(
     569                const ast::StructDecl * decl ) {
     570        // This should only be used on monitors.
     571        assert( ast::AggregateDecl::Monitor == cast_target );
     572
     573        const CodeLocation & location = decl->location;
     574
     575        // The parameter for both routines.
     576        ast::ObjectDecl * this_decl = new ast::ObjectDecl(
     577                location,
     578                "this",
     579                new ast::ReferenceType( new ast::StructInstType( decl ) ),
     580                nullptr,
     581                ast::Storage::Classes(),
     582                ast::Linkage::Cforall
     583        );
     584
     585        ast::FunctionDecl * lock_decl = new ast::FunctionDecl(
     586                location,
     587                "lock",
     588                { /* forall */ },
     589                {
     590                        // Copy the declaration of this.
     591                        ast::deepCopy( this_decl ),
     592                },
     593                { /* returns */ },
     594                nullptr,
     595                ast::Storage::Static,
     596                ast::Linkage::Cforall,
     597                { /* attributes */ },
     598                ast::Function::Inline
     599        );
     600        lock_decl = fixupGenerics( lock_decl, decl );
     601
     602        lock_decl->stmts = new ast::CompoundStmt( location, {
     603                new ast::ExprStmt( location,
     604                        new ast::UntypedExpr( location,
     605                                new ast::NameExpr( location, "lock" ),
     606                                {
     607                                        new ast::UntypedExpr( location,
     608                                                new ast::NameExpr( location, "get_monitor" ),
     609                                                { new ast::VariableExpr( location,
     610                                                        InitTweak::getParamThis( lock_decl ) ) }
     611                                        )
     612                                }
     613                        )
     614                )
     615        } );
     616
     617        ast::FunctionDecl * unlock_decl = new ast::FunctionDecl(
     618                location,
     619                "unlock",
     620                { /* forall */ },
     621                {
     622                        // Last use, consume the declaration of this.
     623                        this_decl,
     624                },
     625                { /* returns */ },
     626                nullptr,
     627                ast::Storage::Static,
     628                ast::Linkage::Cforall,
     629                { /* attributes */ },
     630                ast::Function::Inline
     631        );
     632        unlock_decl = fixupGenerics( unlock_decl, decl );
     633
     634        unlock_decl->stmts = new ast::CompoundStmt( location, {
     635                new ast::ExprStmt( location,
     636                        new ast::UntypedExpr( location,
     637                                new ast::NameExpr( location, "unlock" ),
     638                                {
     639                                        new ast::UntypedExpr( location,
     640                                                new ast::NameExpr( location, "get_monitor" ),
     641                                                { new ast::VariableExpr( location,
     642                                                        InitTweak::getParamThis( unlock_decl ) ) }
     643                                        )
     644                                }
     645                        )
     646                )
     647        } );
     648
     649        declsToAddAfter.push_back( lock_decl );
     650        declsToAddAfter.push_back( unlock_decl );
     651}
     652
     653
     654// --------------------------------------------------------------------------
     655struct SuspendKeyword final :
     656                public ast::WithStmtsToAdd<>, public ast::WithGuards {
     657        SuspendKeyword() = default;
     658        virtual ~SuspendKeyword() = default;
     659
     660        void previsit( const ast::FunctionDecl * );
     661        const ast::DeclWithType * postvisit( const ast::FunctionDecl * );
     662        const ast::Stmt * postvisit( const ast::SuspendStmt * );
     663
     664private:
     665        bool is_real_suspend( const ast::FunctionDecl * );
     666
     667        const ast::Stmt * make_generator_suspend( const ast::SuspendStmt * );
     668        const ast::Stmt * make_coroutine_suspend( const ast::SuspendStmt * );
     669
     670        struct LabelPair {
     671                ast::Label obj;
     672                int idx;
     673        };
     674
     675        LabelPair make_label(const ast::Stmt * stmt ) {
     676                labels.push_back( ControlStruct::newLabel( "generator", stmt ) );
     677                return { labels.back(), int(labels.size()) };
     678        }
     679
     680        const ast::DeclWithType * in_generator = nullptr;
     681        const ast::FunctionDecl * decl_suspend = nullptr;
     682        std::vector<ast::Label> labels;
     683};
     684
     685void SuspendKeyword::previsit( const ast::FunctionDecl * decl ) {
     686        GuardValue( in_generator ); in_generator = nullptr;
     687
     688        // If it is the real suspend, grab it if we don't have one already.
     689        if ( is_real_suspend( decl ) ) {
     690                decl_suspend = decl_suspend ? decl_suspend : decl;
     691                return;
     692        }
     693
     694        // Otherwise check if this is a generator main and, if so, handle it.
     695        auto param = isMainFor( decl, ast::AggregateDecl::Generator );
     696        if ( !param ) return;
     697
     698        if ( 0 != decl->returns.size() ) {
     699                SemanticError( decl->location, "Generator main must return void" );
     700        }
     701
     702        in_generator = param;
     703        GuardValue( labels ); labels.clear();
     704}
     705
     706const ast::DeclWithType * SuspendKeyword::postvisit(
     707                const ast::FunctionDecl * decl ) {
     708        // Only modify a full definition of a generator with states.
     709        if ( !decl->stmts || !in_generator || labels.empty() ) return decl;
     710
     711        const CodeLocation & location = decl->location;
     712
     713        // Create a new function body:
     714        // static void * __generator_labels[] = {&&s0, &&s1, ...};
     715        // void * __generator_label = __generator_labels[GEN.__generator_state];
     716        // goto * __generator_label;
     717        // s0: ;
     718        // OLD_BODY
     719
     720        // This is the null statement inserted right before the body.
     721        ast::NullStmt * noop = new ast::NullStmt( location );
     722        noop->labels.push_back( ControlStruct::newLabel( "generator", noop ) );
     723        const ast::Label & first_label = noop->labels.back();
     724
     725        // Add each label to the init, starting with the first label.
     726        std::vector<ast::ptr<ast::Init>> inits = {
     727                new ast::SingleInit( location,
     728                        new ast::LabelAddressExpr( location, copy( first_label ) ) ) };
     729        // Then go through all the stored labels, and clear the store.
     730        for ( auto && label : labels ) {
     731                inits.push_back( new ast::SingleInit( label.location,
     732                        new ast::LabelAddressExpr( label.location, std::move( label )
     733                        ) ) );
     734        }
     735        labels.clear();
     736        // Then construct the initializer itself.
     737        auto init = new ast::ListInit( location, std::move( inits ) );
     738
     739        ast::ObjectDecl * generatorLabels = new ast::ObjectDecl(
     740                location,
     741                "__generator_labels",
     742                new ast::ArrayType(
     743                        new ast::PointerType( new ast::VoidType() ),
     744                        nullptr,
     745                        ast::FixedLen,
     746                        ast::DynamicDim
     747                ),
     748                init,
     749                ast::Storage::Classes(),
     750                ast::Linkage::AutoGen
     751        );
     752
     753        ast::ObjectDecl * generatorLabel = new ast::ObjectDecl(
     754                location,
     755                "__generator_label",
     756                new ast::PointerType( new ast::VoidType() ),
     757                new ast::SingleInit( location,
     758                        new ast::UntypedExpr( location,
     759                                new ast::NameExpr( location, "?[?]" ),
     760                                {
     761                                        // TODO: Could be a variable expr.
     762                                        new ast::NameExpr( location, "__generator_labels" ),
     763                                        new ast::UntypedMemberExpr( location,
     764                                                new ast::NameExpr( location, "__generator_state" ),
     765                                                new ast::VariableExpr( location, in_generator )
     766                                        )
     767                                }
     768                        )
     769                ),
     770                ast::Storage::Classes(),
     771                ast::Linkage::AutoGen
     772        );
     773
     774        ast::BranchStmt * theGoTo = new ast::BranchStmt(
     775                location, new ast::VariableExpr( location, generatorLabel )
     776        );
     777
     778        // The noop goes here in order.
     779
     780        ast::CompoundStmt * body = new ast::CompoundStmt( location, {
     781                { new ast::DeclStmt( location, generatorLabels ) },
     782                { new ast::DeclStmt( location, generatorLabel ) },
     783                { theGoTo },
     784                { noop },
     785                { decl->stmts },
     786        } );
     787
     788        auto mutDecl = ast::mutate( decl );
     789        mutDecl->stmts = body;
     790        return mutDecl;
     791}
     792
     793const ast::Stmt * SuspendKeyword::postvisit( const ast::SuspendStmt * stmt ) {
     794        switch ( stmt->type ) {
     795        case ast::SuspendStmt::None:
     796                // Use the context to determain the implicit target.
     797                if ( in_generator ) {
     798                        return make_generator_suspend( stmt );
     799                } else {
     800                        return make_coroutine_suspend( stmt );
     801                }
     802        case ast::SuspendStmt::Coroutine:
     803                return make_coroutine_suspend( stmt );
     804        case ast::SuspendStmt::Generator:
     805                // Generator suspends must be directly in a generator.
     806                if ( !in_generator ) SemanticError( stmt->location, "'suspend generator' must be used inside main of generator type." );
     807                return make_generator_suspend( stmt );
     808        }
     809        assert( false );
     810        return stmt;
     811}
     812
     813/// Find the real/official suspend declaration.
     814bool SuspendKeyword::is_real_suspend( const ast::FunctionDecl * decl ) {
     815        return ( !decl->linkage.is_mangled
     816                && 0 == decl->params.size()
     817                && 0 == decl->returns.size()
     818                && "__cfactx_suspend" == decl->name );
     819}
     820
     821const ast::Stmt * SuspendKeyword::make_generator_suspend(
     822                const ast::SuspendStmt * stmt ) {
     823        assert( in_generator );
     824        // Target code is:
     825        //   GEN.__generator_state = X;
     826        //   THEN
     827        //   return;
     828        //   __gen_X:;
     829
     830        const CodeLocation & location = stmt->location;
     831
     832        LabelPair label = make_label( stmt );
     833
     834        // This is the context saving statement.
     835        stmtsToAddBefore.push_back( new ast::ExprStmt( location,
     836                new ast::UntypedExpr( location,
     837                        new ast::NameExpr( location, "?=?" ),
     838                        {
     839                                new ast::UntypedMemberExpr( location,
     840                                        new ast::NameExpr( location, "__generator_state" ),
     841                                        new ast::VariableExpr( location, in_generator )
     842                                ),
     843                                ast::ConstantExpr::from_int( location, label.idx ),
     844                        }
     845                )
     846        ) );
     847
     848        // The THEN component is conditional (return is not).
     849        if ( stmt->then ) {
     850                stmtsToAddBefore.push_back( stmt->then.get() );
     851        }
     852        stmtsToAddBefore.push_back( new ast::ReturnStmt( location, nullptr ) );
     853
     854        // The null statement replaces the old suspend statement.
     855        return new ast::NullStmt( location, { label.obj } );
     856}
     857
     858const ast::Stmt * SuspendKeyword::make_coroutine_suspend(
     859                const ast::SuspendStmt * stmt ) {
     860        // The only thing we need from the old statement is the location.
     861        const CodeLocation & location = stmt->location;
     862
     863        if ( !decl_suspend ) {
     864                SemanticError( location, "suspend keyword applied to coroutines requires coroutines to be in scope, add #include <coroutine.hfa>\n" );
     865        }
     866        if ( stmt->then ) {
     867                SemanticError( location, "Compound statement following coroutines is not implemented." );
     868        }
     869
     870        return new ast::ExprStmt( location,
     871                new ast::UntypedExpr( location,
     872                        ast::VariableExpr::functionPointer( location, decl_suspend ) )
     873        );
     874}
     875
     876// --------------------------------------------------------------------------
     877struct MutexKeyword final : public ast::WithDeclsToAdd<> {
    40878        const ast::FunctionDecl * postvisit( const ast::FunctionDecl * decl );
    41879        void postvisit( const ast::StructDecl * decl );
     
    50888        ast::CompoundStmt * addStatements( const ast::CompoundStmt * body, const std::vector<ast::ptr<ast::Expr>> & args );
    51889        ast::CompoundStmt * addThreadDtorStatements( const ast::FunctionDecl* func, const ast::CompoundStmt * body, const std::vector<const ast::DeclWithType *> & args );
    52 
     890        ast::ExprStmt * genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param);
     891        ast::IfStmt * genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam );
    53892private:
    54893        const ast::StructDecl * monitor_decl = nullptr;
     
    59898
    60899        static ast::ptr<ast::Type> generic_func;
     900
     901        UniqueName mutex_func_namer = UniqueName("__lock_unlock_curr");
    61902};
    62903
     
    1601001
    1611002const ast::Stmt * MutexKeyword::postvisit( const ast::MutexStmt * stmt ) {
     1003        if ( !lock_guard_decl ) {
     1004                SemanticError( stmt->location, "mutex stmt requires a header, add #include <mutex_stmt.hfa>\n" );
     1005        }
    1621006        ast::CompoundStmt * body =
    1631007                        new ast::CompoundStmt( stmt->location, { stmt->stmt } );
    164         addStatements( body, stmt->mutexObjs );
    165         return body;
     1008       
     1009        return addStatements( body, stmt->mutexObjs );;
    1661010}
    1671011
     
    2511095                                {
    2521096                                        new ast::SingleInit( location,
    253                                                 new ast::AddressExpr(
     1097                                                new ast::AddressExpr( location,
    2541098                                                        new ast::VariableExpr( location, monitor ) ) ),
    2551099                                        new ast::SingleInit( location,
     
    3581202}
    3591203
     1204// generates a cast to the void ptr to the appropriate lock type and dereferences it before calling lock or unlock on it
     1205// used to undo the type erasure done by storing all the lock pointers as void
     1206ast::ExprStmt * MutexKeyword::genVirtLockUnlockExpr( const std::string & fnName, ast::ptr<ast::Expr> expr, const CodeLocation & location, ast::Expr * param ) {
     1207        return new ast::ExprStmt( location,
     1208                new ast::UntypedExpr( location,
     1209                        new ast::NameExpr( location, fnName ), {
     1210                                ast::UntypedExpr::createDeref(
     1211                                        location,
     1212                                        new ast::CastExpr( location,
     1213                                                param,
     1214                                                new ast::PointerType( new ast::TypeofType( new ast::UntypedExpr(
     1215                                                        expr->location,
     1216                                                        new ast::NameExpr( expr->location, "__get_mutexstmt_lock_type" ),
     1217                                                        { expr }
     1218                                                ) ) ),
     1219                                                ast::GeneratedFlag::ExplicitCast
     1220                                        )
     1221                                )
     1222                        }
     1223                )
     1224        );
     1225}
     1226
     1227ast::IfStmt * MutexKeyword::genTypeDiscrimLockUnlock( const std::string & fnName, const std::vector<ast::ptr<ast::Expr>> & args, const CodeLocation & location, ast::UntypedExpr * thisParam ) {
     1228        ast::IfStmt * outerLockIf = nullptr;
     1229        ast::IfStmt * lastLockIf = nullptr;
     1230
     1231        //adds an if/elif clause for each lock to assign type from void ptr based on ptr address
     1232        for ( long unsigned int i = 0; i < args.size(); i++ ) {
     1233               
     1234                ast::UntypedExpr * ifCond = new ast::UntypedExpr( location,
     1235                        new ast::NameExpr( location, "?==?" ), {
     1236                                ast::deepCopy( thisParam ),
     1237                                new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() ))
     1238                        }
     1239                );
     1240
     1241                ast::IfStmt * currLockIf = new ast::IfStmt(
     1242                        location,
     1243                        ifCond,
     1244                        genVirtLockUnlockExpr( fnName, args.at(i), location, ast::deepCopy( thisParam ) )
     1245                );
     1246               
     1247                if ( i == 0 ) {
     1248                        outerLockIf = currLockIf;
     1249                } else {
     1250                        // add ifstmt to else of previous stmt
     1251                        lastLockIf->else_ = currLockIf;
     1252                }
     1253
     1254                lastLockIf = currLockIf;
     1255        }
     1256        return outerLockIf;
     1257}
     1258
    3601259ast::CompoundStmt * MutexKeyword::addStatements(
    3611260                const ast::CompoundStmt * body,
    3621261                const std::vector<ast::ptr<ast::Expr>> & args ) {
    363         ast::CompoundStmt * mutBody = ast::mutate( body );
    3641262
    3651263        // Code is generated near the beginning of the compound statement.
    366         const CodeLocation & location = mutBody->location;
     1264        const CodeLocation & location = body->location;
     1265
     1266                // final body to return
     1267        ast::CompoundStmt * newBody = new ast::CompoundStmt( location );
     1268
     1269        // std::string lockFnName = mutex_func_namer.newName();
     1270        // std::string unlockFnName = mutex_func_namer.newName();
    3671271
    3681272        // Make pointer to the monitors.
     
    3721276                new ast::ArrayType(
    3731277                        new ast::PointerType(
    374                                 new ast::TypeofType(
    375                                         new ast::UntypedExpr(
    376                                                 location,
    377                                                 new ast::NameExpr( location, "__get_type" ),
    378                                                 { args.front() }
    379                                         )
    380                                 )
     1278                                new ast::VoidType()
    3811279                        ),
    3821280                        ast::ConstantExpr::from_ulong( location, args.size() ),
     
    3921290                                                new ast::UntypedExpr(
    3931291                                                        expr->location,
    394                                                         new ast::NameExpr( expr->location, "__get_ptr" ),
     1292                                                        new ast::NameExpr( expr->location, "__get_mutexstmt_lock_ptr" ),
    3951293                                                        { expr }
    3961294                                                )
     
    4051303        ast::StructInstType * lock_guard_struct =
    4061304                        new ast::StructInstType( lock_guard_decl );
    407         ast::TypeExpr * lock_type_expr = new ast::TypeExpr(
    408                 location,
    409                 new ast::TypeofType(
    410                         new ast::UntypedExpr(
    411                                 location,
    412                                 new ast::NameExpr( location, "__get_type" ),
    413                                 { args.front() }
    414                         )
    415                 )
    416         );
    417 
    418         lock_guard_struct->params.push_back( lock_type_expr );
    419 
    420         // In reverse order:
     1305
     1306        // use try stmts to lock and finally to unlock
     1307        ast::TryStmt * outerTry = nullptr;
     1308        ast::TryStmt * currentTry;
     1309        ast::CompoundStmt * lastBody = nullptr;
     1310
     1311        // adds a nested try stmt for each lock we are locking
     1312        for ( long unsigned int i = 0; i < args.size(); i++ ) {
     1313                ast::UntypedExpr * innerAccess = new ast::UntypedExpr(
     1314                        location,
     1315                        new ast::NameExpr( location,"?[?]" ), {
     1316                                new ast::NameExpr( location, "__monitors" ),
     1317                                ast::ConstantExpr::from_int( location, i )
     1318                        }
     1319                );
     1320
     1321                // make the try body
     1322                ast::CompoundStmt * currTryBody = new ast::CompoundStmt( location );
     1323                ast::IfStmt * lockCall = genTypeDiscrimLockUnlock( "lock", args, location, innerAccess );
     1324                currTryBody->push_back( lockCall );
     1325
     1326                // make the finally stmt
     1327                ast::CompoundStmt * currFinallyBody = new ast::CompoundStmt( location );
     1328                ast::IfStmt * unlockCall = genTypeDiscrimLockUnlock( "unlock", args, location, innerAccess );
     1329                currFinallyBody->push_back( unlockCall );
     1330
     1331                // construct the current try
     1332                currentTry = new ast::TryStmt(
     1333                        location,
     1334                        currTryBody,
     1335                        {},
     1336                        new ast::FinallyClause( location, currFinallyBody )
     1337                );
     1338                if ( i == 0 ) outerTry = currentTry;
     1339                else {
     1340                        // pushback try into the body of the outer try
     1341                        lastBody->push_back( currentTry );
     1342                }
     1343                lastBody = currTryBody;
     1344        }
     1345
     1346        // push body into innermost try body
     1347        if ( lastBody != nullptr ) {
     1348                lastBody->push_back( body );
     1349                newBody->push_front( outerTry );
     1350        }
     1351
    4211352        // monitor_guard_t __guard = { __monitors, # };
    422         mutBody->push_front(
     1353        newBody->push_front(
    4231354                new ast::DeclStmt(
    4241355                        location,
     
    4471378
    4481379        // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) };
    449         mutBody->push_front( new ast::DeclStmt( location, monitors ) );
    450 
    451         return mutBody;
     1380        newBody->push_front( new ast::DeclStmt( location, monitors ) );
     1381
     1382        // // The parameter for both __lock_curr/__unlock_curr routines.
     1383        // ast::ObjectDecl * this_decl = new ast::ObjectDecl(
     1384        //      location,
     1385        //      "this",
     1386        //      new ast::PointerType( new ast::VoidType() ),
     1387        //      nullptr,
     1388        //      {},
     1389        //      ast::Linkage::Cforall
     1390        // );
     1391
     1392        // ast::FunctionDecl * lock_decl = new ast::FunctionDecl(
     1393        //      location,
     1394        //      lockFnName,
     1395        //      { /* forall */ },
     1396        //      {
     1397        //              // Copy the declaration of this.
     1398        //              this_decl,
     1399        //      },
     1400        //      { /* returns */ },
     1401        //      nullptr,
     1402        //      0,
     1403        //      ast::Linkage::Cforall,
     1404        //      { /* attributes */ },
     1405        //      ast::Function::Inline
     1406        // );
     1407
     1408        // ast::FunctionDecl * unlock_decl = new ast::FunctionDecl(
     1409        //      location,
     1410        //      unlockFnName,
     1411        //      { /* forall */ },
     1412        //      {
     1413        //              // Copy the declaration of this.
     1414        //              ast::deepCopy( this_decl ),
     1415        //      },
     1416        //      { /* returns */ },
     1417        //      nullptr,
     1418        //      0,
     1419        //      ast::Linkage::Cforall,
     1420        //      { /* attributes */ },
     1421        //      ast::Function::Inline
     1422        // );
     1423
     1424        // ast::IfStmt * outerLockIf = nullptr;
     1425        // ast::IfStmt * outerUnlockIf = nullptr;
     1426        // ast::IfStmt * lastLockIf = nullptr;
     1427        // ast::IfStmt * lastUnlockIf = nullptr;
     1428
     1429        // //adds an if/elif clause for each lock to assign type from void ptr based on ptr address
     1430        // for ( long unsigned int i = 0; i < args.size(); i++ ) {
     1431        //      ast::VariableExpr * thisParam = new ast::VariableExpr( location, InitTweak::getParamThis( lock_decl ) );
     1432        //      ast::UntypedExpr * ifCond = new ast::UntypedExpr( location,
     1433        //              new ast::NameExpr( location, "?==?" ), {
     1434        //                      thisParam,
     1435        //                      new ast::CastExpr( location, new ast::AddressExpr( location, args.at(i) ), new ast::PointerType( new ast::VoidType() ))
     1436        //              }
     1437        //      );
     1438
     1439        //      ast::IfStmt * currLockIf = new ast::IfStmt(
     1440        //              location,
     1441        //              ast::deepCopy( ifCond ),
     1442        //              genVirtLockUnlockExpr( "lock", args.at(i), location, ast::deepCopy( thisParam ) )
     1443        //      );
     1444
     1445        //      ast::IfStmt * currUnlockIf = new ast::IfStmt(
     1446        //              location,
     1447        //              ifCond,
     1448        //              genVirtLockUnlockExpr( "unlock", args.at(i), location, ast::deepCopy( thisParam ) )
     1449        //      );
     1450               
     1451        //      if ( i == 0 ) {
     1452        //              outerLockIf = currLockIf;
     1453        //              outerUnlockIf = currUnlockIf;
     1454        //      } else {
     1455        //              // add ifstmt to else of previous stmt
     1456        //              lastLockIf->else_ = currLockIf;
     1457        //              lastUnlockIf->else_ = currUnlockIf;
     1458        //      }
     1459
     1460        //      lastLockIf = currLockIf;
     1461        //      lastUnlockIf = currUnlockIf;
     1462        // }
     1463       
     1464        // // add pointer typing if/elifs to body of routines
     1465        // lock_decl->stmts = new ast::CompoundStmt( location, { outerLockIf } );
     1466        // unlock_decl->stmts = new ast::CompoundStmt( location, { outerUnlockIf } );
     1467
     1468        // // add routines to scope
     1469        // declsToAddBefore.push_back( lock_decl );
     1470        // declsToAddBefore.push_back( unlock_decl );
     1471
     1472        // newBody->push_front(new ast::DeclStmt( location, lock_decl ));
     1473        // newBody->push_front(new ast::DeclStmt( location, unlock_decl ));
     1474
     1475        return newBody;
    4521476}
    4531477
     
    5641588
    5651589// --------------------------------------------------------------------------
     1590// Interface Functions:
    5661591
    5671592void implementKeywords( ast::TranslationUnit & translationUnit ) {
    568         (void)translationUnit;
    569         assertf(false, "Apply Keywords not implemented." );
     1593        ast::Pass<ThreadKeyword>::run( translationUnit );
     1594        ast::Pass<CoroutineKeyword>::run( translationUnit );
     1595        ast::Pass<MonitorKeyword>::run( translationUnit );
     1596        ast::Pass<GeneratorKeyword>::run( translationUnit );
     1597        ast::Pass<SuspendKeyword>::run( translationUnit );
    5701598}
    5711599
  • src/ControlStruct/ExceptTranslateNew.cpp

    rba897d21 r2e9b59b  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Nov  8 11:53:00 2021
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Jan 31 18:49:58 2022
    13 // Update Count     : 1
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Fri Mar 11 17:51:00 2022
     13// Update Count     : 2
    1414//
    1515
     
    2626namespace {
    2727
    28         typedef std::list<ast::CatchStmt*> CatchList;
    29 
    30         void split( CatchList& allHandlers, CatchList& terHandlers,
    31                                 CatchList& resHandlers ) {
    32                 while ( !allHandlers.empty() ) {
    33                         ast::CatchStmt * stmt = allHandlers.front();
    34                         allHandlers.pop_front();
    35                         if (stmt->kind == ast::ExceptionKind::Terminate) {
    36                                 terHandlers.push_back(stmt);
    37                         } else {
    38                                 resHandlers.push_back(stmt);
    39                         }
    40                 }
    41         }
     28        typedef std::list<ast::CatchClause*> CatchList;
    4229
    4330        void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) {
     
    5845        {}
    5946
    60         void previsit( const ast::CatchStmt * stmt );
     47        void previsit( const ast::CatchClause * stmt );
    6148        const ast::Stmt * postvisit( const ast::ThrowStmt * stmt );
    6249};
     
    10188}
    10289
    103 void TranslateThrowsCore::previsit( const ast::CatchStmt * stmt ) {
     90void TranslateThrowsCore::previsit( const ast::CatchClause * stmt ) {
    10491        // Validate the statement's form.
    10592        const ast::ObjectDecl * decl = stmt->decl.as<ast::ObjectDecl>();
     
    160147        ast::FunctionDecl * create_terminate_catch( CatchList &handlers );
    161148        ast::CompoundStmt * create_single_matcher(
    162                 const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler );
     149                const ast::DeclWithType * except_obj, ast::CatchClause * modded_handler );
    163150        ast::FunctionDecl * create_terminate_match( CatchList &handlers );
    164151        ast::CompoundStmt * create_terminate_caller( CodeLocation loc, ast::FunctionDecl * try_wrapper,
     
    171158        ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt );
    172159
    173         // Types used in translation, make sure to use clone.
     160        // Types used in translation, first group are internal.
     161        ast::ObjectDecl * make_index_object( CodeLocation const & ) const;
     162        ast::ObjectDecl * make_exception_object( CodeLocation const & ) const;
     163        ast::ObjectDecl * make_bool_object( CodeLocation const & ) const;
     164        ast::ObjectDecl * make_voidptr_object( CodeLocation const & ) const;
     165        ast::ObjectDecl * make_unused_index_object( CodeLocation const & ) const;
    174166        // void (*function)();
    175         ast::FunctionDecl * try_func_t;
     167        ast::FunctionDecl * make_try_function( CodeLocation const & ) const;
    176168        // void (*function)(int, exception);
    177         ast::FunctionDecl * catch_func_t;
     169        ast::FunctionDecl * make_catch_function( CodeLocation const & ) const;
    178170        // int (*function)(exception);
    179         ast::FunctionDecl * match_func_t;
     171        ast::FunctionDecl * make_match_function( CodeLocation const & ) const;
    180172        // bool (*function)(exception);
    181         ast::FunctionDecl * handle_func_t;
     173        ast::FunctionDecl * make_handle_function( CodeLocation const & ) const;
    182174        // void (*function)(__attribute__((unused)) void *);
    183         ast::FunctionDecl * finally_func_t;
    184 
    185         ast::StructInstType * create_except_type() {
    186                 assert( except_decl );
    187                 return new ast::StructInstType( except_decl );
    188         }
    189         void init_func_types();
     175        ast::FunctionDecl * make_finally_function( CodeLocation const & ) const;
    190176
    191177public:
     
    199185};
    200186
    201 void TryMutatorCore::init_func_types() {
     187ast::ObjectDecl * TryMutatorCore::make_index_object(
     188                CodeLocation const & location ) const {
     189        return new ast::ObjectDecl(
     190                location,
     191                "__handler_index",
     192                new ast::BasicType(ast::BasicType::SignedInt),
     193                nullptr, //init
     194                ast::Storage::Classes{},
     195                ast::Linkage::Cforall
     196                );
     197}
     198
     199ast::ObjectDecl * TryMutatorCore::make_exception_object(
     200                CodeLocation const & location ) const {
    202201        assert( except_decl );
    203 
    204         ast::ObjectDecl index_obj(
    205                 {},
    206                 "__handler_index",
    207                 new ast::BasicType(ast::BasicType::SignedInt)
    208                 );
    209         ast::ObjectDecl exception_obj(
    210                 {},
     202        return new ast::ObjectDecl(
     203                location,
    211204                "__exception_inst",
    212205                new ast::PointerType(
    213206                        new ast::StructInstType( except_decl )
    214207                        ),
    215                 NULL
    216                 );
    217         ast::ObjectDecl bool_obj(
    218                 {},
     208                nullptr, //init
     209                ast::Storage::Classes{},
     210                ast::Linkage::Cforall
     211                );
     212}
     213
     214ast::ObjectDecl * TryMutatorCore::make_bool_object(
     215                CodeLocation const & location ) const {
     216        return new ast::ObjectDecl(
     217                location,
    219218                "__ret_bool",
    220219                new ast::BasicType( ast::BasicType::Bool ),
     
    225224                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    226225                );
    227         ast::ObjectDecl voidptr_obj(
    228                 {},
     226}
     227
     228ast::ObjectDecl * TryMutatorCore::make_voidptr_object(
     229                CodeLocation const & location ) const {
     230        return new ast::ObjectDecl(
     231                location,
    229232                "__hook",
    230233                new ast::PointerType(
     
    237240                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    238241                );
    239 
    240         ast::ObjectDecl unused_index_obj(
    241                 {},
     242}
     243
     244ast::ObjectDecl * TryMutatorCore::make_unused_index_object(
     245                CodeLocation const & location ) const {
     246        return new ast::ObjectDecl(
     247                location,
    242248                "__handler_index",
    243249                new ast::BasicType(ast::BasicType::SignedInt),
     
    248254                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
    249255        );
    250         //unused_index_obj->attributes.push_back( new Attribute( "unused" ) );
    251 
    252         try_func_t = new ast::FunctionDecl(
    253                 {},
     256}
     257
     258ast::FunctionDecl * TryMutatorCore::make_try_function(
     259                CodeLocation const & location ) const {
     260        return new ast::FunctionDecl(
     261                location,
    254262                "try",
    255263                {}, //forall
     
    260268                ast::Linkage::Cforall
    261269        );
    262 
    263         catch_func_t = new ast::FunctionDecl(
    264                 {},
     270}
     271
     272ast::FunctionDecl * TryMutatorCore::make_catch_function(
     273                CodeLocation const & location ) const {
     274        return new ast::FunctionDecl(
     275                location,
    265276                "catch",
    266277                {}, //forall
    267                 {ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param
     278                { make_index_object( location ), make_exception_object( location ) },
    268279                {}, //return void
    269280                nullptr,
     
    271282                ast::Linkage::Cforall
    272283        );
    273 
    274         match_func_t = new ast::FunctionDecl(
    275                 {},
     284}
     285
     286ast::FunctionDecl * TryMutatorCore::make_match_function(
     287                CodeLocation const & location ) const {
     288        return new ast::FunctionDecl(
     289                location,
    276290                "match",
    277291                {}, //forall
    278                 {ast::deepCopy(&exception_obj)},
    279                 {ast::deepCopy(&unused_index_obj)},
     292                { make_exception_object( location ) },
     293                { make_unused_index_object( location ) },
    280294                nullptr,
    281295                ast::Storage::Classes{},
    282296                ast::Linkage::Cforall
    283297        );
    284 
    285         handle_func_t = new ast::FunctionDecl(
    286                 {},
     298}
     299
     300ast::FunctionDecl * TryMutatorCore::make_handle_function(
     301                CodeLocation const & location ) const {
     302        return new ast::FunctionDecl(
     303                location,
    287304                "handle",
    288305                {}, //forall
    289                 {ast::deepCopy(&exception_obj)},
    290                 {ast::deepCopy(&bool_obj)},
     306                { make_exception_object( location ) },
     307                { make_bool_object( location ) },
    291308                nullptr,
    292309                ast::Storage::Classes{},
    293310                ast::Linkage::Cforall
    294311        );
    295 
    296         finally_func_t = new ast::FunctionDecl(
    297                 {},
     312}
     313
     314ast::FunctionDecl * TryMutatorCore::make_finally_function(
     315                CodeLocation const & location ) const {
     316        return new ast::FunctionDecl(
     317                location,
    298318                "finally",
    299319                {}, //forall
    300                 {ast::deepCopy(&voidptr_obj)},
     320                { make_voidptr_object( location ) },
    301321                {}, //return void
    302322                nullptr,
     
    304324                ast::Linkage::Cforall
    305325        );
    306 
    307         //catch_func_t.get_parameters().push_back( index_obj.clone() );
    308         //catch_func_t.get_parameters().push_back( exception_obj.clone() );
    309         //match_func_t.get_returnVals().push_back( unused_index_obj );
    310         //match_func_t.get_parameters().push_back( exception_obj.clone() );
    311         //handle_func_t.get_returnVals().push_back( bool_obj.clone() );
    312         //handle_func_t.get_parameters().push_back( exception_obj.clone() );
    313         //finally_func_t.get_parameters().push_back( voidptr_obj.clone() );
    314326}
    315327
    316328// TryStmt Mutation Helpers
    317 
    318 /*
    319 ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) {
    320         ast::CompoundStmt * block = tryStmt->body;
    321         tryStmt->body = nullptr;
    322         return block;
    323 }
    324 */
    325329
    326330ast::FunctionDecl * TryMutatorCore::create_try_wrapper(
    327331                const ast::CompoundStmt *body ) {
    328332
    329         ast::FunctionDecl * ret = ast::deepCopy(try_func_t);
     333        ast::FunctionDecl * ret = make_try_function( body->location );
    330334        ret->stmts = body;
    331335        return ret;
     
    334338ast::FunctionDecl * TryMutatorCore::create_terminate_catch(
    335339                CatchList &handlers ) {
    336         std::vector<ast::ptr<ast::Stmt>> handler_wrappers;
     340        std::vector<ast::ptr<ast::CaseClause>> handler_wrappers;
    337341
    338342        assert (!handlers.empty());
    339343        const CodeLocation loc = handlers.front()->location;
    340344
    341         ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t);
     345        ast::FunctionDecl * func_t = make_catch_function( loc );
    342346        const ast::DeclWithType * index_obj = func_t->params.front();
    343347        const ast::DeclWithType * except_obj = func_t->params.back();
     
    348352        for ( ; it != handlers.end() ; ++it ) {
    349353                ++index;
    350                 ast::CatchStmt * handler = *it;
     354                ast::CatchClause * handler = *it;
    351355                const CodeLocation loc = handler->location;
    352356
     
    386390                // handler->body = nullptr;
    387391
    388                 handler_wrappers.push_back( new ast::CaseStmt(loc,
     392                handler_wrappers.push_back( new ast::CaseClause(loc,
    389393                        ast::ConstantExpr::from_int(loc, index) ,
    390394                        { block, new ast::ReturnStmt( loc, nullptr ) }
     
    393397        // TODO: Some sort of meaningful error on default perhaps?
    394398
    395         /*
    396         std::list<Statement*> stmt_handlers;
    397         while ( !handler_wrappers.empty() ) {
    398                 stmt_handlers.push_back( handler_wrappers.front() );
    399                 handler_wrappers.pop_front();
    400         }
    401         */
    402 
    403         ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc,
     399        ast::SwitchStmt * handler_lookup = new ast::SwitchStmt( loc,
    404400                new ast::VariableExpr( loc, index_obj ),
    405401                std::move(handler_wrappers)
    406402                );
    407         ast::CompoundStmt * body = new ast::CompoundStmt(loc,
    408                 {handler_lookup});
     403        ast::CompoundStmt * body = new ast::CompoundStmt( loc, {handler_lookup} );
    409404
    410405        func_t->stmts = body;
     
    415410// except_obj is referenced, modded_handler will be freed.
    416411ast::CompoundStmt * TryMutatorCore::create_single_matcher(
    417                 const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ) {
     412                const ast::DeclWithType * except_obj, ast::CatchClause * modded_handler ) {
    418413        // {
    419414        //     `modded_handler.decl`
     
    433428
    434429        // Check for type match.
    435         ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 
     430        ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc,
    436431                new ast::VariableExpr(loc, except_obj ),
    437432                local_except->get_type()
     
    445440        }
    446441        // Construct the match condition.
    447         block->push_back( new ast::IfStmt(loc, 
     442        block->push_back( new ast::IfStmt(loc,
    448443                cond, modded_handler->body, nullptr ) );
    449444
    450         // xxx - how does this work in new ast
    451         //modded_handler->set_decl( nullptr );
    452         //modded_handler->set_cond( nullptr );
    453         //modded_handler->set_body( nullptr );
    454         //delete modded_handler;
    455445        return block;
    456446}
     
    467457        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
    468458
    469         ast::FunctionDecl * func_t = ast::deepCopy(match_func_t);
     459        ast::FunctionDecl * func_t = make_match_function( loc );
    470460        const ast::DeclWithType * except_obj = func_t->params.back();
    471461
     
    475465        for ( it = handlers.begin() ; it != handlers.end() ; ++it ) {
    476466                ++index;
    477                 ast::CatchStmt * handler = *it;
     467                ast::CatchClause * handler = *it;
    478468
    479469                // Body should have been taken by create_terminate_catch.
     
    490480        }
    491481
    492         body->push_back( new ast::ReturnStmt(loc, 
     482        body->push_back( new ast::ReturnStmt(loc,
    493483                ast::ConstantExpr::from_int( loc, 0 ) ));
    494484
     
    525515        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
    526516
    527         ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t);
     517        ast::FunctionDecl * func_t = make_handle_function( loc );
    528518        const ast::DeclWithType * except_obj = func_t->params.back();
    529519
    530520        CatchList::iterator it;
    531521        for ( it = handlers.begin() ; it != handlers.end() ; ++it ) {
    532                 ast::CatchStmt * handler = *it;
     522                ast::CatchClause * handler = *it;
    533523                const CodeLocation loc = handler->location;
    534524                // Modifiy body.
    535525                ast::CompoundStmt * handling_code;
    536526                if (handler->body.as<ast::CompoundStmt>()) {
    537                         handling_code =
    538                         strict_dynamic_cast<ast::CompoundStmt*>( handler->body.get_and_mutate() );
     527                        handling_code = strict_dynamic_cast<ast::CompoundStmt*>(
     528                                handler->body.get_and_mutate() );
    539529                } else {
    540530                        handling_code = new ast::CompoundStmt(loc);
     
    597587                ast::TryStmt * tryStmt ) {
    598588        // void finally() { `finally->block` }
    599         const ast::FinallyStmt * finally = tryStmt->finally;
     589        const ast::FinallyClause * finally = tryStmt->finally;
    600590        const ast::CompoundStmt * body = finally->body;
    601591
    602         ast::FunctionDecl * func_t = ast::deepCopy(finally_func_t);
     592        ast::FunctionDecl * func_t = make_finally_function( tryStmt->location );
    603593        func_t->stmts = body;
    604594
    605         // finally->set_block( nullptr );
    606         // delete finally;
    607595        tryStmt->finally = nullptr;
    608 
    609596
    610597        return func_t;
     
    617604
    618605        const CodeLocation loc = finally_wrapper->location;
    619         // Make Cleanup Attribute.
    620         /*
    621         std::list< ast::Attribute * > attributes;
    622         {
    623                 std::list<  > attr_params;
    624                 attr_params.push_back( nameOf( finally_wrapper ) );
    625                 attributes.push_back( new Attribute( "cleanup", attr_params ) );
    626         }
    627         */
    628 
    629606        return new ast::ObjectDecl(
    630607                loc,
     
    644621        // return false;
    645622        const CodeLocation loc = throwStmt->location;
    646         ast::Stmt * result = new ast::ReturnStmt(loc, 
     623        ast::Stmt * result = new ast::ReturnStmt(loc,
    647624                ast::ConstantExpr::from_bool( loc, false )
    648625                );
    649626        result->labels = throwStmt->labels;
    650         // delete throwStmt; done by postvisit
    651627        return result;
    652628}
     
    660636                assert( nullptr == except_decl );
    661637                except_decl = structDecl;
    662                 init_func_types();
    663638        } else if ( structDecl->name == "__cfaehm_try_resume_node" ) {
    664639                assert( nullptr == node_decl );
     
    706681                }
    707682        }
    708         // split( mutStmt->handlers,
    709         //              termination_handlers, resumption_handlers );
    710683
    711684        if ( resumption_handlers.size() ) {
  • src/ControlStruct/LabelGeneratorNew.cpp

    rba897d21 r2e9b59b  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // LabelGenerator.cc --
     7// LabelGeneratorNew.cpp --
    88//
    99// Author           : Peter A. Buhr
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Feb  2 09:11:17 2022
    13 // Update Count     : 72
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Mon Mar 28 10:03:00 2022
     13// Update Count     : 73
    1414//
    1515
     
    2525namespace ControlStruct {
    2626
    27 Label newLabel( const string & suffix, const Stmt * stmt ) {
     27enum { size = 128 };
     28
     29static int newLabelPre( char buf[size], const string & suffix ) {
    2830        static int current = 0;
    2931
    30         assertf( stmt, "CFA internal error: parameter statement cannot be null pointer" );
    31 
    32         enum { size = 128 };
    33         char buf[size];                                                                         // space to build label
    3432        int len = snprintf( buf, size, "__L%d__%s", current++, suffix.c_str() );
    3533        assertf( len < size, "CFA Internal error: buffer overflow creating label" );
     34        return len;
     35}
     36
     37static Label newLabelPost( char buf[size], const CodeLocation & location ) {
     38        Label ret_label( location, buf );
     39        ret_label.attributes.push_back( new Attribute( "unused" ) );
     40        return ret_label;
     41}
     42
     43Label newLabel( const string & suffix, const Stmt * stmt ) {
     44        // Buffer for string manipulation.
     45        char buf[size];
     46
     47        assertf( stmt, "CFA internal error: parameter statement cannot be null pointer" );
     48        int len = newLabelPre( buf, suffix );
    3649
    3750        // What does this do?
     
    4154        } // if
    4255
    43         Label ret_label( stmt->location, buf );
    44         ret_label.attributes.push_back( new Attribute( "unused" ) );
    45         return ret_label;
     56        return newLabelPost( buf, stmt->location );
     57}
     58
     59Label newLabel( const string & suffix, const CodeLocation & location ) {
     60        // Buffer for string manipulation.
     61        char buf[size];
     62
     63        newLabelPre( buf, suffix );
     64        return newLabelPost( buf, location );
    4665}
    4766
  • src/ControlStruct/LabelGeneratorNew.hpp

    rba897d21 r2e9b59b  
    99// Author           : Rodolfo G. Esteves
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Jan 31 18:03:09 2022
    13 // Update Count     : 27
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Fir Mar 25 15:40:00 2022
     13// Update Count     : 28
    1414//
    1515
     
    1818#include <string>                                                                               // for string
    1919
    20 class Statement;
     20class CodeLocation;
    2121
    2222namespace ast {
     23        class Label;
    2324        class Stmt;
    24         class Label;
    2525} // namespace ast
    2626
    2727namespace ControlStruct {
    2828        ast::Label newLabel( const std::string &, const ast::Stmt * );
     29        ast::Label newLabel( const std::string &, const CodeLocation & );
    2930} // namespace ControlStruct
    3031
  • src/ControlStruct/MultiLevelExit.cpp

    rba897d21 r2e9b59b  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Nov  1 13:48:00 2021
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Feb  2 23:07:54 2022
    13 // Update Count     : 33
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Mon Mar 28  9:42:00 2022
     13// Update Count     : 34
    1414//
    1515
     
    4040
    4141        enum Kind {
    42                 ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, CaseStmtK, SwitchStmtK, TryStmtK
     42                ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, CaseClauseK, SwitchStmtK, TryStmtK
    4343        } kind;
    4444
     
    5858        Entry( const IfStmt *stmt, Label breakExit ) :
    5959                stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( IfStmtK ) {}
    60         Entry( const CaseStmt *stmt, Label fallExit ) :
    61                 stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseStmtK ) {}
     60        Entry( const CaseClause *, const CompoundStmt *stmt, Label fallExit ) :
     61                stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseClauseK ) {}
    6262        Entry( const SwitchStmt *stmt, Label breakExit, Label fallDefaultExit ) :
    6363                stmt( stmt ), firstTarget( breakExit ), secondTarget( fallDefaultExit ), kind( SwitchStmtK ) {}
     
    6666
    6767        bool isContTarget() const { return kind <= WhileDoStmtK; }
    68         bool isBreakTarget() const { return kind != CaseStmtK; }
    69         bool isFallTarget() const { return kind == CaseStmtK; }
     68        bool isBreakTarget() const { return kind != CaseClauseK; }
     69        bool isFallTarget() const { return kind == CaseClauseK; }
    7070        bool isFallDefaultTarget() const { return kind == SwitchStmtK; }
    7171
    7272        // These routines set a target as being "used" by a BranchStmt
    7373        Label useContExit() { assert( kind <= WhileDoStmtK ); return useTarget(secondTarget); }
    74         Label useBreakExit() { assert( kind != CaseStmtK ); return useTarget(firstTarget); }
    75         Label useFallExit() { assert( kind == CaseStmtK );  return useTarget(firstTarget); }
     74        Label useBreakExit() { assert( kind != CaseClauseK ); return useTarget(firstTarget); }
     75        Label useFallExit() { assert( kind == CaseClauseK );  return useTarget(firstTarget); }
    7676        Label useFallDefaultExit() { assert( kind == SwitchStmtK ); return useTarget(secondTarget); }
    7777
    7878        // These routines check if a specific label for a statement is used by a BranchStmt
    7979        bool isContUsed() const { assert( kind <= WhileDoStmtK ); return secondTarget.used; }
    80         bool isBreakUsed() const { assert( kind != CaseStmtK ); return firstTarget.used; }
    81         bool isFallUsed() const { assert( kind == CaseStmtK ); return firstTarget.used; }
     80        bool isBreakUsed() const { assert( kind != CaseClauseK ); return firstTarget.used; }
     81        bool isFallUsed() const { assert( kind == CaseClauseK ); return firstTarget.used; }
    8282        bool isFallDefaultUsed() const { assert( kind == SwitchStmtK ); return secondTarget.used; }
    8383        void seenDefault() { fallDefaultValid = false; }
     
    115115        void previsit( const ForStmt * );
    116116        const ForStmt * postvisit( const ForStmt * );
    117         const CaseStmt * previsit( const CaseStmt * );
     117        const CaseClause * previsit( const CaseClause * );
    118118        void previsit( const IfStmt * );
    119119        const IfStmt * postvisit( const IfStmt * );
     
    123123        void previsit( const TryStmt * );
    124124        void postvisit( const TryStmt * );
    125         void previsit( const FinallyStmt * );
     125        void previsit( const FinallyClause * );
    126126
    127127        const Stmt * mutateLoop( const Stmt * body, Entry& );
     
    288288                  auto switchStmt = strict_dynamic_cast< const SwitchStmt * >( targetEntry->stmt );
    289289                  bool foundDefault = false;
    290                   for ( auto subStmt : switchStmt->stmts ) {
    291                           const CaseStmt * caseStmt = subStmt.strict_as<CaseStmt>();
     290                  for ( auto caseStmt : switchStmt->cases ) {
    292291                          if ( caseStmt->isDefault() ) {
    293292                                  foundDefault = true;
     
    365364}
    366365
    367 const CaseStmt * MultiLevelExitCore::previsit( const CaseStmt * stmt ) {
     366const CaseClause * MultiLevelExitCore::previsit( const CaseClause * stmt ) {
    368367        visit_children = false;
    369368
     
    375374
    376375        // The cond may not exist, but if it does update it now.
    377         visitor->maybe_accept( stmt, &CaseStmt::cond );
     376        visitor->maybe_accept( stmt, &CaseClause::cond );
    378377
    379378        // Just save the mutated node for simplicity.
    380         CaseStmt * mutStmt = mutate( stmt );
    381 
    382         Label fallLabel = newLabel( "fallThrough", stmt );
     379        CaseClause * mutStmt = mutate( stmt );
     380
     381        Label fallLabel = newLabel( "fallThrough", stmt->location );
    383382        if ( ! mutStmt->stmts.empty() ) {
     383                // These should already be in a block.
     384                auto first = mutStmt->stmts.front().get_and_mutate();
     385                auto block = strict_dynamic_cast<CompoundStmt *>( first );
     386
    384387                // Ensure that the stack isn't corrupted by exceptions in fixBlock.
    385388                auto guard = makeFuncGuard(
    386                         [&](){ enclosing_control_structures.emplace_back( mutStmt, fallLabel ); },
     389                        [&](){ enclosing_control_structures.emplace_back( mutStmt, block, fallLabel ); },
    387390                        [this](){ enclosing_control_structures.pop_back(); }
    388391                        );
    389392
    390                 // These should already be in a block.
    391                 auto block = mutate( mutStmt->stmts.front().strict_as<CompoundStmt>() );
    392393                block->kids = fixBlock( block->kids, true );
    393394
     
    396397                Entry & entry = enclosing_control_structures.back();
    397398                if ( entry.isFallUsed() ) {
    398                         mutStmt->stmts.push_back( labelledNullStmt( mutStmt->location, entry.useFallExit() ) );
     399                        mutStmt->stmts.push_back( labelledNullStmt( block->location, entry.useFallExit() ) );
    399400                }
    400401        }
     
    433434}
    434435
    435 bool isDefaultCase( const ptr<Stmt> & stmt ) {
    436         const CaseStmt * caseStmt = stmt.strict_as<CaseStmt>();
    437         return caseStmt->isDefault();
     436static bool isDefaultCase( const ptr<CaseClause> & caseClause ) {
     437        return caseClause->isDefault();
    438438}
    439439
    440440void MultiLevelExitCore::previsit( const SwitchStmt * stmt ) {
    441441        Label label = newLabel( "switchBreak", stmt );
    442         auto it = find_if( stmt->stmts.rbegin(), stmt->stmts.rend(), isDefaultCase );
    443 
    444         const CaseStmt * defaultCase = it != stmt->stmts.rend() ? (it)->strict_as<CaseStmt>() : nullptr;
    445         Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase ) : Label( stmt->location, "" );
     442        auto it = find_if( stmt->cases.rbegin(), stmt->cases.rend(), isDefaultCase );
     443
     444        const CaseClause * defaultCase = it != stmt->cases.rend() ? (*it) : nullptr;
     445        Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase->location ) : Label( stmt->location, "" );
    446446        enclosing_control_structures.emplace_back( stmt, label, defaultLabel );
    447447        GuardAction( [this]() { enclosing_control_structures.pop_back(); } );
     
    449449        // Collect valid labels for fallthrough. It starts with all labels at this level, then remove as each is seen during
    450450        // traversal.
    451         for ( const Stmt * stmt : stmt->stmts ) {
    452                 auto * caseStmt = strict_dynamic_cast< const CaseStmt * >( stmt );
     451        for ( const CaseClause * caseStmt : stmt->cases ) {
    453452                if ( caseStmt->stmts.empty() ) continue;
    454453                auto block = caseStmt->stmts.front().strict_as<CompoundStmt>();
     
    471470                // exit label and break to the last case, create a default case if no cases.
    472471                SwitchStmt * mutStmt = mutate( stmt );
    473                 if ( mutStmt->stmts.empty() ) {
    474                         mutStmt->stmts.push_back( new CaseStmt( mutStmt->location, nullptr, {} ) );
    475                 }
    476 
    477                 auto caseStmt = mutStmt->stmts.back().strict_as<CaseStmt>();
     472                if ( mutStmt->cases.empty() ) {
     473                        mutStmt->cases.push_back( new CaseClause( mutStmt->location, nullptr, {} ) );
     474                }
     475
     476                auto caseStmt = mutStmt->cases.back().get();
    478477                auto mutCase = mutate( caseStmt );
    479                 mutStmt->stmts.back() = mutCase;
     478                mutStmt->cases.back() = mutCase;
    480479
    481480                Label label( mutCase->location, "breakLabel" );
     
    514513}
    515514
    516 void MultiLevelExitCore::previsit( const FinallyStmt * ) {
     515void MultiLevelExitCore::previsit( const FinallyClause * ) {
    517516        GuardAction([this, old = move( enclosing_control_structures)](){ enclosing_control_structures = move(old); });
    518517        enclosing_control_structures = vector<Entry>();
  • src/InitTweak/FixGlobalInit.cc

    rba897d21 r2e9b59b  
    113113                accept_all(translationUnit, fixer);
    114114
     115                // Say these magic declarations come at the end of the file.
     116                CodeLocation const & location = translationUnit.decls.back()->location;
     117
    115118                if ( !fixer.core.initStmts.empty() ) {
    116119                        std::vector<ast::ptr<ast::Expr>> ctorParams;
    117                         if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int({}, 200));
    118                         auto initFunction = new ast::FunctionDecl({}, "__global_init__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.initStmts)),
    119                                 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("constructor", std::move(ctorParams))});
     120                        if (inLibrary) ctorParams.emplace_back(ast::ConstantExpr::from_int(location, 200));
     121                        auto initFunction = new ast::FunctionDecl(location,
     122                                "__global_init__", {}, {}, {},
     123                                new ast::CompoundStmt(location, std::move(fixer.core.initStmts)),
     124                                ast::Storage::Static, ast::Linkage::C,
     125                                {new ast::Attribute("constructor", std::move(ctorParams))});
    120126
    121127                        translationUnit.decls.emplace_back( initFunction );
     
    124130                if ( !fixer.core.destroyStmts.empty() ) {
    125131                        std::vector<ast::ptr<ast::Expr>> dtorParams;
    126                         if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int({}, 200));
    127                         auto destroyFunction = new ast::FunctionDecl({}, "__global_destroy__", {}, {}, {}, new ast::CompoundStmt({}, std::move(fixer.core.destroyStmts)),
    128                                 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("destructor", std::move(dtorParams))});
     132                        if (inLibrary) dtorParams.emplace_back(ast::ConstantExpr::from_int(location, 200));
     133                        auto destroyFunction = new ast::FunctionDecl( location,
     134                                "__global_destroy__", {}, {}, {},
     135                                new ast::CompoundStmt(location, std::move(fixer.core.destroyStmts)),
     136                                ast::Storage::Static, ast::Linkage::C,
     137                                {new ast::Attribute("destructor", std::move(dtorParams))});
    129138
    130139                        translationUnit.decls.emplace_back(destroyFunction);
  • src/InitTweak/FixInitNew.cpp

    rba897d21 r2e9b59b  
    1616#include "CodeGen/GenType.h"           // for genPrettyType
    1717#include "CodeGen/OperatorTable.h"
     18#include "Common/CodeLocationTools.hpp"
    1819#include "Common/PassVisitor.h"        // for PassVisitor, WithStmtsToAdd
    1920#include "Common/SemanticError.h"      // for SemanticError
     
    8586        /// generate/resolve copy construction expressions for each, and generate/resolve destructors for both
    8687        /// arguments and return value temporaries
    87         struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors> {
     88        struct ResolveCopyCtors final : public ast::WithGuards, public ast::WithStmtsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithVisitorRef<ResolveCopyCtors>, public ast::WithConstTranslationUnit {
    8889                const ast::Expr * postvisit( const ast::ImplicitCopyCtorExpr * impCpCtorExpr );
    8990                const ast::StmtExpr * previsit( const ast::StmtExpr * stmtExpr );
     
    189190        /// for any member that is missing a corresponding ctor/dtor call.
    190191        /// error if a member is used before constructed
    191         struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls> {
     192        struct GenStructMemberCalls final : public ast::WithGuards, public ast::WithShortCircuiting, public ast::WithSymbolTable, public ast::WithVisitorRef<GenStructMemberCalls>, public ast::WithConstTranslationUnit {
    192193                void previsit( const ast::FunctionDecl * funcDecl );
    193194                const ast::DeclWithType * postvisit( const ast::FunctionDecl * funcDecl );
     
    214215
    215216        /// expands ConstructorExpr nodes into comma expressions, using a temporary for the first argument
    216         struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting {
     217        struct FixCtorExprs final : public ast::WithDeclsToAdd<>, public ast::WithSymbolTable, public ast::WithShortCircuiting, public ast::WithConstTranslationUnit {
    217218                const ast::Expr * postvisit( const ast::ConstructorExpr * ctorExpr );
    218219        };
     
    509510                // (VariableExpr and already resolved expression)
    510511                CP_CTOR_PRINT( std::cerr << "ResolvingCtorDtor " << untyped << std::endl; )
    511                 ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, symtab);
     512                ast::ptr<ast::Expr> resolved = ResolvExpr::findVoidExpression(untyped, { symtab, transUnit().global } );
    512513                assert( resolved );
    513514                if ( resolved->env ) {
     
    553554                ast::ptr<ast::Expr> guard = mutArg;
    554555
    555                 ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl({}, "__tmp", mutResult, nullptr );
     556                ast::ptr<ast::ObjectDecl> tmp = new ast::ObjectDecl(loc, "__tmp", mutResult, nullptr );
    556557
    557558                // create and resolve copy constructor
     
    587588
    588589        ast::Expr * ResolveCopyCtors::destructRet( const ast::ObjectDecl * ret, const ast::Expr * arg ) {
     590                auto global = transUnit().global;
    589591                // TODO: refactor code for generating cleanup attribute, since it's common and reused in ~3-4 places
    590592                // check for existing cleanup attribute before adding another(?)
    591593                // need to add __Destructor for _tmp_cp variables as well
    592594
    593                 assertf( ast::dtorStruct, "Destructor generation requires __Destructor definition." );
    594                 assertf( ast::dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );
    595                 assertf( ast::dtorStructDestroy, "Destructor generation requires __destroy_Destructor." );
     595                assertf( global.dtorStruct, "Destructor generation requires __Destructor definition." );
     596                assertf( global.dtorStruct->members.size() == 2, "__Destructor definition does not have expected fields." );
     597                assertf( global.dtorDestroy, "Destructor generation requires __destroy_Destructor." );
    596598
    597599                const CodeLocation loc = ret->location;
     
    610612                auto dtorFunc = getDtorFunc( ret, new ast::ExprStmt(loc, dtor ), stmtsToAddBefore );
    611613
    612                 auto dtorStructType = new ast::StructInstType(ast::dtorStruct);
     614                auto dtorStructType = new ast::StructInstType( global.dtorStruct );
    613615
    614616                // what does this do???
     
    622624                static UniqueName namer( "_ret_dtor" );
    623625                auto retDtor = new ast::ObjectDecl(loc, namer.newName(), dtorStructType, new ast::ListInit(loc, { new ast::SingleInit(loc, ast::ConstantExpr::null(loc) ), new ast::SingleInit(loc, new ast::CastExpr( new ast::VariableExpr(loc, dtorFunc ), dtorType ) ) } ) );
    624                 retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, ast::dtorStructDestroy ) } ) );
     626                retDtor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr(loc, global.dtorDestroy ) } ) );
    625627                stmtsToAddBefore.push_back( new ast::DeclStmt(loc, retDtor ) );
    626628
    627629                if ( arg ) {
    628                         auto member = new ast::MemberExpr(loc, ast::dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );
     630                        auto member = new ast::MemberExpr(loc, global.dtorStruct->members.front().strict_as<ast::DeclWithType>(), new ast::VariableExpr(loc, retDtor ) );
    629631                        auto object = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, ret ) ), new ast::PointerType(new ast::VoidType() ) );
    630632                        ast::Expr * assign = createBitwiseAssignment( member, object );
     
    799801        // to prevent warnings ('_unq0' may be used uninitialized in this function),
    800802        // insert an appropriate zero initializer for UniqueExpr temporaries.
    801         ast::Init * makeInit( const ast::Type * t ) {
     803        ast::Init * makeInit( const ast::Type * t, CodeLocation const & loc ) {
    802804                if ( auto inst = dynamic_cast< const ast::StructInstType * >( t ) ) {
    803805                        // initizer for empty struct must be empty
    804                         if ( inst->base->members.empty() ) return new ast::ListInit({}, {});
     806                        if ( inst->base->members.empty() ) {
     807                                return new ast::ListInit( loc, {} );
     808                        }
    805809                } else if ( auto inst = dynamic_cast< const ast::UnionInstType * >( t ) ) {
    806810                        // initizer for empty union must be empty
    807                         if ( inst->base->members.empty() ) return new ast::ListInit({}, {});
    808                 }
    809 
    810                 return new ast::ListInit( {}, { new ast::SingleInit( {}, ast::ConstantExpr::from_int({}, 0) ) } );
     811                        if ( inst->base->members.empty() ) {
     812                                return new ast::ListInit( loc, {} );
     813                        }
     814                }
     815
     816                return new ast::ListInit( loc, {
     817                        new ast::SingleInit( loc, ast::ConstantExpr::from_int( loc, 0 ) )
     818                } );
    811819        }
    812820
     
    832840                        } else {
    833841                                // expr isn't a call expr, so create a new temporary variable to use to hold the value of the unique expression
    834                                 mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result ) );
     842                                mutExpr->object = new ast::ObjectDecl( mutExpr->location, toString("_unq", mutExpr->id), mutExpr->result, makeInit( mutExpr->result, mutExpr->location ) );
    835843                                mutExpr->var = new ast::VariableExpr( mutExpr->location, mutExpr->object );
    836844                        }
     
    11721180                        auto guard = makeFuncGuard( [this]() { symtab.enterScope(); }, [this]() { symtab.leaveScope(); } );
    11731181                        symtab.addFunction( function );
     1182                        auto global = transUnit().global;
    11741183
    11751184                        // need to iterate through members in reverse in order for
     
    12171226
    12181227                                                        static UniqueName memberDtorNamer = { "__memberDtor" };
    1219                                                         assertf( ast::dtorStruct, "builtin __Destructor not found." );
    1220                                                         assertf( ast::dtorStructDestroy, "builtin __destroy_Destructor not found." );
     1228                                                        assertf( global.dtorStruct, "builtin __Destructor not found." );
     1229                                                        assertf( global.dtorDestroy, "builtin __destroy_Destructor not found." );
    12211230
    12221231                                                        ast::Expr * thisExpr = new ast::CastExpr( new ast::AddressExpr( new ast::VariableExpr(loc, thisParam ) ), new ast::PointerType( new ast::VoidType(), ast::CV::Qualifiers() ) );
     
    12281237                                                        auto dtorType = new ast::PointerType( dtorFtype );
    12291238
    1230                                                         auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( ast::dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );
    1231                                                         destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr({}, ast::dtorStructDestroy ) } ) );
     1239                                                        auto destructor = new ast::ObjectDecl(loc, memberDtorNamer.newName(), new ast::StructInstType( global.dtorStruct ), new ast::ListInit(loc, { new ast::SingleInit(loc, thisExpr ), new ast::SingleInit(loc, new ast::CastExpr( dtorExpr, dtorType ) ) } ) );
     1240                                                        destructor->attributes.push_back( new ast::Attribute( "cleanup", { new ast::VariableExpr( loc, global.dtorDestroy ) } ) );
    12321241                                                        mutStmts->push_front( new ast::DeclStmt(loc, destructor ) );
    12331242                                                        mutStmts->kids.splice( mutStmts->kids.begin(), stmtsToAdd );
     
    13231332
    13241333        const ast::Expr * GenStructMemberCalls::postvisit( const ast::UntypedExpr * untypedExpr ) {
    1325                 // Expression * newExpr = untypedExpr;
    13261334                // xxx - functions returning ast::ptr seems wrong...
    1327                 auto res = ResolvExpr::findVoidExpression( untypedExpr, symtab );
    1328                 return res.release();
    1329                 // return newExpr;
     1335                auto res = ResolvExpr::findVoidExpression( untypedExpr, { symtab, transUnit().global } );
     1336                // Fix CodeLocation (at least until resolver is fixed).
     1337                auto fix = localFillCodeLocations( untypedExpr->location, res.release() );
     1338                return strict_dynamic_cast<const ast::Expr *>( fix );
    13301339        }
    13311340
     
    13611370
    13621371                // resolve assignment and dispose of new env
    1363                 auto resolved = ResolvExpr::findVoidExpression( assign, symtab );
     1372                auto resolved = ResolvExpr::findVoidExpression( assign, { symtab, transUnit().global } );
    13641373                auto mut = resolved.get_and_mutate();
    13651374                assertf(resolved.get() == mut, "newly resolved expression must be unique");
  • src/InitTweak/GenInit.cc

    rba897d21 r2e9b59b  
    402402                                        retVal->location, "?{}", retVal, stmt->expr );
    403403                                assertf( ctorStmt,
    404                                         "ReturnFixer: genCtorDtor returned nllptr: %s / %s",
     404                                        "ReturnFixer: genCtorDtor returned nullptr: %s / %s",
    405405                                        toString( retVal ).c_str(),
    406406                                        toString( stmt->expr ).c_str() );
    407                                         stmtsToAddBefore.push_back( ctorStmt );
     407                                stmtsToAddBefore.push_back( ctorStmt );
    408408
    409409                                // Return the retVal object.
     
    421421        void genInit( ast::TranslationUnit & transUnit ) {
    422422                ast::Pass<HoistArrayDimension_NoResolve_New>::run( transUnit );
     423                ast::Pass<ReturnFixer_New>::run( transUnit );
     424        }
     425
     426        void fixReturnStatements( ast::TranslationUnit & transUnit ) {
    423427                ast::Pass<ReturnFixer_New>::run( transUnit );
    424428        }
  • src/InitTweak/GenInit.h

    rba897d21 r2e9b59b  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Fri Oct 22 16:08:00 2021
    13 // Update Count     : 6
     12// Last Modified On : Fri Mar 18 14:22:00 2022
     13// Update Count     : 7
    1414//
    1515
     
    3131        /// Converts return statements into copy constructor calls on the hidden return variable
    3232        void fixReturnStatements( std::list< Declaration * > & translationUnit );
     33        void fixReturnStatements( ast::TranslationUnit & translationUnit );
    3334
    3435        /// generates a single ctor/dtor statement using objDecl as the 'this' parameter and arg as the optional argument
  • src/InitTweak/InitTweak.cc

    rba897d21 r2e9b59b  
    423423                                loc, targetLabel.newName(), { new ast::Attribute{ "unused" } } };
    424424
    425                         std::vector< ast::ptr< ast::Stmt > > branches;
     425                        std::vector< ast::ptr< ast::CaseClause > > branches;
    426426                        for ( const ast::Init * init : *listInit ) {
    427427                                auto condition = ast::ConstantExpr::from_ulong( loc, cond );
     
    432432                                stmts.emplace_back(
    433433                                        new ast::BranchStmt{ loc, ast::BranchStmt::Break, switchLabel } );
    434                                 branches.emplace_back( new ast::CaseStmt{ loc, condition, std::move( stmts ) } );
     434                                branches.emplace_back( new ast::CaseClause{ loc, condition, std::move( stmts ) } );
    435435                        }
    436436                        out.emplace_back( new ast::SwitchStmt{ loc, index, std::move( branches ) } );
  • src/Parser/DeclarationNode.cc

    rba897d21 r2e9b59b  
    7878        delete variable.initializer;
    7979
    80         delete type;
     80//      delete type;
    8181        delete bitfieldWidth;
    8282
     
    253253} // DeclarationNode::newAggregate
    254254
    255 DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body ) {
     255DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body, bool typed) {
    256256        DeclarationNode * newnode = new DeclarationNode;
    257257        newnode->type = new TypeData( TypeData::Enum );
     
    263263} // DeclarationNode::newEnum
    264264
     265
     266
    265267DeclarationNode * DeclarationNode::newName( const string * name ) {
    266268        DeclarationNode * newnode = new DeclarationNode;
     
    270272} // DeclarationNode::newName
    271273
    272 DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) {
     274DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) { // Marker
    273275        DeclarationNode * newnode = newName( name );
    274276        newnode->enumeratorValue.reset( constant );
     
    665667}
    666668
     669DeclarationNode * DeclarationNode::addEnumBase( DeclarationNode * o ) {
     670        if ( o && o -> type)  {
     671                type->base= o->type;
     672        }
     673        delete o;
     674        return this;
     675}
     676
    667677DeclarationNode * DeclarationNode::addTypedef() {
    668678        TypeData * newtype = new TypeData( TypeData::Symbolic );
  • src/Parser/ParseNode.h

    rba897d21 r2e9b59b  
    235235        static DeclarationNode * newFunction( const std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body );
    236236        static DeclarationNode * newAggregate( AggregateDecl::Aggregate kind, const std::string * name, ExpressionNode * actuals, DeclarationNode * fields, bool body );
    237         static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body );
     237        static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed );
    238238        static DeclarationNode * newEnumConstant( const std::string * name, ExpressionNode * constant );
    239239        static DeclarationNode * newName( const std::string * );
     
    265265        DeclarationNode * addType( DeclarationNode * );
    266266        DeclarationNode * addTypedef();
     267        DeclarationNode * addEnumBase( DeclarationNode * );
    267268        DeclarationNode * addAssertions( DeclarationNode * );
    268269        DeclarationNode * addName( std::string * );
  • src/Parser/StatementNode.cc

    rba897d21 r2e9b59b  
    366366} // maybe_build_compound
    367367
     368// Question
    368369Statement * build_asm( bool voltile, Expression * instruction, ExpressionNode * output, ExpressionNode * input, ExpressionNode * clobber, LabelNode * gotolabels ) {
    369370        list< Expression * > out, in;
  • src/Parser/TypeData.cc

    rba897d21 r2e9b59b  
    918918EnumDecl * buildEnum( const TypeData * td, std::list< Attribute * > attributes, LinkageSpec::Spec linkage ) {
    919919        assert( td->kind == TypeData::Enum );
    920         EnumDecl * ret = new EnumDecl( *td->enumeration.name, attributes, linkage );
     920        Type * baseType = td->base ? typebuild(td->base) : nullptr;
     921        EnumDecl * ret = new EnumDecl( *td->enumeration.name, attributes, linkage, baseType );
    921922        buildList( td->enumeration.constants, ret->get_members() );
    922923        list< Declaration * >::iterator members = ret->get_members().begin();
    923         for ( const DeclarationNode * cur = td->enumeration. constants; cur != nullptr; cur = dynamic_cast< DeclarationNode * >( cur->get_next() ), ++members ) {
     924        for ( const DeclarationNode * cur = td->enumeration.constants; cur != nullptr; cur = dynamic_cast< DeclarationNode * >( cur->get_next() ), ++members ) {
    924925                if ( cur->has_enumeratorValue() ) {
    925926                        ObjectDecl * member = dynamic_cast< ObjectDecl * >(* members);
    926927                        member->set_init( new SingleInit( maybeMoveBuild< Expression >( cur->consume_enumeratorValue() ) ) );
     928                } else {
     929                        if ( baseType && (!dynamic_cast<BasicType *>(baseType) || !dynamic_cast<BasicType *>(baseType)->isWholeNumber())) {
     930                                SemanticError( td->location, "A non whole number enum value decl must be explicitly initialized." );
     931                        }
    927932                } // if
    928933        } // for
    929         ret->set_body( td->enumeration.body );
     934        ret->set_body( td->enumeration.body ); // Boolean; if it has body
    930935        return ret;
    931936} // buildEnum
  • src/Parser/TypeData.h

    rba897d21 r2e9b59b  
    132132                                                 Initializer * init = nullptr, std::list< class Attribute * > attributes = std::list< class Attribute * >() );
    133133FunctionType * buildFunction( const TypeData * );
     134Declaration * addEnumBase( Declaration *, const TypeData * );
    134135void buildKRFunction( const TypeData::Function_t & function );
    135136
  • src/Parser/parser.yy

    rba897d21 r2e9b59b  
    1010// Created On       : Sat Sep  1 20:22:55 2001
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb 25 17:54:56 2022
    13 // Update Count     : 5262
     12// Last Modified On : Mon Mar 14 16:35:29 2022
     13// Update Count     : 5276
    1414//
    1515
     
    652652                        // Historic, transitional: Disallow commas in subscripts.
    653653                        // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts.
    654                 // { SemanticError( yylloc, "New array subscript is currently unimplemented." ); $$ = nullptr; }
    655654                        // Current: Commas in subscripts make tuples.
    656655                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); }
     
    661660                // equivalent to the old x[i,j].
    662661                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); }
     662        | constant '[' assignment_expression ']'                        // 3[a], 'a'[a], 3.5[a]
     663                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, $3 ) ); }
     664        | string_literal '[' assignment_expression ']'          // "abc"[3], 3["abc"]
     665                { $$ = new ExpressionNode( build_binary_val( OperKinds::Index, new ExpressionNode( $1 ), $3 ) ); }
    663666        | postfix_expression '{' argument_expression_list_opt '}' // CFA, constructor call
    664667                {
     
    23002303        ;
    23012304
    2302 enum_type:                                                                                              // enum
     2305enum_type: // static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed );                                                                                         // enum
    23032306        ENUM attribute_list_opt '{' enumerator_list comma_opt '}'
    2304                 { $$ = DeclarationNode::newEnum( nullptr, $4, true )->addQualifiers( $2 ); }
     2307                { $$ = DeclarationNode::newEnum( nullptr, $4, true, false )->addQualifiers( $2 ); }
    23052308        | ENUM attribute_list_opt identifier
    23062309                { typedefTable.makeTypedef( *$3 ); }
    23072310          '{' enumerator_list comma_opt '}'
    2308                 { $$ = DeclarationNode::newEnum( $3, $6, true )->addQualifiers( $2 ); }
     2311                { $$ = DeclarationNode::newEnum( $3, $6, true, false )->addQualifiers( $2 ); }
    23092312        | ENUM attribute_list_opt typedef_name                          // unqualified type name
    23102313          '{' enumerator_list comma_opt '}'
    2311                 { $$ = DeclarationNode::newEnum( $3->name, $5, true )->addQualifiers( $2 ); }
     2314                { $$ = DeclarationNode::newEnum( $3->name, $5, true, false )->addQualifiers( $2 ); }
    23122315        | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt '{' enumerator_list comma_opt '}'
    23132316                {
    2314                         if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
    2315                         SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
    2316                 }
    2317         | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt
     2317                        if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 )
     2318                        { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
     2319                        // SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
     2320
     2321                        $$ = DeclarationNode::newEnum( nullptr, $7, true, true ) ->addQualifiers( $5 )  -> addEnumBase( $3 );
     2322                        // $$ = DeclarationNode::newEnum( nullptr, $7, true, true ) ->addQualifiers( $5 );
     2323                }
     2324        | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt identifier attribute_list_opt // Question: why attributes/qualifier after identifier
    23182325                {
    23192326                        if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
     
    23222329          '{' enumerator_list comma_opt '}'
    23232330                {
    2324                         SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
     2331                        $$ = DeclarationNode::newEnum( $6, $10, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ) -> addEnumBase( $3 );
     2332                        // $$ = DeclarationNode::newEnum( $6, $10, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 );
    23252333                }
    23262334        | ENUM '(' cfa_abstract_parameter_declaration ')' attribute_list_opt typedef_name attribute_list_opt '{' enumerator_list comma_opt '}'
     
    23282336                        if ( $3->storageClasses.val != 0 || $3->type->qualifiers.val != 0 ) { SemanticError( yylloc, "storage-class and CV qualifiers are not meaningful for enumeration constants, which are const." ); }
    23292337                        typedefTable.makeTypedef( *$6->name );
    2330                         SemanticError( yylloc, "Typed enumeration is currently unimplemented." ); $$ = nullptr;
     2338                        $$ = DeclarationNode::newEnum( $6->name, $9, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 ) -> addEnumBase( $3 );
     2339                        // $$ = DeclarationNode::newEnum( $6->name, $9, true, true ) -> addQualifiers( $5 ) -> addQualifiers( $7 );
    23312340                }
    23322341        | enum_type_nobody
     
    23352344enum_type_nobody:                                                                               // enum - {...}
    23362345        ENUM attribute_list_opt identifier
    2337                 { typedefTable.makeTypedef( *$3 ); $$ = DeclarationNode::newEnum( $3, 0, false )->addQualifiers( $2 ); }
     2346                { typedefTable.makeTypedef( *$3 ); $$ = DeclarationNode::newEnum( $3, 0, false, false )->addQualifiers( $2 ); }
    23382347        | ENUM attribute_list_opt type_name                                     // qualified type name
    2339                 { typedefTable.makeTypedef( *$3->type->symbolic.name ); $$ = DeclarationNode::newEnum( $3->type->symbolic.name, 0, false )->addQualifiers( $2 ); }
     2348                { typedefTable.makeTypedef( *$3->type->symbolic.name ); $$ = DeclarationNode::newEnum( $3->type->symbolic.name, 0, false, false )->addQualifiers( $2 ); }
    23402349        ;
    23412350
  • src/ResolvExpr/CandidateFinder.cpp

    rba897d21 r2e9b59b  
    1010// Created On       : Wed Jun 5 14:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Oct  1 14:55:00 2019
    13 // Update Count     : 2
     12// Last Modified On : Wed Mar 16 11:58:00 2022
     13// Update Count     : 3
    1414//
    1515
     
    595595        /// Actually visits expressions to find their candidate interpretations
    596596        class Finder final : public ast::WithShortCircuiting {
     597                const ResolveContext & context;
    597598                const ast::SymbolTable & symtab;
    598599        public:
     
    618619
    619620                Finder( CandidateFinder & f )
    620                 : symtab( f.localSyms ), selfFinder( f ), candidates( f.candidates ), tenv( f.env ),
    621                   targetType( f.targetType ) {}
     621                : context( f.context ), symtab( context.symtab ), selfFinder( f ),
     622                  candidates( f.candidates ), tenv( f.env ), targetType( f.targetType ) {}
    622623
    623624                void previsit( const ast::Node * ) { visit_children = false; }
     
    872873                        Tuples::handleTupleAssignment( selfFinder, untypedExpr, argCandidates );
    873874
    874                         CandidateFinder funcFinder{ symtab, tenv };
     875                        CandidateFinder funcFinder( context, tenv );
    875876                        if (auto nameExpr = untypedExpr->func.as<ast::NameExpr>()) {
    876877                                auto kind = ast::SymbolTable::getSpecialFunctionKind(nameExpr->name);
     
    918919                        // find function operators
    919920                        ast::ptr< ast::Expr > opExpr = new ast::NameExpr{ untypedExpr->location, "?()" };
    920                         CandidateFinder opFinder{ symtab, tenv };
     921                        CandidateFinder opFinder( context, tenv );
    921922                        // okay if there aren't any function operations
    922923                        opFinder.find( opExpr, ResolvMode::withoutFailFast() );
     
    10591060
    10601061                void postvisit( const ast::AddressExpr * addressExpr ) {
    1061                         CandidateFinder finder{ symtab, tenv };
     1062                        CandidateFinder finder( context, tenv );
    10621063                        finder.find( addressExpr->arg );
    10631064
     
    10791080                        ast::ptr< ast::Type > toType = castExpr->result;
    10801081                        assert( toType );
    1081                         toType = resolveTypeof( toType, symtab );
     1082                        toType = resolveTypeof( toType, context );
    10821083                        // toType = SymTab::validateType( castExpr->location, toType, symtab );
    10831084                        toType = adjustExprType( toType, tenv, symtab );
    10841085
    1085                         CandidateFinder finder{ symtab, tenv, toType };
     1086                        CandidateFinder finder( context, tenv, toType );
    10861087                        finder.find( castExpr->arg, ResolvMode::withAdjustment() );
    10871088
     
    11361137                void postvisit( const ast::VirtualCastExpr * castExpr ) {
    11371138                        assertf( castExpr->result, "Implicit virtual cast targets not yet supported." );
    1138                         CandidateFinder finder{ symtab, tenv };
     1139                        CandidateFinder finder( context, tenv );
    11391140                        // don't prune here, all alternatives guaranteed to have same type
    11401141                        finder.find( castExpr->arg, ResolvMode::withoutPrune() );
     
    11531154                        auto target = inst->base.get();
    11541155
    1155                         CandidateFinder finder{ symtab, tenv };
     1156                        CandidateFinder finder( context, tenv );
    11561157
    11571158                        auto pick_alternatives = [target, this](CandidateList & found, bool expect_ref) {
     
    12021203
    12031204                void postvisit( const ast::UntypedMemberExpr * memberExpr ) {
    1204                         CandidateFinder aggFinder{ symtab, tenv };
     1205                        CandidateFinder aggFinder( context, tenv );
    12051206                        aggFinder.find( memberExpr->aggregate, ResolvMode::withAdjustment() );
    12061207                        for ( CandidateRef & agg : aggFinder.candidates ) {
     
    12871288                                addCandidate(
    12881289                                        new ast::SizeofExpr{
    1289                                                 sizeofExpr->location, resolveTypeof( sizeofExpr->type, symtab ) },
     1290                                                sizeofExpr->location, resolveTypeof( sizeofExpr->type, context ) },
    12901291                                        tenv );
    12911292                        } else {
    12921293                                // find all candidates for the argument to sizeof
    1293                                 CandidateFinder finder{ symtab, tenv };
     1294                                CandidateFinder finder( context, tenv );
    12941295                                finder.find( sizeofExpr->expr );
    12951296                                // find the lowest-cost candidate, otherwise ambiguous
     
    13111312                                addCandidate(
    13121313                                        new ast::AlignofExpr{
    1313                                                 alignofExpr->location, resolveTypeof( alignofExpr->type, symtab ) },
     1314                                                alignofExpr->location, resolveTypeof( alignofExpr->type, context ) },
    13141315                                        tenv );
    13151316                        } else {
    13161317                                // find all candidates for the argument to alignof
    1317                                 CandidateFinder finder{ symtab, tenv };
     1318                                CandidateFinder finder( context, tenv );
    13181319                                finder.find( alignofExpr->expr );
    13191320                                // find the lowest-cost candidate, otherwise ambiguous
     
    13541355
    13551356                void postvisit( const ast::LogicalExpr * logicalExpr ) {
    1356                         CandidateFinder finder1{ symtab, tenv };
     1357                        CandidateFinder finder1( context, tenv );
    13571358                        finder1.find( logicalExpr->arg1, ResolvMode::withAdjustment() );
    13581359                        if ( finder1.candidates.empty() ) return;
    13591360
    1360                         CandidateFinder finder2{ symtab, tenv };
     1361                        CandidateFinder finder2( context, tenv );
    13611362                        finder2.find( logicalExpr->arg2, ResolvMode::withAdjustment() );
    13621363                        if ( finder2.candidates.empty() ) return;
     
    13841385                void postvisit( const ast::ConditionalExpr * conditionalExpr ) {
    13851386                        // candidates for condition
    1386                         CandidateFinder finder1{ symtab, tenv };
     1387                        CandidateFinder finder1( context, tenv );
    13871388                        finder1.find( conditionalExpr->arg1, ResolvMode::withAdjustment() );
    13881389                        if ( finder1.candidates.empty() ) return;
    13891390
    13901391                        // candidates for true result
    1391                         CandidateFinder finder2{ symtab, tenv };
     1392                        CandidateFinder finder2( context, tenv );
    13921393                        finder2.find( conditionalExpr->arg2, ResolvMode::withAdjustment() );
    13931394                        if ( finder2.candidates.empty() ) return;
    13941395
    13951396                        // candidates for false result
    1396                         CandidateFinder finder3{ symtab, tenv };
     1397                        CandidateFinder finder3( context, tenv );
    13971398                        finder3.find( conditionalExpr->arg3, ResolvMode::withAdjustment() );
    13981399                        if ( finder3.candidates.empty() ) return;
     
    14451446                void postvisit( const ast::CommaExpr * commaExpr ) {
    14461447                        ast::TypeEnvironment env{ tenv };
    1447                         ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, symtab, env );
    1448 
    1449                         CandidateFinder finder2{ symtab, env };
     1448                        ast::ptr< ast::Expr > arg1 = resolveInVoidContext( commaExpr->arg1, context, env );
     1449
     1450                        CandidateFinder finder2( context, env );
    14501451                        finder2.find( commaExpr->arg2, ResolvMode::withAdjustment() );
    14511452
     
    14601461
    14611462                void postvisit( const ast::ConstructorExpr * ctorExpr ) {
    1462                         CandidateFinder finder{ symtab, tenv };
     1463                        CandidateFinder finder( context, tenv );
    14631464                        finder.find( ctorExpr->callExpr, ResolvMode::withoutPrune() );
    14641465                        for ( CandidateRef & r : finder.candidates ) {
     
    14691470                void postvisit( const ast::RangeExpr * rangeExpr ) {
    14701471                        // resolve low and high, accept candidates where low and high types unify
    1471                         CandidateFinder finder1{ symtab, tenv };
     1472                        CandidateFinder finder1( context, tenv );
    14721473                        finder1.find( rangeExpr->low, ResolvMode::withAdjustment() );
    14731474                        if ( finder1.candidates.empty() ) return;
    14741475
    1475                         CandidateFinder finder2{ symtab, tenv };
     1476                        CandidateFinder finder2( context, tenv );
    14761477                        finder2.find( rangeExpr->high, ResolvMode::withAdjustment() );
    14771478                        if ( finder2.candidates.empty() ) return;
     
    15491550
    15501551                void postvisit( const ast::UniqueExpr * unqExpr ) {
    1551                         CandidateFinder finder{ symtab, tenv };
     1552                        CandidateFinder finder( context, tenv );
    15521553                        finder.find( unqExpr->expr, ResolvMode::withAdjustment() );
    15531554                        for ( CandidateRef & r : finder.candidates ) {
     
    15581559
    15591560                void postvisit( const ast::StmtExpr * stmtExpr ) {
    1560                         addCandidate( resolveStmtExpr( stmtExpr, symtab ), tenv );
     1561                        addCandidate( resolveStmtExpr( stmtExpr, context ), tenv );
    15611562                }
    15621563
     
    15701571                        for ( const ast::InitAlternative & initAlt : initExpr->initAlts ) {
    15711572                                // calculate target type
    1572                                 const ast::Type * toType = resolveTypeof( initAlt.type, symtab );
     1573                                const ast::Type * toType = resolveTypeof( initAlt.type, context );
    15731574                                // toType = SymTab::validateType( initExpr->location, toType, symtab );
    15741575                                toType = adjustExprType( toType, tenv, symtab );
     
    15761577                                // types are not bound to the initialization type, since return type variables are
    15771578                                // only open for the duration of resolving the UntypedExpr.
    1578                                 CandidateFinder finder{ symtab, tenv, toType };
     1579                                CandidateFinder finder( context, tenv, toType );
    15791580                                finder.find( initExpr->expr, ResolvMode::withAdjustment() );
    15801581                                for ( CandidateRef & cand : finder.candidates ) {
     
    16931694                }
    16941695                else {
    1695                         satisfyAssertions(candidate, localSyms, satisfied, errors);
     1696                        satisfyAssertions(candidate, context.symtab, satisfied, errors);
    16961697                        needRecomputeKey = true;
    16971698                }
     
    18551856                        r->expr = ast::mutate_field(
    18561857                                r->expr.get(), &ast::Expr::result,
    1857                                 adjustExprType( r->expr->result, r->env, localSyms ) );
     1858                                adjustExprType( r->expr->result, r->env, context.symtab ) );
    18581859                }
    18591860        }
     
    18731874
    18741875        for ( const auto & x : xs ) {
    1875                 out.emplace_back( localSyms, env );
     1876                out.emplace_back( context, env );
    18761877                out.back().find( x, ResolvMode::withAdjustment() );
    18771878
  • src/ResolvExpr/CandidateFinder.hpp

    rba897d21 r2e9b59b  
    1010// Created On       : Wed Jun 5 14:30:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Oct  1  9:51:00 2019
    13 // Update Count     : 2
     12// Last Modified On : Wed Mar 16 15:22:00 2022
     13// Update Count     : 3
    1414//
    1515
     
    2525namespace ResolvExpr {
    2626
     27struct ResolveContext;
     28
    2729/// Data to perform expression resolution
    2830struct CandidateFinder {
    2931        CandidateList candidates;          ///< List of candidate resolutions
    30         const ast::SymbolTable & localSyms;   ///< Symbol table to lookup candidates
     32        const ResolveContext & context;  ///< Information about where the canditates are being found.
    3133        const ast::TypeEnvironment & env;  ///< Substitutions performed in this resolution
    3234        ast::ptr< ast::Type > targetType;  ///< Target type for resolution
     
    3436
    3537        CandidateFinder(
    36                 const ast::SymbolTable & syms, const ast::TypeEnvironment & env,
     38                const ResolveContext & context, const ast::TypeEnvironment & env,
    3739                const ast::Type * tt = nullptr )
    38         : candidates(), localSyms( syms ), env( env ), targetType( tt ) {}
     40        : candidates(), context( context ), env( env ), targetType( tt ) {}
    3941
    4042        /// Fill candidates with feasible resolutions for `expr`
  • src/ResolvExpr/CandidatePrinter.cpp

    rba897d21 r2e9b59b  
    1010// Created On       : Tue Nov  9  9:54:00 2021
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Nov  9 15:47:00 2021
    13 // Update Count     : 0
     12// Last Modified On : Wed Mar 16 13:56:00 2022
     13// Update Count     : 1
    1414//
    1515
     
    2222#include "AST/TranslationUnit.hpp"
    2323#include "ResolvExpr/CandidateFinder.hpp"
     24#include "ResolvExpr/Resolver.h"
    2425
    2526#include <iostream>
     
    2930namespace {
    3031
    31 class CandidatePrintCore : public ast::WithSymbolTable {
     32class CandidatePrintCore : public ast::WithSymbolTable,
     33                public ast::WithConstTranslationUnit {
    3234        std::ostream & os;
    3335public:
     
    3638        void postvisit( const ast::ExprStmt * stmt ) {
    3739                ast::TypeEnvironment env;
    38                 CandidateFinder finder( symtab, env );
     40                CandidateFinder finder( { symtab, transUnit().global }, env );
    3941                finder.find( stmt->expr, ResolvMode::withAdjustment() );
    4042                int count = 1;
  • src/ResolvExpr/ConversionCost.cc

    rba897d21 r2e9b59b  
    333333                } else if ( dynamic_cast< const EnumInstType * >( dest ) ) {
    334334                        // xxx - not positive this is correct, but appears to allow casting int => enum
    335                         cost = Cost::unsafe;
     335                        // TODO
     336                        EnumDecl * decl = dynamic_cast< const EnumInstType * >( dest )->baseEnum;
     337                        if ( decl->base ) {
     338                                cost = Cost::infinity;
     339                        } else {
     340                                cost = Cost::unsafe;
     341                        } // if
    336342                } // if
    337343                // no cases for zero_t/one_t because it should not be possible to convert int, etc. to zero_t/one_t.
     
    610616        } else if ( dynamic_cast< const ast::EnumInstType * >( dst ) ) {
    611617                // xxx - not positive this is correct, but appears to allow casting int => enum
    612                 cost = Cost::unsafe;
     618                const ast::EnumDecl * decl = (dynamic_cast< const ast::EnumInstType * >( dst ))->base.get();
     619                if ( decl->base ) {
     620                        cost = Cost::infinity;
     621                } else {
     622                        cost = Cost::unsafe;
     623                } // if
    613624        }
    614625}
  • src/ResolvExpr/RenameVars.h

    rba897d21 r2e9b59b  
    3636        };
    3737        const ast::Type * renameTyVars( const ast::Type *, RenameMode mode = GEN_USAGE, bool reset = true );
    38        
    3938
    4039        /// resets internal state of renamer to avoid overflow
    4140        void resetTyVarRenaming();
    42 
    43        
    4441} // namespace ResolvExpr
    4542
  • src/ResolvExpr/ResolveTypeof.cc

    rba897d21 r2e9b59b  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:12:20 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 19 16:49:04 2015
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 16:09:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    2222#include "AST/Node.hpp"
    2323#include "AST/Pass.hpp"
     24#include "AST/TranslationUnit.hpp"
    2425#include "AST/Type.hpp"
    2526#include "AST/TypeEnvironment.hpp"
     
    119120namespace {
    120121        struct ResolveTypeof_new : public ast::WithShortCircuiting {
    121                 const ast::SymbolTable & localSymtab;
    122 
    123                 ResolveTypeof_new( const ast::SymbolTable & syms ) : localSymtab( syms ) {}
     122                const ResolveContext & context;
     123
     124                ResolveTypeof_new( const ResolveContext & context ) :
     125                        context( context ) {}
    124126
    125127                void previsit( const ast::TypeofType * ) { visit_children = false; }
     
    137139                                ast::TypeEnvironment dummy;
    138140                                ast::ptr< ast::Expr > newExpr =
    139                                         resolveInVoidContext( typeofType->expr, localSymtab, dummy );
     141                                        resolveInVoidContext( typeofType->expr, context, dummy );
    140142                                assert( newExpr->result && ! newExpr->result->isVoid() );
    141143                                newType = newExpr->result;
     
    161163} // anonymous namespace
    162164
    163 const ast::Type * resolveTypeof( const ast::Type * type , const ast::SymbolTable & symtab ) {
    164         ast::Pass< ResolveTypeof_new > mutator{ symtab };
     165const ast::Type * resolveTypeof( const ast::Type * type , const ResolveContext & context ) {
     166        ast::Pass< ResolveTypeof_new > mutator( context );
    165167        return type->accept( mutator );
    166168}
    167169
    168170struct FixArrayDimension {
    169         // should not require a mutable symbol table - prevent pass template instantiation
    170         const ast::SymbolTable & _symtab;
    171         FixArrayDimension(const ast::SymbolTable & symtab): _symtab(symtab) {}
     171        const ResolveContext & context;
     172        FixArrayDimension(const ResolveContext & context) : context( context ) {}
    172173
    173174        const ast::ArrayType * previsit (const ast::ArrayType * arrayType) {
    174175                if (!arrayType->dimension) return arrayType;
    175176                auto mutType = mutate(arrayType);
    176                 ast::ptr<ast::Type> sizetype = ast::sizeType ? ast::sizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt);
    177                 mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, _symtab);
     177                auto globalSizeType = context.global.sizeType;
     178                ast::ptr<ast::Type> sizetype = globalSizeType ? globalSizeType : new ast::BasicType(ast::BasicType::LongUnsignedInt);
     179                mutType->dimension = findSingleExpression(arrayType->dimension, sizetype, context );
    178180
    179181                if (InitTweak::isConstExpr(mutType->dimension)) {
     
    187189};
    188190
    189 const ast::Type * fixArrayType( const ast::Type * type, const ast::SymbolTable & symtab) {
    190         ast::Pass<FixArrayDimension> visitor {symtab};
     191const ast::Type * fixArrayType( const ast::Type * type, const ResolveContext & context ) {
     192        ast::Pass<FixArrayDimension> visitor(context);
    191193        return type->accept(visitor);
    192194}
    193195
    194 const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab ) {
    195         if (!decl->isTypeFixed) {
    196                 auto mutDecl = mutate(decl);
    197                 auto resolvedType = resolveTypeof(decl->type, symtab);
    198                 resolvedType = fixArrayType(resolvedType, symtab);
     196const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & context ) {
     197        if (decl->isTypeFixed) {
     198                return decl;
     199        }
     200
     201        auto mutDecl = mutate(decl);
     202        {
     203                auto resolvedType = resolveTypeof(decl->type, context);
     204                resolvedType = fixArrayType(resolvedType, context);
    199205                mutDecl->type = resolvedType;
    200 
    201                 // check variable length if object is an array.
    202                 // xxx - should this be part of fixObjectType?
    203 
    204                 /*
    205                 if (auto arrayType = dynamic_cast<const ast::ArrayType *>(resolvedType)) {
    206                         auto dimExpr = findSingleExpression(arrayType->dimension, ast::sizeType, symtab);
    207                         if (auto varexpr = arrayType->dimension.as<ast::VariableExpr>()) {// hoisted previously
    208                                 if (InitTweak::isConstExpr(varexpr->var.strict_as<ast::ObjectDecl>()->init)) {
    209                                         auto mutType = mutate(arrayType);
    210                                         mutType->isVarLen = ast::LengthFlag::VariableLen;
    211                                         mutDecl->type = mutType;
    212                                 }
    213                         }
    214                 }
    215                 */
    216 
    217 
    218                 if (!mutDecl->name.empty())
    219                         mutDecl->mangleName = Mangle::mangle(mutDecl); // do not mangle unnamed variables
    220                
    221                 mutDecl->type = renameTyVars(mutDecl->type, RenameMode::GEN_EXPR_ID);
    222                 mutDecl->isTypeFixed = true;
    223                 return mutDecl;
    224         }
    225         return decl;
     206        }
     207
     208        // Do not mangle unnamed variables.
     209        if (!mutDecl->name.empty()) {
     210                mutDecl->mangleName = Mangle::mangle(mutDecl);
     211        }
     212
     213        mutDecl->type = renameTyVars(mutDecl->type, RenameMode::GEN_EXPR_ID);
     214        mutDecl->isTypeFixed = true;
     215        return mutDecl;
    226216}
    227217
  • src/ResolvExpr/ResolveTypeof.h

    rba897d21 r2e9b59b  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // ResolveTypeof.h -- 
     7// ResolveTypeof.h --
    88//
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:14:53 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jul 22 09:38:35 2017
    13 // Update Count     : 3
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 11:33:00 2022
     13// Update Count     : 4
    1414//
    1515
     
    2222namespace ast {
    2323        class Type;
    24         class SymbolTable;
    2524        class ObjectDecl;
    2625}
    2726
    2827namespace ResolvExpr {
     28        struct ResolveContext;
     29
    2930        Type *resolveTypeof( Type*, const SymTab::Indexer &indexer );
    30         const ast::Type * resolveTypeof( const ast::Type *, const ast::SymbolTable & );
    31         const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ast::SymbolTable & symtab );
     31        const ast::Type * resolveTypeof( const ast::Type *, const ResolveContext & );
     32        const ast::ObjectDecl * fixObjectType( const ast::ObjectDecl * decl , const ResolveContext & );
    3233} // namespace ResolvExpr
    3334
  • src/ResolvExpr/Resolver.cc

    rba897d21 r2e9b59b  
    99// Author           : Aaron B. Moss
    1010// Created On       : Sun May 17 12:17:01 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Feb  1 16:27:14 2022
    13 // Update Count     : 245
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Fri Mar 18 10:41:00 2022
     13// Update Count     : 247
    1414//
    1515
     
    997997                /// Calls the CandidateFinder and finds the single best candidate
    998998                CandidateRef findUnfinishedKindExpression(
    999                         const ast::Expr * untyped, const ast::SymbolTable & symtab, const std::string & kind,
     999                        const ast::Expr * untyped, const ResolveContext & context, const std::string & kind,
    10001000                        std::function<bool(const Candidate &)> pred = anyCandidate, ResolvMode mode = {}
    10011001                ) {
     
    10071007                        ++recursion_level;
    10081008                        ast::TypeEnvironment env;
    1009                         CandidateFinder finder{ symtab, env };
     1009                        CandidateFinder finder( context, env );
    10101010                        finder.find( untyped, recursion_level == 1 ? mode.atTopLevel() : mode );
    10111011                        --recursion_level;
     
    11291129
    11301130        ast::ptr< ast::Expr > resolveInVoidContext(
    1131                 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env
     1131                const ast::Expr * expr, const ResolveContext & context,
     1132                ast::TypeEnvironment & env
    11321133        ) {
    11331134                assertf( expr, "expected a non-null expression" );
     
    11361137                ast::ptr< ast::CastExpr > untyped = new ast::CastExpr{ expr };
    11371138                CandidateRef choice = findUnfinishedKindExpression(
    1138                         untyped, symtab, "", anyCandidate, ResolvMode::withAdjustment() );
     1139                        untyped, context, "", anyCandidate, ResolvMode::withAdjustment() );
    11391140
    11401141                // a cast expression has either 0 or 1 interpretations (by language rules);
     
    11491150                /// context.
    11501151                ast::ptr< ast::Expr > findVoidExpression(
    1151                         const ast::Expr * untyped, const ast::SymbolTable & symtab
     1152                        const ast::Expr * untyped, const ResolveContext & context
    11521153                ) {
    11531154                        ast::TypeEnvironment env;
    1154                         ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, symtab, env );
     1155                        ast::ptr< ast::Expr > newExpr = resolveInVoidContext( untyped, context, env );
    11551156                        finishExpr( newExpr, env, untyped->env );
    11561157                        return newExpr;
     
    11631164                /// lowest cost, returning the resolved version
    11641165                ast::ptr< ast::Expr > findKindExpression(
    1165                         const ast::Expr * untyped, const ast::SymbolTable & symtab,
     1166                        const ast::Expr * untyped, const ResolveContext & context,
    11661167                        std::function<bool(const Candidate &)> pred = anyCandidate,
    11671168                        const std::string & kind = "", ResolvMode mode = {}
     
    11691170                        if ( ! untyped ) return {};
    11701171                        CandidateRef choice =
    1171                                 findUnfinishedKindExpression( untyped, symtab, kind, pred, mode );
     1172                                findUnfinishedKindExpression( untyped, context, kind, pred, mode );
    11721173                        ResolvExpr::finishExpr( choice->expr, choice->env, untyped->env );
    11731174                        return std::move( choice->expr );
     
    11761177                /// Resolve `untyped` to the single expression whose candidate is the best match
    11771178                ast::ptr< ast::Expr > findSingleExpression(
    1178                         const ast::Expr * untyped, const ast::SymbolTable & symtab
     1179                        const ast::Expr * untyped, const ResolveContext & context
    11791180                ) {
    11801181                        Stats::ResolveTime::start( untyped );
    1181                         auto res = findKindExpression( untyped, symtab );
     1182                        auto res = findKindExpression( untyped, context );
    11821183                        Stats::ResolveTime::stop();
    11831184                        return res;
     
    11861187
    11871188        ast::ptr< ast::Expr > findSingleExpression(
    1188                 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab
     1189                const ast::Expr * untyped, const ast::Type * type,
     1190                const ResolveContext & context
    11891191        ) {
    11901192                assert( untyped && type );
    11911193                ast::ptr< ast::Expr > castExpr = new ast::CastExpr{ untyped, type };
    1192                 ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, symtab );
    1193                 removeExtraneousCast( newExpr, symtab );
     1194                ast::ptr< ast::Expr > newExpr = findSingleExpression( castExpr, context );
     1195                removeExtraneousCast( newExpr, context.symtab );
    11941196                return newExpr;
    11951197        }
     
    12171219                /// Resolve `untyped` as an integral expression, returning the resolved version
    12181220                ast::ptr< ast::Expr > findIntegralExpression(
    1219                         const ast::Expr * untyped, const ast::SymbolTable & symtab
     1221                        const ast::Expr * untyped, const ResolveContext & context
    12201222                ) {
    1221                         return findKindExpression( untyped, symtab, hasIntegralType, "condition" );
     1223                        return findKindExpression( untyped, context, hasIntegralType, "condition" );
    12221224                }
    12231225
     
    12491251                // for work previously in GenInit
    12501252                static InitTweak::ManagedTypes_new managedTypes;
     1253                ResolveContext context;
    12511254
    12521255                bool inEnumDecl = false;
     
    12541257        public:
    12551258                static size_t traceId;
    1256                 Resolver_new() = default;
    1257                 Resolver_new( const ast::SymbolTable & syms ) { symtab = syms; }
     1259                Resolver_new( const ast::TranslationGlobal & global ) :
     1260                        context{ symtab, global } {}
     1261                Resolver_new( const ResolveContext & context ) :
     1262                        ast::WithSymbolTable{ context.symtab },
     1263                        context{ symtab, context.global } {}
    12581264
    12591265                const ast::FunctionDecl * previsit( const ast::FunctionDecl * );
     
    12721278                const ast::AsmStmt *         previsit( const ast::AsmStmt * );
    12731279                const ast::IfStmt *          previsit( const ast::IfStmt * );
    1274                 const ast::WhileDoStmt *       previsit( const ast::WhileDoStmt * );
     1280                const ast::WhileDoStmt *     previsit( const ast::WhileDoStmt * );
    12751281                const ast::ForStmt *         previsit( const ast::ForStmt * );
    12761282                const ast::SwitchStmt *      previsit( const ast::SwitchStmt * );
    1277                 const ast::CaseStmt *        previsit( const ast::CaseStmt * );
     1283                const ast::CaseClause *      previsit( const ast::CaseClause * );
    12781284                const ast::BranchStmt *      previsit( const ast::BranchStmt * );
    12791285                const ast::ReturnStmt *      previsit( const ast::ReturnStmt * );
    12801286                const ast::ThrowStmt *       previsit( const ast::ThrowStmt * );
    1281                 const ast::CatchStmt *       previsit( const ast::CatchStmt * );
    1282                 const ast::CatchStmt *       postvisit( const ast::CatchStmt * );
     1287                const ast::CatchClause *     previsit( const ast::CatchClause * );
     1288                const ast::CatchClause *     postvisit( const ast::CatchClause * );
    12831289                const ast::WaitForStmt *     previsit( const ast::WaitForStmt * );
    12841290                const ast::WithStmt *        previsit( const ast::WithStmt * );
     
    12991305
    13001306        void resolve( ast::TranslationUnit& translationUnit ) {
    1301                 ast::Pass< Resolver_new >::run( translationUnit );
     1307                ast::Pass< Resolver_new >::run( translationUnit, translationUnit.global );
    13021308        }
    13031309
    13041310        ast::ptr< ast::Init > resolveCtorInit(
    1305                 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab
     1311                const ast::ConstructorInit * ctorInit, const ResolveContext & context
    13061312        ) {
    13071313                assert( ctorInit );
    1308                 ast::Pass< Resolver_new > resolver{ symtab };
     1314                ast::Pass< Resolver_new > resolver( context );
    13091315                return ctorInit->accept( resolver );
    13101316        }
    13111317
    13121318        const ast::Expr * resolveStmtExpr(
    1313                 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab
     1319                const ast::StmtExpr * stmtExpr, const ResolveContext & context
    13141320        ) {
    13151321                assert( stmtExpr );
    1316                 ast::Pass< Resolver_new > resolver{ symtab };
     1322                ast::Pass< Resolver_new > resolver( context );
    13171323                auto ret = mutate(stmtExpr->accept(resolver));
    13181324                strict_dynamic_cast< ast::StmtExpr * >( ret )->computeResult();
     
    13211327
    13221328        namespace {
    1323                 const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ast::SymbolTable & symtab) {
     1329                const ast::Attribute * handleAttribute(const CodeLocation & loc, const ast::Attribute * attr, const ResolveContext & context) {
    13241330                        std::string name = attr->normalizedName();
    13251331                        if (name == "constructor" || name == "destructor") {
    13261332                                if (attr->params.size() == 1) {
    13271333                                        auto arg = attr->params.front();
    1328                                         auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), symtab );
     1334                                        auto resolved = ResolvExpr::findSingleExpression( arg, new ast::BasicType( ast::BasicType::LongLongSignedInt ), context );
    13291335                                        auto result = eval(arg);
    13301336
     
    13691375
    13701376                        for (auto & attr: mutDecl->attributes) {
    1371                                 attr = handleAttribute(mutDecl->location, attr, symtab);
     1377                                attr = handleAttribute(mutDecl->location, attr, context );
    13721378                        }
    13731379
     
    13791385                        for (auto & typeParam : mutDecl->type_params) {
    13801386                                symtab.addType(typeParam);
    1381                                 mutType->forall.emplace_back(new ast::TypeInstType(typeParam->name, typeParam));
     1387                                mutType->forall.emplace_back(new ast::TypeInstType(typeParam));
    13821388                        }
    13831389                        for (auto & asst : mutDecl->assertions) {
    1384                                 asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), symtab);
     1390                                asst = fixObjectType(asst.strict_as<ast::ObjectDecl>(), context);
    13851391                                symtab.addId(asst);
    13861392                                mutType->assertions.emplace_back(new ast::VariableExpr(functionDecl->location, asst));
     
    13941400
    13951401                        for (auto & param : mutDecl->params) {
    1396                                 param = fixObjectType(param.strict_as<ast::ObjectDecl>(), symtab);
     1402                                param = fixObjectType(param.strict_as<ast::ObjectDecl>(), context);
    13971403                                symtab.addId(param);
    13981404                                paramTypes.emplace_back(param->get_type());
    13991405                        }
    14001406                        for (auto & ret : mutDecl->returns) {
    1401                                 ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), symtab);
     1407                                ret = fixObjectType(ret.strict_as<ast::ObjectDecl>(), context);
    14021408                                returnTypes.emplace_back(ret->get_type());
    14031409                        }
     
    14701476                        // enumerator initializers should not use the enum type to initialize, since the
    14711477                        // enum type is still incomplete at this point. Use `int` instead.
    1472                         objectDecl = fixObjectType(objectDecl, symtab);
    1473                         currentObject = ast::CurrentObject{
    1474                                 objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
     1478
     1479                        if (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base) { // const ast::PointerType &
     1480                                // const ast::Type * enumBase =  (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base.get());
     1481                                // const ast::PointerType * enumBaseAsPtr = dynamic_cast<const ast::PointerType *>(enumBase);
     1482
     1483                                // if ( enumBaseAsPtr ) {
     1484                                //      const ast::Type * pointerBase = enumBaseAsPtr->base.get();
     1485                                //      if ( dynamic_cast<const ast::BasicType *>(pointerBase) ) {
     1486                                //              objectDecl = fixObjectType(objectDecl, context);
     1487                                //              if (dynamic_cast<const ast::BasicType *>(pointerBase)->kind == ast::BasicType::Char)
     1488                                //              currentObject = ast::CurrentObject{
     1489                                //                      objectDecl->location,  new ast::PointerType{
     1490                                //                              new ast::BasicType{ ast::BasicType::Char }
     1491                                //                      } };
     1492                                //      } else {
     1493                                //              objectDecl = fixObjectType(objectDecl, context);
     1494                                //              currentObject = ast::CurrentObject{objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
     1495                                //      }
     1496                                // }
     1497                                objectDecl = fixObjectType( objectDecl, context );
     1498                                const ast::Type * enumBase =  (dynamic_cast< const ast::EnumInstType * >( objectDecl->get_type() )->base->base.get());
     1499                                currentObject = ast::CurrentObject{
     1500                                        objectDecl->location,
     1501                                        enumBase
     1502                                };
     1503                        } else {
     1504                                objectDecl = fixObjectType( objectDecl, context );
     1505                                currentObject = ast::CurrentObject{
     1506                                        objectDecl->location, new ast::BasicType{ ast::BasicType::SignedInt } };
     1507                        }
     1508
    14751509                }
    14761510                else {
    14771511                        if (!objectDecl->isTypeFixed) {
    1478                                 auto newDecl = fixObjectType(objectDecl, symtab);
     1512                                auto newDecl = fixObjectType(objectDecl, context);
    14791513                                auto mutDecl = mutate(newDecl);
    14801514
     
    15071541                        // nested type decls are hoisted already. no need to do anything
    15081542                        if (auto obj = member.as<ast::ObjectDecl>()) {
    1509                                 member = fixObjectType(obj, symtab);
     1543                                member = fixObjectType(obj, context);
    15101544                        }
    15111545                }
     
    15301564                return ast::mutate_field(
    15311565                        assertDecl, &ast::StaticAssertDecl::cond,
    1532                         findIntegralExpression( assertDecl->cond, symtab ) );
     1566                        findIntegralExpression( assertDecl->cond, context ) );
    15331567        }
    15341568
    15351569        template< typename PtrType >
    1536         const PtrType * handlePtrType( const PtrType * type, const ast::SymbolTable & symtab ) {
     1570        const PtrType * handlePtrType( const PtrType * type, const ResolveContext & context ) {
    15371571                if ( type->dimension ) {
    1538                         ast::ptr< ast::Type > sizeType = ast::sizeType;
     1572                        ast::ptr< ast::Type > sizeType = context.global.sizeType;
    15391573                        ast::mutate_field(
    15401574                                type, &PtrType::dimension,
    1541                                 findSingleExpression( type->dimension, sizeType, symtab ) );
     1575                                findSingleExpression( type->dimension, sizeType, context ) );
    15421576                }
    15431577                return type;
     
    15451579
    15461580        const ast::ArrayType * Resolver_new::previsit( const ast::ArrayType * at ) {
    1547                 return handlePtrType( at, symtab );
     1581                return handlePtrType( at, context );
    15481582        }
    15491583
    15501584        const ast::PointerType * Resolver_new::previsit( const ast::PointerType * pt ) {
    1551                 return handlePtrType( pt, symtab );
     1585                return handlePtrType( pt, context );
    15521586        }
    15531587
     
    15571591
    15581592                return ast::mutate_field(
    1559                         exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, symtab ) );
     1593                        exprStmt, &ast::ExprStmt::expr, findVoidExpression( exprStmt->expr, context ) );
    15601594        }
    15611595
     
    15641598
    15651599                asmExpr = ast::mutate_field(
    1566                         asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, symtab ) );
     1600                        asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, context ) );
    15671601
    15681602                return asmExpr;
     
    15781612        const ast::IfStmt * Resolver_new::previsit( const ast::IfStmt * ifStmt ) {
    15791613                return ast::mutate_field(
    1580                         ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, symtab ) );
     1614                        ifStmt, &ast::IfStmt::cond, findIntegralExpression( ifStmt->cond, context ) );
    15811615        }
    15821616
    15831617        const ast::WhileDoStmt * Resolver_new::previsit( const ast::WhileDoStmt * whileDoStmt ) {
    15841618                return ast::mutate_field(
    1585                         whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, symtab ) );
     1619                        whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, context ) );
    15861620        }
    15871621
     
    15891623                if ( forStmt->cond ) {
    15901624                        forStmt = ast::mutate_field(
    1591                                 forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, symtab ) );
     1625                                forStmt, &ast::ForStmt::cond, findIntegralExpression( forStmt->cond, context ) );
    15921626                }
    15931627
    15941628                if ( forStmt->inc ) {
    15951629                        forStmt = ast::mutate_field(
    1596                                 forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, symtab ) );
     1630                                forStmt, &ast::ForStmt::inc, findVoidExpression( forStmt->inc, context ) );
    15971631                }
    15981632
     
    16041638                switchStmt = ast::mutate_field(
    16051639                        switchStmt, &ast::SwitchStmt::cond,
    1606                         findIntegralExpression( switchStmt->cond, symtab ) );
     1640                        findIntegralExpression( switchStmt->cond, context ) );
    16071641                currentObject = ast::CurrentObject{ switchStmt->location, switchStmt->cond->result };
    16081642                return switchStmt;
    16091643        }
    16101644
    1611         const ast::CaseStmt * Resolver_new::previsit( const ast::CaseStmt * caseStmt ) {
     1645        const ast::CaseClause * Resolver_new::previsit( const ast::CaseClause * caseStmt ) {
    16121646                if ( caseStmt->cond ) {
    16131647                        std::deque< ast::InitAlternative > initAlts = currentObject.getOptions();
     
    16171651                        ast::ptr< ast::Expr > untyped =
    16181652                                new ast::CastExpr{ caseStmt->location, caseStmt->cond, initAlts.front().type };
    1619                         ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, symtab );
     1653                        ast::ptr< ast::Expr > newExpr = findSingleExpression( untyped, context );
    16201654
    16211655                        // case condition cannot have a cast in C, so it must be removed here, regardless of
     
    16251659                        }
    16261660
    1627                         caseStmt = ast::mutate_field( caseStmt, &ast::CaseStmt::cond, newExpr );
     1661                        caseStmt = ast::mutate_field( caseStmt, &ast::CaseClause::cond, newExpr );
    16281662                }
    16291663                return caseStmt;
     
    16381672                        branchStmt = ast::mutate_field(
    16391673                                branchStmt, &ast::BranchStmt::computedTarget,
    1640                                 findSingleExpression( branchStmt->computedTarget, target, symtab ) );
     1674                                findSingleExpression( branchStmt->computedTarget, target, context ) );
    16411675                }
    16421676                return branchStmt;
     
    16481682                        returnStmt = ast::mutate_field(
    16491683                                returnStmt, &ast::ReturnStmt::expr,
    1650                                 findSingleExpression( returnStmt->expr, functionReturn, symtab ) );
     1684                                findSingleExpression( returnStmt->expr, functionReturn, context ) );
    16511685                }
    16521686                return returnStmt;
     
    16631697                        throwStmt = ast::mutate_field(
    16641698                                throwStmt, &ast::ThrowStmt::expr,
    1665                                 findSingleExpression( throwStmt->expr, exceptType, symtab ) );
     1699                                findSingleExpression( throwStmt->expr, exceptType, context ) );
    16661700                }
    16671701                return throwStmt;
    16681702        }
    16691703
    1670         const ast::CatchStmt * Resolver_new::previsit( const ast::CatchStmt * catchStmt ) {
     1704        const ast::CatchClause * Resolver_new::previsit( const ast::CatchClause * catchClause ) {
    16711705                // Until we are very sure this invarent (ifs that move between passes have then)
    16721706                // holds, check it. This allows a check for when to decode the mangling.
    1673                 if ( auto ifStmt = catchStmt->body.as<ast::IfStmt>() ) {
     1707                if ( auto ifStmt = catchClause->body.as<ast::IfStmt>() ) {
    16741708                        assert( ifStmt->then );
    16751709                }
    16761710                // Encode the catchStmt so the condition can see the declaration.
    1677                 if ( catchStmt->cond ) {
    1678                         ast::CatchStmt * stmt = mutate( catchStmt );
    1679                         stmt->body = new ast::IfStmt( stmt->location, stmt->cond, nullptr, stmt->body );
    1680                         stmt->cond = nullptr;
    1681                         return stmt;
    1682                 }
    1683                 return catchStmt;
    1684         }
    1685 
    1686         const ast::CatchStmt * Resolver_new::postvisit( const ast::CatchStmt * catchStmt ) {
     1711                if ( catchClause->cond ) {
     1712                        ast::CatchClause * clause = mutate( catchClause );
     1713                        clause->body = new ast::IfStmt( clause->location, clause->cond, nullptr, clause->body );
     1714                        clause->cond = nullptr;
     1715                        return clause;
     1716                }
     1717                return catchClause;
     1718        }
     1719
     1720        const ast::CatchClause * Resolver_new::postvisit( const ast::CatchClause * catchClause ) {
    16871721                // Decode the catchStmt so everything is stored properly.
    1688                 const ast::IfStmt * ifStmt = catchStmt->body.as<ast::IfStmt>();
     1722                const ast::IfStmt * ifStmt = catchClause->body.as<ast::IfStmt>();
    16891723                if ( nullptr != ifStmt && nullptr == ifStmt->then ) {
    16901724                        assert( ifStmt->cond );
    16911725                        assert( ifStmt->else_ );
    1692                         ast::CatchStmt * stmt = ast::mutate( catchStmt );
    1693                         stmt->cond = ifStmt->cond;
    1694                         stmt->body = ifStmt->else_;
     1726                        ast::CatchClause * clause = ast::mutate( catchClause );
     1727                        clause->cond = ifStmt->cond;
     1728                        clause->body = ifStmt->else_;
    16951729                        // ifStmt should be implicately deleted here.
    1696                         return stmt;
    1697                 }
    1698                 return catchStmt;
     1730                        return clause;
     1731                }
     1732                return catchClause;
    16991733        }
    17001734
     
    17071741
    17081742                        ast::TypeEnvironment env;
    1709                         CandidateFinder funcFinder{ symtab, env };
     1743                        CandidateFinder funcFinder( context, env );
    17101744
    17111745                        // Find all candidates for a function in canonical form
     
    19211955                                );
    19221956
    1923                                 clause2.target.args.emplace_back( findSingleExpression( init, symtab ) );
     1957                                clause2.target.args.emplace_back( findSingleExpression( init, context ) );
    19241958                        }
    19251959
    19261960                        // Resolve the conditions as if it were an IfStmt, statements normally
    1927                         clause2.cond = findSingleExpression( clause.cond, symtab );
     1961                        clause2.cond = findSingleExpression( clause.cond, context );
    19281962                        clause2.stmt = clause.stmt->accept( *visitor );
    19291963
     
    19401974                        ast::ptr< ast::Type > target =
    19411975                                new ast::BasicType{ ast::BasicType::LongLongUnsignedInt };
    1942                         timeout2.time = findSingleExpression( stmt->timeout.time, target, symtab );
    1943                         timeout2.cond = findSingleExpression( stmt->timeout.cond, symtab );
     1976                        timeout2.time = findSingleExpression( stmt->timeout.time, target, context );
     1977                        timeout2.cond = findSingleExpression( stmt->timeout.cond, context );
    19441978                        timeout2.stmt = stmt->timeout.stmt->accept( *visitor );
    19451979
     
    19541988                        ast::WaitForStmt::OrElse orElse2;
    19551989
    1956                         orElse2.cond = findSingleExpression( stmt->orElse.cond, symtab );
     1990                        orElse2.cond = findSingleExpression( stmt->orElse.cond, context );
    19571991                        orElse2.stmt = stmt->orElse.stmt->accept( *visitor );
    19581992
     
    19752009                for (auto & expr : exprs) {
    19762010                        // only struct- and union-typed expressions are viable candidates
    1977                         expr = findKindExpression( expr, symtab, structOrUnion, "with expression" );
     2011                        expr = findKindExpression( expr, context, structOrUnion, "with expression" );
    19782012
    19792013                        // if with expression might be impure, create a temporary so that it is evaluated once
     
    20012035                ast::ptr< ast::Expr > untyped = new ast::UntypedInitExpr{
    20022036                        singleInit->location, singleInit->value, currentObject.getOptions() };
    2003                 ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, symtab );
     2037                ast::ptr<ast::Expr> newExpr = findSingleExpression( untyped, context );
    20042038                const ast::InitExpr * initExpr = newExpr.strict_as< ast::InitExpr >();
    20052039
  • src/ResolvExpr/Resolver.h

    rba897d21 r2e9b59b  
    99// Author           : Richard C. Bilson
    1010// Created On       : Sun May 17 12:18:34 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Feb 18 20:40:38 2019
    13 // Update Count     : 4
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 11:32:00 2022
     13// Update Count     : 5
    1414//
    1515
     
    2323class Declaration;
    2424class Expression;
     25class DeletedExpr;
    2526class StmtExpr;
     27class Type;
    2628namespace SymTab {
    2729        class Indexer;
     
    3537        class StmtExpr;
    3638        class SymbolTable;
     39        class TranslationGlobal;
    3740        class TranslationUnit;
    3841        class Type;
     
    5558        void resolveWithExprs( std::list< Declaration * > & translationUnit );
    5659
     60        /// Helper Type: Passes around information between various sub-calls.
     61        struct ResolveContext {
     62                const ast::SymbolTable & symtab;
     63                const ast::TranslationGlobal & global;
     64        };
     65
    5766        /// Checks types and binds syntactic constructs to typed representations
    5867        void resolve( ast::TranslationUnit& translationUnit );
     
    6271        /// context.
    6372        ast::ptr< ast::Expr > resolveInVoidContext(
    64                 const ast::Expr * expr, const ast::SymbolTable & symtab, ast::TypeEnvironment & env );
     73                const ast::Expr * expr, const ResolveContext &, ast::TypeEnvironment & env );
    6574        /// Resolve `untyped` to the single expression whose candidate is the best match for the
    6675        /// given type.
    6776        ast::ptr< ast::Expr > findSingleExpression(
    68                 const ast::Expr * untyped, const ast::Type * type, const ast::SymbolTable & symtab );
     77                const ast::Expr * untyped, const ast::Type * type, const ResolveContext & );
    6978        ast::ptr< ast::Expr > findVoidExpression(
    70                 const ast::Expr * untyped, const ast::SymbolTable & symtab);
     79                const ast::Expr * untyped, const ResolveContext & );
    7180        /// Resolves a constructor init expression
    7281        ast::ptr< ast::Init > resolveCtorInit(
    73                 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab );
     82                const ast::ConstructorInit * ctorInit, const ResolveContext & context );
    7483        /// Resolves a statement expression
    7584        const ast::Expr * resolveStmtExpr(
    76                 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab );
     85                const ast::StmtExpr * stmtExpr, const ResolveContext & context );
    7786} // namespace ResolvExpr
    7887
  • src/ResolvExpr/Unify.cc

    rba897d21 r2e9b59b  
    943943                        // check that the other type is compatible and named the same
    944944                        auto otherInst = dynamic_cast< const XInstType * >( other );
    945                         this->result = otherInst && inst->name == otherInst->name;
     945                        if (otherInst && inst->name == otherInst->name) this->result = otherInst;
    946946                        return otherInst;
    947947                }
  • src/SymTab/Validate.cc

    rba897d21 r2e9b59b  
    395395                                TranslateDimensionGenericParameters::translateDimensions( translationUnit );
    396396                        });
     397                        if (!useNewAST) {
    397398                        Stats::Time::TimeBlock("Resolve Enum Initializers", [&]() {
    398399                                acceptAll( translationUnit, rei ); // must happen after translateDimensions because rei needs identifier lookup, which needs name mangling
    399400                        });
     401                        }
    400402                        Stats::Time::TimeBlock("Check Function Returns", [&]() {
    401403                                ReturnChecker::checkFunctionReturns( translationUnit );
     
    405407                        });
    406408                }
     409        }
     410
     411        static void decayForallPointers( std::list< Declaration * > & translationUnit ) {
     412                PassVisitor<TraitExpander_old> te;
     413                acceptAll( translationUnit, te );
     414                PassVisitor<AssertionFixer_old> af;
     415                acceptAll( translationUnit, af );
     416                PassVisitor<CheckOperatorTypes_old> cot;
     417                acceptAll( translationUnit, cot );
     418                PassVisitor<FixUniqueIds_old> fui;
     419                acceptAll( translationUnit, fui );
    407420        }
    408421
     
    474487        }
    475488
    476         void decayForallPointers( std::list< Declaration * > & translationUnit ) {
    477                 PassVisitor<TraitExpander_old> te;
    478                 acceptAll( translationUnit, te );
    479                 PassVisitor<AssertionFixer_old> af;
    480                 acceptAll( translationUnit, af );
    481                 PassVisitor<CheckOperatorTypes_old> cot;
    482                 acceptAll( translationUnit, cot );
    483                 PassVisitor<FixUniqueIds_old> fui;
    484                 acceptAll( translationUnit, fui );
    485         }
    486 
    487         void decayForallPointersA( std::list< Declaration * > & translationUnit ) {
    488                 PassVisitor<TraitExpander_old> te;
    489                 acceptAll( translationUnit, te );
    490         }
    491         void decayForallPointersB( std::list< Declaration * > & translationUnit ) {
    492                 PassVisitor<AssertionFixer_old> af;
    493                 acceptAll( translationUnit, af );
    494         }
    495         void decayForallPointersC( std::list< Declaration * > & translationUnit ) {
    496                 PassVisitor<CheckOperatorTypes_old> cot;
    497                 acceptAll( translationUnit, cot );
    498         }
    499         void decayForallPointersD( std::list< Declaration * > & translationUnit ) {
    500                 PassVisitor<FixUniqueIds_old> fui;
    501                 acceptAll( translationUnit, fui );
    502         }
    503 
    504489        void validate( std::list< Declaration * > &translationUnit, __attribute__((unused)) bool doDebug ) {
    505490                validate_A( translationUnit );
     
    989974                                        // need to resolve enumerator initializers early so that other passes that determine if an expression is constexpr have the appropriate information.
    990975                                        SingleInit * init = strict_dynamic_cast<SingleInit *>( field->init );
    991                                         ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );
     976                                        if ( !enumDecl->base || dynamic_cast<BasicType *>(enumDecl->base))
     977                                                ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );
     978                                        else {
     979                                                if (dynamic_cast<PointerType *>(enumDecl->base)) {
     980                                                        auto typePtr = dynamic_cast<PointerType *>(enumDecl->base);
     981                                                        ResolvExpr::findSingleExpression( init->value,
     982                                                         new PointerType( Type::Qualifiers(), typePtr->base ), indexer );
     983                                                } else {
     984                                                        ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );
     985                                                }
     986                                        }
     987                                       
    992988                                }
    993989                        }
     990
    994991                } // if
    995992        }
     
    12551252                        declsToAddBefore.push_back( new UnionDecl( aggDecl->name, noAttributes, tyDecl->linkage ) );
    12561253                } else if ( EnumInstType * enumDecl = dynamic_cast< EnumInstType * >( designatorType ) ) {
    1257                         declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage ) );
     1254                        // declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage, enumDecl->baseEnum->base ) );
     1255                        if (enumDecl->baseEnum) {
     1256                                declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage, enumDecl->baseEnum->base ) );
     1257                        } else {
     1258                                declsToAddBefore.push_back( new EnumDecl( enumDecl->name, noAttributes, tyDecl->linkage ) );
     1259                        }
    12581260                } // if
    12591261                return tyDecl->clone();
  • src/SymTab/Validate.h

    rba897d21 r2e9b59b  
    4242        void validate_E( std::list< Declaration * > &translationUnit );
    4343        void validate_F( std::list< Declaration * > &translationUnit );
    44         void decayForallPointers( std::list< Declaration * > & translationUnit );
    45         void decayForallPointersA( std::list< Declaration * > & translationUnit );
    46         void decayForallPointersB( std::list< Declaration * > & translationUnit );
    47         void decayForallPointersC( std::list< Declaration * > & translationUnit );
    48         void decayForallPointersD( std::list< Declaration * > & translationUnit );
    4944
    5045        const ast::Type * validateType(
  • src/SynTree/AggregateDecl.cc

    rba897d21 r2e9b59b  
    5959        } // if
    6060        os << " with body " << has_body();
    61 
    6261        if ( ! parameters.empty() ) {
    6362                os << endl << indent << "... with parameters" << endl;
     
    106105const char * EnumDecl::typeString() const { return aggrString( Enum ); }
    107106
     107void EnumDecl::print( std::ostream & os, Indenter indent ) const {
     108        AggregateDecl::print(os, indent);
     109        os << " with base? " << (base? "True" : "False") << std::endl;
     110        if ( base ) {
     111                os << "Base Type of Enum:" << std::endl;
     112                base->print(os, indent);
     113        }
     114        os <<  std::endl << "End of EnumDecl::print" << std::endl;
     115}
     116
    108117const char * TraitDecl::typeString() const { return aggrString( Trait ); }
    109118
  • src/SynTree/BasicType.cc

    rba897d21 r2e9b59b  
    2929}
    3030
     31bool BasicType::isWholeNumber() const {
     32        return kind == Bool ||
     33                kind ==Char ||
     34                kind == SignedChar ||
     35                kind == UnsignedChar ||
     36                kind == ShortSignedInt ||
     37                kind == ShortUnsignedInt ||
     38                kind == SignedInt ||
     39                kind == UnsignedInt ||
     40                kind == LongSignedInt ||
     41                kind == LongUnsignedInt ||
     42                kind == LongLongSignedInt ||
     43                kind ==LongLongUnsignedInt ||
     44                kind == SignedInt128 ||
     45                kind == UnsignedInt128;
     46}
     47
    3148bool BasicType::isInteger() const {
    3249        return kind <= UnsignedInt128;
  • src/SynTree/Declaration.h

    rba897d21 r2e9b59b  
    144144        virtual void print( std::ostream & os, Indenter indent = {} ) const override;
    145145        virtual void printShort( std::ostream & os, Indenter indent = {} ) const override;
     146
     147        // TODO: Move to the right place
     148        void checkAssignedValue() const;
    146149};
    147150
     
    287290        AggregateDecl * set_body( bool body ) { AggregateDecl::body = body; return this; }
    288291
    289         virtual void print( std::ostream & os, Indenter indent = {} ) const override final;
     292        virtual void print( std::ostream & os, Indenter indent = {} ) const override;
    290293        virtual void printShort( std::ostream & os, Indenter indent = {} ) const override;
    291294  protected:
     
    335338        typedef AggregateDecl Parent;
    336339  public:
    337         EnumDecl( const std::string & name, const std::list< Attribute * > & attributes = std::list< class Attribute * >(), LinkageSpec::Spec linkage = LinkageSpec::Cforall ) : Parent( name, attributes, linkage ) {}
    338         EnumDecl( const EnumDecl & other ) : Parent( other ) {}
     340        EnumDecl( const std::string & name,
     341         const std::list< Attribute * > & attributes = std::list< class Attribute * >(),
     342          LinkageSpec::Spec linkage = LinkageSpec::Cforall,
     343          Type * baseType = nullptr ) : Parent( name, attributes, linkage ) , base( baseType ){}
     344        EnumDecl( const EnumDecl & other ) : Parent( other ), base( other.base ) {}
    339345
    340346        bool valueOf( Declaration * enumerator, long long int & value );
     
    344350        virtual void accept( Visitor & v ) const override { v.visit( this ); }
    345351        virtual Declaration * acceptMutator( Mutator & m )  override { return m.mutate( this ); }
    346   private:
     352        Type * base;
    347353        std::unordered_map< std::string, long long int > enumValues;
     354        virtual void print( std::ostream & os, Indenter indent = {} ) const override final;
     355  private:
     356        // std::unordered_map< std::string, long long int > enumValues;
    348357        virtual const char * typeString() const override;
    349358};
  • src/SynTree/Type.h

    rba897d21 r2e9b59b  
    268268        virtual Type *acceptMutator( Mutator & m ) override { return m.mutate( this ); }
    269269        virtual void print( std::ostream & os, Indenter indent = {} ) const override;
    270 
     270        bool isWholeNumber() const;
    271271        bool isInteger() const;
    272272};
  • src/SynTree/Visitor.h

    rba897d21 r2e9b59b  
    3535        virtual void visit( UnionDecl * node ) { visit( const_cast<const UnionDecl *>(node) ); }
    3636        virtual void visit( const UnionDecl * aggregateDecl ) = 0;
    37         virtual void visit( EnumDecl * node ) { visit( const_cast<const EnumDecl *>(node) ); }
     37        virtual void visit( EnumDecl * node ) { visit( const_cast<const EnumDecl *>(node) ); } // Marker 1
    3838        virtual void visit( const EnumDecl * aggregateDecl ) = 0;
    3939        virtual void visit( TraitDecl * node ) { visit( const_cast<const TraitDecl *>(node) ); }
     
    190190        virtual void visit( UnionInstType * node ) { visit( const_cast<const UnionInstType *>(node) ); }
    191191        virtual void visit( const UnionInstType * aggregateUseType ) = 0;
    192         virtual void visit( EnumInstType * node ) { visit( const_cast<const EnumInstType *>(node) ); }
     192        virtual void visit( EnumInstType * node ) { visit( const_cast<const EnumInstType *>(node) ); } // Marker 2
    193193        virtual void visit( const EnumInstType * aggregateUseType ) = 0;
    194194        virtual void visit( TraitInstType * node ) { visit( const_cast<const TraitInstType *>(node) ); }
  • src/Tuples/TupleAssignment.cc

    rba897d21 r2e9b59b  
    99// Author           : Rodolfo G. Esteves
    1010// Created On       : Mon May 18 07:44:20 2015
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Dec 13 23:45:33 2019
    13 // Update Count     : 9
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Wed Mar 16 14:06:00 2022
     13// Update Count     : 10
    1414//
    1515
     
    465465                                        // resolve ctor/dtor for the new object
    466466                                        ast::ptr< ast::Init > ctorInit = ResolvExpr::resolveCtorInit(
    467                                                         InitTweak::genCtorInit( location, ret ), spotter.crntFinder.localSyms );
     467                                                        InitTweak::genCtorInit( location, ret ), spotter.crntFinder.context );
    468468                                        // remove environments from subexpressions of stmtExpr
    469469                                        ast::Pass< EnvRemover > rm{ env };
     
    560560                                        // resolve the cast expression so that rhsCand return type is bound by the cast
    561561                                        // type as needed, and transfer the resulting environment
    562                                         ResolvExpr::CandidateFinder finder{ spotter.crntFinder.localSyms, env };
     562                                        ResolvExpr::CandidateFinder finder( spotter.crntFinder.context, env );
    563563                                        finder.find( rhsCand->expr, ResolvExpr::ResolvMode::withAdjustment() );
    564564                                        assert( finder.candidates.size() == 1 );
     
    609609                                        // explode the LHS so that each field of a tuple-valued expr is assigned
    610610                                        ResolvExpr::CandidateList lhs;
    611                                         explode( *lhsCand, crntFinder.localSyms, back_inserter(lhs), true );
     611                                        explode( *lhsCand, crntFinder.context.symtab, back_inserter(lhs), true );
    612612                                        for ( ResolvExpr::CandidateRef & cand : lhs ) {
    613613                                                // each LHS value must be a reference - some come in with a cast, if not
     
    629629                                                        if ( isTuple( rhsCand->expr ) ) {
    630630                                                                // multiple assignment
    631                                                                 explode( *rhsCand, crntFinder.localSyms, back_inserter(rhs), true );
     631                                                                explode( *rhsCand, crntFinder.context.symtab, back_inserter(rhs), true );
    632632                                                                matcher.reset(
    633633                                                                        new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } );
     
    648648                                                        // multiple assignment
    649649                                                        ResolvExpr::CandidateList rhs;
    650                                                         explode( rhsCand, crntFinder.localSyms, back_inserter(rhs), true );
     650                                                        explode( rhsCand, crntFinder.context.symtab, back_inserter(rhs), true );
    651651                                                        matcher.reset(
    652652                                                                new MultipleAssignMatcher{ *this, expr->location, lhs, rhs } );
     
    678678                                )
    679679
    680                                 ResolvExpr::CandidateFinder finder{ crntFinder.localSyms, matcher->env };
     680                                ResolvExpr::CandidateFinder finder( crntFinder.context, matcher->env );
    681681
    682682                                try {
  • src/Validate/Autogen.cpp

    rba897d21 r2e9b59b  
    248248                structInst.params.push_back( new ast::TypeExpr(
    249249                        typeDecl->location,
    250                         new ast::TypeInstType( typeDecl->name, typeDecl )
     250                        new ast::TypeInstType( typeDecl )
    251251                ) );
    252252        }
     
    264264                unionInst.params.push_back( new ast::TypeExpr(
    265265                        unionDecl->location,
    266                         new ast::TypeInstType( typeDecl->name, typeDecl )
     266                        new ast::TypeInstType( typeDecl )
    267267                ) );
    268268        }
  • src/Validate/FindSpecialDeclsNew.cpp

    rba897d21 r2e9b59b  
    3030
    3131struct FindDeclsCore : public ast::WithShortCircuiting {
    32         ast::TranslationUnit::Global & global;
    33         FindDeclsCore( ast::TranslationUnit::Global & g ) : global( g ) {}
     32        ast::TranslationGlobal & global;
     33        FindDeclsCore( ast::TranslationGlobal & g ) : global( g ) {}
    3434
    3535        void previsit( const ast::Decl * decl );
     
    7474        ast::Pass<FindDeclsCore>::run( translationUnit, translationUnit.global );
    7575
    76         // TODO: When everything gets the globals from the translation unit,
    77         // remove these.
    78         ast::dereferenceOperator = translationUnit.global.dereference;
    79         ast::dtorStruct = translationUnit.global.dtorStruct;
    80         ast::dtorStructDestroy = translationUnit.global.dtorDestroy;
    81 
    8276        // TODO: conditionally generate 'fake' declarations for missing features,
    8377        // so that translation can proceed in the event that builtins, prelude,
  • src/Validate/ForallPointerDecay.cpp

    rba897d21 r2e9b59b  
    4141        for ( auto & type_param : decl->type_params ) {
    4242                type->forall.emplace_back(
    43                         new ast::TypeInstType( type_param->name, type_param ) );
     43                        new ast::TypeInstType( type_param ) );
    4444        }
    4545        for ( auto & assertion : decl->assertions ) {
     
    7070                AssertionList assertions;
    7171                // Substitute trait decl parameters for instance parameters.
    72                 ast::TypeSubstitution sub(
    73                         inst->base->params.begin(),
    74                         inst->base->params.end(),
    75                         inst->params.begin()
    76                 );
     72                ast::TypeSubstitution sub( inst->base->params, inst->params );
    7773                for ( const ast::ptr<ast::Decl> & decl : inst->base->members ) {
    7874                        ast::ptr<ast::DeclWithType> copy =
  • src/Validate/module.mk

    rba897d21 r2e9b59b  
    2222        Validate/ForallPointerDecay.cpp \
    2323        Validate/ForallPointerDecay.hpp \
     24        Validate/GenericParameter.cpp \
     25        Validate/GenericParameter.hpp \
    2426        Validate/HandleAttributes.cc \
    2527        Validate/HandleAttributes.h \
     
    2830        Validate/LabelAddressFixer.cpp \
    2931        Validate/LabelAddressFixer.hpp \
     32        Validate/ReturnCheck.cpp \
     33        Validate/ReturnCheck.hpp \
    3034        Validate/FindSpecialDeclsNew.cpp \
    3135        Validate/FindSpecialDecls.cc \
  • src/Virtual/Tables.cc

    rba897d21 r2e9b59b  
    1010// Created On       : Mon Aug 31 11:11:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Apr 21 15:36:00 2021
    13 // Update Count     : 2
    14 //
    15 
     12// Last Modified On : Fri Mar 11 10:40:00 2022
     13// Update Count     : 3
     14//
     15
     16#include "AST/Attribute.hpp"
     17#include "AST/Copy.hpp"
     18#include "AST/Decl.hpp"
     19#include "AST/Expr.hpp"
     20#include "AST/Init.hpp"
     21#include "AST/Stmt.hpp"
     22#include "AST/Type.hpp"
    1623#include <SynTree/Attribute.h>
    1724#include <SynTree/Declaration.h>
     
    7784}
    7885
     86static ast::ObjectDecl * makeVtableDeclaration(
     87                CodeLocation const & location, std::string const & name,
     88                ast::StructInstType const * type, ast::Init const * init ) {
     89        ast::Storage::Classes storage;
     90        if ( nullptr == init ) {
     91                storage.is_extern = true;
     92        }
     93        return new ast::ObjectDecl(
     94                location,
     95                name,
     96                type,
     97                init,
     98                storage,
     99                ast::Linkage::Cforall
     100        );
     101}
     102
    79103ObjectDecl * makeVtableForward( std::string const & name, StructInstType * type ) {
    80104        assert( type );
    81105        return makeVtableDeclaration( name, type, nullptr );
     106}
     107
     108ast::ObjectDecl * makeVtableForward(
     109                CodeLocation const & location, std::string const & name,
     110                ast::StructInstType const * vtableType ) {
     111        assert( vtableType );
     112        return makeVtableDeclaration( location, name, vtableType, nullptr );
    82113}
    83114
     
    123154}
    124155
     156static std::vector<ast::ptr<ast::Init>> buildInits(
     157                CodeLocation const & location,
     158                //std::string const & name,
     159                ast::StructInstType const * vtableType,
     160                ast::Type const * objectType ) {
     161        ast::StructDecl const * vtableStruct = vtableType->base;
     162
     163        std::vector<ast::ptr<ast::Init>> inits;
     164        inits.reserve( vtableStruct->members.size() );
     165
     166        // This is designed to run before the resolver.
     167        for ( auto field : vtableStruct->members ) {
     168                if ( std::string( "parent" ) == field->name ) {
     169                        // This will not work with polymorphic state.
     170                        auto oField = field.strict_as<ast::ObjectDecl>();
     171                        auto fieldType = oField->type.strict_as<ast::PointerType>();
     172                        auto parentType = fieldType->base.strict_as<ast::StructInstType>();
     173                        std::string const & parentInstance = instanceName( parentType->name );
     174                        inits.push_back(
     175                                        new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, parentInstance ) ) ) );
     176                } else if ( std::string( "__cfavir_typeid" ) == field->name ) {
     177                        std::string const & baseType = baseTypeName( vtableType->name );
     178                        std::string const & typeId = typeIdName( baseType );
     179                        inits.push_back( new ast::SingleInit( location, new ast::AddressExpr( new ast::NameExpr( location, typeId ) ) ) );
     180                } else if ( std::string( "size" ) == field->name ) {
     181                        inits.push_back( new ast::SingleInit( location, new ast::SizeofExpr( location, objectType )
     182                        ) );
     183                } else if ( std::string( "align" ) == field->name ) {
     184                        inits.push_back( new ast::SingleInit( location,
     185                                new ast::AlignofExpr( location, objectType )
     186                        ) );
     187                } else {
     188                        inits.push_back( new ast::SingleInit( location,
     189                                new ast::NameExpr( location, field->name )
     190                        ) );
     191                }
     192                //ast::Expr * expr = buildInitExpr(...);
     193                //inits.push_back( new ast::SingleInit( location, expr ) )
     194        }
     195
     196        return inits;
     197}
     198
     199ast::ObjectDecl * makeVtableInstance(
     200                CodeLocation const & location,
     201                std::string const & name,
     202                ast::StructInstType const * vtableType,
     203                ast::Type const * objectType,
     204                ast::Init const * init ) {
     205        assert( vtableType );
     206        assert( objectType );
     207
     208        // Build the initialization.
     209        if ( nullptr == init ) {
     210                init = new ast::ListInit( location,
     211                        buildInits( location, vtableType, objectType ) );
     212
     213        // The provided init should initialize everything except the parent
     214        // pointer, the size-of and align-of fields. These should be inserted.
     215        } else {
     216                // Except this is not yet supported.
     217                assert(false);
     218        }
     219        return makeVtableDeclaration( location, name, vtableType, init );
     220}
     221
    125222namespace {
    126223        std::string const functionName = "get_exception_vtable";
     
    140237                new ReferenceType( noQualifiers, vtableType ),
    141238                nullptr,
    142         { new Attribute("unused") }
     239                { new Attribute("unused") }
    143240        ) );
    144241        type->parameters.push_back( new ObjectDecl(
     
    157254                type,
    158255                nullptr
     256        );
     257}
     258
     259ast::FunctionDecl * makeGetExceptionForward(
     260                CodeLocation const & location,
     261                ast::Type const * vtableType,
     262                ast::Type const * exceptType ) {
     263        assert( vtableType );
     264        assert( exceptType );
     265        return new ast::FunctionDecl(
     266                location,
     267                functionName,
     268                { /* forall */ },
     269                { new ast::ObjectDecl(
     270                        location,
     271                        "__unused",
     272                        new ast::PointerType( exceptType )
     273                ) },
     274                { new ast::ObjectDecl(
     275                        location,
     276                        "_retvalue",
     277                        new ast::ReferenceType( vtableType )
     278                ) },
     279                nullptr,
     280                ast::Storage::Classes(),
     281                ast::Linkage::Cforall,
     282                { new ast::Attribute( "unused" ) }
    159283        );
    160284}
     
    172296}
    173297
     298ast::FunctionDecl * makeGetExceptionFunction(
     299                CodeLocation const & location,
     300                ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType ) {
     301        assert( vtableInstance );
     302        assert( exceptType );
     303        ast::FunctionDecl * func = makeGetExceptionForward(
     304                        location, ast::deepCopy( vtableInstance->type ), exceptType );
     305        func->stmts = new ast::CompoundStmt( location, {
     306                new ast::ReturnStmt( location, new ast::VariableExpr( location, vtableInstance ) )
     307        } );
     308        return func;
     309}
     310
    174311ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ) {
    175312        assert( typeIdType );
     
    191328}
    192329
    193 }
     330ast::ObjectDecl * makeTypeIdInstance(
     331                CodeLocation const & location,
     332                ast::StructInstType const * typeIdType ) {
     333        assert( typeIdType );
     334        ast::StructInstType * type = ast::mutate( typeIdType );
     335        type->set_const( true );
     336        std::string const & typeid_name = typeIdTypeToInstance( typeIdType->name );
     337        return new ast::ObjectDecl(
     338                location,
     339                typeid_name,
     340                type,
     341                new ast::ListInit( location, {
     342                        new ast::SingleInit( location,
     343                                new ast::AddressExpr( location,
     344                                        new ast::NameExpr( location, "__cfatid_exception_t" ) ) )
     345                } ),
     346                ast::Storage::Classes(),
     347                ast::Linkage::Cforall,
     348                nullptr,
     349                { new ast::Attribute( "cfa_linkonce" ) }
     350        );
     351}
     352
     353}
  • src/Virtual/Tables.h

    rba897d21 r2e9b59b  
    1010// Created On       : Mon Aug 31 11:07:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Apr 21 10:30:00 2021
    13 // Update Count     : 2
     12// Last Modified On : Wec Dec  8 16:58:00 2021
     13// Update Count     : 3
    1414//
    1515
    1616#include <list>  // for list
    1717
     18#include <string>
     19#include "AST/Fwd.hpp"
    1820class Declaration;
    1921class StructDecl;
     
    3537 * vtableType node is consumed.
    3638 */
     39ast::ObjectDecl * makeVtableForward(
     40        CodeLocation const & location, std::string const & name,
     41        ast::StructInstType const * vtableType );
    3742
    3843ObjectDecl * makeVtableInstance(
     
    4348 * vtableType and init (if provided) nodes are consumed.
    4449 */
     50ast::ObjectDecl * makeVtableInstance(
     51        CodeLocation const & location,
     52        std::string const & name,
     53        ast::StructInstType const * vtableType,
     54        ast::Type const * objectType,
     55        ast::Init const * init = nullptr );
    4556
    4657// Some special code for how exceptions interact with virtual tables.
     
    4960 * linking the vtableType to the exceptType. Both nodes are consumed.
    5061 */
     62ast::FunctionDecl * makeGetExceptionForward(
     63        CodeLocation const & location,
     64        ast::Type const * vtableType,
     65        ast::Type const * exceptType );
    5166
    5267FunctionDecl * makeGetExceptionFunction(
     
    5570 * exceptType node is consumed.
    5671 */
     72ast::FunctionDecl * makeGetExceptionFunction(
     73        CodeLocation const & location,
     74        ast::ObjectDecl const * vtableInstance, ast::Type const * exceptType );
    5775
    5876ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType );
     
    6078 * TODO: Should take the parent type. Currently locked to the exception_t.
    6179 */
     80ast::ObjectDecl * makeTypeIdInstance(
     81        const CodeLocation & location, ast::StructInstType const * typeIdType );
    6282
    6383}
  • src/main.cc

    rba897d21 r2e9b59b  
    1010// Created On       : Fri May 15 23:12:02 2015
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Jan 26 14:09:00 2022
    13 // Update Count     : 670
     12// Last Modified On : Wed Apr 13 11:11:00 2022
     13// Update Count     : 672
    1414//
    1515
     
    3232
    3333#include "AST/Convert.hpp"
    34 #include "AST/Print.hpp"
    3534#include "CompilationState.h"
    3635#include "../config.h"                      // for CFA_LIBDIR
     
    7675#include "Tuples/Tuples.h"                  // for expandMemberTuples, expan...
    7776#include "Validate/Autogen.hpp"             // for autogenerateRoutines
     77#include "Validate/GenericParameter.hpp"    // for fillGenericParameters, tr...
    7878#include "Validate/FindSpecialDecls.h"      // for findGlobalDecls
    7979#include "Validate/ForallPointerDecay.hpp"  // for decayForallPointers
     
    8181#include "Validate/InitializerLength.hpp"   // for setLengthFromInitializer
    8282#include "Validate/LabelAddressFixer.hpp"   // for fixLabelAddresses
     83#include "Validate/ReturnCheck.hpp"         // for checkReturnStatements
    8384#include "Virtual/ExpandCasts.h"            // for expandCasts
    8485
     
    328329                PASS( "Validate-A", SymTab::validate_A( translationUnit ) );
    329330                PASS( "Validate-B", SymTab::validate_B( translationUnit ) );
    330                 PASS( "Validate-C", SymTab::validate_C( translationUnit ) );
    331331
    332332                CodeTools::fillLocations( translationUnit );
    333333
    334334                if( useNewAST ) {
    335                         PASS( "Implement Concurrent Keywords", Concurrency::applyKeywords( translationUnit ) );
    336                         //PASS( "Forall Pointer Decay - A", SymTab::decayForallPointersA( translationUnit ) );
    337                         //PASS( "Forall Pointer Decay - B", SymTab::decayForallPointersB( translationUnit ) );
    338                         //PASS( "Forall Pointer Decay - C", SymTab::decayForallPointersC( translationUnit ) );
    339                         //PASS( "Forall Pointer Decay - D", SymTab::decayForallPointersD( translationUnit ) );
    340335                        CodeTools::fillLocations( translationUnit );
    341336
     
    347342
    348343                        forceFillCodeLocations( transUnit );
     344
     345                        // Check as early as possible. Can't happen before
     346                        // LinkReferenceToType, observed failing when attempted
     347                        // before eliminateTypedef
     348                        PASS( "Validate Generic Parameters", Validate::fillGenericParameters( transUnit ) );
     349
     350                        PASS( "Translate Dimensions", Validate::translateDimensionParameters( transUnit ) );
     351                        PASS( "Check Function Returns", Validate::checkReturnStatements( transUnit ) );
     352
     353                        // Must happen before Autogen.
     354                        PASS( "Fix Return Statements", InitTweak::fixReturnStatements( transUnit ) );
     355
     356                        PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords( transUnit ) );
    349357
    350358                        // Must be after implement concurrent keywords; because uniqueIds
     
    430438                        translationUnit = convert( move( transUnit ) );
    431439                } else {
     440                        PASS( "Validate-C", SymTab::validate_C( translationUnit ) );
    432441                        PASS( "Validate-D", SymTab::validate_D( translationUnit ) );
    433442                        PASS( "Validate-E", SymTab::validate_E( translationUnit ) );
     
    497506                        PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) );
    498507                }
    499 
    500                
    501508
    502509                PASS( "Gen Waitfor" , Concurrency::generateWaitFor( translationUnit ) );
  • tests/Makefile.am

    rba897d21 r2e9b59b  
    2828DEBUG_FLAGS=-debug -g -O0
    2929
    30 quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes
     30quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes meta/dumpable
    3131
    3232concurrent=
     
    6666PRETTY_PATH=mkdir -p $(dir $(abspath ${@})) && cd ${srcdir} &&
    6767
    68 .PHONY: list .validate
    69 .INTERMEDIATE: .validate .validate.cfa
     68.PHONY: list .validate .test_makeflags
     69.INTERMEDIATE: .validate .validate.cfa .test_makeflags
    7070EXTRA_PROGRAMS = avl_test linkonce .dummy_hack # build but do not install
    7171EXTRA_DIST = test.py \
     
    123123        @+${TEST_PY} --list ${concurrent}
    124124
     125.test_makeflags:
     126        @echo "${MAKEFLAGS}"
     127
    125128.validate: .validate.cfa
    126129        $(CFACOMPILE) .validate.cfa -fsyntax-only -Wall -Wextra -Werror
  • tests/PRNG.cfa

    rba897d21 r2e9b59b  
    88// Created On       : Wed Dec 29 09:38:12 2021
    99// Last Modified By : Peter A. Buhr
    10 // Last Modified On : Sat Feb 12 12:23:57 2022
    11 // Update Count     : 342
     10// Last Modified On : Sat Apr  9 15:21:14 2022
     11// Update Count     : 344
    1212//
    1313
     
    2222#include <mutex_stmt.hfa>
    2323
    24 // FIX ME: spurious characters appear in output
    25 Duration default_preemption() { return 0; }
    26 
    2724#ifdef TIME                                                                                             // use -O2 -nodebug
    2825#define STARTTIME start = timeHiRes()
  • tests/collections/.expect/vector-err-pass-perm-it-byval.txt

    rba897d21 r2e9b59b  
    1 error: Unique best alternative includes deleted identifier in Generated Cast of:
     1collections/vector-demo.cfa:95:1 error: Unique best alternative includes deleted identifier in Generated Cast of:
    22  Application of
    33    Deleted Expression
  • tests/concurrent/mutexstmt/.expect/locks.txt

    rba897d21 r2e9b59b  
    33Start Test: multi lock deadlock/mutual exclusion
    44End Test: multi lock deadlock/mutual exclusion
    5 Start Test: single scoped lock mutual exclusion
    6 End Test: single scoped lock mutual exclusion
    7 Start Test: multi scoped lock deadlock/mutual exclusion
    8 End Test: multi scoped lock deadlock/mutual exclusion
     5Start Test: multi polymorphic lock deadlock/mutual exclusion
     6End Test: multi polymorphic lock deadlock/mutual exclusion
  • tests/concurrent/mutexstmt/locks.cfa

    rba897d21 r2e9b59b  
    33
    44const unsigned int num_times = 10000;
     5
     6Duration default_preemption() { return 0; }
    57
    68single_acquisition_lock m1, m2, m3, m4, m5;
     
    2224}
    2325
     26void refTest( single_acquisition_lock & m ) {
     27        mutex ( m ) {
     28                assert(!insideFlag);
     29                insideFlag = true;
     30                assert(insideFlag);
     31                insideFlag = false;
     32        }
     33}
     34
    2435thread T_Multi {};
    2536
    2637void main( T_Multi & this ) {
    2738        for (unsigned int i = 0; i < num_times; i++) {
     39                refTest( m1 );
    2840                mutex ( m1 ) {
    2941                        assert(!insideFlag);
     
    5971}
    6072
    61 thread T_Mutex_Scoped {};
     73single_acquisition_lock l1;
     74linear_backoff_then_block_lock l2;
     75owner_lock l3;
    6276
    63 void main( T_Mutex_Scoped & this ) {
     77monitor monitor_t {};
     78
     79monitor_t l4;
     80
     81thread T_Multi_Poly {};
     82
     83void main( T_Multi_Poly & this ) {
    6484        for (unsigned int i = 0; i < num_times; i++) {
    65                 {
    66                         scoped_lock(single_acquisition_lock) s{m1};
    67                         count++;
    68                 }
    69                 {
    70                         scoped_lock(single_acquisition_lock) s{m1};
     85                refTest( l1 );
     86                mutex ( l1, l4 ) {
    7187                        assert(!insideFlag);
    7288                        insideFlag = true;
     
    7490                        insideFlag = false;
    7591                }
    76         }
    77 }
    78 
    79 thread T_Multi_Scoped {};
    80 
    81 void main( T_Multi_Scoped & this ) {
    82         for (unsigned int i = 0; i < num_times; i++) {
    83                 {
    84                         scoped_lock(single_acquisition_lock) s{m1};
     92                mutex ( l1, l2, l3 ) {
    8593                        assert(!insideFlag);
    8694                        insideFlag = true;
     
    8896                        insideFlag = false;
    8997                }
    90                 {
    91                         scoped_lock(single_acquisition_lock) s1{m1};
    92                         scoped_lock(single_acquisition_lock) s2{m2};
    93                         scoped_lock(single_acquisition_lock) s3{m3};
    94                         scoped_lock(single_acquisition_lock) s4{m4};
    95                         scoped_lock(single_acquisition_lock) s5{m5};
     98                mutex ( l3, l1, l4 ) {
    9699                        assert(!insideFlag);
    97100                        insideFlag = true;
     
    99102                        insideFlag = false;
    100103                }
    101                 {
    102                         scoped_lock(single_acquisition_lock) s1{m1};
    103                         scoped_lock(single_acquisition_lock) s3{m3};
    104                         assert(!insideFlag);
    105                         insideFlag = true;
    106                         assert(insideFlag);
    107                         insideFlag = false;
    108                 }
    109                 {
    110                         scoped_lock(single_acquisition_lock) s1{m1};
    111                         scoped_lock(single_acquisition_lock) s2{m2};
    112                         scoped_lock(single_acquisition_lock) s4{m4};
    113                         assert(!insideFlag);
    114                         insideFlag = true;
    115                         assert(insideFlag);
    116                         insideFlag = false;
    117                 }
    118                 {
    119                         scoped_lock(single_acquisition_lock) s1{m1};
    120                         scoped_lock(single_acquisition_lock) s3{m3};
    121                         scoped_lock(single_acquisition_lock) s4{m4};
    122                         scoped_lock(single_acquisition_lock) s5{m5};
     104                mutex ( l1, l2, l4 ) {
    123105                        assert(!insideFlag);
    124106                        insideFlag = true;
     
    131113int num_tasks = 10;
    132114int main() {
    133         processor p[10];
     115        processor p[num_tasks - 1];
    134116
    135117        printf("Start Test: single lock mutual exclusion\n");
    136118        {
    137                 T_Mutex t[10];
     119                T_Mutex t[num_tasks];
    138120        }
    139121        assert(count == num_tasks * num_times);
     
    141123        printf("Start Test: multi lock deadlock/mutual exclusion\n");
    142124        {
    143                 T_Multi t[10];
     125                T_Multi t[num_tasks];
    144126        }
    145127        printf("End Test: multi lock deadlock/mutual exclusion\n");
    146        
    147         count = 0;
    148         printf("Start Test: single scoped lock mutual exclusion\n");
     128        printf("Start Test: multi polymorphic lock deadlock/mutual exclusion\n");
    149129        {
    150                 T_Mutex_Scoped t[10];
     130                T_Multi_Poly t[num_tasks];
    151131        }
    152         assert(count == num_tasks * num_times);
    153         printf("End Test: single scoped lock mutual exclusion\n");
    154         printf("Start Test: multi scoped lock deadlock/mutual exclusion\n");
    155         {
    156                 T_Multi_Scoped t[10];
    157         }
    158         printf("End Test: multi scoped lock deadlock/mutual exclusion\n");     
     132        printf("End Test: multi polymorphic lock deadlock/mutual exclusion\n");
    159133}
  • tests/designations.cfa

    rba897d21 r2e9b59b  
    1010// Created On       : Thu Jun 29 15:26:36 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Jul 27 11:46:35 2017
    13 // Update Count     : 3
     12// Last Modified On : Mon Mar 28 22:41:55 2022
     13// Update Count     : 15
    1414//
    1515
     
    1818// is used for the designation syntax
    1919#ifdef __cforall
    20 #define DES :
     20#define _ :
     21#define AT @
    2122#else
    22 int printf(const char *, ...);
    23 #define DES =
     23int printf( const char *, ...);
     24#define _ =
     25#define AT
    2426#endif
    2527
    2628const int indentAmt = 2;
    27 void indent(int level) {
    28         for (int i = 0; i < level; ++i) {
    29                 printf(" ");
     29void indent( int level ) {
     30        for ( int i = 0; i < level; ++i ) {
     31                printf( " " );
    3032        }
    3133}
     
    3638        int * ptr;
    3739};
    38 void printA(struct A a, int level) {
    39         indent(level);
    40         printf("(A){ %d %d %p }\n", a.x, a.y, a.ptr);
     40void printA( struct A a, int level ) {
     41        indent( level );
     42        printf( "(A){ %d %d %p }\n", a.x, a.y, a.ptr );
    4143}
    4244
     
    4547        struct A a0, a1;
    4648};
    47 void printB(struct B b, int level) {
    48         indent(level);
    49         printf("(B){\n");
    50         printA(b.a0, level+indentAmt);
    51         printA(b.a1, level+indentAmt);
    52         indent(level);
    53         printf("}\n");
     49void printB( struct B b, int level ) {
     50        indent( level );
     51        printf( "(B){\n" );
     52        printA( b.a0, level+indentAmt );
     53        printA( b.a1, level+indentAmt );
     54        indent( level );
     55        printf( "}\n" );
    5456}
    5557
     
    5961        struct B b;
    6062};
    61 void printC(struct C c, int level) {
    62         indent(level);
    63         printf("(C){\n");
    64         indent(level+indentAmt);
    65         printf("(int[]{ %d %d %d }\n", c.arr[0], c.arr[1], c.arr[2]);
    66         printB(c.b, level+indentAmt);
    67         indent(level);
    68         printf("}\n");
     63void printC( struct C c, int level ) {
     64        indent( level );
     65        printf( "(C){\n" );
     66        indent( level+indentAmt );
     67        printf( "(int[]{ %d %d %d }\n", c.arr[0], c.arr[1], c.arr[2]);
     68        printB( c.b, level+indentAmt );
     69        indent( level );
     70        printf( "}\n" );
    6971}
    7072
     
    7577        };
    7678};
    77 void printD(struct D d, int level) {
    78         indent(level);
    79         printf("(D){ %d }\n", d.x);
     79void printD( struct D d, int level ) {
     80        indent( level);
     81        printf( "(D ){ %d }\n", d.x );
    8082}
    8183
     
    99101    } m;
    100102};
    101 struct Fred s1 @= { .m.j : 3 };
    102 struct Fred s2 @= { .i : { [2] : 2 } };
     103struct Fred s1 AT= { .m.j _ 3 };
     104struct Fred s2 AT= { .i _ { [2] _ 2 } };
    103105
    104106int main() {
    105107        // simple designation case - starting from beginning of structure, leaves ptr default-initialized (zero)
    106108        struct A y0 = {
    107                 .x DES 2,
    108                 .y DES 3
     109                .x _ 2,
     110                .y _ 3
    109111        };
    110112
     
    117119        // use designation to move to member y, leaving x default-initialized (zero)
    118120        struct A y2 = {
    119                 .y DES 3,
     121                .y _ 3,
    120122                0
    121123        };
     
    127129#endif
    128130
    129         printf("=====A=====\n");
    130         printA(y0, 0);
    131         printA(y1, 0);
    132         printA(y2, 0);
    133         printf("=====A=====\n\n");
     131        printf( "=====A=====\n" );
     132        printA( y0, 0 );
     133        printA( y1, 0 );
     134        printA( y2, 0 );
     135        printf( "=====A=====\n\n" );
    134136
    135137        // initialize only first element (z0.a.x), leaving everything else default-initialized (zero), no nested curly-braces
     
    140142                { 3 }, // z1.a0
    141143                { 4 }, // z1.a1
    142                 .a0 DES { 5 }, // z1.a0
     144                .a0 _ { 5 }, // z1.a0
    143145                { 6 }, // z1.a1
    144                 .a0.y DES 2, // z1.a0.y
     146                .a0.y _ 2, // z1.a0.y
    145147                0, // z1.a0.ptr
    146148        };
     
    170172        };
    171173
    172         printf("=====B=====\n");
    173         printB(z0, 0);
    174         printB(z1, 0);
    175         printB(z2, 0);
    176         printB(z3, 0);
    177         printB(z5, 0);
    178         printB(z6, 0);
    179         printf("=====B=====\n\n");
     174        printf( "=====B=====\n" );
     175        printB( z0, 0 );
     176        printB( z1, 0 );
     177        printB( z2, 0 );
     178        printB( z3, 0 );
     179        printB( z5, 0 );
     180        printB( z6, 0 );
     181        printf( "=====B=====\n\n" );
    180182
    181183        // TODO: what about extra things in a nested init? are empty structs skipped??
     
    188190        };
    189191
    190         printf("=====C=====\n");
    191         printC(c1, 0);
    192         printf("=====C=====\n\n");
     192        printf( "=====C=====\n" );
     193        printC( c1, 0 );
     194        printf( "=====C=====\n\n" );
    193195
    194196#if ERROR
     
    213215#endif
    214216        // array designation
    215         int i[2] = { [1] : 3 };
     217        int i[2] = { [1] _ 3 };
    216218        // allowed to have 'too many' initialized lists - essentially they are ignored.
    217219        int i1 = { 3 };
     
    219221        // doesn't work yet.
    220222        // designate unnamed object's members
    221         // struct D d = { .x DES 3 };
     223        // struct D d = { .x _ 3 };
    222224#if ERROR
    223         struct D d1 = { .y DES 3 };
     225        struct D d1 = { .y _ 3 };
    224226#endif
    225227
     
    241243        // move cursor to e4.b.a0.x and initialize until e3.b.a1.ptr inclusive
    242244        union E e3 = {
    243                 .b.a0.x DES 2, 3, 0, 5, 6, 0
    244         };
    245 
    246         printf("=====E=====\n");
    247         printA(e0.a, 0);
    248         printA(e1.a, 0);
    249         printA(e2.a, 0);
    250         printB(e3.b, 0);
    251         printf("=====E=====\n\n");
     245                .b.a0.x _ 2, 3, 0, 5, 6, 0
     246        };
     247
     248        printf( "=====E=====\n" );
     249        printA( e0.a, 0 );
     250        printA( e1.a, 0 );
     251        printA( e2.a, 0 );
     252        printB( e3.b, 0 );
     253        printf( "=====E=====\n\n" );
    252254
    253255        // special case of initialization: char[] can be initialized with a string literal
    254256        const char * str0 = "hello";
    255257        char str1[] = "hello";
    256         const char c1[] = "abc";
    257         const char c2[] = { 'a', 'b', 'c' };
    258         const char c3[][2] = { { 'a', 'b' }, { 'c', 'd'}, { 'c', 'd'} };
     258        const char c2[] = "abc";
     259        const char c3[] = { 'a', 'b', 'c' };
     260        const char c4[][2] = { { 'a', 'b' }, { 'c', 'd'}, { 'c', 'd'} };
     261
     262        // more cases
     263
     264//      int widths[] = { [3 ... 9] _ 1, [10 ... 99] _ 2, [100] _ 3 };
     265//      int widths[] = { [3 ~ 9] _ 1, [10 ~ 99] _ 2, [100] _ 3 };
     266        struct point { int x, y; };
     267        struct point p = { .y _ 5, .x _ 7 };
     268        union foo { int i; double d; };
     269        union foo f = { .d _ 4 };
     270        int v1, v2, v4;
     271        int w[6] = { [1] _ v1, v2, [4] _ v4 };
     272        int whitespace[256] = { [' '] _ 1, ['\t'] _ 1, ['\v'] _ 1, ['\f'] _ 1, ['\n'] _ 1, ['\r'] _ 1 };
     273        struct point ptarray[10] = { [2].y _ 34, [2].x _ 35, [0].x _ 36 };
    259274}
    260275
  • tests/io/away_fair.cfa

    rba897d21 r2e9b59b  
    2020#include <thread.hfa>
    2121#include <iofwd.hfa>
    22 #include <io/types.hfa>
    2322
    2423Duration default_preemption() {
     
    5150}
    5251
    53 // ----- Spinner -----
     52// ----- Submitter -----
    5453// try to submit io but yield so that it's likely we are moved to the slow path
    5554thread Submitter {};
  • tests/io/io-acquire.cfa

    rba897d21 r2e9b59b  
    1010// Created On       : Mon Mar  1 18:40:09 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Jan 14 09:13:18 2022
    13 // Update Count     : 74
     12// Last Modified On : Sat Apr  9 15:22:03 2022
     13// Update Count     : 76
    1414//
    1515
     
    1717#include <thread.hfa>
    1818#include <mutex_stmt.hfa>
    19 
    20 Duration default_preemption() { return 0; }
    2119
    2220thread T {};
  • tests/meta/dumpable.cfa

    rba897d21 r2e9b59b  
    1414//
    1515
     16#include <errno.h>
    1617#include <limits.h>
    17 #include <errno.h>
     18#include <string.h>
    1819
    1920#include <fstream.hfa>
    2021
    2122extern "C" {
     23        #include <fcntl.h>
     24        #include <unistd.h>
    2225        #include <sys/prctl.h>
    2326        #include <sys/resource.h>
    2427        #include <sys/statvfs.h>
    25         #include <unistd.h>
     28        #include <sys/stat.h>
     29        #include <sys/types.h>
    2630}
    2731
     
    102106}
    103107
     108void check_core_pattern() {
     109        int ret;
     110        int cp = open("/proc/sys/kernel/core_pattern", 0, O_RDONLY);
     111        if(cp < 0) {
     112                perror("open(/proc/sys/kernel/core_pattern, O_RDONLY) error");
     113                return;
     114        }
     115
     116        try {
     117                const char * expected = "core\n";
     118                const int sz = sizeof("core\n");
     119                char buf[512];
     120                ret = read(cp, buf, 512);
     121                if(ret < 0) {
     122                        perror("first core pattern read error");
     123                        return;
     124                }
     125                ret = strncmp(expected, buf, sz - 1);
     126                if(ret != 0) {
     127                        serr | "/proc/sys/kernel/core_pattern does not contain 'core', was:" | nl | nl | buf | nl
     128                             | "Test script expect cores files to be dumped with name 'core' in current working directory." | nl
     129                             | "Apport is not supported, it should be deactivated in /etc/default/apport for the test suite to work with core dumps.";
     130
     131                        return;
     132                }
     133        }
     134        finally {
     135                ret = close(cp);
     136                if(ret < 0) perror("close(/proc/sys/kernel/core_pattern) error");
     137        }
     138
     139}
     140
    104141int main() {
    105142        check_ulimit();
     
    113150        check_dumpflag();
    114151
     152        check_core_pattern();
     153
    115154        sout | "Done";
    116155}
  • tests/pybin/settings.py

    rba897d21 r2e9b59b  
    155155        global generating
    156156        global make
     157        global make_jobfds
    157158        global output_width
    158159        global timeout
     
    168169        generating   = options.regenerate_expected
    169170        make         = ['make']
     171        make_jobfds  = []
    170172        output_width = 24
    171173        timeout      = Timeouts(options.timeout, options.global_timeout)
     
    177179                os.putenv('DISTCC_LOG', os.path.join(BUILDDIR, 'distcc_error.log'))
    178180
    179 def update_make_cmd(force, jobs):
     181def update_make_cmd(flags):
    180182        global make
    181 
    182         make = ['make'] if not force else ['make', "-j%i" % jobs]
     183        make = ['make', *flags]
     184
     185def update_make_fds(r, w):
     186        global make_jobfds
     187        make_jobfds = (r, w)
    183188
    184189def validate():
     
    187192        global distcc
    188193        distcc       = "DISTCC_CFA_PATH=~/.cfadistcc/%s/cfa" % tools.config_hash()
    189         errf = os.path.join(BUILDDIR, ".validate.err")
    190         make_ret, out = tools.make( ".validate", error_file = errf, output_file=subprocess.DEVNULL, error=subprocess.DEVNULL )
     194        make_ret, out, err = tools.make( ".validate", output_file=subprocess.PIPE, error=subprocess.PIPE )
    191195        if make_ret != 0:
    192                 with open (errf, "r") as myfile:
    193                         error=myfile.read()
    194196                print("ERROR: Invalid configuration %s:%s" % (arch.string, debug.string), file=sys.stderr)
    195                 print("       verify returned : \n%s" % error, file=sys.stderr)
    196                 tools.rm(errf)
     197                print("       verify returned : \n%s" % err, file=sys.stderr)
    197198                sys.exit(1)
    198 
    199         tools.rm(errf)
    200199
    201200def prep_output(tests):
  • tests/pybin/tools.py

    rba897d21 r2e9b59b  
    2323
    2424# helper functions to run terminal commands
    25 def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False):
     25def sh(*cmd, timeout = False, output_file = None, input_file = None, input_text = None, error = subprocess.STDOUT, ignore_dry_run = False, pass_fds = []):
    2626        try:
    2727                cmd = list(cmd)
     
    6565                                **({'input' : bytes(input_text, encoding='utf-8')} if input_text else {'stdin' : input_file}),
    6666                                stdout  = output_file,
    67                                 stderr  = error
     67                                stderr  = error,
     68                                pass_fds = pass_fds
    6869                        ) as proc:
    6970
    7071                                try:
    71                                         out, _ = proc.communicate(
     72                                        out, errout = proc.communicate(
    7273                                                timeout = settings.timeout.single if timeout else None
    7374                                        )
    7475
    75                                         return proc.returncode, out.decode("latin-1") if out else None
     76                                        return proc.returncode, out.decode("latin-1") if out else None, errout.decode("latin-1") if errout else None
    7677                                except subprocess.TimeoutExpired:
    7778                                        if settings.timeout2gdb:
    7879                                                print("Process {} timeout".format(proc.pid))
    7980                                                proc.communicate()
    80                                                 return 124, str(None)
     81                                                return 124, str(None), "Subprocess Timeout 2 gdb"
    8182                                        else:
    8283                                                proc.send_signal(signal.SIGABRT)
    8384                                                proc.communicate()
    84                                                 return 124, str(None)
     85                                                return 124, str(None), "Subprocess Timeout 2 gdb"
    8586
    8687        except Exception as ex:
     
    105106                return (False, "No file")
    106107
    107         code, out = sh("file", fname, output_file=subprocess.PIPE)
     108        code, out, err = sh("file", fname, output_file=subprocess.PIPE)
    108109        if code != 0:
    109                 return (False, "'file EXPECT' failed with code {}".format(code))
     110                return (False, "'file EXPECT' failed with code {} '{}'".format(code, err))
    110111
    111112        match = re.search(".*: (.*)", out)
     
    190191        ]
    191192        cmd = [s for s in cmd if s]
    192         return sh(*cmd, output_file=output_file, error=error)
     193        return sh(*cmd, output_file=output_file, error=error, pass_fds=settings.make_jobfds)
    193194
    194195def make_recon(target):
     
    241242# move a file
    242243def mv(source, dest):
    243         ret, _ = sh("mv", source, dest)
     244        ret, _, _ = sh("mv", source, dest)
    244245        return ret
    245246
    246247# cat one file into the other
    247248def cat(source, dest):
    248         ret, _ = sh("cat", source, output_file=dest)
     249        ret, _, _ = sh("cat", source, output_file=dest)
    249250        return ret
    250251
     
    289290#               system
    290291################################################################################
     292def jobserver_version():
     293        make_ret, out, err = sh('make', '.test_makeflags', '-j2', output_file=subprocess.PIPE, error=subprocess.PIPE)
     294        if make_ret != 0:
     295                print("ERROR: cannot find Makefile jobserver version", file=sys.stderr)
     296                print("       test returned : {} '{}'".format(make_ret, err), file=sys.stderr)
     297                sys.exit(1)
     298
     299        re_jobs = re.search("--jobserver-(auth|fds)", out)
     300        if not re_jobs:
     301                print("ERROR: cannot find Makefile jobserver version", file=sys.stderr)
     302                print("       MAKEFLAGS are : '{}'".format(out), file=sys.stderr)
     303                sys.exit(1)
     304
     305        return "--jobserver-{}".format(re_jobs.group(1))
     306
     307def prep_recursive_make(N):
     308        if N < 2:
     309                return []
     310
     311        # create the pipe
     312        (r, w) = os.pipe()
     313
     314        # feel it with N-1 tokens, (Why N-1 and not N, I don't know it's in the manpage for make)
     315        os.write(w, b'+' * (N - 1));
     316
     317        # prep the flags for make
     318        make_flags = ["-j{}".format(N), "--jobserver-auth={},{}".format(r, w)]
     319
     320        # tell make about the pipes
     321        os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = " ".join(make_flags)
     322
     323        # make sure pass the pipes to our children
     324        settings.update_make_fds(r, w)
     325
     326        return make_flags
     327
     328def prep_unlimited_recursive_make():
     329        # prep the flags for make
     330        make_flags = ["-j"]
     331
     332        # tell make about the pipes
     333        os.environ["MAKEFLAGS"] = os.environ["MFLAGS"] = "-j"
     334
     335        return make_flags
     336
     337
     338def eval_hardware():
     339        # we can create as many things as we want
     340        # how much hardware do we have?
     341        if settings.distribute:
     342                # remote hardware is allowed
     343                # how much do we have?
     344                ret, jstr, _ = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True)
     345                return int(jstr.strip()) if ret == 0 else multiprocessing.cpu_count()
     346        else:
     347                # remote isn't allowed, use local cpus
     348                return multiprocessing.cpu_count()
     349
    291350# count number of jobs to create
    292 def job_count( options, tests ):
     351def job_count( options ):
    293352        # check if the user already passed in a number of jobs for multi-threading
    294         if not options.jobs:
    295                 make_flags = os.environ.get('MAKEFLAGS')
    296                 force = bool(make_flags)
    297                 make_jobs_fds = re.search("--jobserver-(auth|fds)=\s*([0-9]+),([0-9]+)", make_flags) if make_flags else None
    298                 if make_jobs_fds :
    299                         tokens = os.read(int(make_jobs_fds.group(2)), 1024)
    300                         options.jobs = len(tokens)
    301                         os.write(int(make_jobs_fds.group(3)), tokens)
    302                 else :
    303                         if settings.distribute:
    304                                 ret, jstr = sh("distcc", "-j", output_file=subprocess.PIPE, ignore_dry_run=True)
    305                                 if ret == 0:
    306                                         options.jobs = int(jstr.strip())
    307                                 else :
    308                                         options.jobs = multiprocessing.cpu_count()
    309                         else:
    310                                 options.jobs = multiprocessing.cpu_count()
     353        make_env = os.environ.get('MAKEFLAGS')
     354        make_flags = make_env.split() if make_env else None
     355        jobstr = jobserver_version()
     356
     357        if options.jobs and make_flags:
     358                print('WARNING: -j options should not be specified when called form Make', file=sys.stderr)
     359
     360        # Top level make is calling the shots, just follow
     361        if make_flags:
     362                # do we have -j and --jobserver-...
     363                jobopt = None
     364                exists_fds = None
     365                for f in make_flags:
     366                        jobopt = f if f.startswith("-j") else jobopt
     367                        exists_fds = f if f.startswith(jobstr) else exists_fds
     368
     369                # do we have limited parallelism?
     370                if exists_fds :
     371                        try:
     372                                rfd, wfd = tuple(exists_fds.split('=')[1].split(','))
     373                        except:
     374                                print("ERROR: jobserver has unrecoginzable format, was '{}'".format(exists_fds), file=sys.stderr)
     375                                sys.exit(1)
     376
     377                        # read the token pipe to count number of available tokens and restore the pipe
     378                        # this assumes the test suite script isn't invoked in parellel with something else
     379                        tokens = os.read(int(rfd), 65536)
     380                        os.write(int(wfd), tokens)
     381
     382                        # the number of tokens is off by one for obscure but well documented reason
     383                        # see man make for more details
     384                        options.jobs = len(tokens) + 1
     385
     386                # do we have unlimited parallelism?
     387                elif jobopt and jobopt != "-j1":
     388                        # check that this actually make sense
     389                        if jobopt != "-j":
     390                                print("ERROR: -j option passed by make but no {}, was '{}'".format(jobstr, jobopt), file=sys.stderr)
     391                                sys.exit(1)
     392
     393                        options.jobs = eval_hardware()
     394                        flags = prep_unlimited_recursive_make()
     395
     396
     397                # then no parallelism
     398                else:
     399                        options.jobs = 1
     400
     401                # keep all flags make passed along, except the weird 'w' which is about subdirectories
     402                flags = [f for f in make_flags if f != 'w']
     403
     404        # Arguments are calling the shots, fake the top level make
     405        elif options.jobs :
     406
     407                # make sure we have a valid number of jobs that corresponds to user input
     408                if options.jobs < 0 :
     409                        print('ERROR: Invalid number of jobs', file=sys.stderr)
     410                        sys.exit(1)
     411
     412                flags = prep_recursive_make(options.jobs)
     413
     414        # Arguments are calling the shots, fake the top level make, but 0 is a special case
     415        elif options.jobs == 0:
     416                options.jobs = eval_hardware()
     417                flags = prep_unlimited_recursive_make()
     418
     419        # No one says to run in parallel, then don't
    311420        else :
    312                 force = True
    313 
    314         # make sure we have a valid number of jobs that corresponds to user input
    315         if options.jobs <= 0 :
    316                 print('ERROR: Invalid number of jobs', file=sys.stderr)
    317                 sys.exit(1)
    318 
    319         return min( options.jobs, len(tests) ), force
     421                options.jobs = 1
     422                flags = []
     423
     424        # Make sure we call make as expected
     425        settings.update_make_cmd( flags )
     426
     427        # return the job count
     428        return options.jobs
    320429
    321430# enable core dumps for all the test children
     
    334443        distcc_hash = os.path.join(settings.SRCDIR, '../tools/build/distcc_hash')
    335444        config = "%s-%s" % (settings.arch.target, settings.debug.path)
    336         _, out = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)
     445        _, out, _ = sh(distcc_hash, config, output_file=subprocess.PIPE, ignore_dry_run=True)
    337446        return out.strip()
    338447
     
    374483
    375484        if not os.path.isfile(core):
    376                 return 1, "ERR No core dump (limit soft: {} hard: {})".format(*resource.getrlimit(resource.RLIMIT_CORE))
     485                return 1, "ERR No core dump, expected '{}' (limit soft: {} hard: {})".format(core, *resource.getrlimit(resource.RLIMIT_CORE))
    377486
    378487        try:
    379                 return sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE)
     488                ret, out, err = sh('gdb', '-n', path, core, '-batch', '-x', cmd, output_file=subprocess.PIPE)
     489                if ret == 0:
     490                        return 0, out
     491                else:
     492                        return 1, err
    380493        except:
    381494                return 1, "ERR Could not read core with gdb"
  • tests/test.py

    rba897d21 r2e9b59b  
    140140        parser.add_argument('--regenerate-expected', help='Regenerate the .expect by running the specified tets, can be used with --all option', action='store_true')
    141141        parser.add_argument('--archive-errors', help='If called with a valid path, on test crashes the test script will copy the core dump and the executable to the specified path.', type=str, default='')
    142         parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously', type=int)
     142        parser.add_argument('-j', '--jobs', help='Number of tests to run simultaneously, 0 (default) for unlimited', nargs='?', const=0, type=int)
    143143        parser.add_argument('--list-comp', help='List all valide arguments', action='store_true')
    144144        parser.add_argument('--list-dist', help='List all tests for distribution', action='store_true')
     
    195195        # build, skipping to next test on error
    196196        with Timed() as comp_dur:
    197                 make_ret, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
     197                make_ret, _, _ = make( test.target(), output_file=subprocess.DEVNULL, error=out_file, error_file = err_file )
    198198
    199199        # ----------
     
    208208                                if settings.dry_run or is_exe(exe_file):
    209209                                        # run test
    210                                         retcode, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
     210                                        retcode, _, _ = sh(exe_file, output_file=out_file, input_file=in_file, timeout=True)
    211211                                else :
    212212                                        # simply cat the result into the output
     
    226226                        else :
    227227                                # fetch return code and error from the diff command
    228                                 retcode, error = diff(cmp_file, out_file)
     228                                retcode, error, _ = diff(cmp_file, out_file)
    229229
    230230                else:
     
    366366                        print(os.path.relpath(t.expect(), settings.SRCDIR), end=' ')
    367367                        print(os.path.relpath(t.input() , settings.SRCDIR), end=' ')
    368                         code, out = make_recon(t.target())
     368                        code, out, err = make_recon(t.target())
    369369
    370370                        if code != 0:
    371                                 print('ERROR: recond failed for test {}'.format(t.target()), file=sys.stderr)
     371                                print('ERROR: recond failed for test {}: {} \'{}\''.format(t.target(), code, err), file=sys.stderr)
    372372                                sys.exit(1)
    373373
     
    417417                        if is_empty(t.expect()):
    418418                                print('WARNING: test "{}" has empty .expect file'.format(t.target()), file=sys.stderr)
     419
     420        options.jobs = job_count( options )
    419421
    420422        # for each build configurations, run the test
     
    430432                        local_tests = settings.ast.filter( tests )
    431433                        local_tests = settings.arch.filter( local_tests )
    432                         options.jobs, forceJobs = job_count( options, local_tests )
    433                         settings.update_make_cmd(forceJobs, options.jobs)
    434434
    435435                        # check the build configuration works
    436436                        settings.validate()
     437                        jobs = min(options.jobs, len(local_tests))
    437438
    438439                        # print configuration
     
    440441                                'Regenerating' if settings.generating else 'Running',
    441442                                len(local_tests),
    442                                 options.jobs,
     443                                jobs,
    443444                                settings.ast.string,
    444445                                settings.arch.string,
     
    450451
    451452                        # otherwise run all tests and make sure to return the correct error code
    452                         failed = run_tests(local_tests, options.jobs)
     453                        failed = run_tests(local_tests, jobs)
    453454                        if failed:
    454455                                if not settings.continue_:
Note: See TracChangeset for help on using the changeset viewer.