Changeset e5d9274


Ignore:
Timestamp:
Jun 2, 2022, 3:11:21 PM (8 months ago)
Author:
caparsons <caparson@…>
Branches:
master, pthread-emulation, qualifiedEnum
Children:
ced5e2a
Parents:
015925a (diff), fc134a48 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
25 added
1 deleted
133 edited

Legend:

Unmodified
Added
Removed
  • benchmark/plot.py

    r015925a re5d9274  
    2222
    2323class Field:
    24         def __init__(self, unit, _min, _log):
     24        def __init__(self, unit, _min, _log, _name=None):
    2525                self.unit = unit
    2626                self.min  = _min
    2727                self.log  = _log
     28                self.name = _name
    2829
    2930field_names = {
     
    3233        "Ops per procs"         : Field('Ops'   , 0, False),
    3334        "Ops per threads"       : Field('Ops'   , 0, False),
    34         "ns per ops/procs"      : Field('ns'    , 0, False),
     35        "ns per ops/procs"      : Field(''    , 0, False, _name = "Latency (ns $/$ (Processor $\\times$ Operation))" ),
    3536        "Number of threads"     : Field(''      , 1, False),
    3637        "Total Operations(ops)" : Field('Ops'   , 0, False),
    3738        "Ops/sec/procs"         : Field('Ops'   , 0, False),
    3839        "Total blocks"          : Field('Blocks', 0, False),
    39         "Ops per second"        : Field('Ops'   , 0, False),
     40        "Ops per second"        : Field(''   , 0, False),
    4041        "Cycle size (# thrds)"  : Field('thrd'  , 1, False),
    4142        "Duration (ms)"         : Field('ms'    , 0, False),
     
    5152}
    5253
    53 def plot(in_data, x, y, out):
     54def plot(in_data, x, y, options):
    5455        fig, ax = plt.subplots()
    5556        colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00'])
     
    109110        print("Finishing Plots")
    110111
    111         plt.ylabel(y)
     112        plt.ylabel(field_names[y].name if field_names[y].name else y)
    112113        # plt.xticks(range(1, math.ceil(mx) + 1))
    113         plt.xlabel(x)
     114        plt.xlabel(field_names[x].name if field_names[x].name else x)
    114115        plt.grid(b = True)
    115116        ax.xaxis.set_major_formatter( EngFormatter(unit=field_names[x].unit) )
    116         if field_names[x].log:
     117        if options.logx:
     118                ax.set_xscale('log')
     119        elif field_names[x].log:
    117120                ax.set_xscale('log')
    118121        else:
     
    120123
    121124        ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
    122         if field_names[y].log:
     125        if options.logy:
     126                ax.set_yscale('log')
     127        elif field_names[y].log:
    123128                ax.set_yscale('log')
    124129        else:
    125                 plt.ylim(field_names[y].min, my*1.2)
     130                plt.ylim(field_names[y].min, options.MaxY if options.MaxY else my*1.2)
    126131
    127132        plt.legend(loc='upper left')
    128133
    129134        print("Results Ready")
    130         if out:
    131                 plt.savefig(out)
     135        if options.out:
     136                plt.savefig(options.out, bbox_inches='tight')
    132137        else:
    133138                plt.show()
     
    142147        parser.add_argument('-y', nargs='?', type=str, default="", help="Which field to use as the Y axis")
    143148        parser.add_argument('-x', nargs='?', type=str, default="", help="Which field to use as the X axis")
     149        parser.add_argument('--logx', action='store_true', help="if set, makes the x-axis logscale")
     150        parser.add_argument('--logy', action='store_true', help="if set, makes the y-axis logscale")
     151        parser.add_argument('--MaxY', nargs='?', type=int, help="maximum value of the y-axis")
    144152
    145153        options =  parser.parse_args()
     
    185193
    186194
    187         plot(data, wantx, wanty, options.out)
     195        plot(data, wantx, wanty, options)
  • doc/theses/mubeen_zulfiqar_MMath/allocator.tex

    r015925a re5d9274  
    2929llheap's design was reviewed and changed multiple times throughout the thesis.
    3030Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}).
    31 Note, a few simples tests for a design choice were compared with the current best allocators to determine the viability of a design.
     31Note, a few simple tests for a design choice were compared with the current best allocators to determine the viability of a design.
    3232
    3333
     
    3737These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced.
    3838\paragraph{T:1 model}
    39 \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared across KTs.
    40 This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5 different request sizes.
     39\VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are shared among N KTs.
     40This design leverages the fact that usually the allocation requests are less than 1024 bytes and there are only a few different request sizes.
    4141When KTs $\le$ N, the common bucket sizes are uncontented;
    4242when KTs $>$ N, the free buckets are contented and latency increases significantly.
     
    6464
    6565\paragraph{T:H model}
    66 \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs.
     66\VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded (distributed) across the KTs.
    6767A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket.
    68 When KT $\le$ N, the heaps are uncontented;
     68When KT $\le$ N, the heaps might be uncontented;
    6969when KTs $>$ N, the heaps are contented.
    7070In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared.
    71 By adjusting N upwards, this approach reduces contention but increases storage (time versus space);
     71By increasing N, this approach reduces contention but increases storage (time versus space);
    7272however, picking N is workload specific.
    7373
     
    109109Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}.
    110110\begin{quote}
    111 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}
     111A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable}
    112112\end{quote}
    113113If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness.
     
    138138(See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.)
    139139Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted.
    140 Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership).
     140Heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership.
    141141
    142142Problems:
    143143\begin{itemize}
    144144\item
    145 Need to know when a KT is starts/terminates to create/delete its heap.
     145Need to know when a KT starts/terminates to create/delete its heap.
    146146
    147147\noindent
     
    161161\noindent
    162162In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs.
    163 Since the number of CPUs is relatively small, >~1024, and a heap relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
     163Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.
    164164\item
    165165There is the same serially-reusable problem with UTs migrating across KTs.
     
    171171\noindent
    172172The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown.
    173 For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps maybe shared by multiple threads, even when KTs $\le$ N.
     173For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N.
    174174For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath.
    175175However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs.
    176176More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading.
    177 Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required.
     177So the 1:1 model had no atomic actions along the fastpath and no special operating-system support requirements.
    178178The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns.
    179179
     
    212212Ideally latency is $O(1)$ with a small constant.
    213213
    214 To obtain $O(1)$ internal latency means no searching on the allocation fastpath, largely prohibits coalescing, which leads to external fragmentation.
     214To obtain $O(1)$ internal latency means no searching on the allocation fastpath and largely prohibits coalescing, which leads to external fragmentation.
    215215The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger).
    216216
     
    257257llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration.
    258258There is a global bump-pointer to the next free heap in the array.
    259 When this array is exhausted, another array is allocated.
    260 There is a global top pointer for a heap intrusive link to chain free heaps from terminated threads.
    261 When statistics are turned on, there is a global top pointer for a heap intrusive link to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
     259When this array is exhausted, another array of heaps is allocated.
     260There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads.
     261When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.
    262262
    263263When a KT starts, a heap is allocated from the current array for exclusive use by the KT.
    264 When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps.
    265 The free heaps is a stack so hot storage is reused first.
    266 Preserving all heaps created during the program lifetime, solves the storage lifetime problem, when ownership is used.
     264When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps.
     265The free heaps are stored on stack so hot storage is reused first.
     266Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used.
    267267This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially.
    268268llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap.
    269269
    270270Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M.
     271All objects in a bucket are of the same size.
    271272The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system.
    272273Each free bucket of a specific size has the following two lists:
     
    286287Quantizing is performed using a binary search over the ordered bucket array.
    287288An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket.
    288 (Type @char@ restricts the number of bucket sizes to 256.)
     289The @char@ type restricts the number of bucket sizes to 256.
    289290For $S$ > 64K, a binary search is used.
    290291Then, the allocation storage is obtained from the following locations (in order), with increasing latency.
     
    381382Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket.
    382383
    383 Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap.
     384Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap.
    384385Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated.
    385386This design simplifies heap-management code during development and maintenance.
     
    388389\subsection{Alignment}
    389390
    390 All dynamic memory allocations must have a minimum storage alignment for the contained object(s).
     391Most dynamic memory allocations have a minimum storage alignment for the contained object(s).
    391392Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX).
    392393In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers.
    393394For consistency, the object header is normally aligned at this same boundary.
    394 Larger alignments must be a power of 2, such page alignment (4/8K).
     395Larger alignments must be a power of 2, such as page alignment (4/8K).
    395396Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment.
    396397
     
    400401\end{center}
    401402The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations.
    402 This approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
     403The same approach is used for sufficiently large free blocks, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.
    403404In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation.
    404405However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object.
     
    407408Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request.
    408409
    409 Instead, llheap alignment is accomplished by making a \emph{pessimistically} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
     410Instead, llheap alignment is accomplished by making a \emph{pessimistic} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:
    410411\begin{center}
    411412\input{Alignment2}
     
    424425\input{Alignment2Impl}
    425426\end{center}
    426 Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater.
     427Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments greater than @M@.
    427428When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation.
    428429Therefore, there is always room for an @M@-byte fake header before @A@.
     
    439440\label{s:ReallocStickyProperties}
    440441
    441 Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
     442The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.
    442443\begin{flushleft}
    443444\begin{tabular}{ll}
     
    460461The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying.
    461462This pattern is not used enough to reduce storage management costs.
    462 In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern.
     463In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the allocation pattern.
    463464
    464465The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation.
    465466Are these properties transient or persistent (``sticky'')?
    466 For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size.
    467 That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve?
     467For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size?
     468That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserved?
    468469Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment.
    469470This silent problem is unintuitive to programmers and difficult to locate because it is transient.
     
    475476
    476477To preserve allocation properties requires storing additional information with an allocation,
    477 The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.
     478The best available option is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.
    478479The header has two data field sized appropriately for 32/64-bit alignment requirements.
    479480The first field is a union of three values:
     
    487488\end{description}
    488489The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes.
    489 Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors.
     490Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management.
    490491
    491492\begin{figure}
     
    496497\end{figure}
    497498
    498 The low-order 3-bits of the first field are \emph{unused} for any stored values, whereas the second field may use all of its bits.
     499The low-order 3-bits of the first field are \emph{unused} for any stored values as these values are 16-byte aligned by default, whereas the second field may use all of its bits.
    499500The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively.
    500501Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header.
     
    502503If no bits are on, it implies a basic allocation, which is handled quickly;
    503504otherwise, the bits are analysed and appropriate actions are taken for the complex cases.
    504 Since most allocations are basic, this implementation results in a significant performance gain along the allocation and free fastpath.
     505Since most allocations are basic, they will take significantly less time as the memory operations will be done along the allocation and free fastpath.
    505506
    506507
     
    514515To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps.
    515516Note, the list is locked to prevent errors traversing an active list;
    516 the statistics counters are not locked and can flicker during accumulation, which is not an issue with atomic read/write.
     517the statistics counters are not locked and can flicker during accumulation.
    517518\VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread.
    518519No other memory allocator studied provides as comprehensive statistical information.
    519 Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness, and hence, should be equally valuable to application developers.
     520Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness and should be equally valuable to application developers.
    520521
    521522\begin{figure}
     
    547548Nevertheless, the checks detect many allocation problems.
    548549There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage.
    549 For example, @printf@ allocates a 1024 buffer on first call and never deletes this buffer.
     550For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer.
    550551To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference.
    551552Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related.
    552553
    553 Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
     554Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.
    554555
    555556
     
    557558\label{s:UserlevelThreadingSupport}
    558559
    559 The serially-reusable problem (see \VRef{s:AllocationFastpath}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
    560 The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections.
     560The serially-reusable problem (see \VPageref{p:SeriallyReusable}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.
     561The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical sections such as starting a memory operation on one KT and completing it on another.
    561562Locking these critical sections negates any attempt for a quick fastpath and results in high contention.
    562563For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread.
    563 Without time slicing, a user thread performing a long computation can prevent execution (starve) other threads.
    564 To prevent starvation for an allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
     564Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads.
     565To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-section for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.
    565566The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute.
    566567
    567 llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period.
    568 On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately.
    569 On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and not thread-safe.
     568llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period.
     569On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately.
     570On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe.
    570571For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing.
    571 Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
    572 
    573 The fast technique defines a special code section and places all non-interruptible routines in this section.
     572Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.
     573
     574The fast technique (with lower run time cost) is to define a special code section and places all non-interruptible routines in this section.
    574575The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified.
    575576Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag.
     
    577578Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section.
    578579This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined.
    579 These techniques are used in both the \uC and \CFA versions of llheap, where both of these systems have user-level threading.
     580These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading.
    580581
    581582
     
    587588Programs can be statically or dynamically linked.
    588589\item
    589 The order the linker schedules startup code is poorly supported.
     590The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely.
    590591\item
    591592Knowing a KT's start and end independently from the KT code is difficult.
     
    600601Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct.
    601602Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations.
    602 Testing showed up to a 5\% performance increase for dynamic linking over static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
     603Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.
    603604
    604605All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap.
    605 The problem is getting initialized done before the first allocator call.
     606The problem is getting initialization done before the first allocator call.
    606607However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library.
     608Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}.
     609This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call.
    607610As a result, calls to allocation routines occur without initialization.
    608611To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern).
     
    641644Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT.
    642645Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called.
    643 Now when a KT terminates, @~ThreadManager@ is called to chained it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
     646Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.
    644647The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage.
    645648
     
    660663bool traceHeapOff();                    $\C{// stop printing allocation/free calls}$
    661664\end{lstlisting}
    662 This kind of API is necessary to allow concurrent runtime systems to interact with difference memory allocators in a consistent way.
     665This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way.
    663666
    664667%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     
    712715Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory;
    713716hence the need to return an alternate value for a zero-sized allocation.
    714 A different approach allowed by the C API is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
     717A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.
    715718In theory, notifying the programmer of memory failure allows recovery;
    716719in practice, it is almost impossible to gracefully recover when out of memory.
     
    736739\paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}}
    737740extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory.
    738 @aalloc@ is significantly faster than @calloc@, which is the only alternative.
     741@aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines.
    739742
    740743\noindent\textbf{Usage}
     
    825828\begin{itemize}
    826829\item
    827 @fd@: files description.
     830@fd@: file descriptor.
    828831\end{itemize}
    829832It returns the previous file descriptor.
     
    832835\label{p:malloc_expansion}
    833836set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request.
    834 It returns the heap extension size used throughout a program, \ie called once at heap initialization.
     837It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization.
    835838
    836839\paragraph{\lstinline{size_t malloc_mmap_start()}}
     
    915918\begin{itemize}
    916919\item
    917 naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
    918 \item
    919 named arguments: individual allocation properties are specified using postfix function call, so programmers do have to remember parameter positions in allocation calls.
    920 \item
    921 object size: like the \CFA C-style interface, programmers do not have to specify object size or cast allocation results.
     920naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.
     921\item
     922named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls.
     923\item
     924object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results.
    922925\end{itemize}
    923926Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg
     
    928931duration dur = 3@`@h + 42@`@m + 17@`@s;
    929932\end{cfa}
    930 @ttype@ polymorphism is similar to \CC variadic templates.
    931933
    932934\paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}}
    933 is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines.
     935is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations.
     936These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine.
    934937A call without parameters returns a dynamically allocated object of type @T@ (@malloc@).
    935938A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@).
     
    9809835 5 5 -555819298 -555819298  // two undefined values
    981984\end{lstlisting}
    982 Examples 1 to 3, fill an object with a value or characters.
    983 Examples 4 to 7, fill an array of objects with values, another array, or part of an array.
     985Examples 1 to 3 fill an object with a value or characters.
     986Examples 4 to 7 fill an array of objects with values, another array, or part of an array.
    984987
    985988\subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}}
     
    10151018\subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}}
    10161019used to resize, realign, and fill, where the old object data is copied to the new object.
    1017 The old object type must be the same as the new object type, since the values used.
     1020The old object type must be the same as the new object type, since the value is used.
    10181021Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter.
    10191022For example:
     
    10291032\end{lstlisting}
    10301033Examples 2 to 3 change the alignment for the initial storage of @i@.
    1031 The @13`fill@ for example 3 does nothing because no extra space is added.
     1034The @13`fill@ in example 3 does nothing because no extra space is added.
    10321035
    10331036\begin{cfa}[numbers=left]
     
    10441047\end{lstlisting}
    10451048Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@.
    1046 The @13`fill@ for example 3 does nothing because no extra space is added.
     1049The @13`fill@ in example 3 does nothing because no extra space is added.
    10471050
    10481051These \CFA allocation features are used extensively in the development of the \CFA runtime.
  • doc/theses/mubeen_zulfiqar_MMath/background.tex

    r015925a re5d9274  
    3636The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory.
    3737The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}.
    38 Allocated objects (light grey) are variable sized, and allocated and maintained by the program;
    39 \ie only the program knows the location of allocated storage, not the memory allocator.
     38Allocated objects (light grey) are variable sized, and are allocated and maintained by the program;
     39\ie only the memory allocator knows the location of allocated storage, not the program.
    4040\begin{figure}[h]
    4141\centering
     
    4949if there are multiple reserved blocks, they are also chained together, usually internally.
    5050
    51 Allocated and freed objects typically have additional management data embedded within them.
     51In some allocator designs, allocated and freed objects have additional management data embedded within them.
    5252\VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object.
    5353The header contains information about the object, \eg size, type, etc.
     
    104104\VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time.
    105105Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests.
    106 Memory is highly fragmented when the sizes of most free blocks are unusable.
     106Memory is highly fragmented when most free blocks are unusable because of their sizes.
    107107For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented.
    108108If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system.
     
    137137The fewer bin-sizes, the fewer lists need to be searched and maintained;
    138138however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation.
    139 The more bin-sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.
     139The more bin sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.
    140140A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size.
    141141For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list.
     
    157157The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}.
    158158Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period.
    159 Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.
     159Temporal locality commonly occurs during an iterative computation with a fixed set of disjoint variables, while spatial locality commonly occurs when traversing an array.
    160160
    161161Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy.
     
    328328For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues.
    329329At creation, a thread is associated with a heap from the pool.
    330 When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
     330In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.
    331331If an unlocked heap is found, the thread changes its association and uses that heap.
    332332If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool;
     
    347347The management information in the static zone must be able to locate all heaps in the dynamic zone.
    348348The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number.
    349 Each heap in the dynamic zone is composed of a list of a free objects and a pointer to its reserved memory.
     349Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory.
    350350An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory.
    351351Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur.
     
    361361Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup.
    362362The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory.
    363 Additionally, objects freed by one heap cannot be reused by other threads, except indirectly by returning free memory to the operating system, which can be expensive.
    364 (Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.)
     363Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the operating system, which can be expensive.
     364Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.
    365365In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused.
    366366
     
    384384In contrast, the T:H model spreads each thread's objects over a larger area in different heaps.
    385385Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap.
    386 For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory, no two threads share a page or cache line unless pointers are passed among them.
     386For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them.
    387387Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps.
    388388
    389 When a thread terminates, there are two options for handling its heap.
    390 First is to free all objects in the heap to the global heap and destroy the thread heap.
     389When a thread terminates, there are two options for handling its thread heap.
     390First is to free all objects in the thread heap to the global heap and destroy the thread heap.
    391391Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future.
    392392Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads.
    393 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible..
     393Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible.
    394394
    395395
     
    417417When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it.
    418418To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread.
    419 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).
     419However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption does not happen that frequently.
    420420Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically.
    421421Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation.
    422 
    423 
    424 \begin{figure}
    425 \centering
    426 \subfigure[Ownership]{
    427         \input{MultipleHeapsOwnership}
    428 } % subfigure
    429 \hspace{0.25in}
    430 \subfigure[No Ownership]{
    431         \input{MultipleHeapsNoOwnership}
    432 } % subfigure
    433 \caption{Heap Ownership}
    434 \label{f:HeapsOwnership}
    435 \end{figure}
    436422
    437423
     
    447433For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}.
    448434
     435\begin{figure}
     436\centering
     437\subfigure[Ownership]{
     438        \input{MultipleHeapsOwnership}
     439} % subfigure
     440\hspace{0.25in}
     441\subfigure[No Ownership]{
     442        \input{MultipleHeapsNoOwnership}
     443} % subfigure
     444\caption{Heap Ownership}
     445\label{f:HeapsOwnership}
     446\end{figure}
     447
    449448\VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout.
    450 (For simplicity assume the heaps all use the same size of reserves storage.)
     449(For simplicity, assume the heaps all use the same size of reserves storage.)
    451450In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap.
    452451Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur.
     
    473472While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap.
    474473It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool.
    475 Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads.
    476 
    477 It is possible for heaps to steal objects rather than return them and reallocating these objects when storage runs out on a heap.
     474Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads.
     475
     476It is possible for heaps to steal objects rather than return them and then reallocate these objects again when storage runs out on a heap.
    478477However, stealing can result in passive false-sharing.
    479478For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially.
     
    485484
    486485Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}.
    487 Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
     486Especially if the headers contain redundant management information, then storing that information is a waste of storage, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.
    488487As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective.
    489488Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}:
     
    660659With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list.
    661660Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$.
    662 The cost is adding information to a header, which increases the header size, and therefore internal fragmentation.
     661However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation.
    663662
    664663\begin{figure}
     
    689688The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup.
    690689In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap.
    691 Similarly, a thread first deallocates an object its private heap, and second to the public heap.
     690Similarly, a thread first deallocates an object to its private heap, and second to the public heap.
    692691Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps.
    693692Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
  • doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex

    r015925a re5d9274  
    1212\item[Benchmarks]
    1313are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc).
    14 The applications are suppose to represent common execution patterns that need to perform well with respect to an underlying software implementation.
     14The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation.
    1515Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns.
    1616\item[Micro-Benchmarks]
     
    2626
    2727This thesis designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters.
    28 The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance matrices such as speed, memory overhead, and cache performance.
     28The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance.
    2929% These programs can be taken as a standard to benchmark an allocator's basic goals.
    3030These programs give details of an allocator's memory overhead and speed under certain allocation patterns.
    31 The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum of events for a desired allocation pattern, which is seldom possible with benchmark programs.
     31The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs.
    3232Each micro-benchmark program has multiple control knobs specified by command-line arguments.
    3333
    34 The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific matrices.
     34The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics.
    3535An allocator's speed is benchmarked in different ways, as are issues like false sharing.
    3636
     
    4040Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level.
    4141The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level.
    42 None of the prior work address multi-threading at the UT level.
     42None of the prior work addresses multi-threading at the UT level.
    4343
    4444
     
    4747This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently.
    4848There is no interaction among threads, \ie no object sharing.
    49 Each thread repeatedly allocate 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
    50 Runtime of the benchmark evaluates its efficiency.
     49Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.
     50The execution time of the benchmark evaluates its efficiency.
    5151
    5252
     
    6363Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process.
    6464The number of thread generations varies depending on the thread speed.
    65 It calculates memory operations per second as an indicator of memory allocator's performance.
     65It calculates memory operations per second as an indicator of the memory allocator's performance.
    6666
    6767
     
    7575\label{s:ChurnBenchmark}
    7676
    77 The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenerio, where each thread extensively allocates and frees dynamic memory.
     77The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory.
    7878Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@.
    79 Churn simulates a memory intensive program that can be tuned to create different scenarios.
     79Churn simulates a memory intensive program and can be tuned to create different scenarios.
    8080
    8181\VRef[Figure]{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark.
     
    133133When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases.
    134134
    135 Cache thrash tries to create a scenerio that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
     135Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.
    136136Ideally, a memory allocator should distance the dynamic memory region of one thread from another.
    137137Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads.
     
    141141Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line.
    142142Each thread repeats this for N times.
    143 The main thread measures the total time taken to for all worker threads to complete.
    144 Worker threads sharing cache lines with each other will take longer.
     143The main thread measures the total time taken for all worker threads to complete.
     144Worker threads sharing cache lines with each other are expected to take longer.
    145145
    146146\begin{figure}
     
    156156        signal workers to free
    157157        ...
    158         print addresses from each $thread$
    159158Worker Thread$\(_1\)$
    160         allocate, write, read, free
    161         warmup memory in chunkc of 16 bytes
    162         ...
    163         malloc N objects
    164         ...
    165         free objects
    166         return object address to Main Thread
     159        warm up memory in chunks of 16 bytes
     160        ...
     161        For N
     162                malloc an object
     163                read/write the object M times
     164                free the object
     165        ...
    167166Worker Thread$\(_2\)$
    168167        // same as Worker Thread$\(_1\)$
     
    191190
    192191The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in \VRef{s:AllocatorInducedPassiveFalseSharing}.
    193 As for cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
     192As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.
    194193In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object.
    195194
     
    202201Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly.
    203202An allocator using object ownership, as described in section \VRef{s:Ownership}, is less susceptible to allocator-induced passive false-sharing.
    204 If the object is returned to the thread who owns it, then the thread that gets a new object is less likely to be on the same cache line.
     203If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line.
    205204
    206205\VRef[Figure]{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark.
     
    224223        signal workers to free
    225224        ...
    226         print addresses from each $thread$
    227225Worker Thread$\(_1\)$
    228         allocate, write, read, free
    229         warmup memory in chunkc of 16 bytes
    230         ...
    231         for ( N )
    232                 free an object passed by Main Thread
     226        warmup memory in chunks of 16 bytes
     227        ...
     228        free the object passed by the Main Thread
     229        For N
    233230                malloc new object
    234         ...
    235         free objects
    236         return new object addresses to Main Thread
     231                read/write the object M times
     232                free the object
     233        ...
    237234Worker Thread$\(_2\)$
    238235        // same as Worker Thread$\(_1\)$
     
    248245
    249246Similar to benchmark cache thrash in section \VRef{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments.
    250 \begin{description}[itemsep=0pt,parsep=0pt]
     247\begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt]
    251248\item[threads:]
    252249number of threads (K).
     
    262259\subsection{Speed Micro-Benchmark}
    263260\label{s:SpeedMicroBenchmark}
     261\vspace*{-4pt}
    264262
    265263The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines:
    266 \begin{enumerate}[itemsep=0pt,parsep=0pt]
     264\begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt]
    267265\item malloc
    268266\item realloc
     
    332330\VRef[Figure]{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark.
    333331It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads.
    334 A producer has a separate buffer for each consumer and allocates N objects of random sizes following a settable distribution for each consumer.
     332A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer.
    335333A consumer frees these objects.
    336334After every memory operation, program memory usage is recorded throughout the runtime.
  • doc/theses/mubeen_zulfiqar_MMath/conclusion.tex

    r015925a re5d9274  
    1717% ====================
    1818
    19 The goal of this thesis was to build a low-latency memory allocator for both KT and UT multi-threads systems, which is competitive with the best current memory allocators, while extending the feature set of existing and new allocator routines.
     19The goal of this thesis was to build a low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines.
    2020The new llheap memory-allocator achieves all of these goals, while maintaining and managing sticky allocation information without a performance loss.
    2121Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally.
    2222Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
    2323
    24 Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not make mistakes writing theses useful allocation operations.
     24Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not have to do these useful allocation operations themselves.
    2525The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
    2626
    2727Providing comprehensive statistics for all allocation operations is invaluable in understanding and debugging a program's dynamic behaviour.
    28 No other memory allocator provides comprehensive statistics gathering.
     28No other memory allocator provides such comprehensive statistics gathering.
    2929This capability was used extensively during the development of llheap to verify its behaviour.
    3030As well, providing a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
    31 While not as powerful as the @valgrind@ interpreter, a large number of allocations mistakes are detected.
     31While not as powerful as the @valgrind@ interpreter, a large number of allocation mistakes are detected.
    3232Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
    3333
     
    3636
    3737Starting a micro-benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
    38 The current micro-benchmarks allow some understand of allocator implementation properties without actually looking at the implementation.
     38The current micro-benchmarks allow some understanding of allocator implementation properties without actually looking at the implementation.
    3939For example, the memory micro-benchmark quickly identified how several of the allocators work at the global level.
    4040It was not possible to show how the micro-benchmarks adjustment knobs were used to tune to an interesting test point.
     
    4545
    4646A careful walk-though of the allocator fastpath should yield additional optimizations for a slight performance gain.
    47 In particular, looking at the implementation of rpmalloc, which is often the fastest allocator,
     47In particular, analysing the implementation of rpmalloc, which is often the fastest allocator,
    4848
    49 The micro-benchmarks project requires more testing and analysis.
    50 Additional allocations patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the best tuning knobs.
     49The micro-benchmark project requires more testing and analysis.
     50Additional allocation patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the most useful tuning knobs.
    5151Also, identifying ways to visualize the results of the micro-benchmarks is a work in progress.
    5252
    53 After llheap is made available on gitHub, interacting with its users to locate problems and improvements, will make llbench a more robust memory allocator.
    54 As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional feedback.
     53After llheap is made available on GitHub, interacting with its users to locate problems and improvements will make llbench a more robust memory allocator.
     54As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional information.
  • doc/theses/mubeen_zulfiqar_MMath/figures/Header.fig

    r015925a re5d9274  
    20202 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2
    2121         3300 1500 3300 2400
     222 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     23         4200 1800 6600 1800 6600 2100 4200 2100 4200 1800
    22242 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
    2325        1 1 1.00 45.00 90.00
    24          4050 2625 3750 2625 3750 2400
     26         4200 2775 3750 2775 3750 1725
    25272 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
    2628        1 1 1.00 45.00 90.00
    27          4050 2850 3450 2850 3450 2400
    28 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    29          4200 1800 6600 1800 6600 2100 4200 2100 4200 1800
     29         4200 2550 4050 2550 4050 1725
     302 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3
     31        1 1 1.00 45.00 90.00
     32         4200 3000 3450 3000 3450 2025
    30334 0 0 50 -1 0 12 0.0000 2 180 1185 1875 1725 bucket pointer\001
    31344 0 0 50 -1 0 12 0.0000 2 180 1005 1875 2025 mapped size\001
    32354 0 0 50 -1 0 12 0.0000 2 135 1215 1875 2325 next free block\001
    33364 2 0 50 -1 0 12 0.0000 2 135 480 1725 2025 union\001
    34 4 1 0 50 -1 0 12 0.0000 2 135 270 3775 2325 0/1\001
    35 4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2325 0/1\001
    36374 1 0 50 -1 0 12 0.0000 2 180 945 5400 2025 request size\001
    37384 1 0 50 -1 0 12 0.0000 2 180 765 5400 1425 4/8-bytes\001
    38394 1 0 50 -1 0 12 0.0000 2 180 765 3000 1425 4/8-bytes\001
    39 4 0 0 50 -1 0 12 0.0000 2 135 825 4125 2700 zero filled\001
    40 4 0 0 50 -1 0 12 0.0000 2 180 1515 4125 2925 mapped allocation\001
     404 1 0 50 -1 0 12 0.0000 2 135 270 3475 2025 0/1\001
     414 1 0 50 -1 0 12 0.0000 2 135 270 3775 1725 0/1\001
     424 1 0 50 -1 0 12 0.0000 2 135 270 4075 1725 0/1\001
     434 0 0 50 -1 0 12 0.0000 2 180 1515 4275 3075 mapped allocation\001
     444 0 0 50 -1 0 12 0.0000 2 135 825 4275 2850 zero filled\001
     454 0 0 50 -1 0 12 0.0000 2 180 1920 4275 2625 alignment (fake header)\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsNoOwnership.fig

    r015925a re5d9274  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
     
    11112 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    1212         1200 2100 1500 2100 1500 1800 1200 1800 1200 2100
    13 4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001
     134 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001
    1414-6
    15156 1950 1800 2550 2100
    16162 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    1717         2100 2100 2400 2100 2400 1800 2100 1800 2100 2100
    18 4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_2$\001
     184 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001
    1919-6
    20201 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350
    21211 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350
    22222 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    23         0 0 1.00 45.00 90.00
    24         0 0 1.00 45.00 90.00
     23        1 1 1.00 45.00 90.00
     24        1 1 1.00 45.00 90.00
    2525         1275 1800 1275 1500
    26 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    27         0 0 1.00 45.00 90.00
     262 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
     27        1 1 1.00 45.00 90.00
    2828         1425 1500 1425 1800
    29292 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
    30         0 0 1.00 45.00 90.00
     30        1 1 1.00 45.00 90.00
    3131         1425 1500 2175 1800
    32322 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
    33         0 0 1.00 45.00 90.00
     33        1 1 1.00 45.00 90.00
    3434         2175 1500 1425 1800
    35352 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    36         0 0 1.00 45.00 90.00
     36        1 1 1.00 45.00 90.00
    3737         2175 1500 2175 1800
    38382 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    39         0 0 1.00 45.00 90.00
    40         0 0 1.00 45.00 90.00
     39        1 1 1.00 45.00 90.00
     40        1 1 1.00 45.00 90.00
    4141         2325 1800 2325 1500
    42 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
    43 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001
     424 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
     434 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsOwnership.fig

    r015925a re5d9274  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
     
    11112 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    1212         1200 2100 1500 2100 1500 1800 1200 1800 1200 2100
    13 4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001
     134 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001
    1414-6
    15156 1950 1800 2550 2100
    16162 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    1717         2100 2100 2400 2100 2400 1800 2100 1800 2100 2100
    18 4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_2$\001
     184 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001
    1919-6
    20201 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350
    21211 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350
    22222 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    23         0 0 1.00 45.00 90.00
    24         0 0 1.00 45.00 90.00
     23        1 1 1.00 45.00 90.00
     24        1 1 1.00 45.00 90.00
    2525         2175 1500 1425 1800
    26262 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    27         0 0 1.00 45.00 90.00
    28         0 0 1.00 45.00 90.00
    29          1425 1500 2175 1800
    30 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    31         0 0 1.00 45.00 90.00
    32         0 0 1.00 45.00 90.00
     27        1 1 1.00 45.00 90.00
     28        1 1 1.00 45.00 90.00
    3329         1275 1800 1275 1500
    34302 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    35         0 0 1.00 45.00 90.00
    36         0 0 1.00 45.00 90.00
     31        1 1 1.00 45.00 90.00
     32        1 1 1.00 45.00 90.00
    3733         2325 1800 2325 1500
    38 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001
    39 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
     342 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
     35        1 1 1.00 45.00 90.00
     36        1 1 1.00 45.00 90.00
     37         1425 1500 2175 1800
     384 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001
     394 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/PerThreadHeap.fig

    r015925a re5d9274  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
     
    11112 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    1212         2700 1800 3000 1800 3000 2100 2700 2100 2700 1800
    13 4 1 0 50 -1 0 11 0.0000 2 135 135 2850 2025 G\001
     134 1 0 50 -1 0 11 0.0000 2 120 135 2850 2025 G\001
    1414-6
    15151 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
     
    17171 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
    18182 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    19         0 0 1.00 45.00 90.00
    20         0 0 1.00 45.00 90.00
     19        1 1 1.00 45.00 90.00
     20        1 1 1.00 45.00 90.00
    2121         1350 1500 1350 1800
    22222 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     
    2727         2100 1800 2400 1800 2400 2100 2100 2100 2100 1800
    28282 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    29         0 0 1.00 45.00 90.00
    30         0 0 1.00 45.00 90.00
     29        1 1 1.00 45.00 90.00
     30        1 1 1.00 45.00 90.00
    3131         1800 1500 1800 1800
    32322 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    33         0 0 1.00 45.00 90.00
    34         0 0 1.00 45.00 90.00
     33        1 1 1.00 45.00 90.00
     34        1 1 1.00 45.00 90.00
    3535         2250 1500 2250 1800
    36 4 1 0 50 -1 0 11 0.0000 2 195 1320 2550 2025 $\\Leftrightarrow$\001
    37 4 1 0 50 -1 0 11 0.0000 2 195 1320 3150 2025 $\\Leftrightarrow$\001
    38 4 0 0 50 -1 0 11 0.0000 2 135 240 3300 2025 OS\001
    39 4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001
    40 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
    41 4 1 0 50 -1 0 11 0.0000 2 195 495 1800 2025 H$_2$\001
    42 4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001
    43 4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_3$\001
    44 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001
     364 1 0 50 -1 0 11 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001
     374 1 0 50 -1 0 11 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001
     384 0 0 50 -1 0 11 0.0000 2 120 240 3300 2025 OS\001
     394 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001
     404 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
     414 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_2$\001
     424 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001
     434 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_3$\001
     444 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/SharedHeaps.fig

    r015925a re5d9274  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
     
    10106 1500 1200 2100 1500
    11111 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
    12 4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001
     124 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001
    1313-6
    14146 1050 1200 1650 1500
    15151 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
    16 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
     164 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
    1717-6
    18186 1950 1200 2550 1500
    19191 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
    20 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001
     204 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
    2121-6
    22226 1275 1800 1875 2100
    23232 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    2424         1425 1800 1725 1800 1725 2100 1425 2100 1425 1800
    25 4 1 0 50 -1 0 11 0.0000 2 195 495 1575 2025 H$_1$\001
     254 1 0 50 -1 0 11 0.0000 2 165 495 1575 2025 H$_1$\001
    2626-6
    27276 1725 1800 2325 2100
    28282 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    2929         1875 1800 2175 1800 2175 2100 1875 2100 1875 1800
    30 4 1 0 50 -1 0 11 0.0000 2 195 495 2025 2025 H$_2$\001
     304 1 0 50 -1 0 11 0.0000 2 165 495 2025 2025 H$_2$\001
    3131-6
    32326 2475 1800 2775 2100
    33332 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    3434         2475 1800 2775 1800 2775 2100 2475 2100 2475 1800
    35 4 1 0 50 -1 0 11 0.0000 2 135 135 2625 2025 G\001
     354 1 0 50 -1 0 11 0.0000 2 120 135 2625 2025 G\001
    3636-6
    37372 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    38         0 0 1.00 45.00 90.00
    39         0 0 1.00 45.00 90.00
     38        1 1 1.00 45.00 90.00
     39        1 1 1.00 45.00 90.00
    4040         1275 1500 1500 1800
    41412 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    42         0 0 1.00 45.00 90.00
    43         0 0 1.00 45.00 90.00
     42        1 1 1.00 45.00 90.00
     43        1 1 1.00 45.00 90.00
    4444         1425 1500 1950 1800
    45452 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    46         0 0 1.00 45.00 90.00
    47         0 0 1.00 45.00 90.00
     46        1 1 1.00 45.00 90.00
     47        1 1 1.00 45.00 90.00
    4848         1725 1500 1650 1800
    49492 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    50         0 0 1.00 45.00 90.00
    51         0 0 1.00 45.00 90.00
     50        1 1 1.00 45.00 90.00
     51        1 1 1.00 45.00 90.00
    5252         1875 1500 2025 1800
    53532 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    54         0 0 1.00 45.00 90.00
    55         0 0 1.00 45.00 90.00
     54        1 1 1.00 45.00 90.00
     55        1 1 1.00 45.00 90.00
    5656         2250 1500 2100 1800
    57 4 0 0 50 -1 0 11 0.0000 2 135 240 3075 2025 OS\001
    58 4 1 0 50 -1 0 11 0.0000 2 195 1320 2325 2025 $\\Leftrightarrow$\001
    59 4 1 0 50 -1 0 11 0.0000 2 195 1320 2925 2025 $\\Leftrightarrow$\001
     574 0 0 50 -1 0 11 0.0000 2 120 240 3075 2025 OS\001
     584 1 0 50 -1 0 11 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001
     594 1 0 50 -1 0 11 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/SingleHeap.fig

    r015925a re5d9274  
    1 #FIG 3.2  Produced by xfig version 3.2.5
     1#FIG 3.2  Produced by xfig version 3.2.7b
    22Landscape
    33Center
    44Inches
    5 Letter 
     5Letter
    66100.00
    77Single
     
    10106 1500 1200 2100 1500
    11111 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350
    12 4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001
     124 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001
    1313-6
    14146 1050 1200 1650 1500
    15151 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350
    16 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001
     164 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001
    1717-6
    18186 1950 1200 2550 1500
    19191 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350
    20 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001
     204 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
    2121-6
    22222 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    23         0 0 1.00 45.00 90.00
    24         0 0 1.00 45.00 90.00
     23        1 1 1.00 45.00 90.00
     24        1 1 1.00 45.00 90.00
    2525         1350 1500 1725 1800
    26262 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    27         0 0 1.00 45.00 90.00
    28         0 0 1.00 45.00 90.00
     27        1 1 1.00 45.00 90.00
     28        1 1 1.00 45.00 90.00
    2929         2250 1500 1875 1800
    30302 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    3131         1650 1800 1950 1800 1950 2100 1650 2100 1650 1800
    32322 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    33         0 0 1.00 45.00 90.00
    34         0 0 1.00 45.00 90.00
     33        1 1 1.00 45.00 90.00
     34        1 1 1.00 45.00 90.00
    3535         1800 1500 1800 1800
    36 4 1 0 50 -1 0 11 0.0000 2 195 495 1800 2025 H$_1$\001
    37 4 1 0 50 -1 0 11 0.0000 2 195 1320 2100 2025 $\\Leftrightarrow$\001
    38 4 0 0 50 -1 0 11 0.0000 2 135 240 2250 2025 OS\001
     364 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_1$\001
     374 1 0 50 -1 0 11 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001
     384 0 0 50 -1 0 11 0.0000 2 120 240 2250 2025 OS\001
  • doc/theses/mubeen_zulfiqar_MMath/figures/UserKernelHeaps.fig

    r015925a re5d9274  
    4545-6
    46462 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    47         0 0 1.00 45.00 90.00
    48         0 0 1.00 45.00 90.00
     47        1 1 1.00 45.00 90.00
     48        1 1 1.00 45.00 90.00
    4949         2025 2100 2025 2400
    50502 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    51         0 0 1.00 45.00 90.00
    52         0 0 1.00 45.00 90.00
     51        1 1 1.00 45.00 90.00
     52        1 1 1.00 45.00 90.00
    5353         2475 2100 2475 2400
    54542 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
    55         0 0 1.00 45.00 90.00
    56         0 0 1.00 45.00 90.00
     55        1 1 1.00 45.00 90.00
     56        1 1 1.00 45.00 90.00
    5757         2925 2100 2925 2400
    58584 1 0 50 -1 0 11 0.0000 2 135 2235 2475 1725 scheduled across kernel threads\001
  • doc/theses/mubeen_zulfiqar_MMath/intro.tex

    r015925a re5d9274  
    5353When this allocator proves inadequate, programmers often write specialize allocators for specific needs.
    5454C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator.
    55 (Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.)
     55Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.
    5656However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved.
    5757For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}.
     
    6565\begin{enumerate}[leftmargin=*]
    6666\item
    67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
    68 
    69 \item
    70 Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@.
     67Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading).
    7168
    7269\item
     
    104101
    105102\item
    106 Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties.
     103Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties.
    107104
    108105\item
     
    111108\item
    112109@malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@.
    113 If the allocation is not aligned or @addr@ is the @nulladdr@, the minimal alignment is returned.
     110If the allocation is not aligned or @addr@ is the @NULL@, the minimal alignment is returned.
    114111\item
    115112@malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@.
     
    119116@malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@.
    120117\end{itemize}
    121 
    122 \item
    123 Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation.
    124118
    125119\item
     
    136130
    137131\item
    138 Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination.
     132Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination.
    139133
    140134\item
  • doc/theses/mubeen_zulfiqar_MMath/performance.tex

    r015925a re5d9274  
    33
    44This chapter uses the micro-benchmarks from \VRef[Chapter]{s:Benchmarks} to test a number of current memory allocators, including llheap.
    5 The goal is to see if llheap is competitive with the current best memory allocators.
     5The goal is to see if llheap is competitive with the currently popular memory allocators.
    66
    77
     
    1111\begin{itemize}
    1212\item
     13\textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
     14\item
    1315\textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0
    14 \item
    15 \textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0
    1616\end{itemize}
    1717
     
    3131
    3232\paragraph{glibc (\textsf{glc})}
    33 \cite{glibc} is the default gcc thread-safe allocator.
     33\cite{glibc} is the default glibc thread-safe allocator.
    3434\\
    3535\textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\
     
    4646
    4747\paragraph{hoard (\textsf{hrd})}
    48 \cite{hoard} is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
     48\cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.
    4949\\
    5050\textbf{Version:} 3.13\\
     
    7878
    7979\paragraph{tbb malloc (\textsf{tbb})}
    80 \cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses private heap for each thread.
     80\cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread.
    8181Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size.
    8282\\
     
    9090\section{Experiments}
    9191
    92 The each micro-benchmark is configured and run with each of the allocators,
    93 The less time an allocator takes to complete a benchmark the better, so lower in the graphs is better.
     92Each micro-benchmark is configured and run with each of the allocators,
     93The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs.
    9494All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see \VRef{s:MemoryMicroBenchmark}).
    9595
     
    231231Second is the low-performer group, which includes the rest of the memory allocators.
    232232These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator.
    233 All of the allocator's in this group are sharing heaps among threads at some level.
    234 
    235 Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}).
    236 But, these allocators are among the low performers in the cache scratch.
    237 It suggests these allocators do not actively produce false-sharing but preserve program-induced passive false sharing.
     233All of the allocators in this group are sharing heaps among threads at some level.
     234
     235Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch.
     236It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing.
    238237
    239238%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis-frontpgs.tex

    r015925a re5d9274  
    1313        \vspace*{1.0cm}
    1414
    15         {\Huge\bf \CFA Memory Allocation}
     15        {\Huge\bf High-Performance Concurrent Memory Allocation}
    1616
    1717        \vspace*{1.0cm}
     
    108108% D E C L A R A T I O N   P A G E
    109109% -------------------------------
    110   % The following is a sample Delaration Page as provided by the GSO
     110  % The following is a sample Declaration Page as provided by the GSO
    111111  % December 13th, 2006.  It is designed for an electronic thesis.
    112112 \begin{center}\textbf{Author's Declaration}\end{center}
     
    136136
    137137The goal of this thesis is to build a low-latency memory allocator for both kernel and user multi-threaded systems, which is competitive with the best current memory allocators, while extending the feature set of existing and new allocator routines.
    138 A new llheap memory-allocator is created that achieves all of these goals, while maintaining and managing sticky allocation properties for zero-fill and alignment allocations without a performance loss.
     138A new llheap memory-allocator is created that achieves all of these goals, while maintaining and managing sticky allocation properties for zero-filled and aligned allocations without a performance loss.
    139139Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally, because it preserves sticky properties when enlarging storage requests.
    140140Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations.
    141141The C allocation API is also extended with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ so programmers do not make mistakes writing theses useful allocation operations.
    142142llheap is embedded into the \uC and \CFA runtime systems, both of which have user-level threading.
    143 The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
     143The ability to use \CFA's advanced type-system (and possibly \CC's too) to combine advanced memory operations into one allocation routine using named arguments shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.
    144144
    145145The llheap allocator also provides comprehensive statistics for all allocation operations, which are invaluable in understanding and debugging a program's dynamic behaviour.
    146 No other memory allocator examined in the thesis provides comprehensive statistics gathering.
    147 As well, llheap provides a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students.
     146No other memory allocator examined in the thesis provides such comprehensive statistics gathering.
     147As well, llheap provides a debugging mode where allocations are checked with internal pre/post conditions and invariants. It is extremely useful, especially for students.
    148148While not as powerful as the @valgrind@ interpreter, a large number of allocations mistakes are detected.
    149149Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code.
    150150
    151 A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge.
     151A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs. It has been an interesting challenge.
    152152These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs.
    153 Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc and the new allocator llheap are all compared using the new micro-benchmark test-suite.
     153Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc, and the new allocator llheap are all compared using the new micro-benchmark test-suite.
    154154\cleardoublepage
    155155
     
    162162I would like to thank all the people who made this thesis possible.
    163163
    164 I would like to acknowledge Peter A. Buhr for his assistance and support throughtout the process.
     164I would like to acknowledge Peter A. Buhr for his assistance and support throughout the process.
    165165It would have been impossible without him.
    166166
     167I would like to acknowledge Gregor Richards and Trevor Brown for reading my thesis quickly and giving me great feedback on my work.
     168
    167169Also, I would say thanks to my team members at PLG especially Thierry, Michael, and Andrew for their input.
     170
     171Finally, a special thank you to Huawei Canada for funding this work.
    168172\end{center}
    169173\cleardoublepage
     
    195199% L I S T   O F   T A B L E S
    196200% ---------------------------
    197 \addcontentsline{toc}{chapter}{List of Tables}
    198 \listoftables
    199 \cleardoublepage
    200 \phantomsection         % allows hyperref to link to the correct page
     201% \addcontentsline{toc}{chapter}{List of Tables}
     202% \listoftables
     203% \cleardoublepage
     204% \phantomsection               % allows hyperref to link to the correct page
    201205
    202206% Change page numbering back to Arabic numerals
  • doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

    r015925a re5d9274  
    106106    pdffitwindow=false,     % window fit to page when opened
    107107    pdfstartview={FitH},    % fits the width of the page to the window
    108     pdftitle={Cforall Memory Allocation}, % title: CHANGE THIS TEXT!
     108    pdftitle={High-Performance Concurrent Memory Allocation}, % title: CHANGE THIS TEXT!
    109109    pdfauthor={Mubeen Zulfiqar},    % author: CHANGE THIS TEXT! and uncomment this line
    110110    pdfsubject={Cforall},  % subject: CHANGE THIS TEXT! and uncomment this line
  • doc/theses/thierry_delisle_PhD/thesis/Makefile

    r015925a re5d9274  
    33Build = build
    44Figures = img
    5 Macros = ../../../LaTeXmacros
    6 TeXLIB = .:${Macros}:${Build}:../../../bibliography:
     5
     6LaTMac = ../../../LaTeXmacros
     7BibRep = ../../../bibliography
     8
     9Macros = ${LaTMac}
     10TeXLIB = .:${Macros}:${Build}:${BibRep}:
    711LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
    812BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex
     
    3741        emptytree \
    3842        fairness \
     43        idle \
     44        idle1 \
     45        idle2 \
     46        idle_state \
    3947        io_uring \
    4048        pivot_ring \
     
    4250        cycle \
    4351        result.cycle.jax.ops \
     52        result.yield.jax.ops \
     53        result.churn.jax.ops \
     54        result.cycle.jax.ns \
     55        result.yield.jax.ns \
     56        result.churn.jax.ns \
     57        result.cycle.low.jax.ops \
     58        result.yield.low.jax.ops \
     59        result.churn.low.jax.ops \
     60        result.cycle.low.jax.ns \
     61        result.yield.low.jax.ns \
     62        result.churn.low.jax.ns \
     63        result.memcd.updt.qps \
     64        result.memcd.updt.lat \
     65        result.memcd.rate.qps \
     66        result.memcd.rate.99th \
    4467}
    4568
     
    5275## Define the documents that need to be made.
    5376all: thesis.pdf
    54 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty
     77thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ${LaTMac}/common.tex ${LaTMac}/common.sty ${BibRep}/pl.bib
    5578
    5679DOCUMENT = thesis.pdf
     
    116139        python3 $< $@
    117140
    118 build/result.%.ns.svg : data/% | ${Build}
    119         ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops"
     141cycle_jax_ops_FLAGS = --MaxY=120000000
     142cycle_low_jax_ops_FLAGS = --MaxY=120000000
     143cycle_jax_ns_FLAGS = --MaxY=2000
     144cycle_low_jax_ns_FLAGS = --MaxY=2000
    120145
    121 build/result.%.ops.svg : data/% | ${Build}
    122         ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second"
     146yield_jax_ops_FLAGS = --MaxY=150000000
     147yield_low_jax_ops_FLAGS = --MaxY=150000000
     148yield_jax_ns_FLAGS = --MaxY=1500
     149yield_low_jax_ns_FLAGS = --MaxY=1500
     150
     151build/result.%.ns.svg : data/% Makefile | ${Build}
     152        ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops/procs" $($(subst .,_,$*)_ns_FLAGS)
     153
     154build/result.%.ops.svg : data/% Makefile | ${Build}
     155        ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second" $($(subst .,_,$*)_ops_FLAGS)
     156
     157build/result.memcd.updt.qps.svg : data/memcd.updt Makefile | ${Build}
     158        ../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Update Ratio"
     159
     160build/result.memcd.updt.lat.svg : data/memcd.updt Makefile | ${Build}
     161        ../../../../benchmark/plot.py -f $< -o $@ -y "Average Read Latency" -x "Update Ratio"
     162
     163build/result.memcd.rate.qps.svg : data/memcd.rate Makefile | ${Build}
     164        ../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Target QPS"
     165
     166build/result.memcd.rate.99th.svg : data/memcd.rate Makefile | ${Build}
     167        ../../../../benchmark/plot.py -f $< -o $@ -y "Tail Read Latency" -x "Target QPS"
    123168
    124169## pstex with inverted colors
  • doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax

    r015925a re5d9274  
    1 [["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43606897.0, "Ops per second": 8720908.73, "ns per ops": 114.67, "Ops per threads": 2180344.0, "Ops per procs": 10901724.0, "Ops/sec/procs": 2180227.18, "ns per ops/procs": 458.67}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5010.922033, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 93993568.0, "Total blocks": 93993209.0, "Ops per second": 18757739.07, "ns per ops": 53.31, "Ops per threads": 1174919.0, "Ops per procs": 5874598.0, "Ops/sec/procs": 1172358.69, "ns per ops/procs": 852.98}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 136763517.0, "Ops per second": 27351079.35, "ns per ops": 36.56, "Ops per threads": 1709543.0, "Ops per procs": 8547719.0, "Ops/sec/procs": 1709442.46, "ns per ops/procs": 584.99}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27778961.0, "Ops per second": 5555545.09, "ns per ops": 180.0, "Ops per threads": 5555792.0, "Ops per procs": 27778961.0, "Ops/sec/procs": 5555545.09, "ns per ops/procs": 180.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.290878, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43976310.0, "Total blocks": 43976217.0, "Ops per second": 8778949.17, "ns per ops": 113.91, "Ops per threads": 2198815.0, "Ops per procs": 10994077.0, "Ops/sec/procs": 2194737.29, "ns per ops/procs": 455.64}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.151542, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44132300.0, "Total blocks": 44132201.0, "Ops per second": 8810334.37, "ns per ops": 113.5, "Ops per threads": 2206615.0, "Ops per procs": 11033075.0, "Ops/sec/procs": 2202583.59, "ns per ops/procs": 454.01}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 46353896.0, "Ops per second": 9270294.11, "ns per ops": 107.87, "Ops per threads": 2317694.0, "Ops per procs": 11588474.0, "Ops/sec/procs": 2317573.53, "ns per ops/procs": 431.49}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27894379.0, "Ops per second": 5578591.58, "ns per ops": 179.26, "Ops per threads": 5578875.0, "Ops per procs": 27894379.0, "Ops/sec/procs": 5578591.58, "ns per ops/procs": 179.26}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.743463, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32825528.0, "Total blocks": 32825527.0, "Ops per second": 6553645.29, "ns per ops": 152.59, "Ops per threads": 6565105.0, "Ops per procs": 32825528.0, "Ops/sec/procs": 6553645.29, "ns per ops/procs": 152.59}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 138213098.0, "Ops per second": 27640977.5, "ns per ops": 36.18, "Ops per threads": 1727663.0, "Ops per procs": 8638318.0, "Ops/sec/procs": 1727561.09, "ns per ops/procs": 578.85}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5007.914168, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44109513.0, "Total blocks": 44109419.0, "Ops per second": 8807961.06, "ns per ops": 113.53, "Ops per threads": 2205475.0, "Ops per procs": 11027378.0, "Ops/sec/procs": 2201990.27, "ns per ops/procs": 454.13}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5012.121876, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 94130673.0, "Total blocks": 94130291.0, "Ops per second": 18780603.37, "ns per ops": 53.25, "Ops per threads": 1176633.0, "Ops per procs": 5883167.0, "Ops/sec/procs": 1173787.71, "ns per ops/procs": 851.94}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 140936367.0, "Ops per second": 28185668.38, "ns per ops": 35.48, "Ops per threads": 1761704.0, "Ops per procs": 8808522.0, "Ops/sec/procs": 1761604.27, "ns per ops/procs": 567.66}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44279585.0, "Ops per second": 8855475.01, "ns per ops": 112.92, "Ops per threads": 2213979.0, "Ops per procs": 11069896.0, "Ops/sec/procs": 2213868.75, "ns per ops/procs": 451.7}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.37392, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32227534.0, "Total blocks": 32227533.0, "Ops per second": 6434730.02, "ns per ops": 155.41, "Ops per threads": 6445506.0, "Ops per procs": 32227534.0, "Ops/sec/procs": 6434730.02, "ns per ops/procs": 155.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5011.019789, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 90600569.0, "Total blocks": 90600173.0, "Ops per second": 18080265.66, "ns per ops": 55.31, "Ops per threads": 1132507.0, "Ops per procs": 5662535.0, "Ops/sec/procs": 1130016.6, "ns per ops/procs": 884.94}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.52474, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32861776.0, "Total blocks": 32861775.0, "Ops per second": 6561168.75, "ns per ops": 152.41, "Ops per threads": 6572355.0, "Ops per procs": 32861776.0, "Ops/sec/procs": 6561168.75, "ns per ops/procs": 152.41}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 28097680.0, "Ops per second": 5619274.9, "ns per ops": 177.96, "Ops per threads": 5619536.0, "Ops per procs": 28097680.0, "Ops/sec/procs": 5619274.9, "ns per ops/procs": 177.96}]]
     1[["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1138076440.0, "Ops per second": 113792094.48, "ns per ops": 8.79, "Ops per threads": 94839.0, "Ops per procs": 47419851.0, "Ops/sec/procs": 4741337.27, "ns per ops/procs": 210.91}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 200285.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 17638575791.0, "Ops per second": 88067238.72, "ns per ops": 11.35, "Ops per threads": 2204821.0, "Ops per procs": 1102410986.0, "Ops/sec/procs": 5504202.42, "ns per ops/procs": 181.68}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54856916.0, "Ops per second": 5485691.0, "ns per ops": 184.0, "Ops per threads": 109713.0, "Ops per procs": 54856916.0, "Ops/sec/procs": 5485691.0, "ns per ops/procs": 184.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.449006, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 558836360.0, "Total blocks": 558836360.0, "Ops per second": 55741778.71, "ns per ops": 17.94, "Ops per threads": 69854.0, "Ops per procs": 34927272.0, "Ops/sec/procs": 3483861.17, "ns per ops/procs": 287.04}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10038.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 58647049.0, "Total blocks": 58647049.0, "Ops per second": 5842287.68, "ns per ops": 171.17, "Ops per threads": 7330.0, "Ops per procs": 3665440.0, "Ops/sec/procs": 365142.98, "ns per ops/procs": 2738.65}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10003.489711, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 728096996.0, "Total blocks": 728096996.0, "Ops per second": 72784299.98, "ns per ops": 13.74, "Ops per threads": 60674.0, "Ops per procs": 30337374.0, "Ops/sec/procs": 3032679.17, "ns per ops/procs": 329.74}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 63157049.0, "Total blocks": 63157049.0, "Ops per second": 6302255.13, "ns per ops": 158.67, "Ops per threads": 15789.0, "Ops per procs": 7894631.0, "Ops/sec/procs": 787781.89, "ns per ops/procs": 1269.39}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62412200.0, "Total blocks": 62411700.0, "Ops per second": 6235572.31, "ns per ops": 160.37, "Ops per threads": 124824.0, "Ops per procs": 62412200.0, "Ops/sec/procs": 6235572.31, "ns per ops/procs": 160.37}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 464608617.0, "Ops per second": 46457191.42, "ns per ops": 21.53, "Ops per threads": 116152.0, "Ops per procs": 58076077.0, "Ops/sec/procs": 5807148.93, "ns per ops/procs": 172.2}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391521066.0, "Ops per second": 39152106.0, "ns per ops": 25.0, "Ops per threads": 97880.0, "Ops per procs": 48940133.0, "Ops/sec/procs": 4894013.0, "ns per ops/procs": 206.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 963549550.0, "Ops per second": 96354955.0, "ns per ops": 10.0, "Ops per threads": 80295.0, "Ops per procs": 40147897.0, "Ops/sec/procs": 4014789.0, "ns per ops/procs": 251.0}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 867718190.0, "Ops per second": 86761170.55, "ns per ops": 11.53, "Ops per threads": 108464.0, "Ops per procs": 54232386.0, "Ops/sec/procs": 5422573.16, "ns per ops/procs": 184.41}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 962016289.0, "Ops per second": 96201628.0, "ns per ops": 10.0, "Ops per threads": 80168.0, "Ops per procs": 40084012.0, "Ops/sec/procs": 4008401.0, "ns per ops/procs": 251.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.837824, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54738237.0, "Total blocks": 54737741.0, "Ops per second": 5464622.46, "ns per ops": 183.0, "Ops per threads": 109476.0, "Ops per procs": 54738237.0, "Ops/sec/procs": 5464622.46, "ns per ops/procs": 183.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 731309408.0, "Ops per second": 73130940.0, "ns per ops": 13.0, "Ops per threads": 91413.0, "Ops per procs": 45706838.0, "Ops/sec/procs": 4570683.0, "ns per ops/procs": 220.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 739772688.0, "Ops per second": 73977268.0, "ns per ops": 13.0, "Ops per threads": 92471.0, "Ops per procs": 46235793.0, "Ops/sec/procs": 4623579.0, "ns per ops/procs": 218.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391449785.0, "Ops per second": 39144978.0, "ns per ops": 25.0, "Ops per threads": 97862.0, "Ops per procs": 48931223.0, "Ops/sec/procs": 4893122.0, "ns per ops/procs": 206.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10048.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57239183.0, "Total blocks": 57239183.0, "Ops per second": 5696211.13, "ns per ops": 175.56, "Ops per threads": 4769.0, "Ops per procs": 2384965.0, "Ops/sec/procs": 237342.13, "ns per ops/procs": 4213.33}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55248375.0, "Ops per second": 5524562.87, "ns per ops": 181.01, "Ops per threads": 110496.0, "Ops per procs": 55248375.0, "Ops/sec/procs": 5524562.87, "ns per ops/procs": 181.01}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61553053.0, "Total blocks": 61553053.0, "Ops per second": 6142186.88, "ns per ops": 162.81, "Ops per threads": 15388.0, "Ops per procs": 7694131.0, "Ops/sec/procs": 767773.36, "ns per ops/procs": 1302.47}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62811642.0, "Total blocks": 62811142.0, "Ops per second": 6275517.47, "ns per ops": 159.35, "Ops per threads": 125623.0, "Ops per procs": 62811642.0, "Ops/sec/procs": 6275517.47, "ns per ops/procs": 159.35}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10018.820873, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 260866706.0, "Total blocks": 260862710.0, "Ops per second": 26037665.44, "ns per ops": 38.41, "Ops per threads": 65216.0, "Ops per procs": 32608338.0, "Ops/sec/procs": 3254708.18, "ns per ops/procs": 307.25}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 874581175.0, "Ops per second": 87449851.2, "ns per ops": 11.44, "Ops per threads": 109322.0, "Ops per procs": 54661323.0, "Ops/sec/procs": 5465615.7, "ns per ops/procs": 182.96}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55228782.0, "Ops per second": 5522878.0, "ns per ops": 182.0, "Ops per threads": 110457.0, "Ops per procs": 55228782.0, "Ops/sec/procs": 5522878.0, "ns per ops/procs": 182.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62564955.0, "Total blocks": 62564455.0, "Ops per second": 6250797.96, "ns per ops": 159.98, "Ops per threads": 125129.0, "Ops per procs": 62564955.0, "Ops/sec/procs": 6250797.96, "ns per ops/procs": 159.98}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 738848909.0, "Ops per second": 73884890.0, "ns per ops": 13.0, "Ops per threads": 92356.0, "Ops per procs": 46178056.0, "Ops/sec/procs": 4617805.0, "ns per ops/procs": 218.0}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1131221613.0, "Ops per second": 113108175.94, "ns per ops": 8.84, "Ops per threads": 94268.0, "Ops per procs": 47134233.0, "Ops/sec/procs": 4712840.66, "ns per ops/procs": 212.19}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10008.209159, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 729328104.0, "Total blocks": 729328099.0, "Ops per second": 72872987.81, "ns per ops": 13.72, "Ops per threads": 60777.0, "Ops per procs": 30388671.0, "Ops/sec/procs": 3036374.49, "ns per ops/procs": 329.34}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 961002611.0, "Ops per second": 96100261.0, "ns per ops": 10.0, "Ops per threads": 80083.0, "Ops per procs": 40041775.0, "Ops/sec/procs": 4004177.0, "ns per ops/procs": 252.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 390098231.0, "Ops per second": 39009823.0, "ns per ops": 25.0, "Ops per threads": 97524.0, "Ops per procs": 48762278.0, "Ops/sec/procs": 4876227.0, "ns per ops/procs": 207.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55237591.0, "Ops per second": 5523759.0, "ns per ops": 182.0, "Ops per threads": 110475.0, "Ops per procs": 55237591.0, "Ops/sec/procs": 5523759.0, "ns per ops/procs": 182.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.576699, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54510321.0, "Total blocks": 54509820.0, "Ops per second": 5442011.04, "ns per ops": 183.76, "Ops per threads": 109020.0, "Ops per procs": 54510321.0, "Ops/sec/procs": 5442011.04, "ns per ops/procs": 183.76}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1135730371.0, "Ops per second": 113558509.97, "ns per ops": 8.81, "Ops per threads": 94644.0, "Ops per procs": 47322098.0, "Ops/sec/procs": 4731604.58, "ns per ops/procs": 211.34}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10039.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61004037.0, "Total blocks": 61004037.0, "Ops per second": 6076255.04, "ns per ops": 164.58, "Ops per threads": 7625.0, "Ops per procs": 3812752.0, "Ops/sec/procs": 379765.94, "ns per ops/procs": 2633.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10004.891999, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 747946345.0, "Total blocks": 747934349.0, "Ops per second": 74758062.86, "ns per ops": 13.38, "Ops per threads": 62328.0, "Ops per procs": 31164431.0, "Ops/sec/procs": 3114919.29, "ns per ops/procs": 321.04}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 466424792.0, "Ops per second": 46638931.23, "ns per ops": 21.44, "Ops per threads": 116606.0, "Ops per procs": 58303099.0, "Ops/sec/procs": 5829866.4, "ns per ops/procs": 171.53}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10086.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57343570.0, "Total blocks": 57343570.0, "Ops per second": 5685308.81, "ns per ops": 175.89, "Ops per threads": 4778.0, "Ops per procs": 2389315.0, "Ops/sec/procs": 236887.87, "ns per ops/procs": 4221.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10020.39533, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 263517289.0, "Total blocks": 263513293.0, "Ops per second": 26298093.07, "ns per ops": 38.03, "Ops per threads": 65879.0, "Ops per procs": 32939661.0, "Ops/sec/procs": 3287261.63, "ns per ops/procs": 304.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.357431, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 551670395.0, "Total blocks": 551662399.0, "Ops per second": 55027503.89, "ns per ops": 18.17, "Ops per threads": 68958.0, "Ops per procs": 34479399.0, "Ops/sec/procs": 3439218.99, "ns per ops/procs": 290.76}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10050.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56162695.0, "Total blocks": 56162695.0, "Ops per second": 5588033.65, "ns per ops": 178.95, "Ops per threads": 4680.0, "Ops per procs": 2340112.0, "Ops/sec/procs": 232834.74, "ns per ops/procs": 4294.89}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10019.690183, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 271866976.0, "Total blocks": 271862980.0, "Ops per second": 27133271.69, "ns per ops": 36.86, "Ops per threads": 67966.0, "Ops per procs": 33983372.0, "Ops/sec/procs": 3391658.96, "ns per ops/procs": 294.84}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10057.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62105022.0, "Total blocks": 62105022.0, "Ops per second": 6175186.04, "ns per ops": 161.94, "Ops per threads": 15526.0, "Ops per procs": 7763127.0, "Ops/sec/procs": 771898.25, "ns per ops/procs": 1295.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.81217, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 537080117.0, "Total blocks": 537072121.0, "Ops per second": 53569736.59, "ns per ops": 18.67, "Ops per threads": 67135.0, "Ops per procs": 33567507.0, "Ops/sec/procs": 3348108.54, "ns per ops/procs": 298.68}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55967030.0, "Ops per second": 5596438.25, "ns per ops": 178.69, "Ops per threads": 111934.0, "Ops per procs": 55967030.0, "Ops/sec/procs": 5596438.25, "ns per ops/procs": 178.69}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55703320.0, "Ops per second": 5570084.72, "ns per ops": 179.53, "Ops per threads": 111406.0, "Ops per procs": 55703320.0, "Ops/sec/procs": 5570084.72, "ns per ops/procs": 179.53}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 469211793.0, "Ops per second": 46918327.16, "ns per ops": 21.31, "Ops per threads": 117302.0, "Ops per procs": 58651474.0, "Ops/sec/procs": 5864790.9, "ns per ops/procs": 170.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.545208, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54925472.0, "Total blocks": 54924976.0, "Ops per second": 5483474.68, "ns per ops": 182.37, "Ops per threads": 109850.0, "Ops per procs": 54925472.0, "Ops/sec/procs": 5483474.68, "ns per ops/procs": 182.37}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10037.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60770550.0, "Total blocks": 60770550.0, "Ops per second": 6054474.7, "ns per ops": 165.17, "Ops per threads": 7596.0, "Ops per procs": 3798159.0, "Ops/sec/procs": 378404.67, "ns per ops/procs": 2642.67}]]
  • doc/theses/thierry_delisle_PhD/thesis/local.bib

    r015925a re5d9274  
    701701  note = "[Online; accessed 12-April-2022]"
    702702}
     703
     704% RMR notes :
     705% [05/04, 12:36] Trevor Brown
     706%     i don't know where rmr complexity was first introduced, but there are many many many papers that use the term and define it
     707% ​[05/04, 12:37] Trevor Brown
     708%     here's one paper that uses the term a lot and links to many others that use it... might trace it to something useful there https://drops.dagstuhl.de/opus/volltexte/2021/14832/pdf/LIPIcs-DISC-2021-30.pdf
     709% ​[05/04, 12:37] Trevor Brown
     710%     another option might be to cite a textbook
     711% ​[05/04, 12:42] Trevor Brown
     712%     but i checked two textbooks in the area i'm aware of and i don't see a definition of rmr complexity in either
     713% ​[05/04, 12:42] Trevor Brown
     714%     this one has a nice statement about the prevelance of rmr complexity, as well as some rough definition
     715% ​[05/04, 12:42] Trevor Brown
     716%     https://dl.acm.org/doi/pdf/10.1145/3465084.3467938
     717
     718% Race to idle notes :
     719% [13/04, 16:56] Martin Karsten
     720%       I don't have a citation. Google brings up this one, which might be good:
     721%
     722% https://doi.org/10.1137/1.9781611973099.100
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex

    r015925a re5d9274  
    77Networked ZIPF
    88
     9Nginx : 5Gb still good, 4Gb starts to suffer
     10
     11Cforall : 10Gb too high, 4 Gb too low
     12
    913\section{Memcached}
    1014
    11 In Memory
     15\subsection{Benchmark Environment}
     16These experiments are run on a cluster of homogenous Supermicro SYS-6017R-TDF compute nodes with the following characteristics:
     17The server runs Ubuntu 20.04.3 LTS on top of Linux Kernel 5.11.0-34.
     18Each node has 2 Intel(R) Xeon(R) CPU E5-2620 v2 running at 2.10GHz.
     19These CPUs have 6 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 24 \glspl{hthrd}.
     20The cpus each have 384 KB, 3 MB and 30 MB of L1, L2 and L3 caches respectively.
     21Each node is connected to the network through a Mellanox 10 Gigabit Ethernet port.
     22The network route uses 1 Mellanox SX1012 10/40 Gigabit Ethernet cluster switch.
    1223
    13 Networked
     24
     25
     26\begin{figure}
     27        \centering
     28        \input{result.memcd.updt.qps.pstex_t}
     29        \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
     30        \label{fig:memcd:updt:qps}
     31\end{figure}
     32
     33\begin{figure}
     34        \centering
     35        \input{result.memcd.updt.lat.pstex_t}
     36        \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
     37        \label{fig:memcd:updt:lat}
     38\end{figure}
     39
     40\begin{figure}
     41        \centering
     42        \input{result.memcd.rate.qps.pstex_t}
     43        \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
     44        \label{fig:memcd:rate:qps}
     45\end{figure}
     46
     47\begin{figure}
     48        \centering
     49        \input{result.memcd.rate.99th.pstex_t}
     50        \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
     51        \label{fig:memcd:rate:tail}
     52\end{figure}
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex

    r015925a re5d9274  
    66\section{Benchmark Environment}
    77All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine.
     8
     9For all benchmarks, \texttt{taskset} is used to limit the experiment to 1 NUMA Node with no hyper threading.
     10If more \glspl{hthrd} are needed, then 1 NUMA Node with hyperthreading is used.
     11If still more \glspl{hthrd} are needed then the experiment is limited to as few NUMA Nodes as needed.
     12
    813
    914\paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM.
     
    2328
    2429\section{Cycling latency}
     30\begin{figure}
     31        \centering
     32        \input{cycle.pstex_t}
     33        \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
     34        \label{fig:cycle}
     35\end{figure}
    2536The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue.
    2637Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.
     
    4253Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system.
    4354
    44 \begin{figure}
    45         \centering
    46         \input{cycle.pstex_t}
    47         \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
    48         \label{fig:cycle}
    49 \end{figure}
    50 
    5155To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
    5256Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
     
    5458
    5559The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
    56 
    57 \begin{lstlisting}
    58         Thread.main() {
    59                 count := 0
    60                 for {
    61                         wait()
    62                         this.next.wake()
    63                         count ++
    64                         if must_stop() { break }
    65                 }
    66                 global.count += count
    67         }
    68 \end{lstlisting}
    69 
    70 \begin{figure}
    71         \centering
    72         \input{result.cycle.jax.ops.pstex_t}
    73         \vspace*{-10pt}
    74         \label{fig:cycle:ns:jax}
    75 \end{figure}
     60Figure~\ref{fig:cycle:code} shows pseudo code for this benchmark.
     61
     62\begin{figure}
     63        \begin{lstlisting}
     64                Thread.main() {
     65                        count := 0
     66                        for {
     67                                wait()
     68                                this.next.wake()
     69                                count ++
     70                                if must_stop() { break }
     71                        }
     72                        global.count += count
     73                }
     74        \end{lstlisting}
     75        \caption[Cycle Benchmark : Pseudo Code]{Cycle Benchmark : Pseudo Code}
     76        \label{fig:cycle:code}
     77\end{figure}
     78
     79
     80
     81\subsection{Results}
     82\begin{figure}
     83        \subfloat[][Throughput, 100 \ats per \proc]{
     84                \resizebox{0.5\linewidth}{!}{
     85                        \input{result.cycle.jax.ops.pstex_t}
     86                }
     87                \label{fig:cycle:jax:ops}
     88        }
     89        \subfloat[][Throughput, 1 \ats per \proc]{
     90                \resizebox{0.5\linewidth}{!}{
     91                        \input{result.cycle.low.jax.ops.pstex_t}
     92                }
     93                \label{fig:cycle:jax:low:ops}
     94        }
     95
     96        \subfloat[][Latency, 100 \ats per \proc]{
     97                \resizebox{0.5\linewidth}{!}{
     98                        \input{result.cycle.jax.ns.pstex_t}
     99                }
     100
     101        }
     102        \subfloat[][Latency, 1 \ats per \proc]{
     103                \resizebox{0.5\linewidth}{!}{
     104                        \input{result.cycle.low.jax.ns.pstex_t}
     105                }
     106                \label{fig:cycle:jax:low:ns}
     107        }
     108        \caption[Cycle Benchmark on Intel]{Cycle Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 100 cycles per \proc, 5 \ats per cycle.}
     109        \label{fig:cycle:jax}
     110\end{figure}
     111Figure~\ref{fig:cycle:jax} shows the throughput as a function of \proc count, with the following constants:
     112Each run uses 100 cycles per \proc, 5 \ats per cycle.
     113
     114\todo{results discussion}
    76115
    77116\section{Yield}
     
    81120Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty.
    82121This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done.
    83 
    84 \todo{code, setup, results}
    85 
    86 \begin{lstlisting}
    87         Thread.main() {
    88                 count := 0
    89                 while !stop {
    90                         yield()
    91                         count ++
    92                 }
    93                 global.count += count
    94         }
    95 \end{lstlisting}
     122Figure~\ref{fig:yield:code} shows pseudo code for this benchmark, the ``wait/wake-next'' is simply replaced by a yield.
     123
     124\begin{figure}
     125        \begin{lstlisting}
     126                Thread.main() {
     127                        count := 0
     128                        for {
     129                                yield()
     130                                count ++
     131                                if must_stop() { break }
     132                        }
     133                        global.count += count
     134                }
     135        \end{lstlisting}
     136        \caption[Yield Benchmark : Pseudo Code]{Yield Benchmark : Pseudo Code}
     137        \label{fig:yield:code}
     138\end{figure}
     139
     140\subsection{Results}
     141\begin{figure}
     142        \subfloat[][Throughput, 100 \ats per \proc]{
     143                \resizebox{0.5\linewidth}{!}{
     144                        \input{result.yield.jax.ops.pstex_t}
     145                }
     146                \label{fig:yield:jax:ops}
     147        }
     148        \subfloat[][Throughput, 1 \ats per \proc]{
     149                \resizebox{0.5\linewidth}{!}{
     150                \input{result.yield.low.jax.ops.pstex_t}
     151                }
     152                \label{fig:yield:jax:low:ops}
     153        }
     154
     155        \subfloat[][Latency, 100 \ats per \proc]{
     156                \resizebox{0.5\linewidth}{!}{
     157                \input{result.yield.jax.ns.pstex_t}
     158                }
     159                \label{fig:yield:jax:ns}
     160        }
     161        \subfloat[][Latency, 1 \ats per \proc]{
     162                \resizebox{0.5\linewidth}{!}{
     163                \input{result.yield.low.jax.ns.pstex_t}
     164                }
     165                \label{fig:yield:jax:low:ns}
     166        }
     167        \caption[Yield Benchmark on Intel]{Yield Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 1 \ats per \proc.}
     168        \label{fig:yield:jax}
     169\end{figure}
     170Figure~\ref{fig:yield:ops:jax} shows the throughput as a function of \proc count, with the following constants:
     171Each run uses 100 \ats per \proc.
     172
     173\todo{results discussion}
    96174
    97175
     
    105183In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly.
    106184
    107 To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}.
    108 When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any.
    109 This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves.
    110 For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}.
     185To achieve this the benchmark uses a fixed size array of semaphores.
     186Each \gls{at} picks a random semaphore, \texttt{V}s it to unblock a \at waiting and then \texttt{P}s on the semaphore.
     187This creates a flow where \glspl{at} push each other out of the semaphores before being pushed out themselves.
     188For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of semaphores plus the number of \glspl{proc}.
     189Note that the nature of these semaphores mean the counter can go beyond 1, which could lead to calls to \texttt{P} not blocking.
    111190
    112191\todo{code, setup, results}
     
    116195                for {
    117196                        r := random() % len(spots)
    118                         next := xchg(spots[r], this)
    119                         if next { next.wake() }
    120                         wait()
     197                        spots[r].V()
     198                        spots[r].P()
    121199                        count ++
    122200                        if must_stop() { break }
     
    125203        }
    126204\end{lstlisting}
     205
     206\begin{figure}
     207        \subfloat[][Throughput, 100 \ats per \proc]{
     208                \resizebox{0.5\linewidth}{!}{
     209                        \input{result.churn.jax.ops.pstex_t}
     210                }
     211                \label{fig:churn:jax:ops}
     212        }
     213        \subfloat[][Throughput, 1 \ats per \proc]{
     214                \resizebox{0.5\linewidth}{!}{
     215                        \input{result.churn.low.jax.ops.pstex_t}
     216                }
     217                \label{fig:churn:jax:low:ops}
     218        }
     219
     220        \subfloat[][Latency, 100 \ats per \proc]{
     221                \resizebox{0.5\linewidth}{!}{
     222                        \input{result.churn.jax.ns.pstex_t}
     223                }
     224
     225        }
     226        \subfloat[][Latency, 1 \ats per \proc]{
     227                \resizebox{0.5\linewidth}{!}{
     228                        \input{result.churn.low.jax.ns.pstex_t}
     229                }
     230                \label{fig:churn:jax:low:ns}
     231        }
     232        \caption[Churn Benchmark on Intel]{\centering Churn Benchmark on Intel\smallskip\newline Throughput and latency of the Churn on the benchmark on the Intel machine. Throughput is the total operation per second across all cores. Latency is the duration of each opeartion.}
     233        \label{fig:churn:jax}
     234\end{figure}
    127235
    128236\section{Locality}
  • doc/theses/thierry_delisle_PhD/thesis/text/intro.tex

    r015925a re5d9274  
    22\todo{A proper intro}
    33
    4 The C programming language\cit{C}
     4The C programming language~\cite{C11}
    55
    6 The \CFA programming language\cite{cfa:frontpage,cfa:typesystem} which extends the C programming language to add modern safety and productiviy features while maintaining backwards compatibility. Among it's productiviy features, \CFA introduces support for threading\cit{CFA Concurrency}, to allow programmers to write modern concurrent and parallel programming.
    7 While previous work on the concurrent package of \CFA focused on features and interfaces, this thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. More specifically, this thesis concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strictly \glsxtrshort{fifo} \gls{rQ}.
     6The \CFA programming language~\cite{cfa:frontpage,cfa:typesystem} extends the C programming language by adding modern safety and productivity features, while maintaining backwards compatibility. Among its productivity features, \CFA supports user-level threading~\cite{Delisle21} allowing programmers to write modern concurrent and parallel programs.
     7My previous master's thesis on concurrent in \CFA focused on features and interfaces.
     8This Ph.D.\ thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. Specifically, this work concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strict \glsxtrshort{fifo} \gls{rQ} and  no non-blocking I/O capabilities at the user-thread level.
    89
    9 This work exclusively concentrates on Linux as it's operating system since the existing \CFA runtime and compiler does not already support other operating systems. Furthermore, as \CFA is yet to be released, supporting version of Linux older than the latest version is not a goal of this work.
     10As a research project, this work builds exclusively on newer versions of the Linux operating-system and gcc/clang compilers. While \CFA is released, supporting older versions of Linux ($<$~Ubuntu 16.04) and gcc/clang compilers ($<$~gcc 6.0) is not a goal of this work.
  • doc/theses/thierry_delisle_PhD/thesis/text/practice.tex

    r015925a re5d9274  
    77More precise \CFA supports adding \procs using the RAII object @processor@.
    88These objects can be created at any time and can be destroyed at any time.
    9 They are normally create as automatic stack variables, but this is not a requirement.
     9They are normally created as automatic stack variables, but this is not a requirement.
    1010
    1111The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence.
    1212
    1313\section{Manual Resizing}
    14 The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed.
    15 This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}.
    16 
    17 There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown.
     14Manual resizing is expected to be a rare operation.
     15Programmers are mostly expected to resize clusters on startup or teardown.
     16Therefore dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state.
     17As such all internal arrays that are sized based on the number of \procs need to be \texttt{realloc}ed.
     18This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing when shrinkingbecause some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}.
     19
     20There are no performance requirements, within reason, for resizing since it is expected to be rare.
    1821However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks.
    1922It should also avoid as much as possible any effect on performance when the number of \procs remain constant.
    20 This later requirement prehibits simple solutions, like simply adding a global lock to these arrays.
     23This later requirement prohibits naive solutions, like simply adding a global lock to the ready-queue arrays.
    2124
    2225\subsection{Read-Copy-Update}
     
    2427In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy.
    2528This approach potentially has the advantage that it may not need any synchronization to do the switch.
    26 The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in.
    27 The important question then becomes whether or not this race can be recovered from.
    28 If the changes that arrived late can be transferred from the original to the copy then this solution works.
    29 
    30 For linked-lists, dequeing is somewhat of a problem.
     29However, there is a race where \procs could still use the previous, original, data structure after the copy was switched in.
     30This race not only requires some added memory reclamation scheme, it also requires that operations made on the stale original version be eventually moved to the copy.
     31
     32For linked-lists, enqueing is only somewhat problematic, \ats enqueued to the original queues need to be transferred to the new, which might not preserve ordering.
     33Dequeing is more challenging.
    3134Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at.
    32 Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves.
     35Fixing this requires more synchronization or more indirection on the queues.
    3336
    3437Another challenge is that the original must be kept until all \procs have witnessed the change.
     
    97100In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful.
    98101While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time.
    99 Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice.
    100 Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
     102Furthermore, race conditions that spuriously lead to the impression that no \ats are ready are actually common in practice.
     103Therefore resources associated with \procs should not be freed but \procs simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
    101104This state is referred to as \newterm{Idle-Sleep}.
    102105
     
    110113The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work.
    111114
     115\section{Sleeping}
     116As usual, the corner-stone of any feature related to the kernel is the choice of system call.
     117In terms of blocking a \gls{kthrd} until some event occurs the linux kernel has many available options:
     118
     119\paragraph{\texttt{pthread\_mutex}/\texttt{pthread\_cond}}
     120The most classic option is to use some combination of \texttt{pthread\_mutex} and \texttt{pthread\_cond}.
     121These serve as straight forward mutual exclusion and synchronization tools and allow a \gls{kthrd} to wait on a \texttt{pthread\_cond} until signalled.
     122While this approach is generally perfectly appropriate for \glspl{kthrd} waiting after eachother, \io operations do not signal \texttt{pthread\_cond}s.
     123For \io results to wake a \proc waiting on a \texttt{pthread\_cond} means that a different \glspl{kthrd} must be woken up first, and then the \proc can be signalled.
     124
     125\subsection{\texttt{io\_uring} and Epoll}
     126An alternative is to flip the problem on its head and block waiting for \io, using \texttt{io\_uring} or even \texttt{epoll}.
     127This creates the inverse situation, where \io operations directly wake sleeping \procs but waking \proc from a running \gls{kthrd} must use an indirect scheme.
     128This generally takes the form of creating a file descriptor, \eg, a dummy file, a pipe or an event fd, and using that file descriptor when \procs need to wake eachother.
     129This leads to additional complexity because there can be a race between these artificial \io operations and genuine \io operations.
     130If not handled correctly, this can lead to the artificial files going out of sync.
     131
     132\subsection{Event FDs}
     133Another interesting approach is to use an event file descriptor\cit{eventfd}.
     134This is a Linux feature that is a file descriptor that behaves like \io, \ie, uses \texttt{read} and \texttt{write}, but also behaves like a semaphore.
     135Indeed, all read and writes must use 64bits large values\footnote{On 64-bit Linux, a 32-bit Linux would use 32 bits values.}.
     136Writes add their values to the buffer, that is arithmetic addition and not buffer append, and reads zero out the buffer and return the buffer values so far\footnote{This is without the \texttt{EFD\_SEMAPHORE} flag. This flags changes the behavior of \texttt{read} but is not needed for this work.}.
     137If a read is made while the buffer is already 0, the read blocks until a non-0 value is added.
     138What makes this feature particularly interesting is that \texttt{io\_uring} supports the \texttt{IORING\_REGISTER\_EVENTFD} command, to register an event fd to a particular instance.
     139Once that instance is registered, any \io completion will result in \texttt{io\_uring} writing to the event FD.
     140This means that a \proc waiting on the event FD can be \emph{directly} woken up by either other \procs or incomming \io.
     141
     142\begin{figure}
     143        \centering
     144        \input{idle1.pstex_t}
     145        \caption[Basic Idle Sleep Data Structure]{Basic Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
     146        Each \proc has a private event FD.}
     147        \label{fig:idle1}
     148\end{figure}
     149
    112150
    113151\section{Tracking Sleepers}
    114152Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
    115153The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
    116 
    117 Furthermore, the ``Race-to-Idle'' approach means that there is some
    118 
    119 \section{Sleeping}
    120 
    121 \subsection{Event FDs}
    122 
    123 \subsection{Epoll}
    124 
    125 \subsection{\texttt{io\_uring}}
    126 
    127 \section{Reducing Latency}
     154Since \ats can be made ready by timers, \io operations or other events outside a clusre, this race can occur even if the \proc going to sleep is the only \proc awake.
     155As a result, improper handling of this race can lead to all \procs going to sleep and the system deadlocking.
     156
     157Furthermore, the ``Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers.
     158Contention slowing down \procs attempting to sleep or wake-up can be tolerated.
     159These \procs are not doing useful work and therefore not contributing to overall performance.
     160However, notifying, checking if a \proc must be woken-up and doing so if needed, can significantly affect overall performance and must be low cost.
     161
     162\subsection{Sleepers List}
     163Each cluster maintains a list of idle \procs, organized as a stack.
     164This ordering hopefully allows \proc at the tail to stay in idle sleep for extended period of times.
     165Because of these unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \proc handle as much of the work as possible.
     166The idle \procs maintain the of sleepers among themselves and notifying a sleeping \proc takes as little work as possible.
     167This approach means that maintaining the list is fairly straightforward.
     168The list can simply use a single lock per cluster and only \procs that are getting in and out of idle state will contend for that lock.
     169
     170This approach also simplifies notification.
     171Indeed, \procs need to be notify when a new \at is readied, but they also must be notified during resizing, so the \gls{kthrd} can be joined.
     172This means that whichever entity removes idle \procs from the sleeper list must be able to do so in any order.
     173Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure.
     174The notification process then simply needs to wake-up the desired idle \proc, using \texttt{pthread\_cond\_signal}, \texttt{write} on an fd, etc., and the \proc will handle the rest.
     175
     176\subsection{Reducing Latency}
     177As mentioned in this section, \procs going idle for extremely short periods of time is likely in certain common scenarios.
     178Therefore, the latency of doing a system call to read from and writing to the event fd can actually negatively affect overall performance in a notable way.
     179Is it important to reduce latency and contention of the notification as much as possible.
     180Figure~\ref{fig:idle1} shoes the basic idle sleep data structure.
     181For the notifiers, this data structure can cause contention on the lock and the event fd syscall can cause notable latency.
     182
     183\begin{figure}
     184        \centering
     185        \input{idle2.pstex_t}
     186        \caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
     187        \label{fig:idle2}
     188\end{figure}
     189
     190The contention is mostly due to the lock on the list needing to be held to get to the head \proc.
     191That lock can be contended by \procs attempting to go to sleep, \procs waking or notification attempts.
     192The contentention from the \procs attempting to go to sleep can be mitigated slightly by using \texttt{try\_acquire} instead, so the \procs simply continue searching for \ats if the lock is held.
     193This trick cannot be used for waking \procs since they are not in a state where they can run \ats.
     194However, it is worth nothing that notification does not strictly require accessing the list or the head \proc.
     195Therefore, contention can be reduced notably by having notifiers avoid the lock entirely and adding a pointer to the event fd of the first idle \proc, as in Figure~\ref{fig:idle2}.
     196To avoid contention between the notifiers, instead of simply reading the atomic pointer, notifiers atomically exchange it to \texttt{null} so only only notifier will contend on the system call.
     197
     198\begin{figure}
     199        \centering
     200        \input{idle_state.pstex_t}
     201        \caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
     202        \label{fig:idle:state}
     203\end{figure}
     204
     205The next optimization that can be done is to avoid the latency of the event fd when possible.
     206This can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event fd.
     207A simple three state flag is added beside the event fd to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}.
     208The flag starts in state \texttt{SEARCH}, while the \proc is searching for \ats to run.
     209The \proc then confirms the sleep by atomically swaping the state to \texttt{SLEEP}.
     210If the previous state was still \texttt{SEARCH}, then the \proc does read the event fd.
     211Meanwhile, notifiers atomically exchange the state to \texttt{AWAKE} state.
     212if the previous state was \texttt{SLEEP}, then the notifier must write to the event fd.
     213However, if the notify arrives almost immediately after the \proc marks itself idle, then both reads and writes on the event fd can be omitted, which reduces latency notably.
     214This leads to the final data structure shown in Figure~\ref{fig:idle}.
     215
     216\begin{figure}
     217        \centering
     218        \input{idle.pstex_t}
     219        \caption[Low-latency Idle Sleep Data Structure]{Low-latency Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
     220        Each \proc has a private event FD with a benaphore in front of it.
     221        The list also has an atomic pointer to the event fd and benaphore of the first \proc on the list.}
     222        \label{fig:idle}
     223\end{figure}
  • doc/theses/thierry_delisle_PhD/thesis/thesis.tex

    r015925a re5d9274  
    8080%\usepackage{nomencl} % For a nomenclature (optional; available from ctan.org)
    8181\usepackage{amsmath,amssymb,amstext} % Lots of math symbols and environments
    82 \usepackage{xcolor}
     82\usepackage[dvipsnames]{xcolor}
    8383\usepackage{graphicx} % For including graphics
     84\usepackage{subcaption}
    8485
    8586% Hyperlinks make it very easy to navigate an electronic document.
     
    104105        colorlinks=true,        % false: boxed links; true: colored links
    105106        linkcolor=blue,         % color of internal links
    106         citecolor=green,        % color of links to bibliography
     107        citecolor=OliveGreen,   % color of links to bibliography
    107108        filecolor=magenta,      % color of file links
    108109        urlcolor=cyan           % color of external links
     
    204205\newcommand\at{\gls{at}\xspace}%
    205206\newcommand\ats{\glspl{at}\xspace}%
     207\newcommand\Proc{\Pls{proc}\xspace}%
    206208\newcommand\proc{\gls{proc}\xspace}%
    207209\newcommand\procs{\glspl{proc}\xspace}%
  • libcfa/src/Makefile.am

    r015925a re5d9274  
    3333# The built sources must not depend on the installed inst_headers_src
    3434AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr -I$(srcdir)/concurrency $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@
    35 AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@
     35AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -fvisibility=hidden -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@
    3636AM_CCASFLAGS = -g -Wall -Werror=return-type -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@
    3737CFACC = @CFACC@
     
    194194
    195195prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@
    196         ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o ${@}
     196        ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@}
    197197
    198198prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@
    199199        ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \
    200         $(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o ${@}
     200        $(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@}
    201201
    202202concurrency/io/call.cfa: $(srcdir)/concurrency/io/call.cfa.in
  • libcfa/src/algorithms/range_iterator.cfa

    r015925a re5d9274  
    2020#include <fstream.hfa>
    2121
    22 void main(RangeIter & this) {
     22#include "bits/defs.hfa"
     23
     24void main(RangeIter & this) libcfa_public {
    2325        for() {
    2426                this._start = -1;
  • libcfa/src/assert.cfa

    r015925a re5d9274  
    1919#include <unistd.h>                                                             // STDERR_FILENO
    2020#include "bits/debug.hfa"
     21#include "bits/defs.hfa"
    2122
    2223extern "C" {
     
    2627
    2728        // called by macro assert in assert.h
    28         void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) {
     29        // would be cool to remove libcfa_public but it's needed for libcfathread
     30        void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) libcfa_public {
    2931                __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file );
    3032                abort();
     
    3234
    3335        // called by macro assertf
    34         void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) {
     36        // would be cool to remove libcfa_public but it's needed for libcfathread
     37        void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) libcfa_public {
    3538                __cfaabi_bits_acquire();
    3639                __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file );
  • libcfa/src/bits/debug.cfa

    r015925a re5d9274  
    2121#include <unistd.h>
    2222
     23#include "bits/defs.hfa"
     24
    2325enum { buffer_size = 4096 };
    2426static char buffer[ buffer_size ];
    2527
    2628extern "C" {
    27         void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) {
     29        // would be cool to remove libcfa_public but it's needed for libcfathread
     30        void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) libcfa_public {
    2831                // ensure all data is written
    2932                for ( int count = 0, retcode; count < len; count += retcode ) {
     
    4447        void __cfaabi_bits_release() __attribute__((__weak__)) {}
    4548
    46         int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) {
     49        // would be cool to remove libcfa_public but it's needed for libcfathread
     50        int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) libcfa_public {
    4751                va_list args;
    4852
  • libcfa/src/bits/defs.hfa

    r015925a re5d9274  
    3636#define __cfa_dlink(x) struct { struct x * next; struct x * back; } __dlink_substitute
    3737#endif
     38
     39#define libcfa_public __attribute__((visibility("default")))
    3840
    3941#ifdef __cforall
  • libcfa/src/bits/weakso_locks.cfa

    r015925a re5d9274  
    1818#include "bits/weakso_locks.hfa"
    1919
     20#pragma GCC visibility push(default)
     21
    2022void  ?{}( blocking_lock &, bool, bool ) {}
    2123void ^?{}( blocking_lock & ) {}
  • libcfa/src/common.cfa

    r015925a re5d9274  
    1818#include <stdlib.h>                                     // div_t, *div
    1919
     20#pragma GCC visibility push(default)
     21
    2022//---------------------------------------
    2123
  • libcfa/src/concurrency/alarm.cfa

    r015925a re5d9274  
    141141//=============================================================================================
    142142
    143 void sleep( Duration duration ) {
     143void sleep( Duration duration ) libcfa_public {
    144144        alarm_node_t node = { active_thread(), duration, 0`s };
    145145
  • libcfa/src/concurrency/clib/cfathread.cfa

    r015925a re5d9274  
    237237
    238238typedef ThreadCancelled(cfathread_object) cfathread_exception;
    239 typedef ThreadCancelled_vtable(cfathread_object) cfathread_vtable;
     239typedef vtable(ThreadCancelled(cfathread_object)) cfathread_vtable;
    240240
    241241void defaultResumptionHandler(ThreadCancelled(cfathread_object) & except) {
     
    283283
    284284typedef ThreadCancelled(__cfainit) __cfainit_exception;
    285 typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable;
     285typedef vtable(ThreadCancelled(__cfainit)) __cfainit_vtable;
    286286
    287287void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) {
     
    326326}
    327327
     328#pragma GCC visibility push(default)
     329
    328330//================================================================================
    329331// Main Api
    330332extern "C" {
    331         int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {
     333        int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public {
    332334                *cl = new();
    333335                return 0;
    334336        }
    335337
    336         cfathread_cluster_t cfathread_cluster_self(void) {
     338        cfathread_cluster_t cfathread_cluster_self(void) libcfa_public {
    337339                return active_cluster();
    338340        }
    339341
    340         int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {
     342        int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public {
    341343                #if !defined(__CFA_NO_STATISTICS__)
    342344                        print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO );
  • libcfa/src/concurrency/coroutine.cfa

    r015925a re5d9274  
    4848//-----------------------------------------------------------------------------
    4949forall(T &)
    50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {
     50void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public {
    5151        dst->virtual_table = src->virtual_table;
    5252        dst->the_coroutine = src->the_coroutine;
     
    5555
    5656forall(T &)
    57 const char * msg(CoroutineCancelled(T) *) {
     57const char * msg(CoroutineCancelled(T) *) libcfa_public {
    5858        return "CoroutineCancelled(...)";
    5959}
     
    6262forall(T & | is_coroutine(T))
    6363void __cfaehm_cancelled_coroutine(
    64                 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {
     64                T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public {
    6565        verify( desc->cancellation );
    6666        desc->state = Cancelled;
     
    8989
    9090void __stack_prepare( __stack_info_t * this, size_t create_size );
    91 void __stack_clean  ( __stack_info_t * this );
     91static void __stack_clean  ( __stack_info_t * this );
    9292
    9393//-----------------------------------------------------------------------------
     
    114114}
    115115
    116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {
     116void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) {
    117117        (this.context){0p, 0p};
    118118        (this.stack){storage, storageSize};
     
    124124}
    125125
    126 void ^?{}(coroutine$& this) {
     126void ^?{}(coroutine$& this) libcfa_public {
    127127        if(this.state != Halted && this.state != Start && this.state != Primed) {
    128128                coroutine$ * src = active_coroutine();
     
    146146// Part of the Public API
    147147// Not inline since only ever called once per coroutine
    148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
    149 void prime(T& cor) {
     148forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
     149void prime(T& cor) libcfa_public {
    150150        coroutine$* this = get_coroutine(cor);
    151151        assert(this->state == Start);
     
    155155}
    156156
    157 [void *, size_t] __stack_alloc( size_t storageSize ) {
     157static [void *, size_t] __stack_alloc( size_t storageSize ) {
    158158        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    159159        assert(__page_size != 0l);
     
    193193}
    194194
    195 void __stack_clean  ( __stack_info_t * this ) {
     195static void __stack_clean  ( __stack_info_t * this ) {
    196196        void * storage = this->storage->limit;
    197197
     
    215215}
    216216
    217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {
     217void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public {
    218218        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    219219        bool userStack;
  • libcfa/src/concurrency/coroutine.hfa

    r015925a re5d9274  
    2222//-----------------------------------------------------------------------------
    2323// Exception thrown from resume when a coroutine stack is cancelled.
    24 EHM_FORALL_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) (
     24forall(coroutine_t &)
     25exception CoroutineCancelled {
    2526        coroutine_t * the_coroutine;
    2627        exception_t * the_exception;
    27 );
     28};
    2829
    2930forall(T &)
     
    3738// Anything that implements this trait can be resumed.
    3839// Anything that is resumed is a coroutine.
    39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled, (T))) {
     40trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled(T))) {
    4041        void main(T & this);
    4142        coroutine$ * get_coroutine(T & this);
     
    6061//-----------------------------------------------------------------------------
    6162// Public coroutine API
    62 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
     63forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
    6364void prime(T & cor);
    6465
     
    113114
    114115extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
    115 extern void __stack_clean  ( __stack_info_t * this );
    116 
    117116
    118117// Suspend implementation inlined for performance
     
    141140forall(T & | is_coroutine(T))
    142141void __cfaehm_cancelled_coroutine(
    143         T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );
     142        T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) );
    144143
    145144// Resume implementation inlined for performance
    146 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
     145forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
    147146static inline T & resume(T & cor) {
    148147        // optimization : read TLS once and reuse it
  • libcfa/src/concurrency/exception.cfa

    r015925a re5d9274  
    6464extern "C" {
    6565
    66 struct exception_context_t * this_exception_context(void) {
     66struct exception_context_t * this_exception_context(void) libcfa_public {
    6767        return &__get_stack( active_coroutine() )->exception_context;
    6868}
    6969
    70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {
     70_Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public {
    7171        _Unwind_Stop_Fn stop_func;
    7272        void * stop_param;
  • libcfa/src/concurrency/invoke.c

    r015925a re5d9274  
    3636extern void enable_interrupts( _Bool poll );
    3737
    38 void __cfactx_invoke_coroutine(
     38libcfa_public void __cfactx_invoke_coroutine(
    3939        void (*main)(void *),
    4040        void *this
     
    7070}
    7171
    72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
     72libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
    7373void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) {
    7474        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
     
    7777}
    7878
    79 void __cfactx_invoke_thread(
     79libcfa_public void __cfactx_invoke_thread(
    8080        void (*main)(void *),
    8181        void *this
     
    9898}
    9999
    100 void __cfactx_start(
     100libcfa_public void __cfactx_start(
    101101        void (*main)(void *),
    102102        struct coroutine$ * cor,
  • libcfa/src/concurrency/io.cfa

    r015925a re5d9274  
    221221                        const unsigned long long ctsc = rdtscl();
    222222
    223                         if(proc->io.target == MAX) {
     223                        if(proc->io.target == UINT_MAX) {
    224224                                uint64_t chaos = __tls_rand();
    225225                                unsigned ext = chaos & 0xff;
     
    232232                        else {
    233233                                const unsigned target = proc->io.target;
    234                                 /* paranoid */ verify( io.tscs[target].tv != MAX );
     234                                /* paranoid */ verify( io.tscs[target].tv != ULLONG_MAX );
    235235                                HELP: if(target < ctxs_count) {
    236236                                        const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io);
     
    246246                                        __STATS__( true, io.calls.helped++; )
    247247                                }
    248                                 proc->io.target = MAX;
     248                                proc->io.target = UINT_MAX;
    249249                        }
    250250                }
     
    340340        // for convenience, return both the index and the pointer to the sqe
    341341        // sqe == &sqes[idx]
    342         struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
     342        struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
    343343                // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
    344344
     
    419419        }
    420420
    421         void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
     421        void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
    422422                // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
    423423
  • libcfa/src/concurrency/io/call.cfa.in

    r015925a re5d9274  
    139139// I/O Interface
    140140//=============================================================================================
     141#pragma GCC visibility push(default)
    141142"""
    142143
  • libcfa/src/concurrency/io/setup.cfa

    r015925a re5d9274  
    2626
    2727#if !defined(CFA_HAVE_LINUX_IO_URING_H)
    28         void ?{}(io_context_params & this) {}
     28        void ?{}(io_context_params & this) libcfa_public {}
    2929
    3030        void  ?{}($io_context & this, struct cluster & cl) {}
     
    6666#pragma GCC diagnostic pop
    6767
    68         void ?{}(io_context_params & this) {
     68        void ?{}(io_context_params & this) libcfa_public {
    6969                this.num_entries = 256;
    7070        }
  • libcfa/src/concurrency/io/types.hfa

    r015925a re5d9274  
    1717#pragma once
    1818
     19#include <limits.h>
     20
    1921extern "C" {
    2022        #include <linux/types.h>
     
    2527#include "iofwd.hfa"
    2628#include "kernel/fwd.hfa"
    27 #include "limits.hfa"
    2829
    2930#if defined(CFA_HAVE_LINUX_IO_URING_H)
     
    140141                const __u32 tail = *this->cq.tail;
    141142
    142                 if(head == tail) return MAX;
     143                if(head == tail) return ULLONG_MAX;
    143144
    144145                return this->cq.ts;
  • libcfa/src/concurrency/kernel.cfa

    r015925a re5d9274  
    389389
    390390// KERNEL_ONLY
    391 void returnToKernel() {
     391static void returnToKernel() {
    392392        /* paranoid */ verify( ! __preemption_enabled() );
    393393        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
     
    547547}
    548548
    549 void unpark( thread$ * thrd, unpark_hint hint ) {
     549void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
    550550        if( !thrd ) return;
    551551
     
    558558}
    559559
    560 void park( void ) {
     560void park( void ) libcfa_public {
    561561        __disable_interrupts_checked();
    562562                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
     
    601601
    602602// KERNEL ONLY
    603 bool force_yield( __Preemption_Reason reason ) {
     603bool force_yield( __Preemption_Reason reason ) libcfa_public {
    604604        __disable_interrupts_checked();
    605605                thread$ * thrd = kernelTLS().this_thread;
     
    849849//-----------------------------------------------------------------------------
    850850// Debug
    851 bool threading_enabled(void) __attribute__((const)) {
     851bool threading_enabled(void) __attribute__((const)) libcfa_public {
    852852        return true;
    853853}
     
    856856// Statistics
    857857#if !defined(__CFA_NO_STATISTICS__)
    858         void print_halts( processor & this ) {
     858        void print_halts( processor & this ) libcfa_public {
    859859                this.print_halts = true;
    860860        }
     
    873873        }
    874874
    875         void crawl_cluster_stats( cluster & this ) {
     875        static void crawl_cluster_stats( cluster & this ) {
    876876                // Stop the world, otherwise stats could get really messed-up
    877877                // this doesn't solve all problems but does solve many
     
    889889
    890890
    891         void print_stats_now( cluster & this, int flags ) {
     891        void print_stats_now( cluster & this, int flags ) libcfa_public {
    892892                crawl_cluster_stats( this );
    893893                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
  • libcfa/src/concurrency/kernel.hfa

    r015925a re5d9274  
    4949
    5050// Coroutine used py processors for the 2-step context switch
    51 coroutine processorCtx_t {
     51
     52struct processorCtx_t {
     53        struct coroutine$ self;
    5254        struct processor * proc;
    5355};
  • libcfa/src/concurrency/kernel/cluster.cfa

    r015925a re5d9274  
    4949
    5050// returns the maximum number of processors the RWLock support
    51 __attribute__((weak)) unsigned __max_processors() {
     51__attribute__((weak)) unsigned __max_processors() libcfa_public {
    5252        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
    5353        if(!max_cores_s) {
     
    233233                                        if(is_empty(sl)) {
    234234                                                assert( sl.anchor.next == 0p );
    235                                                 assert( sl.anchor.ts   == -1llu );
     235                                                assert( sl.anchor.ts   == MAX );
    236236                                                assert( mock_head(sl)  == sl.prev );
    237237                                        } else {
    238238                                                assert( sl.anchor.next != 0p );
    239                                                 assert( sl.anchor.ts   != -1llu );
     239                                                assert( sl.anchor.ts   != MAX );
    240240                                                assert( mock_head(sl)  != sl.prev );
    241241                                        }
     
    259259                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
    260260                it->rdq.id = valrq;
    261                 it->rdq.target = MAX;
     261                it->rdq.target = UINT_MAX;
    262262                valrq += __shard_factor.readyq;
    263263                #if defined(CFA_HAVE_LINUX_IO_URING_H)
    264264                        it->io.ctx->cq.id = valio;
    265                         it->io.target = MAX;
     265                        it->io.target = UINT_MAX;
    266266                        valio += __shard_factor.io;
    267267                #endif
     
    472472        this.prev = mock_head(this);
    473473        this.anchor.next = 0p;
    474         this.anchor.ts   = -1llu;
     474        this.anchor.ts   = MAX;
    475475        #if !defined(__CFA_NO_STATISTICS__)
    476476                this.cnt  = 0;
     
    484484        /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
    485485        /* paranoid */ verify( mock_head(this)->link.next == 0p );
    486         /* paranoid */ verify( mock_head(this)->link.ts   == -1llu );
     486        /* paranoid */ verify( mock_head(this)->link.ts   == MAX );
    487487        /* paranoid */ verify( mock_head(this) == this.prev );
    488488        /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );
     
    495495        // Make sure the list is empty
    496496        /* paranoid */ verify( this.anchor.next == 0p );
    497         /* paranoid */ verify( this.anchor.ts   == -1llu );
     497        /* paranoid */ verify( this.anchor.ts   == MAX );
    498498        /* paranoid */ verify( mock_head(this)  == this.prev );
    499499}
  • libcfa/src/concurrency/kernel/cluster.hfa

    r015925a re5d9274  
    1919#include "kernel/private.hfa"
    2020
    21 #include "limits.hfa"
     21#include <limits.h>
    2222
    2323//-----------------------------------------------------------------------
     
    3737
    3838static inline void touch_tsc(__timestamp_t * tscs, size_t idx, unsigned long long ts_prev, unsigned long long ts_next) {
    39         if (ts_next == MAX) return;
     39        if (ts_next == ULLONG_MAX) return;
    4040        unsigned long long now = rdtscl();
    4141        unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED);
     
    5959        for(i; shard_factor) {
    6060                unsigned long long ptsc = ts(data[start + i]);
    61                 if(ptsc != -1ull) {
     61                if(ptsc != ULLONG_MAX) {
    6262                        /* paranoid */ verify( start + i < count );
    6363                        unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i].ma);
  • libcfa/src/concurrency/kernel/private.hfa

    r015925a re5d9274  
    109109//-----------------------------------------------------------------------------
    110110// Processor
    111 void main(processorCtx_t *);
     111void main(processorCtx_t &);
     112static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
    112113
    113114void * __create_pthread( pthread_t *, void * (*)(void *), void * );
  • libcfa/src/concurrency/kernel/startup.cfa

    r015925a re5d9274  
    120120#endif
    121121
    122 cluster              * mainCluster;
     122cluster              * mainCluster libcfa_public;
    123123processor            * mainProcessor;
    124124thread$              * mainThread;
     
    169169};
    170170
    171 void ?{}( current_stack_info_t & this ) {
     171static void ?{}( current_stack_info_t & this ) {
    172172        __stack_context_t ctx;
    173173        CtxGet( ctx );
     
    209209        // Construct the processor context of the main processor
    210210        void ?{}(processorCtx_t & this, processor * proc) {
    211                 (this.__cor){ "Processor" };
    212                 this.__cor.starter = 0p;
     211                (this.self){ "Processor" };
     212                this.self.starter = 0p;
    213213                this.proc = proc;
    214214        }
     
    507507        self_mon_p = &self_mon;
    508508        link.next = 0p;
    509         link.ts   = -1llu;
     509        link.ts   = MAX;
    510510        preferred = ready_queue_new_preferred();
    511511        last_proc = 0p;
     
    526526// Construct the processor context of non-main processors
    527527static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
    528         (this.__cor){ info };
     528        (this.self){ info };
    529529        this.proc = proc;
    530530}
     
    578578}
    579579
    580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {
     580void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public {
    581581        ( this.terminated ){};
    582582        ( this.runner ){};
     
    591591}
    592592
    593 void ?{}(processor & this, const char name[], cluster & _cltr) {
     593void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public {
    594594        (this){name, _cltr, 0p};
    595595}
    596596
    597597extern size_t __page_size;
    598 void ^?{}(processor & this) with( this ){
     598void ^?{}(processor & this) libcfa_public with( this ) {
    599599        /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) );
    600600        __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
     
    623623}
    624624
    625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
     625void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) {
    626626        this.name = name;
    627627        this.preemption_rate = preemption_rate;
     
    658658}
    659659
    660 void ^?{}(cluster & this) {
     660void ^?{}(cluster & this) libcfa_public {
    661661        destroy(this.io.arbiter);
    662662
  • libcfa/src/concurrency/locks.cfa

    r015925a re5d9274  
    2424#include <stdlib.hfa>
    2525
     26#pragma GCC visibility push(default)
     27
    2628//-----------------------------------------------------------------------------
    2729// info_thread
     
    116118}
    117119
    118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
     120static void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
    119121        thread$ * t = &try_pop_front( blocked_threads );
    120122        owner = t;
     
    264266        void ^?{}( alarm_node_wrap(L) & this ) { }
    265267
    266         void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
     268        static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
    267269                // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin.
    268270                lock( cond->lock __cfaabi_dbg_ctx2 );
     
    288290
    289291        // this casts the alarm node to our wrapped type since we used type erasure
    290         void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
     292        static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
    291293}
    292294
     
    305307        void ^?{}( condition_variable(L) & this ){ }
    306308
    307         void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
     309        static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
    308310                if(&popped != 0p) {
    309311                        popped.signalled = true;
     
    350352        int counter( condition_variable(L) & this ) with(this) { return count; }
    351353
    352         size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
     354        static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
    353355                // add info_thread to waiting queue
    354356                insert_last( blocked_threads, *i );
     
    363365
    364366        // helper for wait()'s' with no timeout
    365         void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
     367        static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
    366368                lock( lock __cfaabi_dbg_ctx2 );
    367369                size_t recursion_count = queue_and_get_recursion(this, &i);
     
    380382
    381383        // helper for wait()'s' with a timeout
    382         void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
     384        static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    383385                lock( lock __cfaabi_dbg_ctx2 );
    384386                size_t recursion_count = queue_and_get_recursion(this, &info);
     
    415417        // fast_cond_var
    416418        void  ?{}( fast_cond_var(L) & this ){
    417                 this.blocked_threads{}; 
     419                this.blocked_threads{};
    418420                #ifdef __CFA_DEBUG__
    419421                this.lock_used = 0p;
  • libcfa/src/concurrency/monitor.cfa

    r015925a re5d9274  
    4444static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    4545
     46static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
     47static inline void ?{}(__condition_criterion_t & this );
     48static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
     49
    4650static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    4751static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     
    243247
    244248// Leave single monitor
    245 void __leave( monitor$ * this ) {
     249static void __leave( monitor$ * this ) {
    246250        // Lock the monitor spinlock
    247251        lock( this->lock __cfaabi_dbg_ctx2 );
     
    278282
    279283// Leave single monitor for the last time
    280 void __dtor_leave( monitor$ * this, bool join ) {
     284static void __dtor_leave( monitor$ * this, bool join ) {
    281285        __cfaabi_dbg_debug_do(
    282286                if( active_thread() != this->owner ) {
     
    344348// Ctor for monitor guard
    345349// Sorts monitors before entering
    346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {
     350void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public {
    347351        thread$ * thrd = active_thread();
    348352
     
    369373}
    370374
    371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {
     375void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public {
    372376        this{ m, count, 0p };
    373377}
     
    375379
    376380// Dtor for monitor guard
    377 void ^?{}( monitor_guard_t & this ) {
     381void ^?{}( monitor_guard_t & this ) libcfa_public {
    378382        // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count);
    379383
     
    389393// Ctor for monitor guard
    390394// Sorts monitors before entering
    391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {
     395void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public {
    392396        // optimization
    393397        thread$ * thrd = active_thread();
     
    409413
    410414// Dtor for monitor guard
    411 void ^?{}( monitor_dtor_guard_t & this ) {
     415void ^?{}( monitor_dtor_guard_t & this ) libcfa_public {
    412416        // Leave the monitors in order
    413417        __dtor_leave( this.m, this.join );
     
    419423//-----------------------------------------------------------------------------
    420424// Internal scheduling types
    421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     425static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    422426        this.waiting_thread = waiting_thread;
    423427        this.count = count;
     
    426430}
    427431
    428 void ?{}(__condition_criterion_t & this ) with( this ) {
     432static void ?{}(__condition_criterion_t & this ) with( this ) {
    429433        ready  = false;
    430434        target = 0p;
     
    433437}
    434438
    435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
     439static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
    436440        this.ready  = false;
    437441        this.target = target;
     
    442446//-----------------------------------------------------------------------------
    443447// Internal scheduling
    444 void wait( condition & this, uintptr_t user_info = 0 ) {
     448void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public {
    445449        brand_condition( this );
    446450
     
    496500}
    497501
    498 bool signal( condition & this ) {
     502bool signal( condition & this ) libcfa_public {
    499503        if( is_empty( this ) ) { return false; }
    500504
     
    538542}
    539543
    540 bool signal_block( condition & this ) {
     544bool signal_block( condition & this ) libcfa_public {
    541545        if( !this.blocked.head ) { return false; }
    542546
     
    586590
    587591// Access the user_info of the thread waiting at the front of the queue
    588 uintptr_t front( condition & this ) {
     592uintptr_t front( condition & this ) libcfa_public {
    589593        verifyf( !is_empty(this),
    590594                "Attempt to access user data on an empty condition.\n"
     
    608612//              setup mask
    609613//              block
    610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
     614void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public {
    611615        // This statment doesn't have a contiguous list of monitors...
    612616        // Create one!
     
    994998// Can't be accepted since a mutex stmt is effectively an anonymous routine
    995999// Thus we do not need a monitor group
    996 void lock( monitor$ * this ) {
     1000void lock( monitor$ * this ) libcfa_public {
    9971001        thread$ * thrd = active_thread();
    9981002
     
    10461050// Leave routine for mutex stmt
    10471051// Is just a wrapper around __leave for the is_lock trait to see
    1048 void unlock( monitor$ * this ) { __leave( this ); }
     1052void unlock( monitor$ * this ) libcfa_public { __leave( this ); }
    10491053
    10501054// Local Variables: //
  • libcfa/src/concurrency/monitor.hfa

    r015925a re5d9274  
    119119}
    120120
    121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
    122 void ?{}(__condition_criterion_t & this );
    123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
     121// void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
     122// void ?{}(__condition_criterion_t & this );
     123// void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
    124124
    125125struct condition {
  • libcfa/src/concurrency/preemption.cfa

    r015925a re5d9274  
    3838#endif
    3939
    40 __attribute__((weak)) Duration default_preemption() {
     40__attribute__((weak)) Duration default_preemption() libcfa_public {
    4141        const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION");
    4242        if(!preempt_rate_s) {
     
    238238//----------
    239239// special case for preemption since used often
    240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {
     240__attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public {
    241241        // create a assembler label before
    242242        // marked as clobber all to avoid movement
     
    276276// Get data from the TLS block
    277277// struct asm_region __cfaasm_get;
    278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
     278uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems
    279279uintptr_t __cfatls_get( unsigned long int offset ) {
    280280        // create a assembler label before
     
    295295extern "C" {
    296296        // Disable interrupts by incrementing the counter
    297         void disable_interrupts() {
     297        __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public {
    298298                // create a assembler label before
    299299                // marked as clobber all to avoid movement
     
    326326        // Enable interrupts by decrementing the counter
    327327        // If counter reaches 0, execute any pending __cfactx_switch
    328         void enable_interrupts( bool poll ) {
     328        void enable_interrupts( bool poll ) libcfa_public {
    329329                // Cache the processor now since interrupts can start happening after the atomic store
    330330                processor   * proc = __cfaabi_tls.this_processor;
     
    362362//-----------------------------------------------------------------------------
    363363// Kernel Signal Debug
    364 void __cfaabi_check_preemption() {
     364void __cfaabi_check_preemption() libcfa_public {
    365365        bool ready = __preemption_enabled();
    366366        if(!ready) { abort("Preemption should be ready"); }
  • libcfa/src/concurrency/ready_queue.cfa

    r015925a re5d9274  
    125125        const unsigned long long ctsc = rdtscl();
    126126
    127         if(proc->rdq.target == MAX) {
     127        if(proc->rdq.target == UINT_MAX) {
    128128                uint64_t chaos = __tls_rand();
    129129                unsigned ext = chaos & 0xff;
     
    137137                const unsigned target = proc->rdq.target;
    138138                __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
    139                 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
     139                /* paranoid */ verify( readyQ.tscs[target].tv != ULLONG_MAX );
    140140                if(target < lanes_count) {
    141141                        const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
     
    147147                        }
    148148                }
    149                 proc->rdq.target = MAX;
     149                proc->rdq.target = UINT_MAX;
    150150        }
    151151
     
    245245// get preferred ready for new thread
    246246unsigned ready_queue_new_preferred() {
    247         unsigned pref = MAX;
     247        unsigned pref = UINT_MAX;
    248248        if(struct thread$ * thrd = publicTLS_get( this_thread )) {
    249249                pref = thrd->preferred;
  • libcfa/src/concurrency/ready_subqueue.hfa

    r015925a re5d9274  
    3232        /* paranoid */ verify( this.lock );
    3333        /* paranoid */ verify( node->link.next == 0p );
    34         /* paranoid */ verify( node->link.ts  == MAX  );
     34        /* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX  );
    3535        /* paranoid */ verify( this.prev->link.next == 0p );
    36         /* paranoid */ verify( this.prev->link.ts   == MAX  );
     36        /* paranoid */ verify( __atomic_load_n(&this.prev->link.ts, __ATOMIC_RELAXED)   == MAX  );
    3737        if( this.anchor.next == 0p ) {
    3838                /* paranoid */ verify( this.anchor.next == 0p );
    39                 /* paranoid */ verify( this.anchor.ts  == MAX );
    40                 /* paranoid */ verify( this.anchor.ts  != 0  );
     39                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) == MAX );
     40                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0  );
    4141                /* paranoid */ verify( this.prev == mock_head( this ) );
    4242        } else {
    4343                /* paranoid */ verify( this.anchor.next != 0p );
    44                 /* paranoid */ verify( this.anchor.ts  != MAX );
    45                 /* paranoid */ verify( this.anchor.ts  != 0  );
     44                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX );
     45                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0  );
    4646                /* paranoid */ verify( this.prev != mock_head( this ) );
    4747        }
     
    6262        /* paranoid */ verify( this.lock );
    6363        /* paranoid */ verify( this.anchor.next != 0p );
    64         /* paranoid */ verify( this.anchor.ts  != MAX );
    65         /* paranoid */ verify( this.anchor.ts   != 0  );
     64        /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX );
     65        /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0   );
    6666
    6767        // Get the relevant nodes locally
    6868        thread$ * node = this.anchor.next;
    6969        this.anchor.next = node->link.next;
    70         this.anchor.ts   = node->link.ts;
     70        __atomic_store_n(&this.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED);
    7171        bool is_empty = this.anchor.next == 0p;
    7272        node->link.next = 0p;
    73         node->link.ts   = MAX;
     73        __atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED);
    7474        #if !defined(__CFA_NO_STATISTICS__)
    7575                this.cnt--;
     
    7979        if(is_empty) this.prev = mock_head( this );
    8080
     81        unsigned long long ats = __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED);
    8182        /* paranoid */ verify( node->link.next == 0p );
    82         /* paranoid */ verify( node->link.ts   == MAX );
    83         /* paranoid */ verify( node->link.ts   != 0  );
    84         /* paranoid */ verify( this.anchor.ts  != 0 );
    85         /* paranoid */ verify( (this.anchor.ts == MAX) == is_empty );
    86         return [node, this.anchor.ts];
     83        /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX );
     84        /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) != 0   );
     85        /* paranoid */ verify( ats != 0 );
     86        /* paranoid */ verify( (ats == MAX) == is_empty );
     87        return [node, ats];
    8788}
    8889
     
    9697        // Cannot verify 'emptiness' here since it may not be locked
    9798        /* paranoid */ verify(this.anchor.ts != 0);
    98         return this.anchor.ts;
     99        /* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this.anchor.ts), &this.anchor.ts));
     100        return __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED);
    99101}
  • libcfa/src/concurrency/thread.cfa

    r015925a re5d9274  
    1919#include "thread.hfa"
    2020
     21#include "exception.hfa"
    2122#include "kernel/private.hfa"
    22 #include "exception.hfa"
     23#include "limits.hfa"
    2324
    2425#define __CFA_INVOKE_PRIVATE__
     
    2627
    2728extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask;
     29
     30#pragma GCC visibility push(default)
    2831
    2932//-----------------------------------------------------------------------------
     
    4245        curr_cluster = &cl;
    4346        link.next = 0p;
    44         link.ts   = -1llu;
     47        link.ts   = MAX;
    4548        preferred = ready_queue_new_preferred();
    4649        last_proc = 0p;
     
    8790}
    8891
    89 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))
    90     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     92forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
     93    | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    9194void ?{}( thread_dtor_guard_t & this,
    9295                T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
     
    166169
    167170//-----------------------------------------------------------------------------
    168 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))
    169     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     171forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
     172        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    170173T & join( T & this ) {
    171174        thread_dtor_guard_t guard = { this, defaultResumptionHandler };
  • libcfa/src/concurrency/thread.hfa

    r015925a re5d9274  
    3232};
    3333
    34 EHM_FORALL_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) (
     34forall(thread_t &)
     35exception ThreadCancelled {
    3536        thread_t * the_thread;
    3637        exception_t * the_exception;
    37 );
     38};
    3839
    3940forall(T &)
     
    7980};
    8081
    81 forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))
    82     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     82forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
     83        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    8384void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) );
    8485void ^?{}( thread_dtor_guard_t & this );
     
    126127//----------
    127128// join
    128 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))
    129     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     129forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
     130        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    130131T & join( T & this );
    131132
  • libcfa/src/containers/maybe.cfa

    r015925a re5d9274  
    1717#include <assert.h>
    1818
     19#pragma GCC visibility push(default)
    1920
    2021forall(T)
  • libcfa/src/containers/result.cfa

    r015925a re5d9274  
    1717#include <assert.h>
    1818
     19#pragma GCC visibility push(default)
    1920
    2021forall(T, E)
  • libcfa/src/containers/string.cfa

    r015925a re5d9274  
    1818#include <stdlib.hfa>
    1919
     20#pragma GCC visibility push(default)
    2021
    2122/*
  • libcfa/src/containers/string_sharectx.hfa

    r015925a re5d9274  
    1616#pragma once
    1717
     18#pragma GCC visibility push(default)
     19
    1820//######################### String Sharing Context #########################
    1921
    2022struct VbyteHeap;
    2123
    22 // A string_sharectx 
     24// A string_sharectx
    2325//
    2426// Usage:
  • libcfa/src/containers/vector.cfa

    r015925a re5d9274  
    1818#include <stdlib.hfa>
    1919
     20#pragma GCC visibility push(default)
     21
    2022forall(T, allocator_t | allocator_c(T, allocator_t))
    21 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);
     23static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);
    2224
    2325//------------------------------------------------------------------------------
     
    8385
    8486forall(T, allocator_t | allocator_c(T, allocator_t))
    85 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)
     87static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)
    8688{
    8789        this->size = other->size;
  • libcfa/src/device/cpu.cfa

    r015925a re5d9274  
    3131}
    3232
     33#include "bits/defs.hfa"
    3334#include "algorithms/range_iterator.hfa"
    3435
     
    456457}
    457458
    458 cpu_info_t cpu_info;
     459libcfa_public cpu_info_t cpu_info;
  • libcfa/src/exception.c

    r015925a re5d9274  
    2727#include "stdhdr/assert.h"
    2828#include "virtual.h"
     29
     30#pragma GCC visibility push(default)
     31
    2932#include "lsda.h"
    3033
     
    261264#else // defined( __ARM_ARCH )
    262265        // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack.
    263         // This workaround tries to keep default exception handling working. 
     266        // This workaround tries to keep default exception handling working.
    264267        if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) {
    265268#endif
  • libcfa/src/exception.hfa

    r015925a re5d9274  
    1010// Created On       : Thu Apr  7 10:25:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Thr Apr  8 15:16:00 2021
    13 // Update Count     : 4
     12// Last Modified On : Wed May 25 17:20:00 2022
     13// Update Count     : 5
    1414//
    1515
     
    1818// -----------------------------------------------------------------------------------------------
    1919
    20 // EHM_EXCEPTION(exception_name)(fields...);
    21 // Create an exception (a virtual structure that inherits from exception_t)
    22 // with the given name and fields.
    23 #define EHM_EXCEPTION(exception_name) \
    24         _EHM_TYPE_ID_STRUCT(exception_name, ); \
    25         _EHM_TYPE_ID_VALUE(exception_name, ); \
    26         _EHM_VIRTUAL_TABLE_STRUCT(exception_name, , ); \
    27         _EHM_EXCEPTION_STRUCT(exception_name, , )
     20// EHM_DEFAULT_VTABLE(exception_type)
     21// Create a declaration for a (possibly polymorphic) default vtable.
     22// Mostly used by and for the currency module.
     23#define EHM_DEFAULT_VTABLE(type) vtable(type) & const _default_vtable
    2824
    29 // EHM_EXTERN_VTABLE(exception_name, table_name);
    30 // Forward declare a virtual table called table_name for exception_name type.
    31 #define EHM_EXTERN_VTABLE(exception_name, table_name) \
    32         _EHM_EXTERN_VTABLE(exception_name, , table_name)
    33 
    34 // EHM_VIRTUAL_TABLE(exception_name, table_name);
    35 // Define a virtual table called table_name for exception_name type.
    36 #define EHM_VIRTUAL_TABLE(exception_name, table_name) \
    37         _EHM_DEFINE_COPY(exception_name, ) \
    38         _EHM_DEFINE_MSG(exception_name, ) \
    39         _EHM_VIRTUAL_TABLE(exception_name, , table_name)
    40 
    41 // EHM_FORALL_EXCEPTION(exception_name, (assertions), (parameters))(fields...);
    42 // As EHM_EXCEPTION but for polymorphic types instead of monomorphic ones.
    43 // The assertions list should include all polymorphic parameters and
    44 // assertions inside a parentisized list. Parameters should include all the
    45 // polymorphic parameter names inside a parentisized list (same order).
    46 #define EHM_FORALL_EXCEPTION(exception_name, assertions, parameters) \
    47         _EHM_TYPE_ID_STRUCT(exception_name, forall assertions); \
    48         _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall assertions, parameters); \
    49         _EHM_EXCEPTION_STRUCT(exception_name, forall assertions, parameters)
    50 
    51 // EHM_FORALL_EXTERN_VTABLE(exception_name, (arguments), table_name);
    52 // As EHM_EXTERN_VTABLE but for polymorphic types instead of monomorphic ones.
    53 // Arguments should be the parentisized list of polymorphic arguments.
    54 #define EHM_FORALL_EXTERN_VTABLE(exception_name, arguments, table_name) \
    55         _EHM_EXTERN_VTABLE(exception_name, arguments, table_name)
    56 
    57 // EHM_FORALL_VIRTUAL_TABLE(exception_name, (arguments), table_name);
    58 // As EHM_VIRTUAL_TABLE but for polymorphic types instead of monomorphic ones.
    59 // Arguments should be the parentisized list of polymorphic arguments.
    60 #define EHM_FORALL_VIRTUAL_TABLE(exception_name, arguments, table_name) \
    61         _EHM_TYPE_ID_VALUE(exception_name, arguments); \
    62         _EHM_DEFINE_COPY(exception_name, arguments) \
    63         _EHM_DEFINE_MSG(exception_name, arguments) \
    64         _EHM_VIRTUAL_TABLE(exception_name, arguments, table_name)
    65 
    66 // EHM_DEFAULT_VTABLE(exception_name, (arguments))
    67 // Create a declaration for a (possibly polymorphic) default vtable.
    68 #define EHM_DEFAULT_VTABLE(exception_name, arguments) \
    69         _EHM_VTABLE_TYPE(exception_name) arguments & const _default_vtable
    70 
    71 // IS_EXCEPTION(exception_name [, (...parameters)])
    72 // IS_RESUMPTION_EXCEPTION(exception_name [, (parameters...)])
    73 // IS_TERMINATION_EXCEPTION(exception_name [, (parameters...)])
    74 // Create an assertion that exception_name, possibly with the qualifing parameters, is the given
    75 // kind of exception with the standard vtable with the same parameters if applicable.
    76 #define IS_EXCEPTION(...) _IS_EXCEPTION(is_exception, __VA_ARGS__, , ~)
    77 #define IS_RESUMPTION_EXCEPTION(...) _IS_EXCEPTION(is_resumption_exception, __VA_ARGS__, , ~)
    78 #define IS_TERMINATION_EXCEPTION(...) _IS_EXCEPTION(is_termination_exception, __VA_ARGS__, , ~)
    79 
    80 // Macros starting with a leading underscore are internal.
    81 
    82 // Create an exception type definition. must be tailing, can be polymorphic.
    83 #define _EHM_EXCEPTION_STRUCT(exception_name, forall_clause, parameters) \
    84         forall_clause struct exception_name { \
    85                 _EHM_VTABLE_TYPE(exception_name) parameters const * virtual_table; \
    86                 _CLOSE
    87 
    88 // Create a (possibly polymorphic) virtual table forward declaration.
    89 #define _EHM_EXTERN_VTABLE(exception_name, arguments, table_name) \
    90         extern const _EHM_VTABLE_TYPE(exception_name) arguments table_name
    91 
    92 // Create a (possibly polymorphic) virtual table definition.
    93 #define _EHM_VIRTUAL_TABLE(exception_type, arguments, table_name) \
    94         const _EHM_VTABLE_TYPE(exception_type) arguments table_name @= { \
    95                 .__cfavir_typeid : &_EHM_TYPE_ID_NAME(exception_type), \
    96                 .size : sizeof(struct exception_type arguments), \
    97                 .copy : copy, \
    98                 .^?{} : ^?{}, \
    99                 .msg : msg, \
    100         }
    101 
    102 // Create a (possibly polymorphic) copy function from an assignment operator.
    103 #define _EHM_DEFINE_FORALL_COPY(exception_name, forall_clause, parameters) \
    104         forall_clause void copy(exception_name parameters * this, \
    105                         exception_name parameters * that) { \
    106                 *this = *that; \
    107         }
    108 
    109 #define _EHM_DEFINE_COPY(exception_name, arguments) \
    110         void copy(exception_name arguments * this, exception_name arguments * that) { \
    111                 *this = *that; \
    112         }
    113 
    114 // Create a (possibly polymorphic) msg function
    115 #define _EHM_DEFINE_FORALL_MSG(exception_name, forall_clause, parameters) \
    116         forall_clause const char * msg(exception_name parameters * this) { \
    117                 return #exception_name #parameters; \
    118         }
    119 
    120 #define _EHM_DEFINE_MSG(exception_name, arguments) \
    121         const char * msg(exception_name arguments * this) { \
    122                 return #exception_name #arguments; \
    123         }
    124 
    125 // Produces the C compatable name of the virtual table type for a virtual type.
    126 #define _EHM_VTABLE_TYPE(type_name) struct _GLUE2(type_name,_vtable)
    127 
    128 // Create the vtable type for exception name.
    129 #define _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall_clause, parameters) \
    130         forall_clause struct exception_name; \
    131         forall_clause _EHM_VTABLE_TYPE(exception_name) { \
    132                 _EHM_TYPE_ID_TYPE(exception_name) parameters const * __cfavir_typeid; \
    133                 size_t size; \
    134                 void (*copy)(exception_name parameters * this, exception_name parameters * other); \
    135                 void (*^?{})(exception_name parameters & this); \
    136                 const char * (*msg)(exception_name parameters * this); \
    137         }
    138 
    139 // Define the function required to satify the trait for exceptions.
    140 #define _EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \
    141         forall_clause inline void mark_exception( \
    142                 exception_name parameters const &, \
    143                 _EHM_VTABLE_TYPE(exception_name) parameters const &) {} \
    144 
    145 #define __EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \
    146         forall_clause inline _EHM_VTABLE_TYPE(exception_name) parameters const & \
    147                         get_exception_vtable(exception_name parameters const & this) { \
    148                 /* This comes before the structure definition, but we know the offset. */ \
    149                 /* return (_EHM_VTABLE_TYPE(exception_name) parameters const &)this; */ \
    150                 assert(false); \
    151         }
    152 
    153 // Generates a new type-id structure. This is used to mangle the name of the
    154 // type-id instance so it also includes polymorphic information. Must be the
    155 // direct decendent of exception_t.
    156 // The second field is used to recover type information about the exception.
    157 #define _EHM_TYPE_ID_STRUCT(exception_name, forall_clause) \
    158         forall_clause _EHM_TYPE_ID_TYPE(exception_name) { \
    159                 __cfavir_type_info const * parent; \
    160         }
    161 
    162 // Generate a new type-id value.
    163 #define _EHM_TYPE_ID_VALUE(exception_name, arguments) \
    164         __attribute__((cfa_linkonce)) \
    165         _EHM_TYPE_ID_TYPE(exception_name) arguments const \
    166                         _EHM_TYPE_ID_NAME(exception_name) = { \
    167                 &__cfatid_exception_t, \
    168         }
    169 
    170 // _EHM_TYPE_ID_STRUCT and _EHM_TYPE_ID_VALUE are the two that would need to
    171 // be updated to extend the hierarchy if we are still using macros when that
    172 // is added.
    173 
    174 // Produce the C compatable name of the type-id type for an exception type.
    175 #define _EHM_TYPE_ID_TYPE(exception_name) \
    176         struct _GLUE2(__cfatid_struct_, exception_name)
    177 
    178 // Produce the name of the instance of the type-id for an exception type.
    179 #define _EHM_TYPE_ID_NAME(exception_name) _GLUE2(__cfatid_,exception_name)
    180 
    181 #define _IS_EXCEPTION(kind, exception_name, parameters, ...) \
    182         kind(exception_name parameters, _EHM_VTABLE_TYPE(exception_name) parameters)
    183 
    184 // Internal helper macros:
    185 #define _CLOSE(...) __VA_ARGS__ }
    186 #define _GLUE2(left, right) left##right
     25// IS_EXCEPTION(exception_type)
     26// IS_RESUMPTION_EXCEPTION(exception_type)
     27// IS_TERMINATION_EXCEPTION(exception_type)
     28// Create an assertion that exception_type is the given kind of exception.
     29// This is used to mimic associated types so the vtable type is unmentioned.
     30#define IS_EXCEPTION(type) is_exception(type, vtable(type))
     31#define IS_RESUMPTION_EXCEPTION(type) is_resumption_exception(type, vtable(type))
     32#define IS_TERMINATION_EXCEPTION(type) is_termination_exception(type, vtable(type))
  • libcfa/src/fstream.cfa

    r015925a re5d9274  
    2222#include <assert.h>
    2323#include <errno.h>                                                                              // errno
     24
     25#pragma GCC visibility push(default)
    2426
    2527// *********************************** ofstream ***********************************
     
    118120                // abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno );
    119121        } // if
    120         (os){ file };                                                                           // initialize 
     122        (os){ file };                                                                           // initialize
    121123} // open
    122124
     
    157159        va_list args;
    158160        va_start( args, format );
    159                
     161
    160162        int len;
    161163    for ( cnt; 10 ) {
     
    241243                // abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno );
    242244        } // if
    243         (is){ file };                                                                           // initialize 
     245        (is){ file };                                                                           // initialize
    244246} // open
    245247
  • libcfa/src/fstream.hfa

    r015925a re5d9274  
    1818#include "bits/weakso_locks.hfa"                                                // mutex_lock
    1919#include "iostream.hfa"
    20 #include <exception.hfa>
    2120
    2221
  • libcfa/src/heap.cfa

    r015925a re5d9274  
    3636static bool traceHeap = false;
    3737
    38 inline bool traceHeap() { return traceHeap; }
    39 
    40 bool traceHeapOn() {
     38inline bool traceHeap() libcfa_public { return traceHeap; }
     39
     40bool traceHeapOn() libcfa_public {
    4141        bool temp = traceHeap;
    4242        traceHeap = true;
     
    4444} // traceHeapOn
    4545
    46 bool traceHeapOff() {
     46bool traceHeapOff() libcfa_public {
    4747        bool temp = traceHeap;
    4848        traceHeap = false;
     
    5050} // traceHeapOff
    5151
    52 bool traceHeapTerm() { return false; }
     52bool traceHeapTerm() libcfa_public { return false; }
    5353
    5454
    5555static bool prtFree = false;
    5656
    57 bool prtFree() {
     57static bool prtFree() {
    5858        return prtFree;
    5959} // prtFree
    6060
    61 bool prtFreeOn() {
     61static bool prtFreeOn() {
    6262        bool temp = prtFree;
    6363        prtFree = true;
     
    6565} // prtFreeOn
    6666
    67 bool prtFreeOff() {
     67static bool prtFreeOff() {
    6868        bool temp = prtFree;
    6969        prtFree = false;
     
    388388static unsigned int maxBucketsUsed;                                             // maximum number of buckets in use
    389389// extern visibility, used by runtime kernel
    390 size_t __page_size;                                                                             // architecture pagesize
    391 int __map_prot;                                                                                 // common mmap/mprotect protection
     390// would be cool to remove libcfa_public but it's needed for libcfathread
     391libcfa_public size_t __page_size;                                                       // architecture pagesize
     392libcfa_public int __map_prot;                                                           // common mmap/mprotect protection
    392393
    393394
     
    727728
    728729
    729 size_t prtFree( Heap & manager ) with( manager ) {
     730static size_t prtFree( Heap & manager ) with( manager ) {
    730731        size_t total = 0;
    731732        #ifdef __STATISTICS__
     
    879880        // Allocates size bytes and returns a pointer to the allocated memory.  The contents are undefined. If size is 0,
    880881        // then malloc() returns a unique pointer value that can later be successfully passed to free().
    881         void * malloc( size_t size ) {
     882        void * malloc( size_t size ) libcfa_public {
    882883                #ifdef __STATISTICS__
    883884                if ( likely( size > 0 ) ) {
     
    894895
    895896        // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
    896         void * aalloc( size_t dim, size_t elemSize ) {
     897        void * aalloc( size_t dim, size_t elemSize ) libcfa_public {
    897898                size_t size = dim * elemSize;
    898899                #ifdef __STATISTICS__
     
    910911
    911912        // Same as aalloc() with memory set to zero.
    912         void * calloc( size_t dim, size_t elemSize ) {
     913        void * calloc( size_t dim, size_t elemSize ) libcfa_public {
    913914                size_t size = dim * elemSize;
    914915          if ( unlikely( size ) == 0 ) {                        // 0 BYTE ALLOCATION RETURNS NULL POINTER
     
    951952        // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier
    952953        // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
    953         void * resize( void * oaddr, size_t size ) {
     954        void * resize( void * oaddr, size_t size ) libcfa_public {
    954955                // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    955956          if ( unlikely( size == 0 ) ) {                                        // special cases
     
    996997        // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
    997998        // the old and new sizes.
    998         void * realloc( void * oaddr, size_t size ) {
     999        void * realloc( void * oaddr, size_t size ) libcfa_public {
    9991000                // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    10001001          if ( unlikely( size == 0 ) ) {                                        // special cases
     
    10601061
    10611062        // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
    1062         void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {
     1063        void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public {
    10631064                return realloc( oaddr, dim * elemSize );
    10641065        } // reallocarray
     
    10661067
    10671068        // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
    1068         void * memalign( size_t alignment, size_t size ) {
     1069        void * memalign( size_t alignment, size_t size ) libcfa_public {
    10691070                #ifdef __STATISTICS__
    10701071                if ( likely( size > 0 ) ) {
     
    10811082
    10821083        // Same as aalloc() with memory alignment.
    1083         void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {
     1084        void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
    10841085                size_t size = dim * elemSize;
    10851086                #ifdef __STATISTICS__
     
    10971098
    10981099        // Same as calloc() with memory alignment.
    1099         void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {
     1100        void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
    11001101                size_t size = dim * elemSize;
    11011102          if ( unlikely( size ) == 0 ) {                                        // 0 BYTE ALLOCATION RETURNS NULL POINTER
     
    11361137        // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
    11371138        // of alignment. This requirement is universally ignored.
    1138         void * aligned_alloc( size_t alignment, size_t size ) {
     1139        void * aligned_alloc( size_t alignment, size_t size ) libcfa_public {
    11391140                return memalign( alignment, size );
    11401141        } // aligned_alloc
     
    11451146        // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
    11461147        // free(3).
    1147         int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
     1148        int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public {
    11481149          if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
    11491150                *memptr = memalign( alignment, size );
     
    11541155        // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
    11551156        // page size.  It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
    1156         void * valloc( size_t size ) {
     1157        void * valloc( size_t size ) libcfa_public {
    11571158                return memalign( __page_size, size );
    11581159        } // valloc
     
    11601161
    11611162        // Same as valloc but rounds size to multiple of page size.
    1162         void * pvalloc( size_t size ) {
     1163        void * pvalloc( size_t size ) libcfa_public {
    11631164                return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
    11641165        } // pvalloc
     
    11681169        // or realloc().  Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
    11691170        // 0p, no operation is performed.
    1170         void free( void * addr ) {
     1171        void free( void * addr ) libcfa_public {
    11711172          if ( unlikely( addr == 0p ) ) {                                       // special case
    11721173                        #ifdef __STATISTICS__
     
    11891190
    11901191        // Returns the alignment of an allocation.
    1191         size_t malloc_alignment( void * addr ) {
     1192        size_t malloc_alignment( void * addr ) libcfa_public {
    11921193          if ( unlikely( addr == 0p ) ) return libAlign();      // minimum alignment
    11931194                Heap.Storage.Header * header = HeaderAddr( addr );
     
    12011202
    12021203        // Returns true if the allocation is zero filled, e.g., allocated by calloc().
    1203         bool malloc_zero_fill( void * addr ) {
     1204        bool malloc_zero_fill( void * addr ) libcfa_public {
    12041205          if ( unlikely( addr == 0p ) ) return false;           // null allocation is not zero fill
    12051206                Heap.Storage.Header * header = HeaderAddr( addr );
     
    12121213
    12131214        // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
    1214         size_t malloc_size( void * addr ) {
     1215        size_t malloc_size( void * addr ) libcfa_public {
    12151216          if ( unlikely( addr == 0p ) ) return 0;                       // null allocation has zero size
    12161217                Heap.Storage.Header * header = HeaderAddr( addr );
     
    12241225        // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
    12251226        // malloc or a related function.
    1226         size_t malloc_usable_size( void * addr ) {
     1227        size_t malloc_usable_size( void * addr ) libcfa_public {
    12271228          if ( unlikely( addr == 0p ) ) return 0;                       // null allocation has 0 size
    12281229                Heap.Storage.Header * header;
     
    12361237
    12371238        // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
    1238         void malloc_stats( void ) {
     1239        void malloc_stats( void ) libcfa_public {
    12391240                #ifdef __STATISTICS__
    12401241                printStats();
     
    12451246
    12461247        // Changes the file descriptor where malloc_stats() writes statistics.
    1247         int malloc_stats_fd( int fd __attribute__(( unused )) ) {
     1248        int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public {
    12481249                #ifdef __STATISTICS__
    12491250                int temp = stats_fd;
     
    12591260        // The string is printed on the file stream stream.  The exported string includes information about all arenas (see
    12601261        // malloc).
    1261         int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {
     1262        int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public {
    12621263          if ( options != 0 ) { errno = EINVAL; return -1; }
    12631264                #ifdef __STATISTICS__
     
    12711272        // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
    12721273        // specifies the parameter to be modified, and value specifies the new value for that parameter.
    1273         int mallopt( int option, int value ) {
     1274        int mallopt( int option, int value ) libcfa_public {
    12741275          if ( value < 0 ) return 0;
    12751276                choose( option ) {
     
    12851286
    12861287        // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
    1287         int malloc_trim( size_t ) {
     1288        int malloc_trim( size_t ) libcfa_public {
    12881289                return 0;                                                                               // => impossible to release memory
    12891290        } // malloc_trim
     
    12941295        // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
    12951296        // result.  (The caller must free this memory.)
    1296         void * malloc_get_state( void ) {
     1297        void * malloc_get_state( void ) libcfa_public {
    12971298                return 0p;                                                                              // unsupported
    12981299        } // malloc_get_state
     
    13011302        // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
    13021303        // structure pointed to by state.
    1303         int malloc_set_state( void * ) {
     1304        int malloc_set_state( void * ) libcfa_public {
    13041305                return 0;                                                                               // unsupported
    13051306        } // malloc_set_state
     
    13071308
    13081309        // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
    1309         __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }
     1310        __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; }
    13101311
    13111312        // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped.
    1312         __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; }
     1313        __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; }
    13131314
    13141315        // Amount subtracted to adjust for unfreed program storage (debug only).
    1315         __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; }
     1316        __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; }
    13161317} // extern "C"
    13171318
    13181319
    13191320// Must have CFA linkage to overload with C linkage realloc.
    1320 void * resize( void * oaddr, size_t nalign, size_t size ) {
     1321void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public {
    13211322        // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    13221323  if ( unlikely( size == 0 ) ) {                                                // special cases
     
    13801381
    13811382
    1382 void * realloc( void * oaddr, size_t nalign, size_t size ) {
     1383void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public {
    13831384        // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    13841385  if ( unlikely( size == 0 ) ) {                                                // special cases
  • libcfa/src/interpose.cfa

    r015925a re5d9274  
    3636//=============================================================================================
    3737
    38 void preload_libgcc(void) {
     38static void preload_libgcc(void) {
    3939        dlopen( "libgcc_s.so.1", RTLD_NOW );
    4040        if ( const char * error = dlerror() ) abort( "interpose_symbol : internal error pre-loading libgcc, %s\n", error );
     
    4242
    4343typedef void (* generic_fptr_t)(void);
    44 generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {
     44static generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {
    4545        const char * error;
    4646
     
    8383//=============================================================================================
    8484
    85 void sigHandler_segv( __CFA_SIGPARMS__ );
    86 void sigHandler_ill ( __CFA_SIGPARMS__ );
    87 void sigHandler_fpe ( __CFA_SIGPARMS__ );
    88 void sigHandler_abrt( __CFA_SIGPARMS__ );
    89 void sigHandler_term( __CFA_SIGPARMS__ );
    90 
    91 struct {
     85static void sigHandler_segv( __CFA_SIGPARMS__ );
     86static void sigHandler_ill ( __CFA_SIGPARMS__ );
     87static void sigHandler_fpe ( __CFA_SIGPARMS__ );
     88static void sigHandler_abrt( __CFA_SIGPARMS__ );
     89static void sigHandler_term( __CFA_SIGPARMS__ );
     90
     91static struct {
    9292        void (* exit)( int ) __attribute__(( __noreturn__ ));
    9393        void (* abort)( void ) __attribute__(( __noreturn__ ));
    9494} __cabi_libc;
    9595
    96 int cfa_main_returned;
     96libcfa_public int cfa_main_returned;
    9797
    9898extern "C" {
     
    148148
    149149// Forward declare abort after the __typeof__ call to avoid ambiguities
    150 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
    151 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
    152 void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
    153 void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));
     150libcfa_public void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
     151libcfa_public void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
     152libcfa_public void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
     153libcfa_public void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));
    154154
    155155extern "C" {
    156         void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
     156        libcfa_public void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
    157157                abort( false, "%s", "" );
    158158        }
    159159
    160         void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {
     160        libcfa_public void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {
    161161                va_list argp;
    162162                va_start( argp, fmt );
     
    165165        }
    166166
    167         void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
     167        libcfa_public void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
    168168                __cabi_libc.exit( status );
    169169        }
  • libcfa/src/iostream.cfa

    r015925a re5d9274  
    3232#include "bitmanip.hfa"                                                                 // high1
    3333
     34#pragma GCC visibility push(default)
    3435
    3536// *********************************** ostream ***********************************
  • libcfa/src/limits.cfa

    r015925a re5d9274  
    2020#include <complex.h>
    2121#include "limits.hfa"
     22
     23#pragma GCC visibility push(default)
    2224
    2325// Integral Constants
  • libcfa/src/memory.cfa

    r015925a re5d9274  
    1616#include "memory.hfa"
    1717#include "stdlib.hfa"
     18
     19#pragma GCC visibility push(default)
    1820
    1921// Internal data object.
  • libcfa/src/parseargs.cfa

    r015925a re5d9274  
    2424#include "common.hfa"
    2525#include "limits.hfa"
     26
     27#pragma GCC visibility push(default)
    2628
    2729extern int cfa_args_argc __attribute__((weak));
  • libcfa/src/parseconfig.cfa

    r015925a re5d9274  
    1414
    1515
     16#pragma GCC visibility push(default)
     17
    1618// *********************************** exceptions ***********************************
    1719
    1820
    1921// TODO: Add names of missing config entries to exception (see further below)
    20 static vtable(Missing_Config_Entries) Missing_Config_Entries_vt;
     22vtable(Missing_Config_Entries) Missing_Config_Entries_vt;
    2123
    2224[ void ] ?{}( & Missing_Config_Entries this, unsigned int num_missing ) {
     
    3133
    3234
    33 static vtable(Parse_Failure) Parse_Failure_vt;
     35vtable(Parse_Failure) Parse_Failure_vt;
    3436
    3537[ void ] ?{}( & Parse_Failure this, [] char failed_key, [] char failed_value ) {
     
    5355
    5456
    55 static vtable(Validation_Failure) Validation_Failure_vt;
     57vtable(Validation_Failure) Validation_Failure_vt;
    5658
    5759[ void ] ?{}( & Validation_Failure this, [] char failed_key, [] char failed_value ) {
     
    110112
    111113
    112 [ bool ] comments( & ifstream in, [] char name ) {
     114static [ bool ] comments( & ifstream in, [] char name ) {
    113115        while () {
    114116                in | name;
  • libcfa/src/rational.cfa

    r015925a re5d9274  
    1717#include "fstream.hfa"
    1818#include "stdlib.hfa"
     19
     20#pragma GCC visibility push(default)
    1921
    2022forall( T | Arithmetic( T ) ) {
  • libcfa/src/startup.cfa

    r015925a re5d9274  
    4141        } // __cfaabi_appready_shutdown
    4242
    43         void disable_interrupts() __attribute__(( weak )) {}
    44         void enable_interrupts() __attribute__(( weak )) {}
     43        void disable_interrupts() __attribute__(( weak )) libcfa_public {}
     44        void enable_interrupts() __attribute__(( weak )) libcfa_public {}
    4545
    4646
     
    6464struct __spinlock_t;
    6565extern "C" {
    66         void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {}
     66        void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {}
    6767}
    6868
  • libcfa/src/stdlib.cfa

    r015925a re5d9274  
    2525#include <complex.h>                                                                    // _Complex_I
    2626#include <assert.h>
     27
     28#pragma GCC visibility push(default)
    2729
    2830//---------------------------------------
     
    225227#define GENERATOR LCG
    226228
    227 uint32_t __global_random_seed;                                                  // sequential/concurrent
    228 uint32_t __global_random_state;                                                 // sequential only
     229// would be cool to make hidden but it's needed for libcfathread
     230__attribute__((visibility("default"))) uint32_t __global_random_seed;                                                   // sequential/concurrent
     231__attribute__((visibility("hidden"))) uint32_t __global_random_state;                                                   // sequential only
    229232
    230233void set_seed( PRNG & prng, uint32_t seed_ ) with( prng ) { state = seed = seed_; GENERATOR( state ); } // set seed
  • libcfa/src/strstream.cfa

    r015925a re5d9274  
    1 // 
     1//
    22// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
    3 // 
     3//
    44// The contents of this file are covered under the licence agreement in the
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // strstream.cfa -- 
    8 // 
     7// strstream.cfa --
     8//
    99// Author           : Peter A. Buhr
    1010// Created On       : Thu Apr 22 22:24:35 2021
     
    1212// Last Modified On : Sun Oct 10 16:13:20 2021
    1313// Update Count     : 101
    14 // 
     14//
    1515
    1616#include "strstream.hfa"
     
    2424#include <unistd.h>                                                                             // sbrk, sysconf
    2525
     26#pragma GCC visibility push(default)
    2627
    2728// *********************************** strstream ***********************************
  • libcfa/src/time.cfa

    r015925a re5d9274  
    1818#include <stdio.h>                                                                              // snprintf
    1919#include <assert.h>
     20
     21#pragma GCC visibility push(default)
    2022
    2123static char * nanomsd( long int ns, char * buf ) {              // most significant digits
  • libcfa/src/virtual.c

    r015925a re5d9274  
    1616#include "virtual.h"
    1717#include "assert.h"
     18
     19#pragma GCC visibility push(default)
    1820
    1921int __cfavir_is_parent(
  • src/AST/Expr.cpp

    r015925a re5d9274  
    1010// Created On       : Wed May 15 17:00:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Created On       : Tue Nov 30 14:23:00 2021
    13 // Update Count     : 7
     12// Created On       : Wed May 18 13:56:00 2022
     13// Update Count     : 8
    1414//
    1515
     
    2121
    2222#include "Copy.hpp"                // for shallowCopy
    23 #include "Eval.hpp"                // for call
    2423#include "GenericSubstitution.hpp"
    2524#include "LinkageSpec.hpp"
     
    6766// --- UntypedExpr
    6867
     68bool UntypedExpr::get_lvalue() const {
     69        std::string fname = InitTweak::getFunctionName( this );
     70        return lvalueFunctionNames.count( fname );
     71}
     72
    6973UntypedExpr * UntypedExpr::createDeref( const CodeLocation & loc, const Expr * arg ) {
    7074        assert( arg );
    7175
    72         UntypedExpr * ret = call( loc, "*?", arg );
     76        UntypedExpr * ret = createCall( loc, "*?", { arg } );
    7377        if ( const Type * ty = arg->result ) {
    7478                const Type * base = InitTweak::getPointerBase( ty );
     
    8791}
    8892
    89 bool UntypedExpr::get_lvalue() const {
    90         std::string fname = InitTweak::getFunctionName( this );
    91         return lvalueFunctionNames.count( fname );
    92 }
    93 
    9493UntypedExpr * UntypedExpr::createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs ) {
    9594        assert( lhs && rhs );
    9695
    97         UntypedExpr * ret = call( loc, "?=?", lhs, rhs );
     96        UntypedExpr * ret = createCall( loc, "?=?", { lhs, rhs } );
    9897        if ( lhs->result && rhs->result ) {
    9998                // if both expressions are typed, assumes that this assignment is a C bitwise assignment,
     
    102101        }
    103102        return ret;
     103}
     104
     105UntypedExpr * UntypedExpr::createCall( const CodeLocation & loc,
     106                const std::string & name, std::vector<ptr<Expr>> && args ) {
     107        return new UntypedExpr( loc,
     108                        new NameExpr( loc, name ), std::move( args ) );
    104109}
    105110
  • src/AST/Expr.hpp

    r015925a re5d9274  
    230230        /// Creates a new assignment expression
    231231        static UntypedExpr * createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs );
     232        /// Creates a new call of a variable.
     233        static UntypedExpr * createCall( const CodeLocation & loc,
     234                const std::string & name, std::vector<ptr<Expr>> && args );
    232235
    233236        const Expr * accept( Visitor & v ) const override { return v.visit( this ); }
  • src/AST/module.mk

    r015925a re5d9274  
    2929        AST/DeclReplacer.cpp \
    3030        AST/DeclReplacer.hpp \
    31         AST/Eval.hpp \
    3231        AST/Expr.cpp \
    3332        AST/Expr.hpp \
  • src/CodeGen/CodeGenerator.cc

    r015925a re5d9274  
    12381238} // namespace CodeGen
    12391239
    1240 
    1241 unsigned Indenter::tabsize = 2;
    1242 
    1243 std::ostream & operator<<( std::ostream & out, const BaseSyntaxNode * node ) {
    1244         if ( node ) {
    1245                 node->print( out );
    1246         } else {
    1247                 out << "nullptr";
    1248         }
    1249         return out;
    1250 }
    1251 
    12521240// Local Variables: //
    12531241// tab-width: 4 //
  • src/CodeGen/FixMain.cc

    r015925a re5d9274  
    4949
    5050}
    51 
    52         bool FixMain::replace_main = false;
    5351
    5452        template<typename container>
  • src/CodeGen/GenType.cc

    r015925a re5d9274  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Wed May  1 15:24:00 2019