# Changeset e5d9274

Ignore:
Timestamp:
Jun 2, 2022, 3:11:21 PM (8 months ago)
Branches:
Children:
ced5e2a
Parents:
015925a (diff), fc134a48 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
1 deleted
133 edited

Unmodified
Removed
• ## benchmark/plot.py

 r015925a class Field: def __init__(self, unit, _min, _log): def __init__(self, unit, _min, _log, _name=None): self.unit = unit self.min  = _min self.log  = _log self.name = _name field_names = { "Ops per procs"         : Field('Ops'   , 0, False), "Ops per threads"       : Field('Ops'   , 0, False), "ns per ops/procs"      : Field('ns'    , 0, False), "ns per ops/procs"      : Field(''    , 0, False, _name = "Latency (ns $/$ (Processor $\\times$ Operation))" ), "Number of threads"     : Field(''      , 1, False), "Total Operations(ops)" : Field('Ops'   , 0, False), "Ops/sec/procs"         : Field('Ops'   , 0, False), "Total blocks"          : Field('Blocks', 0, False), "Ops per second"        : Field('Ops'   , 0, False), "Ops per second"        : Field(''   , 0, False), "Cycle size (# thrds)"  : Field('thrd'  , 1, False), "Duration (ms)"         : Field('ms'    , 0, False), } def plot(in_data, x, y, out): def plot(in_data, x, y, options): fig, ax = plt.subplots() colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00']) print("Finishing Plots") plt.ylabel(y) plt.ylabel(field_names[y].name if field_names[y].name else y) # plt.xticks(range(1, math.ceil(mx) + 1)) plt.xlabel(x) plt.xlabel(field_names[x].name if field_names[x].name else x) plt.grid(b = True) ax.xaxis.set_major_formatter( EngFormatter(unit=field_names[x].unit) ) if field_names[x].log: if options.logx: ax.set_xscale('log') elif field_names[x].log: ax.set_xscale('log') else: ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) ) if field_names[y].log: if options.logy: ax.set_yscale('log') elif field_names[y].log: ax.set_yscale('log') else: plt.ylim(field_names[y].min, my*1.2) plt.ylim(field_names[y].min, options.MaxY if options.MaxY else my*1.2) plt.legend(loc='upper left') print("Results Ready") if out: plt.savefig(out) if options.out: plt.savefig(options.out, bbox_inches='tight') else: plt.show() parser.add_argument('-y', nargs='?', type=str, default="", help="Which field to use as the Y axis") parser.add_argument('-x', nargs='?', type=str, default="", help="Which field to use as the X axis") parser.add_argument('--logx', action='store_true', help="if set, makes the x-axis logscale") parser.add_argument('--logy', action='store_true', help="if set, makes the y-axis logscale") parser.add_argument('--MaxY', nargs='?', type=int, help="maximum value of the y-axis") options =  parser.parse_args() plot(data, wantx, wanty, options.out) plot(data, wantx, wanty, options)
• ## doc/theses/mubeen_zulfiqar_MMath/allocator.tex

 r015925a llheap's design was reviewed and changed multiple times throughout the thesis. Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}). Note, a few simples tests for a design choice were compared with the current best allocators to determine the viability of a design. Note, a few simple tests for a design choice were compared with the current best allocators to determine the viability of a design. These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced. \paragraph{T:1 model} \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared across KTs. This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5 different request sizes. \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are shared among N KTs. This design leverages the fact that usually the allocation requests are less than 1024 bytes and there are only a few different request sizes. When KTs $\le$ N, the common bucket sizes are uncontented; when KTs $>$ N, the free buckets are contented and latency increases significantly. \paragraph{T:H model} \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs. \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded (distributed) across the KTs. A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket. When KT $\le$ N, the heaps are uncontented; When KT $\le$ N, the heaps might be uncontented; when KTs $>$ N, the heaps are contented. In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared. By adjusting N upwards, this approach reduces contention but increases storage (time versus space); By increasing N, this approach reduces contention but increases storage (time versus space); however, picking N is workload specific. Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}. \begin{quote} A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable} A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable} \end{quote} If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness. (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.) Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted. Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership). Heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership. Problems: \begin{itemize} \item Need to know when a KT is starts/terminates to create/delete its heap. Need to know when a KT starts/terminates to create/delete its heap. \noindent \noindent In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs. Since the number of CPUs is relatively small, >~1024, and a heap relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs. Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs. \item There is the same serially-reusable problem with UTs migrating across KTs. \noindent The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown. For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps maybe shared by multiple threads, even when KTs $\le$ N. For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N. For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath. However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs. More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading. Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required. So the 1:1 model had no atomic actions along the fastpath and no special operating-system support requirements. The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns. Ideally latency is $O(1)$ with a small constant. To obtain $O(1)$ internal latency means no searching on the allocation fastpath, largely prohibits coalescing, which leads to external fragmentation. To obtain $O(1)$ internal latency means no searching on the allocation fastpath and largely prohibits coalescing, which leads to external fragmentation. The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger). llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration. There is a global bump-pointer to the next free heap in the array. When this array is exhausted, another array is allocated. There is a global top pointer for a heap intrusive link to chain free heaps from terminated threads. When statistics are turned on, there is a global top pointer for a heap intrusive link to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@. When this array is exhausted, another array of heaps is allocated. There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads. When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@. When a KT starts, a heap is allocated from the current array for exclusive use by the KT. When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps. The free heaps is a stack so hot storage is reused first. Preserving all heaps created during the program lifetime, solves the storage lifetime problem, when ownership is used. When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps. The free heaps are stored on stack so hot storage is reused first. Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used. This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially. llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap. Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M. All objects in a bucket are of the same size. The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system. Each free bucket of a specific size has the following two lists: Quantizing is performed using a binary search over the ordered bucket array. An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket. (Type @char@ restricts the number of bucket sizes to 256.) The @char@ type restricts the number of bucket sizes to 256. For $S$ > 64K, a binary search is used. Then, the allocation storage is obtained from the following locations (in order), with increasing latency. Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket. Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap. Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap. Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated. This design simplifies heap-management code during development and maintenance. \subsection{Alignment} All dynamic memory allocations must have a minimum storage alignment for the contained object(s). Most dynamic memory allocations have a minimum storage alignment for the contained object(s). Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX). In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers. For consistency, the object header is normally aligned at this same boundary. Larger alignments must be a power of 2, such page alignment (4/8K). Larger alignments must be a power of 2, such as page alignment (4/8K). Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment. \end{center} The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations. This approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage. The same approach is used for sufficiently large free blocks, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage. In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation. However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object. Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request. Instead, llheap alignment is accomplished by making a \emph{pessimistically} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg: Instead, llheap alignment is accomplished by making a \emph{pessimistic} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg: \begin{center} \input{Alignment2} \input{Alignment2Impl} \end{center} Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater. Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments greater than @M@. When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation. Therefore, there is always room for an @M@-byte fake header before @A@. \label{s:ReallocStickyProperties} Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually. The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually. \begin{flushleft} \begin{tabular}{ll} The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying. This pattern is not used enough to reduce storage management costs. In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern. In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the allocation pattern. The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation. Are these properties transient or persistent (sticky'')? For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size. That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve? For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size? That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserved? Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment. This silent problem is unintuitive to programmers and difficult to locate because it is transient. To preserve allocation properties requires storing additional information with an allocation, The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout. The best available option is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout. The header has two data field sized appropriately for 32/64-bit alignment requirements. The first field is a union of three values: \end{description} The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes. Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors. Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management. \begin{figure} \end{figure} The low-order 3-bits of the first field are \emph{unused} for any stored values, whereas the second field may use all of its bits. The low-order 3-bits of the first field are \emph{unused} for any stored values as these values are 16-byte aligned by default, whereas the second field may use all of its bits. The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively. Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header. If no bits are on, it implies a basic allocation, which is handled quickly; otherwise, the bits are analysed and appropriate actions are taken for the complex cases. Since most allocations are basic, this implementation results in a significant performance gain along the allocation and free fastpath. Since most allocations are basic, they will take significantly less time as the memory operations will be done along the allocation and free fastpath. To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps. Note, the list is locked to prevent errors traversing an active list; the statistics counters are not locked and can flicker during accumulation, which is not an issue with atomic read/write. the statistics counters are not locked and can flicker during accumulation. \VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread. No other memory allocator studied provides as comprehensive statistical information. Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness, and hence, should be equally valuable to application developers. Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness and should be equally valuable to application developers. \begin{figure} Nevertheless, the checks detect many allocation problems. There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage. For example, @printf@ allocates a 1024 buffer on first call and never deletes this buffer. For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer. To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference. Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related. Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program. Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program. \label{s:UserlevelThreadingSupport} The serially-reusable problem (see \VRef{s:AllocationFastpath}) occurs for kernel threads in the T:H model, H = number of CPUs'' model and for user threads in the 1:1'' model, where llheap uses the 1:1'' model. The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections. The serially-reusable problem (see \VPageref{p:SeriallyReusable}) occurs for kernel threads in the T:H model, H = number of CPUs'' model and for user threads in the 1:1'' model, where llheap uses the 1:1'' model. The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical sections such as starting a memory operation on one KT and completing it on another. Locking these critical sections negates any attempt for a quick fastpath and results in high contention. For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread. Without time slicing, a user thread performing a long computation can prevent execution (starve) other threads. To prevent starvation for an allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice. Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads. To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-section for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice. The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute. llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period. On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately. On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and not thread-safe. llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period. On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately. On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe. For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing. Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread. The fast technique defines a special code section and places all non-interruptible routines in this section. Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread. The fast technique (with lower run time cost) is to define a special code section and places all non-interruptible routines in this section. The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified. Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag. Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section. This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined. These techniques are used in both the \uC and \CFA versions of llheap, where both of these systems have user-level threading. These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading. Programs can be statically or dynamically linked. \item The order the linker schedules startup code is poorly supported. The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely. \item Knowing a KT's start and end independently from the KT code is difficult. Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct. Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations. Testing showed up to a 5\% performance increase for dynamic linking over static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding. Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding. All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap. The problem is getting initialized done before the first allocator call. The problem is getting initialization done before the first allocator call. However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library. Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}. This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call. As a result, calls to allocation routines occur without initialization. To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern). Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT. Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called. Now when a KT terminates, @~ThreadManager@ is called to chained it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT. Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT. The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage. bool traceHeapOff();                    $\C{// stop printing allocation/free calls}$ \end{lstlisting} This kind of API is necessary to allow concurrent runtime systems to interact with difference memory allocators in a consistent way. This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory; hence the need to return an alternate value for a zero-sized allocation. A different approach allowed by the C API is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation. A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation. In theory, notifying the programmer of memory failure allows recovery; in practice, it is almost impossible to gracefully recover when out of memory. \paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}} extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory. @aalloc@ is significantly faster than @calloc@, which is the only alternative. @aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines. \noindent\textbf{Usage} \begin{itemize} \item @fd@: files description. @fd@: file descriptor. \end{itemize} It returns the previous file descriptor. \label{p:malloc_expansion} set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request. It returns the heap extension size used throughout a program, \ie called once at heap initialization. It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization. \paragraph{\lstinline{size_t malloc_mmap_start()}} \begin{itemize} \item naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations. \item named arguments: individual allocation properties are specified using postfix function call, so programmers do have to remember parameter positions in allocation calls. \item object size: like the \CFA C-style interface, programmers do not have to specify object size or cast allocation results. naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations. \item named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls. \item object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results. \end{itemize} Note, postfix function call is an alternative call syntax, using backtick @@, where the argument appears before the function name, \eg duration dur = 3@@h + 42@@m + 17@@s; \end{cfa} @ttype@ polymorphism is similar to \CC variadic templates. \paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}} is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines. is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations. These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine. A call without parameters returns a dynamically allocated object of type @T@ (@malloc@). A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@). 5 5 5 -555819298 -555819298  // two undefined values \end{lstlisting} Examples 1 to 3, fill an object with a value or characters. Examples 4 to 7, fill an array of objects with values, another array, or part of an array. Examples 1 to 3 fill an object with a value or characters. Examples 4 to 7 fill an array of objects with values, another array, or part of an array. \subparagraph{\lstinline{S_resize(T) ?resize( void * oaddr )}} \subparagraph{\lstinline{S_realloc(T) ?realloc( T * a ))}} used to resize, realign, and fill, where the old object data is copied to the new object. The old object type must be the same as the new object type, since the values used. The old object type must be the same as the new object type, since the value is used. Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter. For example: \end{lstlisting} Examples 2 to 3 change the alignment for the initial storage of @i@. The @13fill@ for example 3 does nothing because no extra space is added. The @13fill@ in example 3 does nothing because no extra space is added. \begin{cfa}[numbers=left] \end{lstlisting} Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@. The @13fill@ for example 3 does nothing because no extra space is added. The @13fill@ in example 3 does nothing because no extra space is added. These \CFA allocation features are used extensively in the development of the \CFA runtime.
• ## doc/theses/mubeen_zulfiqar_MMath/background.tex

 r015925a The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory. The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}. Allocated objects (light grey) are variable sized, and allocated and maintained by the program; \ie only the program knows the location of allocated storage, not the memory allocator. Allocated objects (light grey) are variable sized, and are allocated and maintained by the program; \ie only the memory allocator knows the location of allocated storage, not the program. \begin{figure}[h] \centering if there are multiple reserved blocks, they are also chained together, usually internally. Allocated and freed objects typically have additional management data embedded within them. In some allocator designs, allocated and freed objects have additional management data embedded within them. \VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object. The header contains information about the object, \eg size, type, etc. \VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time. Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests. Memory is highly fragmented when the sizes of most free blocks are unusable. Memory is highly fragmented when most free blocks are unusable because of their sizes. For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented. If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system. The fewer bin-sizes, the fewer lists need to be searched and maintained; however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation. The more bin-sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup. The more bin sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup. A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size. For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list. The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}. Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period. Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array. Temporal locality commonly occurs during an iterative computation with a fixed set of disjoint variables, while spatial locality commonly occurs when traversing an array. Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy. For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues. At creation, a thread is associated with a heap from the pool. When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool. In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool. If an unlocked heap is found, the thread changes its association and uses that heap. If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool; The management information in the static zone must be able to locate all heaps in the dynamic zone. The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number. Each heap in the dynamic zone is composed of a list of a free objects and a pointer to its reserved memory. Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory. An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory. Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur. Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup. The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory. Additionally, objects freed by one heap cannot be reused by other threads, except indirectly by returning free memory to the operating system, which can be expensive. (Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.) Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the operating system, which can be expensive. Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix. In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused. In contrast, the T:H model spreads each thread's objects over a larger area in different heaps. Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap. For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory, no two threads share a page or cache line unless pointers are passed among them. For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them. Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps. When a thread terminates, there are two options for handling its heap. First is to free all objects in the heap to the global heap and destroy the thread heap. When a thread terminates, there are two options for handling its thread heap. First is to free all objects in the thread heap to the global heap and destroy the thread heap. Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future. Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads. Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible.. Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible. When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it. To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread. However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds). However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption does not happen that frequently. Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically. Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation. \begin{figure} \centering \subfigure[Ownership]{ \input{MultipleHeapsOwnership} } % subfigure \hspace{0.25in} \subfigure[No Ownership]{ \input{MultipleHeapsNoOwnership} } % subfigure \caption{Heap Ownership} \label{f:HeapsOwnership} \end{figure} For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}. \begin{figure} \centering \subfigure[Ownership]{ \input{MultipleHeapsOwnership} } % subfigure \hspace{0.25in} \subfigure[No Ownership]{ \input{MultipleHeapsNoOwnership} } % subfigure \caption{Heap Ownership} \label{f:HeapsOwnership} \end{figure} \VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout. (For simplicity assume the heaps all use the same size of reserves storage.) (For simplicity, assume the heaps all use the same size of reserves storage.) In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap. Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur. While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap. It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool. Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads. It is possible for heaps to steal objects rather than return them and reallocating these objects when storage runs out on a heap. Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads. It is possible for heaps to steal objects rather than return them and then reallocate these objects again when storage runs out on a heap. However, stealing can result in passive false-sharing. For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially. Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}. Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes. Especially if the headers contain redundant management information, then storing that information is a waste of storage, \eg object size may be the same for many objects because programs only allocate a small set of object sizes. As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective. Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}: With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list. Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$. The cost is adding information to a header, which increases the header size, and therefore internal fragmentation. However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation. \begin{figure} The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup. In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap. Similarly, a thread first deallocates an object its private heap, and second to the public heap. Similarly, a thread first deallocates an object to its private heap, and second to the public heap. Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps. Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap.
• ## doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex

 r015925a \item[Benchmarks] are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc). The applications are suppose to represent common execution patterns that need to perform well with respect to an underlying software implementation. The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation. Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns. \item[Micro-Benchmarks] This thesis designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters. The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance matrices such as speed, memory overhead, and cache performance. The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance. % These programs can be taken as a standard to benchmark an allocator's basic goals. These programs give details of an allocator's memory overhead and speed under certain allocation patterns. The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum of events for a desired allocation pattern, which is seldom possible with benchmark programs. The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs. Each micro-benchmark program has multiple control knobs specified by command-line arguments. The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific matrices. The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics. An allocator's speed is benchmarked in different ways, as are issues like false sharing. Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level. The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level. None of the prior work address multi-threading at the UT level. None of the prior work addresses multi-threading at the UT level. This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently. There is no interaction among threads, \ie no object sharing. Each thread repeatedly allocate 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated. Runtime of the benchmark evaluates its efficiency. Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated. The execution time of the benchmark evaluates its efficiency. Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process. The number of thread generations varies depending on the thread speed. It calculates memory operations per second as an indicator of memory allocator's performance. It calculates memory operations per second as an indicator of the memory allocator's performance. \label{s:ChurnBenchmark} The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenerio, where each thread extensively allocates and frees dynamic memory. The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory. Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@. Churn simulates a memory intensive program that can be tuned to create different scenarios. Churn simulates a memory intensive program and can be tuned to create different scenarios. \VRef[Figure]{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark. When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases. Cache thrash tries to create a scenerio that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines. Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines. Ideally, a memory allocator should distance the dynamic memory region of one thread from another. Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads. Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line. Each thread repeats this for N times. The main thread measures the total time taken to for all worker threads to complete. Worker threads sharing cache lines with each other will take longer. The main thread measures the total time taken for all worker threads to complete. Worker threads sharing cache lines with each other are expected to take longer. \begin{figure} signal workers to free ... print addresses from each $thread$ Worker Thread$$$_1$$$ allocate, write, read, free warmup memory in chunkc of 16 bytes ... malloc N objects ... free objects return object address to Main Thread warm up memory in chunks of 16 bytes ... For N malloc an object read/write the object M times free the object ... Worker Thread$$$_2$$$ // same as Worker Thread$$$_1$$$ The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in \VRef{s:AllocatorInducedPassiveFalseSharing}. As for cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance. As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance. In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object. Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly. An allocator using object ownership, as described in section \VRef{s:Ownership}, is less susceptible to allocator-induced passive false-sharing. If the object is returned to the thread who owns it, then the thread that gets a new object is less likely to be on the same cache line. If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line. \VRef[Figure]{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark. signal workers to free ... print addresses from each $thread$ Worker Thread$$$_1$$$ allocate, write, read, free warmup memory in chunkc of 16 bytes ... for ( N ) free an object passed by Main Thread warmup memory in chunks of 16 bytes ... free the object passed by the Main Thread For N malloc new object ... free objects return new object addresses to Main Thread read/write the object M times free the object ... Worker Thread$$$_2$$$ // same as Worker Thread$$$_1$$$ Similar to benchmark cache thrash in section \VRef{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments. \begin{description}[itemsep=0pt,parsep=0pt] \begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt] \item[threads:] number of threads (K). \subsection{Speed Micro-Benchmark} \label{s:SpeedMicroBenchmark} \vspace*{-4pt} The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines: \begin{enumerate}[itemsep=0pt,parsep=0pt] \begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt] \item malloc \item realloc \VRef[Figure]{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark. It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads. A producer has a separate buffer for each consumer and allocates N objects of random sizes following a settable distribution for each consumer. A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer. A consumer frees these objects. After every memory operation, program memory usage is recorded throughout the runtime.
• ## doc/theses/mubeen_zulfiqar_MMath/conclusion.tex

 r015925a 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2 3300 1500 3300 2400 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3 1 1 1.00 45.00 90.00 4050 2625 3750 2625 3750 2400 4200 2775 3750 2775 3750 1725 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3 1 1 1.00 45.00 90.00 4050 2850 3450 2850 3450 2400 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800 4200 2550 4050 2550 4050 1725 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3 1 1 1.00 45.00 90.00 4200 3000 3450 3000 3450 2025 4 0 0 50 -1 0 12 0.0000 2 180 1185 1875 1725 bucket pointer\001 4 0 0 50 -1 0 12 0.0000 2 180 1005 1875 2025 mapped size\001 4 0 0 50 -1 0 12 0.0000 2 135 1215 1875 2325 next free block\001 4 2 0 50 -1 0 12 0.0000 2 135 480 1725 2025 union\001 4 1 0 50 -1 0 12 0.0000 2 135 270 3775 2325 0/1\001 4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2325 0/1\001 4 1 0 50 -1 0 12 0.0000 2 180 945 5400 2025 request size\001 4 1 0 50 -1 0 12 0.0000 2 180 765 5400 1425 4/8-bytes\001 4 1 0 50 -1 0 12 0.0000 2 180 765 3000 1425 4/8-bytes\001 4 0 0 50 -1 0 12 0.0000 2 135 825 4125 2700 zero filled\001 4 0 0 50 -1 0 12 0.0000 2 180 1515 4125 2925 mapped allocation\001 4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2025 0/1\001 4 1 0 50 -1 0 12 0.0000 2 135 270 3775 1725 0/1\001 4 1 0 50 -1 0 12 0.0000 2 135 270 4075 1725 0/1\001 4 0 0 50 -1 0 12 0.0000 2 180 1515 4275 3075 mapped allocation\001 4 0 0 50 -1 0 12 0.0000 2 135 825 4275 2850 zero filled\001 4 0 0 50 -1 0 12 0.0000 2 180 1920 4275 2625 alignment (fake header)\001
• ## doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsNoOwnership.fig

 r015925a #FIG 3.2  Produced by xfig version 3.2.5 #FIG 3.2  Produced by xfig version 3.2.7b Landscape Center Inches Letter Letter 100.00 Single 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 1200 2100 1500 2100 1500 1800 1200 1800 1200 2100 4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001 -6 6 1950 1800 2550 2100 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 2100 2100 2400 2100 2400 1800 2100 1800 2100 2100 4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001 -6 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1275 1800 1275 1500 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 0 0 1.00 45.00 90.00 2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 45.00 90.00 1425 1500 1425 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1425 1500 2175 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2175 1500 1425 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2175 1500 2175 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2325 1800 2325 1500 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001
• ## doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsOwnership.fig

 r015925a #FIG 3.2  Produced by xfig version 3.2.5 #FIG 3.2  Produced by xfig version 3.2.7b Landscape Center Inches Letter Letter 100.00 Single 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 1200 2100 1500 2100 1500 1800 1200 1800 1200 2100 4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001 -6 6 1950 1800 2550 2100 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 2100 2100 2400 2100 2400 1800 2100 1800 2100 2100 4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001 -6 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2175 1500 1425 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1425 1500 2175 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1275 1800 1275 1500 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2325 1800 2325 1500 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1425 1500 2175 1800 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001

 r015925a #FIG 3.2  Produced by xfig version 3.2.5 #FIG 3.2  Produced by xfig version 3.2.7b Landscape Center Inches Letter Letter 100.00 Single 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 2700 1800 3000 1800 3000 2100 2700 2100 2700 1800 4 1 0 50 -1 0 11 0.0000 2 135 135 2850 2025 G\001 4 1 0 50 -1 0 11 0.0000 2 120 135 2850 2025 G\001 -6 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1350 1500 1350 1800 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 2100 1800 2400 1800 2400 2100 2100 2100 2100 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1800 1500 1800 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2250 1500 2250 1800 4 1 0 50 -1 0 11 0.0000 2 195 1320 2550 2025 $\\Leftrightarrow$\001 4 1 0 50 -1 0 11 0.0000 2 195 1320 3150 2025 $\\Leftrightarrow$\001 4 0 0 50 -1 0 11 0.0000 2 135 240 3300 2025 OS\001 4 1 0 50 -1 0 11 0.0000 2 195 495 1350 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001 4 1 0 50 -1 0 11 0.0000 2 195 495 1800 2025 H$_2$\001 4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 195 495 2250 2025 H$_3$\001 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001 4 1 0 50 -1 0 11 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001 4 1 0 50 -1 0 11 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001 4 0 0 50 -1 0 11 0.0000 2 120 240 3300 2025 OS\001 4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_3$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001
• ## doc/theses/mubeen_zulfiqar_MMath/figures/SharedHeaps.fig

 r015925a #FIG 3.2  Produced by xfig version 3.2.5 #FIG 3.2  Produced by xfig version 3.2.7b Landscape Center Inches Letter Letter 100.00 Single 6 1500 1200 2100 1500 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350 4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001 -6 6 1050 1200 1650 1500 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 -6 6 1950 1200 2550 1500 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001 -6 6 1275 1800 1875 2100 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 1425 1800 1725 1800 1725 2100 1425 2100 1425 1800 4 1 0 50 -1 0 11 0.0000 2 195 495 1575 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 1575 2025 H$_1$\001 -6 6 1725 1800 2325 2100 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 1875 1800 2175 1800 2175 2100 1875 2100 1875 1800 4 1 0 50 -1 0 11 0.0000 2 195 495 2025 2025 H$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 495 2025 2025 H$_2$\001 -6 6 2475 1800 2775 2100 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 2475 1800 2775 1800 2775 2100 2475 2100 2475 1800 4 1 0 50 -1 0 11 0.0000 2 135 135 2625 2025 G\001 4 1 0 50 -1 0 11 0.0000 2 120 135 2625 2025 G\001 -6 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1275 1500 1500 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1425 1500 1950 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1725 1500 1650 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1875 1500 2025 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2250 1500 2100 1800 4 0 0 50 -1 0 11 0.0000 2 135 240 3075 2025 OS\001 4 1 0 50 -1 0 11 0.0000 2 195 1320 2325 2025 $\\Leftrightarrow$\001 4 1 0 50 -1 0 11 0.0000 2 195 1320 2925 2025 $\\Leftrightarrow$\001 4 0 0 50 -1 0 11 0.0000 2 120 240 3075 2025 OS\001 4 1 0 50 -1 0 11 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001 4 1 0 50 -1 0 11 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001
• ## doc/theses/mubeen_zulfiqar_MMath/figures/SingleHeap.fig

 r015925a #FIG 3.2  Produced by xfig version 3.2.5 #FIG 3.2  Produced by xfig version 3.2.7b Landscape Center Inches Letter Letter 100.00 Single 6 1500 1200 2100 1500 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350 4 1 0 50 -1 0 11 0.0000 2 195 465 1800 1425 T$_2$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001 -6 6 1050 1200 1650 1500 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 -6 6 1950 1200 2550 1500 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_3$\001 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001 -6 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1350 1500 1725 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2250 1500 1875 1800 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 1650 1800 1950 1800 1950 2100 1650 2100 1650 1800 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1800 1500 1800 1800 4 1 0 50 -1 0 11 0.0000 2 195 495 1800 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 195 1320 2100 2025 $\\Leftrightarrow$\001 4 0 0 50 -1 0 11 0.0000 2 135 240 2250 2025 OS\001 4 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_1$\001 4 1 0 50 -1 0 11 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001 4 0 0 50 -1 0 11 0.0000 2 120 240 2250 2025 OS\001
• ## doc/theses/mubeen_zulfiqar_MMath/figures/UserKernelHeaps.fig

 r015925a -6 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2025 2100 2025 2400 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2475 2100 2475 2400 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 0 0 1.00 45.00 90.00 0 0 1.00 45.00 90.00 1 1 1.00 45.00 90.00 1 1 1.00 45.00 90.00 2925 2100 2925 2400 4 1 0 50 -1 0 11 0.0000 2 135 2235 2475 1725 scheduled across kernel threads\001
• ## doc/theses/mubeen_zulfiqar_MMath/intro.tex

 r015925a When this allocator proves inadequate, programmers often write specialize allocators for specific needs. C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator. (Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.) Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine. However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved. For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}. \begin{enumerate}[leftmargin=*] \item Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). \item Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@. Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). \item \item Provide additional heap wrapper functions in \CFA creating an orthogonal set of allocation operations and properties. Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties. \item \item @malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@. If the allocation is not aligned or @addr@ is the @nulladdr@, the minimal alignment is returned. If the allocation is not aligned or @addr@ is the @NULL@, the minimal alignment is returned. \item @malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@. @malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@. \end{itemize} \item Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation. \item \item Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination. Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination. \item
• ## doc/theses/mubeen_zulfiqar_MMath/performance.tex

 r015925a This chapter uses the micro-benchmarks from \VRef[Chapter]{s:Benchmarks} to test a number of current memory allocators, including llheap. The goal is to see if llheap is competitive with the current best memory allocators. The goal is to see if llheap is competitive with the currently popular memory allocators. \begin{itemize} \item \textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0 \item \textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0 \item \textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0 \end{itemize} \paragraph{glibc (\textsf{glc})} \cite{glibc} is the default gcc thread-safe allocator. \cite{glibc} is the default glibc thread-safe allocator. \\ \textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\ \paragraph{hoard (\textsf{hrd})} \cite{hoard} is a thread-safe allocator that is multi-threaded and using a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap. \cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap. \\ \textbf{Version:} 3.13\\ \paragraph{tbb malloc (\textsf{tbb})} \cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses private heap for each thread. \cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread. Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size. \\ \section{Experiments} The each micro-benchmark is configured and run with each of the allocators, The less time an allocator takes to complete a benchmark the better, so lower in the graphs is better. Each micro-benchmark is configured and run with each of the allocators, The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs. All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see \VRef{s:MemoryMicroBenchmark}). Second is the low-performer group, which includes the rest of the memory allocators. These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator. All of the allocator's in this group are sharing heaps among threads at some level. Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}). But, these allocators are among the low performers in the cache scratch. It suggests these allocators do not actively produce false-sharing but preserve program-induced passive false sharing. All of the allocators in this group are sharing heaps among threads at some level. Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch. It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

• ## doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

 r015925a pdffitwindow=false,     % window fit to page when opened pdfstartview={FitH},    % fits the width of the page to the window pdftitle={Cforall Memory Allocation}, % title: CHANGE THIS TEXT! pdftitle={High-Performance Concurrent Memory Allocation}, % title: CHANGE THIS TEXT! pdfauthor={Mubeen Zulfiqar},    % author: CHANGE THIS TEXT! and uncomment this line pdfsubject={Cforall},  % subject: CHANGE THIS TEXT! and uncomment this line
• ## doc/theses/thierry_delisle_PhD/thesis/Makefile

 r015925a Build = build Figures = img Macros = ../../../LaTeXmacros TeXLIB = .:${Macros}:${Build}:../../../bibliography: LaTMac = ../../../LaTeXmacros BibRep = ../../../bibliography Macros = ${LaTMac} TeXLIB = .:${Macros}:${Build}:${BibRep}: LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex emptytree \ fairness \ idle \ idle1 \ idle2 \ idle_state \ io_uring \ pivot_ring \ cycle \ result.cycle.jax.ops \ result.yield.jax.ops \ result.churn.jax.ops \ result.cycle.jax.ns \ result.yield.jax.ns \ result.churn.jax.ns \ result.cycle.low.jax.ops \ result.yield.low.jax.ops \ result.churn.low.jax.ops \ result.cycle.low.jax.ns \ result.yield.low.jax.ns \ result.churn.low.jax.ns \ result.memcd.updt.qps \ result.memcd.updt.lat \ result.memcd.rate.qps \ result.memcd.rate.99th \ } ## Define the documents that need to be made. all: thesis.pdf thesis.pdf:${TEXTS} ${FIGURES}${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty thesis.pdf: ${TEXTS}${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib${LaTMac}/common.tex ${LaTMac}/common.sty${BibRep}/pl.bib DOCUMENT = thesis.pdf python3 $<$@ build/result.%.ns.svg : data/% | ${Build} ../../../../benchmark/plot.py -f$< -o $@ -y "ns per ops" cycle_jax_ops_FLAGS = --MaxY=120000000 cycle_low_jax_ops_FLAGS = --MaxY=120000000 cycle_jax_ns_FLAGS = --MaxY=2000 cycle_low_jax_ns_FLAGS = --MaxY=2000 build/result.%.ops.svg : data/% |${Build} ../../../../benchmark/plot.py -f $< -o$@ -y "Ops per second" yield_jax_ops_FLAGS = --MaxY=150000000 yield_low_jax_ops_FLAGS = --MaxY=150000000 yield_jax_ns_FLAGS = --MaxY=1500 yield_low_jax_ns_FLAGS = --MaxY=1500 build/result.%.ns.svg : data/% Makefile | ${Build} ../../../../benchmark/plot.py -f$< -o $@ -y "ns per ops/procs"$($(subst .,_,$*)_ns_FLAGS) build/result.%.ops.svg : data/% Makefile | ${Build} ../../../../benchmark/plot.py -f$< -o $@ -y "Ops per second"$($(subst .,_,$*)_ops_FLAGS) build/result.memcd.updt.qps.svg : data/memcd.updt Makefile | ${Build} ../../../../benchmark/plot.py -f$< -o $@ -y "Actual QPS" -x "Update Ratio" build/result.memcd.updt.lat.svg : data/memcd.updt Makefile |${Build} ../../../../benchmark/plot.py -f $< -o$@ -y "Average Read Latency" -x "Update Ratio" build/result.memcd.rate.qps.svg : data/memcd.rate Makefile | ${Build} ../../../../benchmark/plot.py -f$< -o $@ -y "Actual QPS" -x "Target QPS" build/result.memcd.rate.99th.svg : data/memcd.rate Makefile |${Build} ../../../../benchmark/plot.py -f $< -o$@ -y "Tail Read Latency" -x "Target QPS" ## pstex with inverted colors

• ## doc/theses/thierry_delisle_PhD/thesis/local.bib

 r015925a note = "[Online; accessed 12-April-2022]" } % RMR notes : % [05/04, 12:36] Trevor Brown %     i don't know where rmr complexity was first introduced, but there are many many many papers that use the term and define it % ​[05/04, 12:37] Trevor Brown %     here's one paper that uses the term a lot and links to many others that use it... might trace it to something useful there https://drops.dagstuhl.de/opus/volltexte/2021/14832/pdf/LIPIcs-DISC-2021-30.pdf % ​[05/04, 12:37] Trevor Brown %     another option might be to cite a textbook % ​[05/04, 12:42] Trevor Brown %     but i checked two textbooks in the area i'm aware of and i don't see a definition of rmr complexity in either % ​[05/04, 12:42] Trevor Brown %     this one has a nice statement about the prevelance of rmr complexity, as well as some rough definition % ​[05/04, 12:42] Trevor Brown %     https://dl.acm.org/doi/pdf/10.1145/3465084.3467938 % Race to idle notes : % [13/04, 16:56] Martin Karsten %       I don't have a citation. Google brings up this one, which might be good: % % https://doi.org/10.1137/1.9781611973099.100
• ## doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex

 r015925a Networked ZIPF Nginx : 5Gb still good, 4Gb starts to suffer Cforall : 10Gb too high, 4 Gb too low \section{Memcached} In Memory \subsection{Benchmark Environment} These experiments are run on a cluster of homogenous Supermicro SYS-6017R-TDF compute nodes with the following characteristics: The server runs Ubuntu 20.04.3 LTS on top of Linux Kernel 5.11.0-34. Each node has 2 Intel(R) Xeon(R) CPU E5-2620 v2 running at 2.10GHz. These CPUs have 6 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 24 \glspl{hthrd}. The cpus each have 384 KB, 3 MB and 30 MB of L1, L2 and L3 caches respectively. Each node is connected to the network through a Mellanox 10 Gigabit Ethernet port. The network route uses 1 Mellanox SX1012 10/40 Gigabit Ethernet cluster switch. Networked \begin{figure} \centering \input{result.memcd.updt.qps.pstex_t} \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} \label{fig:memcd:updt:qps} \end{figure} \begin{figure} \centering \input{result.memcd.updt.lat.pstex_t} \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} \label{fig:memcd:updt:lat} \end{figure} \begin{figure} \centering \input{result.memcd.rate.qps.pstex_t} \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} \label{fig:memcd:rate:qps} \end{figure} \begin{figure} \centering \input{result.memcd.rate.99th.pstex_t} \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} \label{fig:memcd:rate:tail} \end{figure}
• ## doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex

 r015925a \section{Benchmark Environment} All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine. For all benchmarks, \texttt{taskset} is used to limit the experiment to 1 NUMA Node with no hyper threading. If more \glspl{hthrd} are needed, then 1 NUMA Node with hyperthreading is used. If still more \glspl{hthrd} are needed then the experiment is limited to as few NUMA Nodes as needed. \paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM. \section{Cycling latency} \begin{figure} \centering \input{cycle.pstex_t} \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.} \label{fig:cycle} \end{figure} The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue. Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark. Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system. \begin{figure} \centering \input{cycle.pstex_t} \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.} \label{fig:cycle} \end{figure} To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available. Beyond this point, adding more rings serves to mitigate even more the idle sleep handling. The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition. \begin{lstlisting} Thread.main() { count := 0 for { wait() this.next.wake() count ++ if must_stop() { break } } global.count += count } \end{lstlisting} \begin{figure} \centering \input{result.cycle.jax.ops.pstex_t} \vspace*{-10pt} \label{fig:cycle:ns:jax} \end{figure} Figure~\ref{fig:cycle:code} shows pseudo code for this benchmark. \begin{figure} \begin{lstlisting} Thread.main() { count := 0 for { wait() this.next.wake() count ++ if must_stop() { break } } global.count += count } \end{lstlisting} \caption[Cycle Benchmark : Pseudo Code]{Cycle Benchmark : Pseudo Code} \label{fig:cycle:code} \end{figure} \subsection{Results} \begin{figure} \subfloat[][Throughput, 100 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.cycle.jax.ops.pstex_t} } \label{fig:cycle:jax:ops} } \subfloat[][Throughput, 1 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.cycle.low.jax.ops.pstex_t} } \label{fig:cycle:jax:low:ops} } \subfloat[][Latency, 100 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.cycle.jax.ns.pstex_t} } } \subfloat[][Latency, 1 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.cycle.low.jax.ns.pstex_t} } \label{fig:cycle:jax:low:ns} } \caption[Cycle Benchmark on Intel]{Cycle Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 100 cycles per \proc, 5 \ats per cycle.} \label{fig:cycle:jax} \end{figure} Figure~\ref{fig:cycle:jax} shows the throughput as a function of \proc count, with the following constants: Each run uses 100 cycles per \proc, 5 \ats per cycle. \todo{results discussion} \section{Yield} Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty. This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done. \todo{code, setup, results} \begin{lstlisting} Thread.main() { count := 0 while !stop { yield() count ++ } global.count += count } \end{lstlisting} Figure~\ref{fig:yield:code} shows pseudo code for this benchmark, the wait/wake-next'' is simply replaced by a yield. \begin{figure} \begin{lstlisting} Thread.main() { count := 0 for { yield() count ++ if must_stop() { break } } global.count += count } \end{lstlisting} \caption[Yield Benchmark : Pseudo Code]{Yield Benchmark : Pseudo Code} \label{fig:yield:code} \end{figure} \subsection{Results} \begin{figure} \subfloat[][Throughput, 100 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.yield.jax.ops.pstex_t} } \label{fig:yield:jax:ops} } \subfloat[][Throughput, 1 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.yield.low.jax.ops.pstex_t} } \label{fig:yield:jax:low:ops} } \subfloat[][Latency, 100 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.yield.jax.ns.pstex_t} } \label{fig:yield:jax:ns} } \subfloat[][Latency, 1 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.yield.low.jax.ns.pstex_t} } \label{fig:yield:jax:low:ns} } \caption[Yield Benchmark on Intel]{Yield Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 1 \ats per \proc.} \label{fig:yield:jax} \end{figure} Figure~\ref{fig:yield:ops:jax} shows the throughput as a function of \proc count, with the following constants: Each run uses 100 \ats per \proc. \todo{results discussion} In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly. To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}. When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any. This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves. For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}. To achieve this the benchmark uses a fixed size array of semaphores. Each \gls{at} picks a random semaphore, \texttt{V}s it to unblock a \at waiting and then \texttt{P}s on the semaphore. This creates a flow where \glspl{at} push each other out of the semaphores before being pushed out themselves. For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of semaphores plus the number of \glspl{proc}. Note that the nature of these semaphores mean the counter can go beyond 1, which could lead to calls to \texttt{P} not blocking. \todo{code, setup, results} for { r := random() % len(spots) next := xchg(spots[r], this) if next { next.wake() } wait() spots[r].V() spots[r].P() count ++ if must_stop() { break } } \end{lstlisting} \begin{figure} \subfloat[][Throughput, 100 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.churn.jax.ops.pstex_t} } \label{fig:churn:jax:ops} } \subfloat[][Throughput, 1 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.churn.low.jax.ops.pstex_t} } \label{fig:churn:jax:low:ops} } \subfloat[][Latency, 100 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.churn.jax.ns.pstex_t} } } \subfloat[][Latency, 1 \ats per \proc]{ \resizebox{0.5\linewidth}{!}{ \input{result.churn.low.jax.ns.pstex_t} } \label{fig:churn:jax:low:ns} } \caption[Churn Benchmark on Intel]{\centering Churn Benchmark on Intel\smallskip\newline Throughput and latency of the Churn on the benchmark on the Intel machine. Throughput is the total operation per second across all cores. Latency is the duration of each opeartion.} \label{fig:churn:jax} \end{figure} \section{Locality}
• ## doc/theses/thierry_delisle_PhD/thesis/text/intro.tex

 r015925a \todo{A proper intro} The C programming language\cit{C} The C programming language~\cite{C11} The \CFA programming language\cite{cfa:frontpage,cfa:typesystem} which extends the C programming language to add modern safety and productiviy features while maintaining backwards compatibility. Among it's productiviy features, \CFA introduces support for threading\cit{CFA Concurrency}, to allow programmers to write modern concurrent and parallel programming. While previous work on the concurrent package of \CFA focused on features and interfaces, this thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. More specifically, this thesis concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strictly \glsxtrshort{fifo} \gls{rQ}. The \CFA programming language~\cite{cfa:frontpage,cfa:typesystem} extends the C programming language by adding modern safety and productivity features, while maintaining backwards compatibility. Among its productivity features, \CFA supports user-level threading~\cite{Delisle21} allowing programmers to write modern concurrent and parallel programs. My previous master's thesis on concurrent in \CFA focused on features and interfaces. This Ph.D.\ thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. Specifically, this work concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strict \glsxtrshort{fifo} \gls{rQ} and  no non-blocking I/O capabilities at the user-thread level. This work exclusively concentrates on Linux as it's operating system since the existing \CFA runtime and compiler does not already support other operating systems. Furthermore, as \CFA is yet to be released, supporting version of Linux older than the latest version is not a goal of this work. As a research project, this work builds exclusively on newer versions of the Linux operating-system and gcc/clang compilers. While \CFA is released, supporting older versions of Linux ($<$~Ubuntu 16.04) and gcc/clang compilers ($<$~gcc 6.0) is not a goal of this work.
• ## doc/theses/thierry_delisle_PhD/thesis/text/practice.tex

 r015925a More precise \CFA supports adding \procs using the RAII object @processor@. These objects can be created at any time and can be destroyed at any time. They are normally create as automatic stack variables, but this is not a requirement. They are normally created as automatic stack variables, but this is not a requirement. The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence. \section{Manual Resizing} The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed. This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}. There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown. Manual resizing is expected to be a rare operation. Programmers are mostly expected to resize clusters on startup or teardown. Therefore dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state. As such all internal arrays that are sized based on the number of \procs need to be \texttt{realloc}ed. This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing when shrinkingbecause some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}. There are no performance requirements, within reason, for resizing since it is expected to be rare. However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks. It should also avoid as much as possible any effect on performance when the number of \procs remain constant. This later requirement prehibits simple solutions, like simply adding a global lock to these arrays. This later requirement prohibits naive solutions, like simply adding a global lock to the ready-queue arrays. \subsection{Read-Copy-Update} In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy. This approach potentially has the advantage that it may not need any synchronization to do the switch. The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in. The important question then becomes whether or not this race can be recovered from. If the changes that arrived late can be transferred from the original to the copy then this solution works. For linked-lists, dequeing is somewhat of a problem. However, there is a race where \procs could still use the previous, original, data structure after the copy was switched in. This race not only requires some added memory reclamation scheme, it also requires that operations made on the stale original version be eventually moved to the copy. For linked-lists, enqueing is only somewhat problematic, \ats enqueued to the original queues need to be transferred to the new, which might not preserve ordering. Dequeing is more challenging. Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at. Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves. Fixing this requires more synchronization or more indirection on the queues. Another challenge is that the original must be kept until all \procs have witnessed the change. In addition to users manually changing the number of \procs, it is desireable to support removing'' \procs when there is not enough \ats for all the \procs to be useful. While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be removed'' for only short periods of time. Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice. Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready. Furthermore, race conditions that spuriously lead to the impression that no \ats are ready are actually common in practice. Therefore resources associated with \procs should not be freed but \procs simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready. This state is referred to as \newterm{Idle-Sleep}. The \CFA scheduler simply follows the Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work. \section{Sleeping} As usual, the corner-stone of any feature related to the kernel is the choice of system call. In terms of blocking a \gls{kthrd} until some event occurs the linux kernel has many available options: \paragraph{\texttt{pthread\_mutex}/\texttt{pthread\_cond}} The most classic option is to use some combination of \texttt{pthread\_mutex} and \texttt{pthread\_cond}. These serve as straight forward mutual exclusion and synchronization tools and allow a \gls{kthrd} to wait on a \texttt{pthread\_cond} until signalled. While this approach is generally perfectly appropriate for \glspl{kthrd} waiting after eachother, \io operations do not signal \texttt{pthread\_cond}s. For \io results to wake a \proc waiting on a \texttt{pthread\_cond} means that a different \glspl{kthrd} must be woken up first, and then the \proc can be signalled. \subsection{\texttt{io\_uring} and Epoll} An alternative is to flip the problem on its head and block waiting for \io, using \texttt{io\_uring} or even \texttt{epoll}. This creates the inverse situation, where \io operations directly wake sleeping \procs but waking \proc from a running \gls{kthrd} must use an indirect scheme. This generally takes the form of creating a file descriptor, \eg, a dummy file, a pipe or an event fd, and using that file descriptor when \procs need to wake eachother. This leads to additional complexity because there can be a race between these artificial \io operations and genuine \io operations. If not handled correctly, this can lead to the artificial files going out of sync. \subsection{Event FDs} Another interesting approach is to use an event file descriptor\cit{eventfd}. This is a Linux feature that is a file descriptor that behaves like \io, \ie, uses \texttt{read} and \texttt{write}, but also behaves like a semaphore. Indeed, all read and writes must use 64bits large values\footnote{On 64-bit Linux, a 32-bit Linux would use 32 bits values.}. Writes add their values to the buffer, that is arithmetic addition and not buffer append, and reads zero out the buffer and return the buffer values so far\footnote{This is without the \texttt{EFD\_SEMAPHORE} flag. This flags changes the behavior of \texttt{read} but is not needed for this work.}. If a read is made while the buffer is already 0, the read blocks until a non-0 value is added. What makes this feature particularly interesting is that \texttt{io\_uring} supports the \texttt{IORING\_REGISTER\_EVENTFD} command, to register an event fd to a particular instance. Once that instance is registered, any \io completion will result in \texttt{io\_uring} writing to the event FD. This means that a \proc waiting on the event FD can be \emph{directly} woken up by either other \procs or incomming \io. \begin{figure} \centering \input{idle1.pstex_t} \caption[Basic Idle Sleep Data Structure]{Basic Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock. Each \proc has a private event FD.} \label{fig:idle1} \end{figure} \section{Tracking Sleepers} Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc. The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at. Furthermore, the Race-to-Idle'' approach means that there is some \section{Sleeping} \subsection{Event FDs} \subsection{Epoll} \subsection{\texttt{io\_uring}} \section{Reducing Latency} Since \ats can be made ready by timers, \io operations or other events outside a clusre, this race can occur even if the \proc going to sleep is the only \proc awake. As a result, improper handling of this race can lead to all \procs going to sleep and the system deadlocking. Furthermore, the Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers. Contention slowing down \procs attempting to sleep or wake-up can be tolerated. These \procs are not doing useful work and therefore not contributing to overall performance. However, notifying, checking if a \proc must be woken-up and doing so if needed, can significantly affect overall performance and must be low cost. \subsection{Sleepers List} Each cluster maintains a list of idle \procs, organized as a stack. This ordering hopefully allows \proc at the tail to stay in idle sleep for extended period of times. Because of these unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \proc handle as much of the work as possible. The idle \procs maintain the of sleepers among themselves and notifying a sleeping \proc takes as little work as possible. This approach means that maintaining the list is fairly straightforward. The list can simply use a single lock per cluster and only \procs that are getting in and out of idle state will contend for that lock. This approach also simplifies notification. Indeed, \procs need to be notify when a new \at is readied, but they also must be notified during resizing, so the \gls{kthrd} can be joined. This means that whichever entity removes idle \procs from the sleeper list must be able to do so in any order. Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure. The notification process then simply needs to wake-up the desired idle \proc, using \texttt{pthread\_cond\_signal}, \texttt{write} on an fd, etc., and the \proc will handle the rest. \subsection{Reducing Latency} As mentioned in this section, \procs going idle for extremely short periods of time is likely in certain common scenarios. Therefore, the latency of doing a system call to read from and writing to the event fd can actually negatively affect overall performance in a notable way. Is it important to reduce latency and contention of the notification as much as possible. Figure~\ref{fig:idle1} shoes the basic idle sleep data structure. For the notifiers, this data structure can cause contention on the lock and the event fd syscall can cause notable latency. \begin{figure} \centering \input{idle2.pstex_t} \caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.} \label{fig:idle2} \end{figure} The contention is mostly due to the lock on the list needing to be held to get to the head \proc. That lock can be contended by \procs attempting to go to sleep, \procs waking or notification attempts. The contentention from the \procs attempting to go to sleep can be mitigated slightly by using \texttt{try\_acquire} instead, so the \procs simply continue searching for \ats if the lock is held. This trick cannot be used for waking \procs since they are not in a state where they can run \ats. However, it is worth nothing that notification does not strictly require accessing the list or the head \proc. Therefore, contention can be reduced notably by having notifiers avoid the lock entirely and adding a pointer to the event fd of the first idle \proc, as in Figure~\ref{fig:idle2}. To avoid contention between the notifiers, instead of simply reading the atomic pointer, notifiers atomically exchange it to \texttt{null} so only only notifier will contend on the system call. \begin{figure} \centering \input{idle_state.pstex_t} \caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.} \label{fig:idle:state} \end{figure} The next optimization that can be done is to avoid the latency of the event fd when possible. This can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event fd. A simple three state flag is added beside the event fd to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}. The flag starts in state \texttt{SEARCH}, while the \proc is searching for \ats to run. The \proc then confirms the sleep by atomically swaping the state to \texttt{SLEEP}. If the previous state was still \texttt{SEARCH}, then the \proc does read the event fd. Meanwhile, notifiers atomically exchange the state to \texttt{AWAKE} state. if the previous state was \texttt{SLEEP}, then the notifier must write to the event fd. However, if the notify arrives almost immediately after the \proc marks itself idle, then both reads and writes on the event fd can be omitted, which reduces latency notably. This leads to the final data structure shown in Figure~\ref{fig:idle}. \begin{figure} \centering \input{idle.pstex_t} \caption[Low-latency Idle Sleep Data Structure]{Low-latency Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock. Each \proc has a private event FD with a benaphore in front of it. The list also has an atomic pointer to the event fd and benaphore of the first \proc on the list.} \label{fig:idle} \end{figure}
• ## doc/theses/thierry_delisle_PhD/thesis/thesis.tex

• ## libcfa/src/Makefile.am

 r015925a # The built sources must not depend on the installed inst_headers_src AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr -I$(srcdir)/concurrency $(if$(findstring ${gdbwaittarget},${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -fvisibility=hidden -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ AM_CCASFLAGS = -g -Wall -Werror=return-type -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ CFACC = @CFACC@ prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o${@} ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o${@} prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o${@} $(CFACOMPILE) -quiet -XCFA,-l${<} -c -fvisibility=default -o ${@} concurrency/io/call.cfa:$(srcdir)/concurrency/io/call.cfa.in
• ## libcfa/src/algorithms/range_iterator.cfa

 r015925a #include void main(RangeIter & this) { #include "bits/defs.hfa" void main(RangeIter & this) libcfa_public { for() { this._start = -1;
• ## libcfa/src/assert.cfa

 r015925a #include                                                              // STDERR_FILENO #include "bits/debug.hfa" #include "bits/defs.hfa" extern "C" { // called by macro assert in assert.h void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) { // would be cool to remove libcfa_public but it's needed for libcfathread void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) libcfa_public { __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file ); abort(); // called by macro assertf void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) { // would be cool to remove libcfa_public but it's needed for libcfathread void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) libcfa_public { __cfaabi_bits_acquire(); __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file );
• ## libcfa/src/bits/debug.cfa

 r015925a #include #include "bits/defs.hfa" enum { buffer_size = 4096 }; static char buffer[ buffer_size ]; extern "C" { void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) { // would be cool to remove libcfa_public but it's needed for libcfathread void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) libcfa_public { // ensure all data is written for ( int count = 0, retcode; count < len; count += retcode ) { void __cfaabi_bits_release() __attribute__((__weak__)) {} int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { // would be cool to remove libcfa_public but it's needed for libcfathread int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) libcfa_public { va_list args;
• ## libcfa/src/bits/defs.hfa

 r015925a #define __cfa_dlink(x) struct { struct x * next; struct x * back; } __dlink_substitute #endif #define libcfa_public __attribute__((visibility("default"))) #ifdef __cforall
• ## libcfa/src/bits/weakso_locks.cfa

 r015925a #include "bits/weakso_locks.hfa" #pragma GCC visibility push(default) void  ?{}( blocking_lock &, bool, bool ) {} void ^?{}( blocking_lock & ) {}
• ## libcfa/src/common.cfa

 r015925a #include                                      // div_t, *div #pragma GCC visibility push(default) //---------------------------------------
• ## libcfa/src/concurrency/alarm.cfa

 r015925a //============================================================================================= void sleep( Duration duration ) { void sleep( Duration duration ) libcfa_public { alarm_node_t node = { active_thread(), duration, 0`s };

• ## libcfa/src/concurrency/coroutine.cfa

 r015925a //----------------------------------------------------------------------------- forall(T &) void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) { void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public { dst->virtual_table = src->virtual_table; dst->the_coroutine = src->the_coroutine; forall(T &) const char * msg(CoroutineCancelled(T) *) { const char * msg(CoroutineCancelled(T) *) libcfa_public { return "CoroutineCancelled(...)"; } forall(T & | is_coroutine(T)) void __cfaehm_cancelled_coroutine( T & cor, coroutine$* desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) { T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public { verify( desc->cancellation ); desc->state = Cancelled; void __stack_prepare( __stack_info_t * this, size_t create_size ); void __stack_clean  ( __stack_info_t * this ); static void __stack_clean  ( __stack_info_t * this ); //----------------------------------------------------------------------------- } void ?{}( coroutine$& this, const char name[], void * storage, size_t storageSize ) with( this ) { void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) { (this.context){0p, 0p}; (this.stack){storage, storageSize}; } void ^?{}(coroutine$& this) { void ^?{}(coroutine$& this) libcfa_public { if(this.state != Halted && this.state != Start && this.state != Primed) { coroutine$* src = active_coroutine(); // Part of the Public API // Not inline since only ever called once per coroutine forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); }) void prime(T& cor) { forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) void prime(T& cor) libcfa_public { coroutine$* this = get_coroutine(cor); assert(this->state == Start); } [void *, size_t] __stack_alloc( size_t storageSize ) { static [void *, size_t] __stack_alloc( size_t storageSize ) { const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment assert(__page_size != 0l); } void __stack_clean  ( __stack_info_t * this ) { static void __stack_clean  ( __stack_info_t * this ) { void * storage = this->storage->limit; } void __stack_prepare( __stack_info_t * this, size_t create_size ) { void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public { const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment bool userStack;

• ## libcfa/src/concurrency/io/types.hfa

 r015925a #pragma once #include extern "C" { #include #include "iofwd.hfa" #include "kernel/fwd.hfa" #include "limits.hfa" #if defined(CFA_HAVE_LINUX_IO_URING_H) const __u32 tail = *this->cq.tail; if(head == tail) return MAX; if(head == tail) return ULLONG_MAX; return this->cq.ts;
• ## libcfa/src/concurrency/kernel.cfa

 r015925a // KERNEL_ONLY void returnToKernel() { static void returnToKernel() { /* paranoid */ verify( ! __preemption_enabled() ); coroutine$* proc_cor = get_coroutine(kernelTLS().this_processor->runner); } void unpark( thread$ * thrd, unpark_hint hint ) { void unpark( thread$* thrd, unpark_hint hint ) libcfa_public { if( !thrd ) return; } void park( void ) { void park( void ) libcfa_public { __disable_interrupts_checked(); /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); // KERNEL ONLY bool force_yield( __Preemption_Reason reason ) { bool force_yield( __Preemption_Reason reason ) libcfa_public { __disable_interrupts_checked(); thread$ * thrd = kernelTLS().this_thread; //----------------------------------------------------------------------------- // Debug bool threading_enabled(void) __attribute__((const)) { bool threading_enabled(void) __attribute__((const)) libcfa_public { return true; } // Statistics #if !defined(__CFA_NO_STATISTICS__) void print_halts( processor & this ) { void print_halts( processor & this ) libcfa_public { this.print_halts = true; } } void crawl_cluster_stats( cluster & this ) { static void crawl_cluster_stats( cluster & this ) { // Stop the world, otherwise stats could get really messed-up // this doesn't solve all problems but does solve many void print_stats_now( cluster & this, int flags ) { void print_stats_now( cluster & this, int flags ) libcfa_public { crawl_cluster_stats( this ); __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );

• ## libcfa/src/concurrency/monitor.cfa

 r015925a static inline void restore( monitor$* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); static inline void ?{}(__condition_criterion_t & this ); static inline void ?{}(__condition_criterion_t & this, monitor$* target, __condition_node_t * owner ); static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); static inline void init_push( __lock_size_t count, monitor$* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); // Leave single monitor void __leave( monitor$ * this ) { static void __leave( monitor$* this ) { // Lock the monitor spinlock lock( this->lock __cfaabi_dbg_ctx2 ); // Leave single monitor for the last time void __dtor_leave( monitor$ * this, bool join ) { static void __dtor_leave( monitor$* this, bool join ) { __cfaabi_dbg_debug_do( if( active_thread() != this->owner ) { // Ctor for monitor guard // Sorts monitors before entering void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) { void ?{}( monitor_guard_t & this, monitor$* m [], __lock_size_t count, fptr_t func ) libcfa_public { thread$ * thrd = active_thread(); } void ?{}( monitor_guard_t & this, monitor$* m [], __lock_size_t count ) { void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { this{ m, count, 0p }; } // Dtor for monitor guard void ^?{}( monitor_guard_t & this ) { void ^?{}( monitor_guard_t & this ) libcfa_public { // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); // Ctor for monitor guard // Sorts monitors before entering void ?{}( monitor_dtor_guard_t & this, monitor$* m [], fptr_t func, bool join ) { void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { // optimization thread$* thrd = active_thread(); // Dtor for monitor guard void ^?{}( monitor_dtor_guard_t & this ) { void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { // Leave the monitors in order __dtor_leave( this.m, this.join ); //----------------------------------------------------------------------------- // Internal scheduling types void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { static void ?{}(__condition_node_t & this, thread$* waiting_thread, __lock_size_t count, uintptr_t user_info ) { this.waiting_thread = waiting_thread; this.count = count; } void ?{}(__condition_criterion_t & this ) with( this ) { static void ?{}(__condition_criterion_t & this ) with( this ) { ready = false; target = 0p; } void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { static void ?{}(__condition_criterion_t & this, monitor$* target, __condition_node_t & owner ) { this.ready = false; this.target = target; //----------------------------------------------------------------------------- // Internal scheduling void wait( condition & this, uintptr_t user_info = 0 ) { void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { brand_condition( this ); } bool signal( condition & this ) { bool signal( condition & this ) libcfa_public { if( is_empty( this ) ) { return false; } } bool signal_block( condition & this ) { bool signal_block( condition & this ) libcfa_public { if( !this.blocked.head ) { return false; } // Access the user_info of the thread waiting at the front of the queue uintptr_t front( condition & this ) { uintptr_t front( condition & this ) libcfa_public { verifyf( !is_empty(this), "Attempt to access user data on an empty condition.\n" // setup mask // block void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) { void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { // This statment doesn't have a contiguous list of monitors... // Create one! // Can't be accepted since a mutex stmt is effectively an anonymous routine // Thus we do not need a monitor group void lock( monitor$ * this ) { void lock( monitor$* this ) libcfa_public { thread$ * thrd = active_thread(); // Leave routine for mutex stmt // Is just a wrapper around __leave for the is_lock trait to see void unlock( monitor$* this ) { __leave( this ); } void unlock( monitor$ * this ) libcfa_public { __leave( this ); } // Local Variables: //
• ## libcfa/src/concurrency/monitor.hfa

 r015925a } void ?{}(__condition_node_t & this, thread$* waiting_thread, __lock_size_t count, uintptr_t user_info ); void ?{}(__condition_criterion_t & this ); void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); // void ?{}(__condition_node_t & this, thread$* waiting_thread, __lock_size_t count, uintptr_t user_info ); // void ?{}(__condition_criterion_t & this ); // void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); struct condition {
• ## libcfa/src/concurrency/preemption.cfa

 r015925a #endif __attribute__((weak)) Duration default_preemption() { __attribute__((weak)) Duration default_preemption() libcfa_public { const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); if(!preempt_rate_s) { //---------- // special case for preemption since used often __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() { __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { // create a assembler label before // marked as clobber all to avoid movement // Get data from the TLS block // struct asm_region __cfaasm_get; uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems uintptr_t __cfatls_get( unsigned long int offset ) { // create a assembler label before extern "C" { // Disable interrupts by incrementing the counter void disable_interrupts() { __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { // create a assembler label before // marked as clobber all to avoid movement // Enable interrupts by decrementing the counter // If counter reaches 0, execute any pending __cfactx_switch void enable_interrupts( bool poll ) { void enable_interrupts( bool poll ) libcfa_public { // Cache the processor now since interrupts can start happening after the atomic store processor   * proc = __cfaabi_tls.this_processor; //----------------------------------------------------------------------------- // Kernel Signal Debug void __cfaabi_check_preemption() { void __cfaabi_check_preemption() libcfa_public { bool ready = __preemption_enabled(); if(!ready) { abort("Preemption should be ready"); }

• ## libcfa/src/containers/maybe.cfa

 r015925a #include #pragma GCC visibility push(default) forall(T)
• ## libcfa/src/containers/result.cfa

 r015925a #include #pragma GCC visibility push(default) forall(T, E)
• ## libcfa/src/containers/string.cfa

 r015925a #include #pragma GCC visibility push(default) /*
• ## libcfa/src/containers/string_sharectx.hfa

 r015925a #pragma once #pragma GCC visibility push(default) //######################### String Sharing Context ######################### struct VbyteHeap; // A string_sharectx // A string_sharectx // // Usage:
• ## libcfa/src/containers/vector.cfa

 r015925a #include #pragma GCC visibility push(default) forall(T, allocator_t | allocator_c(T, allocator_t)) void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other); static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other); //------------------------------------------------------------------------------ forall(T, allocator_t | allocator_c(T, allocator_t)) void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other) static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other) { this->size = other->size;
• ## libcfa/src/device/cpu.cfa

 r015925a } #include "bits/defs.hfa" #include "algorithms/range_iterator.hfa" } cpu_info_t cpu_info; libcfa_public cpu_info_t cpu_info;
• ## libcfa/src/exception.c

 r015925a #include "stdhdr/assert.h" #include "virtual.h" #pragma GCC visibility push(default) #include "lsda.h" #else // defined( __ARM_ARCH ) // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack. // This workaround tries to keep default exception handling working. // This workaround tries to keep default exception handling working. if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) { #endif

• ## libcfa/src/fstream.cfa

 r015925a #include #include                                                                               // errno #pragma GCC visibility push(default) // *********************************** ofstream *********************************** // abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno ); } // if (os){ file };                                                                           // initialize (os){ file };                                                                           // initialize } // open va_list args; va_start( args, format ); int len; for ( cnt; 10 ) { // abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno ); } // if (is){ file };                                                                           // initialize (is){ file };                                                                           // initialize } // open
• ## libcfa/src/fstream.hfa

 r015925a #include "bits/weakso_locks.hfa"                                                // mutex_lock #include "iostream.hfa" #include

• ## libcfa/src/interpose.cfa

 r015925a //============================================================================================= void preload_libgcc(void) { static void preload_libgcc(void) { dlopen( "libgcc_s.so.1", RTLD_NOW ); if ( const char * error = dlerror() ) abort( "interpose_symbol : internal error pre-loading libgcc, %s\n", error ); typedef void (* generic_fptr_t)(void); generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) { static generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) { const char * error; //============================================================================================= void sigHandler_segv( __CFA_SIGPARMS__ ); void sigHandler_ill ( __CFA_SIGPARMS__ ); void sigHandler_fpe ( __CFA_SIGPARMS__ ); void sigHandler_abrt( __CFA_SIGPARMS__ ); void sigHandler_term( __CFA_SIGPARMS__ ); struct { static void sigHandler_segv( __CFA_SIGPARMS__ ); static void sigHandler_ill ( __CFA_SIGPARMS__ ); static void sigHandler_fpe ( __CFA_SIGPARMS__ ); static void sigHandler_abrt( __CFA_SIGPARMS__ ); static void sigHandler_term( __CFA_SIGPARMS__ ); static struct { void (* exit)( int ) __attribute__(( __noreturn__ )); void (* abort)( void ) __attribute__(( __noreturn__ )); } __cabi_libc; int cfa_main_returned; libcfa_public int cfa_main_returned; extern "C" { // Forward declare abort after the __typeof__ call to avoid ambiguities void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )); libcfa_public void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); libcfa_public void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); libcfa_public void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); libcfa_public void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )); extern "C" { void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { libcfa_public void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { abort( false, "%s", "" ); } void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { libcfa_public void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { va_list argp; va_start( argp, fmt ); } void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { libcfa_public void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { __cabi_libc.exit( status ); }
• ## libcfa/src/iostream.cfa

 r015925a #include "bitmanip.hfa"                                                                 // high1 #pragma GCC visibility push(default) // *********************************** ostream ***********************************
• ## libcfa/src/limits.cfa

 r015925a #include #include "limits.hfa" #pragma GCC visibility push(default) // Integral Constants
• ## libcfa/src/memory.cfa

 r015925a #include "memory.hfa" #include "stdlib.hfa" #pragma GCC visibility push(default) // Internal data object.
• ## libcfa/src/parseargs.cfa

 r015925a #include "common.hfa" #include "limits.hfa" #pragma GCC visibility push(default) extern int cfa_args_argc __attribute__((weak));
• ## libcfa/src/parseconfig.cfa

 r015925a #pragma GCC visibility push(default) // *********************************** exceptions *********************************** // TODO: Add names of missing config entries to exception (see further below) static vtable(Missing_Config_Entries) Missing_Config_Entries_vt; vtable(Missing_Config_Entries) Missing_Config_Entries_vt; [ void ] ?{}( & Missing_Config_Entries this, unsigned int num_missing ) { static vtable(Parse_Failure) Parse_Failure_vt; vtable(Parse_Failure) Parse_Failure_vt; [ void ] ?{}( & Parse_Failure this, [] char failed_key, [] char failed_value ) { static vtable(Validation_Failure) Validation_Failure_vt; vtable(Validation_Failure) Validation_Failure_vt; [ void ] ?{}( & Validation_Failure this, [] char failed_key, [] char failed_value ) { [ bool ] comments( & ifstream in, [] char name ) { static [ bool ] comments( & ifstream in, [] char name ) { while () { in | name;
• ## libcfa/src/rational.cfa

 r015925a #include "fstream.hfa" #include "stdlib.hfa" #pragma GCC visibility push(default) forall( T | Arithmetic( T ) ) {
• ## libcfa/src/startup.cfa

 r015925a } // __cfaabi_appready_shutdown void disable_interrupts() __attribute__(( weak )) {} void enable_interrupts() __attribute__(( weak )) {} void disable_interrupts() __attribute__(( weak )) libcfa_public {} void enable_interrupts() __attribute__(( weak )) libcfa_public {} struct __spinlock_t; extern "C" { void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {} void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {} }
• ## libcfa/src/stdlib.cfa

 r015925a #include                                                                     // _Complex_I #include #pragma GCC visibility push(default) //--------------------------------------- #define GENERATOR LCG uint32_t __global_random_seed;                                                  // sequential/concurrent uint32_t __global_random_state;                                                 // sequential only // would be cool to make hidden but it's needed for libcfathread __attribute__((visibility("default"))) uint32_t __global_random_seed;                                                   // sequential/concurrent __attribute__((visibility("hidden"))) uint32_t __global_random_state;                                                   // sequential only void set_seed( PRNG & prng, uint32_t seed_ ) with( prng ) { state = seed = seed_; GENERATOR( state ); } // set seed
• ## libcfa/src/strstream.cfa

 r015925a // // // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo // // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // strstream.cfa -- // // strstream.cfa -- // // Author           : Peter A. Buhr // Created On       : Thu Apr 22 22:24:35 2021 // Last Modified On : Sun Oct 10 16:13:20 2021 // Update Count     : 101 // // #include "strstream.hfa" #include                                                                              // sbrk, sysconf #pragma GCC visibility push(default) // *********************************** strstream ***********************************
• ## libcfa/src/time.cfa

 r015925a #include                                                                               // snprintf #include #pragma GCC visibility push(default) static char * nanomsd( long int ns, char * buf ) {              // most significant digits
• ## libcfa/src/virtual.c

 r015925a #include "virtual.h" #include "assert.h" #pragma GCC visibility push(default) int __cfavir_is_parent(
• ## src/AST/Expr.cpp

 r015925a // Created On       : Wed May 15 17:00:00 2019 // Last Modified By : Andrew Beach // Created On       : Tue Nov 30 14:23:00 2021 // Update Count     : 7 // Created On       : Wed May 18 13:56:00 2022 // Update Count     : 8 // #include "Copy.hpp"                // for shallowCopy #include "Eval.hpp"                // for call #include "GenericSubstitution.hpp" #include "LinkageSpec.hpp" // --- UntypedExpr bool UntypedExpr::get_lvalue() const { std::string fname = InitTweak::getFunctionName( this ); return lvalueFunctionNames.count( fname ); } UntypedExpr * UntypedExpr::createDeref( const CodeLocation & loc, const Expr * arg ) { assert( arg ); UntypedExpr * ret = call( loc, "*?", arg ); UntypedExpr * ret = createCall( loc, "*?", { arg } ); if ( const Type * ty = arg->result ) { const Type * base = InitTweak::getPointerBase( ty ); } bool UntypedExpr::get_lvalue() const { std::string fname = InitTweak::getFunctionName( this ); return lvalueFunctionNames.count( fname ); } UntypedExpr * UntypedExpr::createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs ) { assert( lhs && rhs ); UntypedExpr * ret = call( loc, "?=?", lhs, rhs ); UntypedExpr * ret = createCall( loc, "?=?", { lhs, rhs } ); if ( lhs->result && rhs->result ) { // if both expressions are typed, assumes that this assignment is a C bitwise assignment, } return ret; } UntypedExpr * UntypedExpr::createCall( const CodeLocation & loc, const std::string & name, std::vector> && args ) { return new UntypedExpr( loc, new NameExpr( loc, name ), std::move( args ) ); }
• ## src/AST/Expr.hpp

 r015925a /// Creates a new assignment expression static UntypedExpr * createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs ); /// Creates a new call of a variable. static UntypedExpr * createCall( const CodeLocation & loc, const std::string & name, std::vector> && args ); const Expr * accept( Visitor & v ) const override { return v.visit( this ); }
• ## src/AST/module.mk

 r015925a AST/DeclReplacer.cpp \ AST/DeclReplacer.hpp \ AST/Eval.hpp \ AST/Expr.cpp \ AST/Expr.hpp \
• ## src/CodeGen/CodeGenerator.cc

 r015925a } // namespace CodeGen unsigned Indenter::tabsize = 2; std::ostream & operator<<( std::ostream & out, const BaseSyntaxNode * node ) { if ( node ) { node->print( out ); } else { out << "nullptr"; } return out; } // Local Variables: // // tab-width: 4 //
• ## src/CodeGen/FixMain.cc

 r015925a } bool FixMain::replace_main = false; template
• ## src/CodeGen/GenType.cc

 r015925a // Created On       : Mon May 18 07:44:20 2015 // Last Modified By : Andrew Beach // Last Modified On : Wed May  1 15:24:00 2019