Changeset e5d9274
- Timestamp:
- Jun 2, 2022, 3:11:21 PM (8 months ago)
- Branches:
- master, pthread-emulation, qualifiedEnum
- Children:
- ced5e2a
- Parents:
- 015925a (diff), fc134a48 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 25 added
- 1 deleted
- 133 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/plot.py
r015925a re5d9274 22 22 23 23 class Field: 24 def __init__(self, unit, _min, _log ):24 def __init__(self, unit, _min, _log, _name=None): 25 25 self.unit = unit 26 26 self.min = _min 27 27 self.log = _log 28 self.name = _name 28 29 29 30 field_names = { … … 32 33 "Ops per procs" : Field('Ops' , 0, False), 33 34 "Ops per threads" : Field('Ops' , 0, False), 34 "ns per ops/procs" : Field(' ns' , 0, False),35 "ns per ops/procs" : Field('' , 0, False, _name = "Latency (ns $/$ (Processor $\\times$ Operation))" ), 35 36 "Number of threads" : Field('' , 1, False), 36 37 "Total Operations(ops)" : Field('Ops' , 0, False), 37 38 "Ops/sec/procs" : Field('Ops' , 0, False), 38 39 "Total blocks" : Field('Blocks', 0, False), 39 "Ops per second" : Field(' Ops' , 0, False),40 "Ops per second" : Field('' , 0, False), 40 41 "Cycle size (# thrds)" : Field('thrd' , 1, False), 41 42 "Duration (ms)" : Field('ms' , 0, False), … … 51 52 } 52 53 53 def plot(in_data, x, y, o ut):54 def plot(in_data, x, y, options): 54 55 fig, ax = plt.subplots() 55 56 colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00']) … … 109 110 print("Finishing Plots") 110 111 111 plt.ylabel( y)112 plt.ylabel(field_names[y].name if field_names[y].name else y) 112 113 # plt.xticks(range(1, math.ceil(mx) + 1)) 113 plt.xlabel( x)114 plt.xlabel(field_names[x].name if field_names[x].name else x) 114 115 plt.grid(b = True) 115 116 ax.xaxis.set_major_formatter( EngFormatter(unit=field_names[x].unit) ) 116 if field_names[x].log: 117 if options.logx: 118 ax.set_xscale('log') 119 elif field_names[x].log: 117 120 ax.set_xscale('log') 118 121 else: … … 120 123 121 124 ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) ) 122 if field_names[y].log: 125 if options.logy: 126 ax.set_yscale('log') 127 elif field_names[y].log: 123 128 ax.set_yscale('log') 124 129 else: 125 plt.ylim(field_names[y].min, my*1.2)130 plt.ylim(field_names[y].min, options.MaxY if options.MaxY else my*1.2) 126 131 127 132 plt.legend(loc='upper left') 128 133 129 134 print("Results Ready") 130 if o ut:131 plt.savefig(o ut)135 if options.out: 136 plt.savefig(options.out, bbox_inches='tight') 132 137 else: 133 138 plt.show() … … 142 147 parser.add_argument('-y', nargs='?', type=str, default="", help="Which field to use as the Y axis") 143 148 parser.add_argument('-x', nargs='?', type=str, default="", help="Which field to use as the X axis") 149 parser.add_argument('--logx', action='store_true', help="if set, makes the x-axis logscale") 150 parser.add_argument('--logy', action='store_true', help="if set, makes the y-axis logscale") 151 parser.add_argument('--MaxY', nargs='?', type=int, help="maximum value of the y-axis") 144 152 145 153 options = parser.parse_args() … … 185 193 186 194 187 plot(data, wantx, wanty, options .out)195 plot(data, wantx, wanty, options) -
doc/theses/mubeen_zulfiqar_MMath/allocator.tex
r015925a re5d9274 29 29 llheap's design was reviewed and changed multiple times throughout the thesis. 30 30 Some of the rejected designs are discussed because they show the path to the final design (see discussion in \VRef{s:MultipleHeaps}). 31 Note, a few simple stests for a design choice were compared with the current best allocators to determine the viability of a design.31 Note, a few simple tests for a design choice were compared with the current best allocators to determine the viability of a design. 32 32 33 33 … … 37 37 These designs look at the allocation/free \newterm{fastpath}, \ie when an allocation can immediately return free storage or returned storage is not coalesced. 38 38 \paragraph{T:1 model} 39 \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are N-shared acrossKTs.40 This design leverages the fact that 95\% of allocation requests are less than 1024 bytes and there are only 3--5different request sizes.39 \VRef[Figure]{f:T1SharedBuckets} shows one heap accessed by multiple kernel threads (KTs) using a bucket array, where smaller bucket sizes are shared among N KTs. 40 This design leverages the fact that usually the allocation requests are less than 1024 bytes and there are only a few different request sizes. 41 41 When KTs $\le$ N, the common bucket sizes are uncontented; 42 42 when KTs $>$ N, the free buckets are contented and latency increases significantly. … … 64 64 65 65 \paragraph{T:H model} 66 \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded across the KTs.66 \VRef[Figure]{f:THSharedHeaps} shows a fixed number of heaps (N), each a local free pool, where the heaps are sharded (distributed) across the KTs. 67 67 A KT can point directly to its assigned heap or indirectly through the corresponding heap bucket. 68 When KT $\le$ N, the heaps are uncontented;68 When KT $\le$ N, the heaps might be uncontented; 69 69 when KTs $>$ N, the heaps are contented. 70 70 In all cases, a KT must acquire/release a lock, contented or uncontented along the fast allocation path because a heap is shared. 71 By adjusting N upwards, this approach reduces contention but increases storage (time versus space);71 By increasing N, this approach reduces contention but increases storage (time versus space); 72 72 however, picking N is workload specific. 73 73 … … 109 109 Need to prevent preemption during a dynamic memory operation because of the \newterm{serially-reusable problem}. 110 110 \begin{quote} 111 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable} 111 A sequence of code that is guaranteed to run to completion before being invoked to accept another input is called serially-reusable code.~\cite{SeriallyReusable}\label{p:SeriallyReusable} 112 112 \end{quote} 113 113 If a KT is preempted during an allocation operation, the operating system can schedule another KT on the same CPU, which can begin an allocation operation before the previous operation associated with this CPU has completed, invalidating heap correctness. … … 138 138 (See \VRef[Figure]{f:THSharedHeaps} but with a heap bucket per KT and no bucket or local-pool lock.) 139 139 Hence, immediately after a KT starts, its heap is created and just before a KT terminates, its heap is (logically) deleted. 140 Heaps are uncontended for a KTs memory operations to its heap (modulo operations on the global pool and ownership).140 Heaps are uncontended for a KTs memory operations as every KT has its own thread-local heap, modulo operations on the global pool and ownership. 141 141 142 142 Problems: 143 143 \begin{itemize} 144 144 \item 145 Need to know when a KT isstarts/terminates to create/delete its heap.145 Need to know when a KT starts/terminates to create/delete its heap. 146 146 147 147 \noindent … … 161 161 \noindent 162 162 In many concurrent applications, good performance is achieved with the number of KTs proportional to the number of CPUs. 163 Since the number of CPUs is relatively small, >~1024, and a heaprelatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs.163 Since the number of CPUs is relatively small, and a heap is also relatively small, $\approx$10K bytes (not including any associated freed storage), the worst-case external fragmentation is still small compared to the RAM available on large servers with many CPUs. 164 164 \item 165 165 There is the same serially-reusable problem with UTs migrating across KTs. … … 171 171 \noindent 172 172 The conclusion from this design exercise is: any atomic fence, atomic instruction (lock free), or lock along the allocation fastpath produces significant slowdown. 173 For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps m aybe shared by multiple threads, even when KTs $\le$ N.173 For the T:1 and T:H models, locking must exist along the allocation fastpath because the buckets or heaps might be shared by multiple threads, even when KTs $\le$ N. 174 174 For the T:H=CPU and 1:1 models, locking is eliminated along the allocation fastpath. 175 175 However, T:H=CPU has poor operating-system support to determine the CPU id (heap id) and prevent the serially-reusable problem for KTs. 176 176 More operating system support is required to make this model viable, but there is still the serially-reusable problem with user-level threading. 177 Leaving the 1:1 model with no atomic actions along the fastpath and no special operating-system support required.177 So the 1:1 model had no atomic actions along the fastpath and no special operating-system support requirements. 178 178 The 1:1 model still has the serially-reusable problem with user-level threading, which is addressed in \VRef{s:UserlevelThreadingSupport}, and the greatest potential for heap blowup for certain allocation patterns. 179 179 … … 212 212 Ideally latency is $O(1)$ with a small constant. 213 213 214 To obtain $O(1)$ internal latency means no searching on the allocation fastpath ,largely prohibits coalescing, which leads to external fragmentation.214 To obtain $O(1)$ internal latency means no searching on the allocation fastpath and largely prohibits coalescing, which leads to external fragmentation. 215 215 The mitigating factor is that most programs have well behaved allocation patterns, where the majority of allocation operations can be $O(1)$, and heap blowup does not occur without coalescing (although the allocation footprint may be slightly larger). 216 216 … … 257 257 llheap starts by creating an array of $N$ global heaps from storage obtained using @mmap@, where $N$ is the number of computer cores, that persists for program duration. 258 258 There is a global bump-pointer to the next free heap in the array. 259 When this array is exhausted, another array is allocated.260 There is a global top pointer for a heap intrusive linkto chain free heaps from terminated threads.261 When statistics are turned on, there is a global top pointer for a heap intrusive linkto chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@.259 When this array is exhausted, another array of heaps is allocated. 260 There is a global top pointer for a intrusive linked-list to chain free heaps from terminated threads. 261 When statistics are turned on, there is a global top pointer for a intrusive linked-list to chain \emph{all} the heaps, which is traversed to accumulate statistics counters across heaps using @malloc_stats@. 262 262 263 263 When a KT starts, a heap is allocated from the current array for exclusive use by the KT. 264 When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of heaps.265 The free heaps is astack so hot storage is reused first.266 Preserving all heaps created during the program lifetime, solves the storage lifetime problem,when ownership is used.264 When a KT terminates, its heap is chained onto the heap free-list for reuse by a new KT, which prevents unbounded growth of number of heaps. 265 The free heaps are stored on stack so hot storage is reused first. 266 Preserving all heaps, created during the program lifetime, solves the storage lifetime problem when ownership is used. 267 267 This approach wastes storage if a large number of KTs are created/terminated at program start and then the program continues sequentially. 268 268 llheap can be configured with object ownership, where an object is freed to the heap from which it is allocated, or object no-ownership, where an object is freed to the KT's current heap. 269 269 270 270 Each heap uses segregated free-buckets that have free objects distributed across 91 different sizes from 16 to 4M. 271 All objects in a bucket are of the same size. 271 272 The number of buckets used is determined dynamically depending on the crossover point from @sbrk@ to @mmap@ allocation using @mallopt( M_MMAP_THRESHOLD )@, \ie small objects managed by the program and large objects managed by the operating system. 272 273 Each free bucket of a specific size has the following two lists: … … 286 287 Quantizing is performed using a binary search over the ordered bucket array. 287 288 An optional optimization is fast lookup $O(1)$ for sizes < 64K from a 64K array of type @char@, where each element has an index to the corresponding bucket. 288 (Type @char@ restricts the number of bucket sizes to 256.) 289 The @char@ type restricts the number of bucket sizes to 256. 289 290 For $S$ > 64K, a binary search is used. 290 291 Then, the allocation storage is obtained from the following locations (in order), with increasing latency. … … 381 382 Then the corresponding bucket of the owner thread is computed for the deallocating thread, and the allocation is pushed onto the deallocating thread's bucket. 382 383 383 Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through routines @malloc@/@free@, which are the only routines to directly access and manage the internal data structures of the heap.384 Finally, the llheap design funnels \label{p:FunnelRoutine} all allocation/deallocation operations through the @malloc@ and @free@ routines, which are the only routines to directly access and manage the internal data structures of the heap. 384 385 Other allocation operations, \eg @calloc@, @memalign@, and @realloc@, are composed of calls to @malloc@ and possibly @free@, and may manipulate header information after storage is allocated. 385 386 This design simplifies heap-management code during development and maintenance. … … 388 389 \subsection{Alignment} 389 390 390 All dynamic memory allocations musthave a minimum storage alignment for the contained object(s).391 Most dynamic memory allocations have a minimum storage alignment for the contained object(s). 391 392 Often the minimum memory alignment, M, is the bus width (32 or 64-bit) or the largest register (double, long double) or largest atomic instruction (DCAS) or vector data (MMMX). 392 393 In general, the minimum storage alignment is 8/16-byte boundary on 32/64-bit computers. 393 394 For consistency, the object header is normally aligned at this same boundary. 394 Larger alignments must be a power of 2, such page alignment (4/8K).395 Larger alignments must be a power of 2, such as page alignment (4/8K). 395 396 Any alignment request, N, $\le$ the minimum alignment is handled as a normal allocation with minimal alignment. 396 397 … … 400 401 \end{center} 401 402 The storage between @E@ and @H@ is chained onto the appropriate free list for future allocations. 402 Th is approach is also valid within any sufficiently large free block, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage.403 The same approach is used for sufficiently large free blocks, where @E@ is the start of the free block, and any unused storage before @H@ or after the allocated object becomes free storage. 403 404 In this approach, the aligned address @A@ is the same as the allocated storage address @P@, \ie @P@ $=$ @A@ for all allocation routines, which simplifies deallocation. 404 405 However, if there are a large number of aligned requests, this approach leads to memory fragmentation from the small free areas around the aligned object. … … 407 408 Finally, this approach is incompatible with allocator designs that funnel allocation requests through @malloc@ as it directly manipulates management information within the allocator to optimize the space/time of a request. 408 409 409 Instead, llheap alignment is accomplished by making a \emph{pessimistic ally} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg:410 Instead, llheap alignment is accomplished by making a \emph{pessimistic} allocation request for sufficient storage to ensure that \emph{both} the alignment and size request are satisfied, \eg: 410 411 \begin{center} 411 412 \input{Alignment2} … … 424 425 \input{Alignment2Impl} 425 426 \end{center} 426 Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments of @M@ or greater.427 Since @malloc@ has a minimum alignment of @M@, @P@ $\neq$ @A@ only holds for alignments greater than @M@. 427 428 When @P@ $\neq$ @A@, the minimum distance between @P@ and @A@ is @M@ bytes, due to the pessimistic storage-allocation. 428 429 Therefore, there is always room for an @M@-byte fake header before @A@. … … 439 440 \label{s:ReallocStickyProperties} 440 441 441 Allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually.442 The allocation routine @realloc@ provides a memory-management pattern for shrinking/enlarging an existing allocation, while maintaining some or all of the object data, rather than performing the following steps manually. 442 443 \begin{flushleft} 443 444 \begin{tabular}{ll} … … 460 461 The realloc pattern leverages available storage at the end of an allocation due to bucket sizes, possibly eliminating a new allocation and copying. 461 462 This pattern is not used enough to reduce storage management costs. 462 In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the pattern.463 In fact, if @oaddr@ is @nullptr@, @realloc@ does a @malloc@, so even the initial @malloc@ can be a @realloc@ for consistency in the allocation pattern. 463 464 464 465 The hidden problem for this pattern is the effect of zero fill and alignment with respect to reallocation. 465 466 Are these properties transient or persistent (``sticky'')? 466 For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size .467 That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserve ?467 For example, when memory is initially allocated by @calloc@ or @memalign@ with zero fill or alignment properties, respectively, what happens when those allocations are given to @realloc@ to change size? 468 That is, if @realloc@ logically extends storage into unused bucket space or allocates new storage to satisfy a size change, are initial allocation properties preserved? 468 469 Currently, allocation properties are not preserved, so subsequent use of @realloc@ storage may cause inefficient execution or errors due to lack of zero fill or alignment. 469 470 This silent problem is unintuitive to programmers and difficult to locate because it is transient. … … 475 476 476 477 To preserve allocation properties requires storing additional information with an allocation, 477 The only available location is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout.478 The best available option is the header, where \VRef[Figure]{f:llheapNormalHeader} shows the llheap storage layout. 478 479 The header has two data field sized appropriately for 32/64-bit alignment requirements. 479 480 The first field is a union of three values: … … 487 488 \end{description} 488 489 The second field remembers the request size versus the allocation (bucket) size, \eg request 42 bytes which is rounded up to 64 bytes. 489 Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors .490 Since programmers think in request sizes rather than allocation sizes, the request size allows better generation of statistics or errors and also helps in memory management. 490 491 491 492 \begin{figure} … … 496 497 \end{figure} 497 498 498 The low-order 3-bits of the first field are \emph{unused} for any stored values , whereas the second field may use all of its bits.499 The low-order 3-bits of the first field are \emph{unused} for any stored values as these values are 16-byte aligned by default, whereas the second field may use all of its bits. 499 500 The 3 unused bits are used to represent mapped allocation, zero filled, and alignment, respectively. 500 501 Note, the alignment bit is not used in the normal header and the zero-filled/mapped bits are not used in the fake header. … … 502 503 If no bits are on, it implies a basic allocation, which is handled quickly; 503 504 otherwise, the bits are analysed and appropriate actions are taken for the complex cases. 504 Since most allocations are basic, th is implementation results in a significant performance gainalong the allocation and free fastpath.505 Since most allocations are basic, they will take significantly less time as the memory operations will be done along the allocation and free fastpath. 505 506 506 507 … … 514 515 To locate all statistic counters, heaps are linked together in statistics mode, and this list is locked and traversed to sum all counters across heaps. 515 516 Note, the list is locked to prevent errors traversing an active list; 516 the statistics counters are not locked and can flicker during accumulation , which is not an issue with atomic read/write.517 the statistics counters are not locked and can flicker during accumulation. 517 518 \VRef[Figure]{f:StatiticsOutput} shows an example of statistics output, which covers all allocation operations and information about deallocating storage not owned by a thread. 518 519 No other memory allocator studied provides as comprehensive statistical information. 519 Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness , and hence,should be equally valuable to application developers.520 Finally, these statistics were invaluable during the development of this thesis for debugging and verifying correctness and should be equally valuable to application developers. 520 521 521 522 \begin{figure} … … 547 548 Nevertheless, the checks detect many allocation problems. 548 549 There is an unfortunate problem in detecting unfreed storage because some library routines assume their allocations have life-time duration, and hence, do not free their storage. 549 For example, @printf@ allocates a 1024 buffer onfirst call and never deletes this buffer.550 For example, @printf@ allocates a 1024-byte buffer on the first call and never deletes this buffer. 550 551 To prevent a false positive for unfreed storage, it is possible to specify an amount of storage that is never freed (see @malloc_unfreed@ \VPageref{p:malloc_unfreed}), and it is subtracted from the total allocate/free difference. 551 552 Determining the amount of never-freed storage is annoying, but once done, any warnings of unfreed storage are application related. 552 553 553 Tests indicate only a 30\% performance increase when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program.554 Tests indicate only a 30\% performance decrease when statistics \emph{and} debugging are enabled, and the latency cost for accumulating statistic is mitigated by limited calls, often only one at the end of the program. 554 555 555 556 … … 557 558 \label{s:UserlevelThreadingSupport} 558 559 559 The serially-reusable problem (see \V Ref{s:AllocationFastpath}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model.560 The solution is to prevent interrupts that can result in CPU or KT change during operations that are logically critical sections.560 The serially-reusable problem (see \VPageref{p:SeriallyReusable}) occurs for kernel threads in the ``T:H model, H = number of CPUs'' model and for user threads in the ``1:1'' model, where llheap uses the ``1:1'' model. 561 The solution is to prevent interrupts that can result in a CPU or KT change during operations that are logically critical sections such as starting a memory operation on one KT and completing it on another. 561 562 Locking these critical sections negates any attempt for a quick fastpath and results in high contention. 562 563 For user-level threading, the serially-reusable problem appears with time slicing for preemptable scheduling, as the signal handler context switches to another user-level thread. 563 Without time slicing, a user thread performing a long computation can prevent execution(starve) other threads.564 To prevent starvation for a n allocation-active thread, \ie the time slice always triggers in an allocation critical-section for one thread, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice.564 Without time slicing, a user thread performing a long computation can prevent the execution of (starve) other threads. 565 To prevent starvation for a memory-allocation-intensive thread, \ie the time slice always triggers in an allocation critical-section for one thread so the thread never gets time sliced, a thread-local \newterm{rollforward} flag is set in the signal handler when it aborts a time slice. 565 566 The rollforward flag is tested at the end of each allocation funnel routine (see \VPageref{p:FunnelRoutine}), and if set, it is reset and a volunteer yield (context switch) is performed to allow other threads to execute. 566 567 567 llheap uses two techniques to detect when execution is in a allocation operation or routine called from allocation operation, to abort any time slice during this period.568 On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting thread-local flags so the signal handler aborts immediately.569 On the fastpath, disabling/enabling interrupts is too expensive as accessing thread-local storage can be expensive and notthread-safe.568 llheap uses two techniques to detect when execution is in an allocation operation or routine called from allocation operation, to abort any time slice during this period. 569 On the slowpath when executing expensive operations, like @sbrk@ or @mmap@, interrupts are disabled/enabled by setting kernel-thread-local flags so the signal handler aborts immediately. 570 On the fastpath, disabling/enabling interrupts is too expensive as accessing kernel-thread-local storage can be expensive and not user-thread-safe. 570 571 For example, the ARM processor stores the thread-local pointer in a coprocessor register that cannot perform atomic base-displacement addressing. 571 Hence, there is a window between loading the thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread.572 573 The fast technique definesa special code section and places all non-interruptible routines in this section.572 Hence, there is a window between loading the kernel-thread-local pointer from the coprocessor register into a normal register and adding the displacement when a time slice can move a thread. 573 574 The fast technique (with lower run time cost) is to define a special code section and places all non-interruptible routines in this section. 574 575 The linker places all code in this section into a contiguous block of memory, but the order of routines within the block is unspecified. 575 576 Then, the signal handler compares the program counter at the point of interrupt with the the start and end address of the non-interruptible section, and aborts if executing within this section and sets the rollforward flag. … … 577 578 Hence, for correctness, this approach requires inspection of generated assembler code for routines placed in the non-interruptible section. 578 579 This issue is mitigated by the llheap funnel design so only funnel routines and a few statistics routines are placed in the non-interruptible section and their assembler code examined. 579 These techniques are used in both the \uC and \CFA versions of llheap , whereboth of these systems have user-level threading.580 These techniques are used in both the \uC and \CFA versions of llheap as both of these systems have user-level threading. 580 581 581 582 … … 587 588 Programs can be statically or dynamically linked. 588 589 \item 589 The order the linker schedules startup code is poorly supported.590 The order in which the linker schedules startup code is poorly supported so it cannot be controlled entirely. 590 591 \item 591 592 Knowing a KT's start and end independently from the KT code is difficult. … … 600 601 Hence, some part of the @sbrk@ area may be used by the default allocator and statistics about allocation operations cannot be correct. 601 602 Furthermore, dynamic linking goes through trampolines, so there is an additional cost along the allocator fastpath for all allocation operations. 602 Testing showed up to a 5\% performance increase for dynamic linking overstatic linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding.603 Testing showed up to a 5\% performance decrease with dynamic linking as compared to static linking, even when using @tls_model("initial-exec")@ so the dynamic loader can obtain tighter binding. 603 604 604 605 All allocator libraries need to perform startup code to initialize data structures, such as the heap array for llheap. 605 The problem is getting initializ eddone before the first allocator call.606 The problem is getting initialization done before the first allocator call. 606 607 However, there does not seem to be mechanism to tell either the static or dynamic loader to first perform initialization code before any calls to a loaded library. 608 Also, initialization code of other libraries and the run-time environment may call memory allocation routines such as \lstinline{malloc}. 609 This compounds the situation as there is no mechanism to tell either the static or dynamic loader to first perform the initialization code of the memory allocator before any other initialization that may involve a dynamic memory allocation call. 607 610 As a result, calls to allocation routines occur without initialization. 608 611 To deal with this problem, it is necessary to put a conditional initialization check along the allocation fastpath to trigger initialization (singleton pattern). … … 641 644 Therefore, the constructor is useless for knowing when a KT starts because the KT must reference it, and the allocator does not control the application KT. 642 645 Fortunately, the singleton pattern needed for initializing the program KT also triggers KT allocator initialization, which can then reference @pgm_thread@ to call @threadManager@'s constructor, otherwise its destructor is not called. 643 Now when a KT terminates, @~ThreadManager@ is called to chain edit onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT.646 Now when a KT terminates, @~ThreadManager@ is called to chain it onto the global-heap free-stack, where @pgm_thread@ is set to true only for the program KT. 644 647 The conditional destructor call prevents closing down the program heap, which must remain available because epilogue code may free more storage. 645 648 … … 660 663 bool traceHeapOff(); $\C{// stop printing allocation/free calls}$ 661 664 \end{lstlisting} 662 This kind of API is necessary to allow concurrent runtime systems to interact with differen cememory allocators in a consistent way.665 This kind of API is necessary to allow concurrent runtime systems to interact with different memory allocators in a consistent way. 663 666 664 667 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% … … 712 715 Most allocators use @nullptr@ to indicate an allocation failure, specifically out of memory; 713 716 hence the need to return an alternate value for a zero-sized allocation. 714 A different approach allowed by the C APIis to abort a program when out of memory and return @nullptr@ for a zero-sized allocation.717 A different approach allowed by @C API@ is to abort a program when out of memory and return @nullptr@ for a zero-sized allocation. 715 718 In theory, notifying the programmer of memory failure allows recovery; 716 719 in practice, it is almost impossible to gracefully recover when out of memory. … … 736 739 \paragraph{\lstinline{void * aalloc( size_t dim, size_t elemSize )}} 737 740 extends @calloc@ for allocating a dynamic array of objects without calculating the total size of array explicitly but \emph{without} zero-filling the memory. 738 @aalloc@ is significantly faster than @calloc@, which is the only alternative .741 @aalloc@ is significantly faster than @calloc@, which is the only alternative given by the standard memory-allocation routines. 739 742 740 743 \noindent\textbf{Usage} … … 825 828 \begin{itemize} 826 829 \item 827 @fd@: file s description.830 @fd@: file descriptor. 828 831 \end{itemize} 829 832 It returns the previous file descriptor. … … 832 835 \label{p:malloc_expansion} 833 836 set the amount (bytes) to extend the heap when there is insufficient free storage to service an allocation request. 834 It returns the heap extension size used throughout a program , \ie called once at heap initialization.837 It returns the heap extension size used throughout a program when requesting more memory from the system using @sbrk@ system-call, \ie called once at heap initialization. 835 838 836 839 \paragraph{\lstinline{size_t malloc_mmap_start()}} … … 915 918 \begin{itemize} 916 919 \item 917 naming: \CFA regular and @ttype@ polymorphism is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations.918 \item 919 named arguments: individual allocation properties are specified using postfix function call, so programmers dohave to remember parameter positions in allocation calls.920 \item 921 object size: like the \CFA C-styleinterface, programmers do not have to specify object size or cast allocation results.920 naming: \CFA regular and @ttype@ polymorphism (@ttype@ polymorphism in \CFA is similar to \CC variadic templates) is used to encapsulate a wide range of allocation functionality into a single routine name, so programmers do not have to remember multiple routine names for different kinds of dynamic allocations. 921 \item 922 named arguments: individual allocation properties are specified using postfix function call, so the programmers do not have to remember parameter positions in allocation calls. 923 \item 924 object size: like the \CFA's C-interface, programmers do not have to specify object size or cast allocation results. 922 925 \end{itemize} 923 926 Note, postfix function call is an alternative call syntax, using backtick @`@, where the argument appears before the function name, \eg … … 928 931 duration dur = 3@`@h + 42@`@m + 17@`@s; 929 932 \end{cfa} 930 @ttype@ polymorphism is similar to \CC variadic templates.931 933 932 934 \paragraph{\lstinline{T * alloc( ... )} or \lstinline{T * alloc( size_t dim, ... )}} 933 is overloaded with a variable number of specific allocation routines, or an integer dimension parameter followed by a variable number specific allocation routines. 935 is overloaded with a variable number of specific allocation operations, or an integer dimension parameter followed by a variable number of specific allocation operations. 936 These allocation operations can be passed as named arguments when calling the \lstinline{alloc} routine. 934 937 A call without parameters returns a dynamically allocated object of type @T@ (@malloc@). 935 938 A call with only the dimension (dim) parameter returns a dynamically allocated array of objects of type @T@ (@aalloc@). … … 980 983 5 5 5 -555819298 -555819298 // two undefined values 981 984 \end{lstlisting} 982 Examples 1 to 3 ,fill an object with a value or characters.983 Examples 4 to 7 ,fill an array of objects with values, another array, or part of an array.985 Examples 1 to 3 fill an object with a value or characters. 986 Examples 4 to 7 fill an array of objects with values, another array, or part of an array. 984 987 985 988 \subparagraph{\lstinline{S_resize(T) ?`resize( void * oaddr )}} … … 1015 1018 \subparagraph{\lstinline{S_realloc(T) ?`realloc( T * a ))}} 1016 1019 used to resize, realign, and fill, where the old object data is copied to the new object. 1017 The old object type must be the same as the new object type, since the value s used.1020 The old object type must be the same as the new object type, since the value is used. 1018 1021 Note, for @fill@, only the extra space after copying the data from the old object is filled with the given parameter. 1019 1022 For example: … … 1029 1032 \end{lstlisting} 1030 1033 Examples 2 to 3 change the alignment for the initial storage of @i@. 1031 The @13`fill@ forexample 3 does nothing because no extra space is added.1034 The @13`fill@ in example 3 does nothing because no extra space is added. 1032 1035 1033 1036 \begin{cfa}[numbers=left] … … 1044 1047 \end{lstlisting} 1045 1048 Examples 2 to 4 change the array size, alignment and fill for the initial storage of @ia@. 1046 The @13`fill@ forexample 3 does nothing because no extra space is added.1049 The @13`fill@ in example 3 does nothing because no extra space is added. 1047 1050 1048 1051 These \CFA allocation features are used extensively in the development of the \CFA runtime. -
doc/theses/mubeen_zulfiqar_MMath/background.tex
r015925a re5d9274 36 36 The management data starts with fixed-sized information in the static-data memory that references components in the dynamic-allocation memory. 37 37 The \newterm{storage data} is composed of allocated and freed objects, and \newterm{reserved memory}. 38 Allocated objects (light grey) are variable sized, and a llocated and maintained by the program;39 \ie only the program knows the location of allocated storage, not the memory allocator.38 Allocated objects (light grey) are variable sized, and are allocated and maintained by the program; 39 \ie only the memory allocator knows the location of allocated storage, not the program. 40 40 \begin{figure}[h] 41 41 \centering … … 49 49 if there are multiple reserved blocks, they are also chained together, usually internally. 50 50 51 Allocated and freed objects typicallyhave additional management data embedded within them.51 In some allocator designs, allocated and freed objects have additional management data embedded within them. 52 52 \VRef[Figure]{f:AllocatedObject} shows an allocated object with a header, trailer, and alignment padding and spacing around the object. 53 53 The header contains information about the object, \eg size, type, etc. … … 104 104 \VRef[Figure]{f:MemoryFragmentation} shows an example of how a small block of memory fragments as objects are allocated and deallocated over time. 105 105 Blocks of free memory become smaller and non-contiguous making them less useful in serving allocation requests. 106 Memory is highly fragmented when the sizes of most free blocks are unusable.106 Memory is highly fragmented when most free blocks are unusable because of their sizes. 107 107 For example, \VRef[Figure]{f:Contiguous} and \VRef[Figure]{f:HighlyFragmented} have the same quantity of external fragmentation, but \VRef[Figure]{f:HighlyFragmented} is highly fragmented. 108 108 If there is a request to allocate a large object, \VRef[Figure]{f:Contiguous} is more likely to be able to satisfy it with existing free memory, while \VRef[Figure]{f:HighlyFragmented} likely has to request more memory from the operating system. … … 137 137 The fewer bin-sizes, the fewer lists need to be searched and maintained; 138 138 however, the bin sizes are less likely to closely fit the requested object size, leading to more internal fragmentation. 139 The more bin -sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup.139 The more bin sizes, the longer the search and the less likely free objects are to be reused, leading to more external fragmentation and potentially heap blowup. 140 140 A variation of the binning algorithm allows objects to be allocated to the requested size, but when an object is freed, it is placed on the free list of the next smallest or equal bin-size. 141 141 For example, with bin sizes of 8 and 16 bytes, a request for 12 bytes allocates only 12 bytes, but when the object is freed, it is placed on the 8-byte bin-list. … … 157 157 The principle of locality recognizes that programs tend to reference a small set of data, called a working set, for a certain period of time, where a working set is composed of temporal and spatial accesses~\cite{Denning05}. 158 158 Temporal clustering implies a group of objects are accessed repeatedly within a short time period, while spatial clustering implies a group of objects physically close together (nearby addresses) are accessed repeatedly within a short time period. 159 Temporal locality commonly occurs during an iterative computation with a fix set of disjoint variables, while spatial locality commonly occurs when traversing an array.159 Temporal locality commonly occurs during an iterative computation with a fixed set of disjoint variables, while spatial locality commonly occurs when traversing an array. 160 160 161 161 Hardware takes advantage of temporal and spatial locality through multiple levels of caching, \ie memory hierarchy. … … 328 328 For example, multiple heaps are managed in a pool, starting with a single or a fixed number of heaps that increase\-/decrease depending on contention\-/space issues. 329 329 At creation, a thread is associated with a heap from the pool. 330 When the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool.330 In some implementations of this model, when the thread attempts an allocation and its associated heap is locked (contention), it scans for an unlocked heap in the pool. 331 331 If an unlocked heap is found, the thread changes its association and uses that heap. 332 332 If all heaps are locked, the thread may create a new heap, use it, and then place the new heap into the pool; … … 347 347 The management information in the static zone must be able to locate all heaps in the dynamic zone. 348 348 The management information for the heaps must reside in the dynamic-allocation zone if there are a variable number. 349 Each heap in the dynamic zone is composed of a list of afree objects and a pointer to its reserved memory.349 Each heap in the dynamic zone is composed of a list of free objects and a pointer to its reserved memory. 350 350 An alternative implementation is for all heaps to share one reserved memory, which requires a separate lock for the reserved storage to ensure mutual exclusion when acquiring new memory. 351 351 Because multiple threads can allocate/free/reallocate adjacent storage, all forms of false sharing may occur. … … 361 361 Multiple heaps increase external fragmentation as the ratio of heaps to threads increases, which can lead to heap blowup. 362 362 The external fragmentation experienced by a program with a single heap is now multiplied by the number of heaps, since each heap manages its own free storage and allocates its own reserved memory. 363 Additionally, objects freed by one heap cannot be reused by other threads , except indirectly by returning free memory to the operating system, which can be expensive.364 (Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix.) 363 Additionally, objects freed by one heap cannot be reused by other threads without increasing the cost of the memory operations, except indirectly by returning free memory to the operating system, which can be expensive. 364 Depending on how the operating system provides dynamic storage to an application, returning storage may be difficult or impossible, \eg the contiguous @sbrk@ area in Unix. 365 365 In the worst case, a program in which objects are allocated from one heap but deallocated to another heap means these freed objects are never reused. 366 366 … … 384 384 In contrast, the T:H model spreads each thread's objects over a larger area in different heaps. 385 385 Thread heaps can also eliminate allocator-induced active false-sharing, if memory is acquired so it does not overlap at crucial boundaries with memory for another thread's heap. 386 For example, assume page boundaries coincide with cache line boundaries, then if a thread heap always acquires pages of memory,no two threads share a page or cache line unless pointers are passed among them.386 For example, assume page boundaries coincide with cache line boundaries, if a thread heap always acquires pages of memory then no two threads share a page or cache line unless pointers are passed among them. 387 387 Hence, allocator-induced active false-sharing in \VRef[Figure]{f:AllocatorInducedActiveFalseSharing} cannot occur because the memory for thread heaps never overlaps. 388 388 389 When a thread terminates, there are two options for handling its heap.390 First is to free all objects in the heap to the global heap and destroy the thread heap.389 When a thread terminates, there are two options for handling its thread heap. 390 First is to free all objects in the thread heap to the global heap and destroy the thread heap. 391 391 Second is to place the thread heap on a list of available heaps and reuse it for a new thread in the future. 392 392 Destroying the thread heap immediately may reduce external fragmentation sooner, since all free objects are freed to the global heap and may be reused by other threads. 393 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible. .393 Alternatively, reusing thread heaps may improve performance if the inheriting thread makes similar allocation requests as the thread that previously held the thread heap because any unfreed storage is immediately accessible. 394 394 395 395 … … 417 417 When the user thread continues on the new kernel thread, it may have pointers into the previous kernel-thread's heap and hold locks associated with it. 418 418 To get the same kernel-thread safety, time slicing must be disabled/\-enabled around these operations, so the user thread cannot jump to another kernel thread. 419 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption is rare (10--100 milliseconds).419 However, eagerly disabling/enabling time-slicing on the allocation/deallocation fast path is expensive, because preemption does not happen that frequently. 420 420 Instead, techniques exist to lazily detect this case in the interrupt handler, abort the preemption, and return to the operation so it can complete atomically. 421 421 Occasionally ignoring a preemption should be benign, but a persistent lack of preemption can result in both short and long term starvation. 422 423 424 \begin{figure}425 \centering426 \subfigure[Ownership]{427 \input{MultipleHeapsOwnership}428 } % subfigure429 \hspace{0.25in}430 \subfigure[No Ownership]{431 \input{MultipleHeapsNoOwnership}432 } % subfigure433 \caption{Heap Ownership}434 \label{f:HeapsOwnership}435 \end{figure}436 422 437 423 … … 447 433 For the T:1/T:H models with or without ownership or the 1:1 model with ownership, a thread may free objects to different heaps, which makes each heap publicly accessible to all threads, called a \newterm{public heap}. 448 434 435 \begin{figure} 436 \centering 437 \subfigure[Ownership]{ 438 \input{MultipleHeapsOwnership} 439 } % subfigure 440 \hspace{0.25in} 441 \subfigure[No Ownership]{ 442 \input{MultipleHeapsNoOwnership} 443 } % subfigure 444 \caption{Heap Ownership} 445 \label{f:HeapsOwnership} 446 \end{figure} 447 449 448 \VRef[Figure]{f:MultipleHeapStorageOwnership} shows the effect of ownership on storage layout. 450 (For simplicity assume the heaps all use the same size of reserves storage.)449 (For simplicity, assume the heaps all use the same size of reserves storage.) 451 450 In contrast to \VRef[Figure]{f:MultipleHeapStorage}, each reserved area used by a heap only contains free storage for that particular heap because threads must return free objects back to the owner heap. 452 451 Again, because multiple threads can allocate/free/reallocate adjacent storage in the same heap, all forms of false sharing may occur. … … 473 472 While the returning thread can batch objects, batching across multiple heaps is complex and there is no obvious time when to push back to the owner heap. 474 473 It is better for returning threads to immediately return to the receiving thread's batch list as the receiving thread has better knowledge when to incorporate the batch list into its free pool. 475 Batching leverages the fact that most allocation patterns use the contention-free fast-path so locking on the batch list is rare for both the returning and receiving threads.476 477 It is possible for heaps to steal objects rather than return them and reallocating these objectswhen storage runs out on a heap.474 Batching leverages the fact that most allocation patterns use the contention-free fast-path, so locking on the batch list is rare for both the returning and receiving threads. 475 476 It is possible for heaps to steal objects rather than return them and then reallocate these objects again when storage runs out on a heap. 478 477 However, stealing can result in passive false-sharing. 479 478 For example, in \VRef[Figure]{f:AllocatorInducedPassiveFalseSharing}, Object$_2$ may be deallocated to Thread$_2$'s heap initially. … … 485 484 486 485 Bracketing every allocation with headers/trailers can result in significant internal fragmentation, as shown in \VRef[Figure]{f:ObjectHeaders}. 487 Especially if the headers contain redundant management information, \eg object size may be the same for many objects because programs only allocate a small set of object sizes.486 Especially if the headers contain redundant management information, then storing that information is a waste of storage, \eg object size may be the same for many objects because programs only allocate a small set of object sizes. 488 487 As well, it can result in poor cache usage, since only a portion of the cache line is holding useful information from the program's perspective. 489 488 Spatial locality can also be negatively affected leading to poor cache locality~\cite{Feng05}: … … 660 659 With local free-lists in containers, as in \VRef[Figure]{f:LocalFreeListWithinContainers}, the container is simply removed from one heap's free list and placed on the new heap's free list. 661 660 Thus, when using local free-lists, the operation of moving containers is reduced from $O(N)$ to $O(1)$. 662 The cost is adding information to aheader, which increases the header size, and therefore internal fragmentation.661 However, there is the additional storage cost in the header, which increases the header size, and therefore internal fragmentation. 663 662 664 663 \begin{figure} … … 689 688 The main goal of the hybrid approach is to eliminate locking on thread-local allocation/deallocation, while providing ownership to prevent heap blowup. 690 689 In the hybrid approach, a thread first allocates from its private heap and second from its public heap if no free memory exists in the private heap. 691 Similarly, a thread first deallocates an object its private heap, and second to the public heap.690 Similarly, a thread first deallocates an object to its private heap, and second to the public heap. 692 691 Both private and public heaps can allocate/deallocate to/from the global heap if there is no free memory or excess free memory, although an implementation may choose to funnel all interaction with the global heap through one of the heaps. 693 692 Note, deallocation from the private to the public (dashed line) is unlikely because there is no obvious advantages unless the public heap provides the only interface to the global heap. -
doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex
r015925a re5d9274 12 12 \item[Benchmarks] 13 13 are a suite of application programs (SPEC CPU/WEB) that are exercised in a common way (inputs) to find differences among underlying software implementations associated with an application (compiler, memory allocator, web server, \etc). 14 The applications are suppose to represent common execution patterns that need to perform well with respect to an underlying software implementation.14 The applications are supposed to represent common execution patterns that need to perform well with respect to an underlying software implementation. 15 15 Benchmarks are often criticized for having overlapping patterns, insufficient patterns, or extraneous code that masks patterns. 16 16 \item[Micro-Benchmarks] … … 26 26 27 27 This thesis designs and examines a new set of micro-benchmarks for memory allocators that test a variety of allocation patterns, each with multiple tuning parameters. 28 The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance m atrices such as speed, memory overhead, and cache performance.28 The aim of the micro-benchmark suite is to create a set of programs that can evaluate a memory allocator based on the key performance metrics such as speed, memory overhead, and cache performance. 29 29 % These programs can be taken as a standard to benchmark an allocator's basic goals. 30 30 These programs give details of an allocator's memory overhead and speed under certain allocation patterns. 31 The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum of events for a desired allocation pattern, which is seldom possible with benchmark programs.31 The allocation patterns are configurable (adjustment knobs) to observe an allocator's performance across a spectrum allocation patterns, which is seldom possible with benchmark programs. 32 32 Each micro-benchmark program has multiple control knobs specified by command-line arguments. 33 33 34 The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific m atrices.34 The new micro-benchmark suite measures performance by allocating dynamic objects and measuring specific metrics. 35 35 An allocator's speed is benchmarked in different ways, as are issues like false sharing. 36 36 … … 40 40 Modern memory allocators, such as llheap, must handle multi-threaded programs at the KT and UT level. 41 41 The following multi-threaded micro-benchmarks are presented to give a sense of prior work~\cite{Berger00} at the KT level. 42 None of the prior work address multi-threading at the UT level.42 None of the prior work addresses multi-threading at the UT level. 43 43 44 44 … … 47 47 This benchmark stresses the ability of the allocator to handle different threads allocating and deallocating independently. 48 48 There is no interaction among threads, \ie no object sharing. 49 Each thread repeatedly allocate 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated.50 Runtime of the benchmark evaluates its efficiency.49 Each thread repeatedly allocates 100,000 \emph{8-byte} objects then deallocates them in the order they were allocated. 50 The execution time of the benchmark evaluates its efficiency. 51 51 52 52 … … 63 63 Before the thread terminates, it passes its array of 10,000 objects to a new child thread to continue the process. 64 64 The number of thread generations varies depending on the thread speed. 65 It calculates memory operations per second as an indicator of memory allocator's performance.65 It calculates memory operations per second as an indicator of the memory allocator's performance. 66 66 67 67 … … 75 75 \label{s:ChurnBenchmark} 76 76 77 The churn benchmark measures the runtime speed of an allocator in a multi-threaded scen erio, where each thread extensively allocates and frees dynamic memory.77 The churn benchmark measures the runtime speed of an allocator in a multi-threaded scenario, where each thread extensively allocates and frees dynamic memory. 78 78 Only @malloc@ and @free@ are used to eliminate any extra cost, such as @memcpy@ in @calloc@ or @realloc@. 79 Churn simulates a memory intensive program thatcan be tuned to create different scenarios.79 Churn simulates a memory intensive program and can be tuned to create different scenarios. 80 80 81 81 \VRef[Figure]{fig:ChurnBenchFig} shows the pseudo code for the churn micro-benchmark. … … 133 133 When threads share a cache line, frequent reads/writes to their cache-line object causes cache misses, which cause escalating delays as cache distance increases. 134 134 135 Cache thrash tries to create a scen erio that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines.135 Cache thrash tries to create a scenario that leads to false sharing, if the underlying memory allocator is allocating dynamic memory to multiple threads on the same cache lines. 136 136 Ideally, a memory allocator should distance the dynamic memory region of one thread from another. 137 137 Having multiple threads allocating small objects simultaneously can cause a memory allocator to allocate objects on the same cache line, if its not distancing the memory among different threads. … … 141 141 Each worker thread allocates an object and intensively reads/writes it for M times to possible invalidate cache lines that may interfere with other threads sharing the same cache line. 142 142 Each thread repeats this for N times. 143 The main thread measures the total time taken tofor all worker threads to complete.144 Worker threads sharing cache lines with each other willtake longer.143 The main thread measures the total time taken for all worker threads to complete. 144 Worker threads sharing cache lines with each other are expected to take longer. 145 145 146 146 \begin{figure} … … 156 156 signal workers to free 157 157 ... 158 print addresses from each $thread$159 158 Worker Thread$\(_1\)$ 160 allocate, write, read, free161 warmup memory in chunkc of 16 bytes162 ...163 malloc N objects164 ...165 free objects166 return object address to Main Thread159 warm up memory in chunks of 16 bytes 160 ... 161 For N 162 malloc an object 163 read/write the object M times 164 free the object 165 ... 167 166 Worker Thread$\(_2\)$ 168 167 // same as Worker Thread$\(_1\)$ … … 191 190 192 191 The cache-scratch micro-benchmark measures allocator-induced passive false-sharing as illustrated in \VRef{s:AllocatorInducedPassiveFalseSharing}. 193 As forcache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance.192 As with cache thrash, if memory is allocated for multiple threads on the same cache line, this can significantly slow down program performance. 194 193 In this scenario, the false sharing is being caused by the memory allocator although it is started by the program sharing an object. 195 194 … … 202 201 Cache scratch tries to create a scenario that leads to false sharing and should make the memory allocator preserve the program-induced false sharing, if it does not return a freed object to its owner thread and, instead, re-uses it instantly. 203 202 An allocator using object ownership, as described in section \VRef{s:Ownership}, is less susceptible to allocator-induced passive false-sharing. 204 If the object is returned to the thread who owns it, then the thread that gets a new objectis less likely to be on the same cache line.203 If the object is returned to the thread that owns it, then the new object that the thread gets is less likely to be on the same cache line. 205 204 206 205 \VRef[Figure]{fig:benchScratchFig} shows the pseudo code for the cache-scratch micro-benchmark. … … 224 223 signal workers to free 225 224 ... 226 print addresses from each $thread$227 225 Worker Thread$\(_1\)$ 228 allocate, write, read, free 229 warmup memory in chunkc of 16 bytes 230 ... 231 for ( N ) 232 free an object passed by Main Thread 226 warmup memory in chunks of 16 bytes 227 ... 228 free the object passed by the Main Thread 229 For N 233 230 malloc new object 234 ...235 free objects236 return new object addresses to Main Thread231 read/write the object M times 232 free the object 233 ... 237 234 Worker Thread$\(_2\)$ 238 235 // same as Worker Thread$\(_1\)$ … … 248 245 249 246 Similar to benchmark cache thrash in section \VRef{sec:benchThrashSec}, different cache access scenarios can be created using the following command-line arguments. 250 \begin{description}[ itemsep=0pt,parsep=0pt]247 \begin{description}[topsep=0pt,itemsep=0pt,parsep=0pt] 251 248 \item[threads:] 252 249 number of threads (K). … … 262 259 \subsection{Speed Micro-Benchmark} 263 260 \label{s:SpeedMicroBenchmark} 261 \vspace*{-4pt} 264 262 265 263 The speed benchmark measures the runtime speed of individual and sequences of memory allocation routines: 266 \begin{enumerate}[ itemsep=0pt,parsep=0pt]264 \begin{enumerate}[topsep=-5pt,itemsep=0pt,parsep=0pt] 267 265 \item malloc 268 266 \item realloc … … 332 330 \VRef[Figure]{fig:MemoryBenchFig} shows the pseudo code for the memory micro-benchmark. 333 331 It creates a producer-consumer scenario with K producer threads and each producer has M consumer threads. 334 A producer has a separate buffer for each consumer and allocates N objects of random sizes following a settable distribution for each consumer.332 A producer has a separate buffer for each consumer and allocates N objects of random sizes following a configurable distribution for each consumer. 335 333 A consumer frees these objects. 336 334 After every memory operation, program memory usage is recorded throughout the runtime. -
doc/theses/mubeen_zulfiqar_MMath/conclusion.tex
r015925a re5d9274 17 17 % ==================== 18 18 19 The goal of this thesis was to build a low-latency memory allocator for both KT and UT multi-threads systems, which is competitive with the best current memory allocators,while extending the feature set of existing and new allocator routines.19 The goal of this thesis was to build a low-latency (or high bandwidth) memory allocator for both KT and UT multi-threading systems that is competitive with the best current memory allocators while extending the feature set of existing and new allocator routines. 20 20 The new llheap memory-allocator achieves all of these goals, while maintaining and managing sticky allocation information without a performance loss. 21 21 Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally. 22 22 Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations. 23 23 24 Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not make mistakes writing theses useful allocation operations.24 Extending the C allocation API with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ means programmers do not have to do these useful allocation operations themselves. 25 25 The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation. 26 26 27 27 Providing comprehensive statistics for all allocation operations is invaluable in understanding and debugging a program's dynamic behaviour. 28 No other memory allocator provides comprehensive statistics gathering.28 No other memory allocator provides such comprehensive statistics gathering. 29 29 This capability was used extensively during the development of llheap to verify its behaviour. 30 30 As well, providing a debugging mode where allocations are checked, along with internal pre/post conditions and invariants, is extremely useful, especially for students. 31 While not as powerful as the @valgrind@ interpreter, a large number of allocation smistakes are detected.31 While not as powerful as the @valgrind@ interpreter, a large number of allocation mistakes are detected. 32 32 Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code. 33 33 … … 36 36 37 37 Starting a micro-benchmark test-suite for comparing allocators, rather than relying on a suite of arbitrary programs, has been an interesting challenge. 38 The current micro-benchmarks allow some understand of allocator implementation properties without actually looking at the implementation.38 The current micro-benchmarks allow some understanding of allocator implementation properties without actually looking at the implementation. 39 39 For example, the memory micro-benchmark quickly identified how several of the allocators work at the global level. 40 40 It was not possible to show how the micro-benchmarks adjustment knobs were used to tune to an interesting test point. … … 45 45 46 46 A careful walk-though of the allocator fastpath should yield additional optimizations for a slight performance gain. 47 In particular, looking atthe implementation of rpmalloc, which is often the fastest allocator,47 In particular, analysing the implementation of rpmalloc, which is often the fastest allocator, 48 48 49 The micro-benchmark sproject requires more testing and analysis.50 Additional allocation s patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the besttuning knobs.49 The micro-benchmark project requires more testing and analysis. 50 Additional allocation patterns are needed to extract meaningful information about allocators, and within allocation patterns, what are the most useful tuning knobs. 51 51 Also, identifying ways to visualize the results of the micro-benchmarks is a work in progress. 52 52 53 After llheap is made available on gitHub, interacting with its users to locate problems and improvements,will make llbench a more robust memory allocator.54 As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional feedback.53 After llheap is made available on GitHub, interacting with its users to locate problems and improvements will make llbench a more robust memory allocator. 54 As well, feedback from the \uC and \CFA projects, which have adopted llheap for their memory allocator, will provide additional information. -
doc/theses/mubeen_zulfiqar_MMath/figures/Header.fig
r015925a re5d9274 20 20 2 1 1 1 0 7 50 -1 -1 4.000 0 0 -1 0 0 2 21 21 3300 1500 3300 2400 22 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 23 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800 22 24 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3 23 25 1 1 1.00 45.00 90.00 24 4 050 2625 3750 2625 3750 240026 4200 2775 3750 2775 3750 1725 25 27 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3 26 28 1 1 1.00 45.00 90.00 27 4050 2850 3450 2850 3450 2400 28 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 29 4200 1800 6600 1800 6600 2100 4200 2100 4200 1800 29 4200 2550 4050 2550 4050 1725 30 2 1 0 1 0 7 50 -1 -1 4.000 0 0 -1 1 0 3 31 1 1 1.00 45.00 90.00 32 4200 3000 3450 3000 3450 2025 30 33 4 0 0 50 -1 0 12 0.0000 2 180 1185 1875 1725 bucket pointer\001 31 34 4 0 0 50 -1 0 12 0.0000 2 180 1005 1875 2025 mapped size\001 32 35 4 0 0 50 -1 0 12 0.0000 2 135 1215 1875 2325 next free block\001 33 36 4 2 0 50 -1 0 12 0.0000 2 135 480 1725 2025 union\001 34 4 1 0 50 -1 0 12 0.0000 2 135 270 3775 2325 0/1\00135 4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2325 0/1\00136 37 4 1 0 50 -1 0 12 0.0000 2 180 945 5400 2025 request size\001 37 38 4 1 0 50 -1 0 12 0.0000 2 180 765 5400 1425 4/8-bytes\001 38 39 4 1 0 50 -1 0 12 0.0000 2 180 765 3000 1425 4/8-bytes\001 39 4 0 0 50 -1 0 12 0.0000 2 135 825 4125 2700 zero filled\001 40 4 0 0 50 -1 0 12 0.0000 2 180 1515 4125 2925 mapped allocation\001 40 4 1 0 50 -1 0 12 0.0000 2 135 270 3475 2025 0/1\001 41 4 1 0 50 -1 0 12 0.0000 2 135 270 3775 1725 0/1\001 42 4 1 0 50 -1 0 12 0.0000 2 135 270 4075 1725 0/1\001 43 4 0 0 50 -1 0 12 0.0000 2 180 1515 4275 3075 mapped allocation\001 44 4 0 0 50 -1 0 12 0.0000 2 135 825 4275 2850 zero filled\001 45 4 0 0 50 -1 0 12 0.0000 2 180 1920 4275 2625 alignment (fake header)\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsNoOwnership.fig
r015925a re5d9274 1 #FIG 3.2 Produced by xfig version 3.2. 51 #FIG 3.2 Produced by xfig version 3.2.7b 2 2 Landscape 3 3 Center 4 4 Inches 5 Letter 5 Letter 6 6 100.00 7 7 Single … … 11 11 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 12 12 1200 2100 1500 2100 1500 1800 1200 1800 1200 2100 13 4 1 0 50 -1 0 11 0.0000 2 1 95 495 1350 2025 H$_1$\00113 4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001 14 14 -6 15 15 6 1950 1800 2550 2100 16 16 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 17 17 2100 2100 2400 2100 2400 1800 2100 1800 2100 2100 18 4 1 0 50 -1 0 11 0.0000 2 1 95 495 2250 2025 H$_2$\00118 4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001 19 19 -6 20 20 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350 21 21 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350 22 22 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 23 0 01.00 45.00 90.0024 0 01.00 45.00 90.0023 1 1 1.00 45.00 90.00 24 1 1 1.00 45.00 90.00 25 25 1275 1800 1275 1500 26 2 1 0 1 0 750 -1 -1 0.000 0 0 -1 1 0 227 0 01.00 45.00 90.0026 2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2 27 1 1 1.00 45.00 90.00 28 28 1425 1500 1425 1800 29 29 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2 30 0 01.00 45.00 90.0030 1 1 1.00 45.00 90.00 31 31 1425 1500 2175 1800 32 32 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2 33 0 01.00 45.00 90.0033 1 1 1.00 45.00 90.00 34 34 2175 1500 1425 1800 35 35 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 36 0 01.00 45.00 90.0036 1 1 1.00 45.00 90.00 37 37 2175 1500 2175 1800 38 38 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 39 0 01.00 45.00 90.0040 0 01.00 45.00 90.0039 1 1 1.00 45.00 90.00 40 1 1 1.00 45.00 90.00 41 41 2325 1800 2325 1500 42 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1350 1425 T$_1$\00143 4 1 0 50 -1 0 11 0.0000 2 1 95 465 2250 1425 T$_2$\00142 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 43 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/MultipleHeapsOwnership.fig
r015925a re5d9274 1 #FIG 3.2 Produced by xfig version 3.2. 51 #FIG 3.2 Produced by xfig version 3.2.7b 2 2 Landscape 3 3 Center 4 4 Inches 5 Letter 5 Letter 6 6 100.00 7 7 Single … … 11 11 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 12 12 1200 2100 1500 2100 1500 1800 1200 1800 1200 2100 13 4 1 0 50 -1 0 11 0.0000 2 1 95 495 1350 2025 H$_1$\00113 4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001 14 14 -6 15 15 6 1950 1800 2550 2100 16 16 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 17 17 2100 2100 2400 2100 2400 1800 2100 1800 2100 2100 18 4 1 0 50 -1 0 11 0.0000 2 1 95 495 2250 2025 H$_2$\00118 4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_2$\001 19 19 -6 20 20 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 1350 1350 150 150 1350 1350 1500 1350 21 21 1 3 0 1 0 7 50 -1 -1 0.000 0 -0.0000 2250 1350 150 150 2250 1350 2400 1350 22 22 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 23 0 01.00 45.00 90.0024 0 01.00 45.00 90.0023 1 1 1.00 45.00 90.00 24 1 1 1.00 45.00 90.00 25 25 2175 1500 1425 1800 26 26 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 27 0 0 1.00 45.00 90.00 28 0 0 1.00 45.00 90.00 29 1425 1500 2175 1800 30 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 31 0 0 1.00 45.00 90.00 32 0 0 1.00 45.00 90.00 27 1 1 1.00 45.00 90.00 28 1 1 1.00 45.00 90.00 33 29 1275 1800 1275 1500 34 30 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 35 0 01.00 45.00 90.0036 0 01.00 45.00 90.0031 1 1 1.00 45.00 90.00 32 1 1 1.00 45.00 90.00 37 33 2325 1800 2325 1500 38 4 1 0 50 -1 0 11 0.0000 2 195 465 2250 1425 T$_2$\001 39 4 1 0 50 -1 0 11 0.0000 2 195 465 1350 1425 T$_1$\001 34 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 35 1 1 1.00 45.00 90.00 36 1 1 1.00 45.00 90.00 37 1425 1500 2175 1800 38 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_2$\001 39 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/PerThreadHeap.fig
r015925a re5d9274 1 #FIG 3.2 Produced by xfig version 3.2. 51 #FIG 3.2 Produced by xfig version 3.2.7b 2 2 Landscape 3 3 Center 4 4 Inches 5 Letter 5 Letter 6 6 100.00 7 7 Single … … 11 11 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 12 12 2700 1800 3000 1800 3000 2100 2700 2100 2700 1800 13 4 1 0 50 -1 0 11 0.0000 2 1 35135 2850 2025 G\00113 4 1 0 50 -1 0 11 0.0000 2 120 135 2850 2025 G\001 14 14 -6 15 15 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350 … … 17 17 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350 18 18 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 19 0 01.00 45.00 90.0020 0 01.00 45.00 90.0019 1 1 1.00 45.00 90.00 20 1 1 1.00 45.00 90.00 21 21 1350 1500 1350 1800 22 22 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 … … 27 27 2100 1800 2400 1800 2400 2100 2100 2100 2100 1800 28 28 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 29 0 01.00 45.00 90.0030 0 01.00 45.00 90.0029 1 1 1.00 45.00 90.00 30 1 1 1.00 45.00 90.00 31 31 1800 1500 1800 1800 32 32 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 33 0 01.00 45.00 90.0034 0 01.00 45.00 90.0033 1 1 1.00 45.00 90.00 34 1 1 1.00 45.00 90.00 35 35 2250 1500 2250 1800 36 4 1 0 50 -1 0 11 0.0000 2 1 95 1320 2550 2025 $\\Leftrightarrow$\00137 4 1 0 50 -1 0 11 0.0000 2 1 95 1320 3150 2025 $\\Leftrightarrow$\00138 4 0 0 50 -1 0 11 0.0000 2 1 35240 3300 2025 OS\00139 4 1 0 50 -1 0 11 0.0000 2 1 95 495 1350 2025 H$_1$\00140 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1350 1425 T$_1$\00141 4 1 0 50 -1 0 11 0.0000 2 1 95 495 1800 2025 H$_2$\00142 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1800 1425 T$_2$\00143 4 1 0 50 -1 0 11 0.0000 2 1 95 495 2250 2025 H$_3$\00144 4 1 0 50 -1 0 11 0.0000 2 1 95 465 2250 1425 T$_3$\00136 4 1 0 50 -1 0 11 0.0000 2 180 1260 2550 2025 $\\Leftrightarrow$\001 37 4 1 0 50 -1 0 11 0.0000 2 180 1260 3150 2025 $\\Leftrightarrow$\001 38 4 0 0 50 -1 0 11 0.0000 2 120 240 3300 2025 OS\001 39 4 1 0 50 -1 0 11 0.0000 2 165 495 1350 2025 H$_1$\001 40 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 41 4 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_2$\001 42 4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001 43 4 1 0 50 -1 0 11 0.0000 2 165 495 2250 2025 H$_3$\001 44 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/SharedHeaps.fig
r015925a re5d9274 1 #FIG 3.2 Produced by xfig version 3.2. 51 #FIG 3.2 Produced by xfig version 3.2.7b 2 2 Landscape 3 3 Center 4 4 Inches 5 Letter 5 Letter 6 6 100.00 7 7 Single … … 10 10 6 1500 1200 2100 1500 11 11 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350 12 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1800 1425 T$_2$\00112 4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001 13 13 -6 14 14 6 1050 1200 1650 1500 15 15 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350 16 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1350 1425 T$_1$\00116 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 17 17 -6 18 18 6 1950 1200 2550 1500 19 19 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350 20 4 1 0 50 -1 0 11 0.0000 2 1 95 465 2250 1425 T$_3$\00120 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001 21 21 -6 22 22 6 1275 1800 1875 2100 23 23 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 24 24 1425 1800 1725 1800 1725 2100 1425 2100 1425 1800 25 4 1 0 50 -1 0 11 0.0000 2 1 95 495 1575 2025 H$_1$\00125 4 1 0 50 -1 0 11 0.0000 2 165 495 1575 2025 H$_1$\001 26 26 -6 27 27 6 1725 1800 2325 2100 28 28 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 29 29 1875 1800 2175 1800 2175 2100 1875 2100 1875 1800 30 4 1 0 50 -1 0 11 0.0000 2 1 95 495 2025 2025 H$_2$\00130 4 1 0 50 -1 0 11 0.0000 2 165 495 2025 2025 H$_2$\001 31 31 -6 32 32 6 2475 1800 2775 2100 33 33 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 34 34 2475 1800 2775 1800 2775 2100 2475 2100 2475 1800 35 4 1 0 50 -1 0 11 0.0000 2 1 35135 2625 2025 G\00135 4 1 0 50 -1 0 11 0.0000 2 120 135 2625 2025 G\001 36 36 -6 37 37 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 38 0 01.00 45.00 90.0039 0 01.00 45.00 90.0038 1 1 1.00 45.00 90.00 39 1 1 1.00 45.00 90.00 40 40 1275 1500 1500 1800 41 41 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 42 0 01.00 45.00 90.0043 0 01.00 45.00 90.0042 1 1 1.00 45.00 90.00 43 1 1 1.00 45.00 90.00 44 44 1425 1500 1950 1800 45 45 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 46 0 01.00 45.00 90.0047 0 01.00 45.00 90.0046 1 1 1.00 45.00 90.00 47 1 1 1.00 45.00 90.00 48 48 1725 1500 1650 1800 49 49 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 50 0 01.00 45.00 90.0051 0 01.00 45.00 90.0050 1 1 1.00 45.00 90.00 51 1 1 1.00 45.00 90.00 52 52 1875 1500 2025 1800 53 53 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 54 0 01.00 45.00 90.0055 0 01.00 45.00 90.0054 1 1 1.00 45.00 90.00 55 1 1 1.00 45.00 90.00 56 56 2250 1500 2100 1800 57 4 0 0 50 -1 0 11 0.0000 2 1 35240 3075 2025 OS\00158 4 1 0 50 -1 0 11 0.0000 2 1 95 1320 2325 2025 $\\Leftrightarrow$\00159 4 1 0 50 -1 0 11 0.0000 2 1 95 1320 2925 2025 $\\Leftrightarrow$\00157 4 0 0 50 -1 0 11 0.0000 2 120 240 3075 2025 OS\001 58 4 1 0 50 -1 0 11 0.0000 2 180 1260 2325 2025 $\\Leftrightarrow$\001 59 4 1 0 50 -1 0 11 0.0000 2 180 1260 2925 2025 $\\Leftrightarrow$\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/SingleHeap.fig
r015925a re5d9274 1 #FIG 3.2 Produced by xfig version 3.2. 51 #FIG 3.2 Produced by xfig version 3.2.7b 2 2 Landscape 3 3 Center 4 4 Inches 5 Letter 5 Letter 6 6 100.00 7 7 Single … … 10 10 6 1500 1200 2100 1500 11 11 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1350 150 150 1800 1350 1950 1350 12 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1800 1425 T$_2$\00112 4 1 0 50 -1 0 11 0.0000 2 165 465 1800 1425 T$_2$\001 13 13 -6 14 14 6 1050 1200 1650 1500 15 15 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1350 1350 150 150 1350 1350 1500 1350 16 4 1 0 50 -1 0 11 0.0000 2 1 95 465 1350 1425 T$_1$\00116 4 1 0 50 -1 0 11 0.0000 2 165 465 1350 1425 T$_1$\001 17 17 -6 18 18 6 1950 1200 2550 1500 19 19 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2250 1350 150 150 2250 1350 2400 1350 20 4 1 0 50 -1 0 11 0.0000 2 1 95 465 2250 1425 T$_3$\00120 4 1 0 50 -1 0 11 0.0000 2 165 465 2250 1425 T$_3$\001 21 21 -6 22 22 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 23 0 01.00 45.00 90.0024 0 01.00 45.00 90.0023 1 1 1.00 45.00 90.00 24 1 1 1.00 45.00 90.00 25 25 1350 1500 1725 1800 26 26 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 27 0 01.00 45.00 90.0028 0 01.00 45.00 90.0027 1 1 1.00 45.00 90.00 28 1 1 1.00 45.00 90.00 29 29 2250 1500 1875 1800 30 30 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 31 31 1650 1800 1950 1800 1950 2100 1650 2100 1650 1800 32 32 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 33 0 01.00 45.00 90.0034 0 01.00 45.00 90.0033 1 1 1.00 45.00 90.00 34 1 1 1.00 45.00 90.00 35 35 1800 1500 1800 1800 36 4 1 0 50 -1 0 11 0.0000 2 1 95 495 1800 2025 H$_1$\00137 4 1 0 50 -1 0 11 0.0000 2 1 95 1320 2100 2025 $\\Leftrightarrow$\00138 4 0 0 50 -1 0 11 0.0000 2 1 35240 2250 2025 OS\00136 4 1 0 50 -1 0 11 0.0000 2 165 495 1800 2025 H$_1$\001 37 4 1 0 50 -1 0 11 0.0000 2 180 1260 2100 2025 $\\Leftrightarrow$\001 38 4 0 0 50 -1 0 11 0.0000 2 120 240 2250 2025 OS\001 -
doc/theses/mubeen_zulfiqar_MMath/figures/UserKernelHeaps.fig
r015925a re5d9274 45 45 -6 46 46 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 47 0 01.00 45.00 90.0048 0 01.00 45.00 90.0047 1 1 1.00 45.00 90.00 48 1 1 1.00 45.00 90.00 49 49 2025 2100 2025 2400 50 50 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 51 0 01.00 45.00 90.0052 0 01.00 45.00 90.0051 1 1 1.00 45.00 90.00 52 1 1 1.00 45.00 90.00 53 53 2475 2100 2475 2400 54 54 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 55 0 01.00 45.00 90.0056 0 01.00 45.00 90.0055 1 1 1.00 45.00 90.00 56 1 1 1.00 45.00 90.00 57 57 2925 2100 2925 2400 58 58 4 1 0 50 -1 0 11 0.0000 2 135 2235 2475 1725 scheduled across kernel threads\001 -
doc/theses/mubeen_zulfiqar_MMath/intro.tex
r015925a re5d9274 53 53 When this allocator proves inadequate, programmers often write specialize allocators for specific needs. 54 54 C and \CC allow easy replacement of the default memory allocator with an alternative specialized or general-purpose memory-allocator. 55 (Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine.) 55 Jikes RVM MMTk~\cite{MMTk} provides a similar generalization for the Java virtual machine. 56 56 However, high-performance memory-allocators for kernel and user multi-threaded programs are still being designed and improved. 57 57 For this reason, several alternative general-purpose allocators have been written for C/\CC with the goal of scaling in a multi-threaded program~\cite{Berger00,mtmalloc,streamflow,tcmalloc}. … … 65 65 \begin{enumerate}[leftmargin=*] 66 66 \item 67 Implementation of a new stand-lone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). 68 69 \item 70 Adopt @nullptr@ return for a zero-sized allocation, rather than an actual memory address, which can be passed to @free@. 67 Implementation of a new stand-alone concurrent low-latency memory-allocator ($\approx$1,200 lines of code) for C/\CC programs using kernel threads (1:1 threading), and specialized versions of the allocator for the programming languages \uC and \CFA using user-level threads running over multiple kernel threads (M:N threading). 71 68 72 69 \item … … 104 101 105 102 \item 106 Provide additional heap wrapper functions in \CFA creating a n orthogonalset of allocation operations and properties.103 Provide additional heap wrapper functions in \CFA creating a more usable set of allocation operations and properties. 107 104 108 105 \item … … 111 108 \item 112 109 @malloc_alignment( addr )@ returns the alignment of the allocation pointed-to by @addr@. 113 If the allocation is not aligned or @addr@ is the @ nulladdr@, the minimal alignment is returned.110 If the allocation is not aligned or @addr@ is the @NULL@, the minimal alignment is returned. 114 111 \item 115 112 @malloc_zero_fill( addr )@ returns a boolean result indicating if the memory pointed-to by @addr@ is allocated with zero fill, e.g., by @calloc@/@cmemalign@. … … 119 116 @malloc_usable_size( addr )@ returns the usable (total) size of the memory pointed-to by @addr@, i.e., the bin size containing the allocation, where @malloc_size( addr )@ $\le$ @malloc_usable_size( addr )@. 120 117 \end{itemize} 121 122 \item123 Provide mostly contention-free allocation and free operations via a heap-per-kernel-thread implementation.124 118 125 119 \item … … 136 130 137 131 \item 138 Provide extensive runtime checks to valid allocation operations and identify the amount of unfreed storage at program termination.132 Provide extensive runtime checks to validate allocation operations and identify the amount of unfreed storage at program termination. 139 133 140 134 \item -
doc/theses/mubeen_zulfiqar_MMath/performance.tex
r015925a re5d9274 3 3 4 4 This chapter uses the micro-benchmarks from \VRef[Chapter]{s:Benchmarks} to test a number of current memory allocators, including llheap. 5 The goal is to see if llheap is competitive with the current bestmemory allocators.5 The goal is to see if llheap is competitive with the currently popular memory allocators. 6 6 7 7 … … 11 11 \begin{itemize} 12 12 \item 13 \textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.0 14 \item 13 15 \textbf{Nasus} AMD EPYC 7662, 64-core socket $\times$ 2, 2.0 GHz, GCC version 9.3.0 14 \item15 \textbf{Algol} Huawei ARM TaiShan 2280 V2 Kunpeng 920, 24-core socket $\times$ 4, 2.6 GHz, GCC version 9.4.016 16 \end{itemize} 17 17 … … 31 31 32 32 \paragraph{glibc (\textsf{glc})} 33 \cite{glibc} is the default g cc thread-safe allocator.33 \cite{glibc} is the default glibc thread-safe allocator. 34 34 \\ 35 35 \textbf{Version:} Ubuntu GLIBC 2.31-0ubuntu9.7 2.31\\ … … 46 46 47 47 \paragraph{hoard (\textsf{hrd})} 48 \cite{hoard} is a thread-safe allocator that is multi-threaded and us inga heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap.48 \cite{hoard} is a thread-safe allocator that is multi-threaded and uses a heap layer framework. It has per-thread heaps that have thread-local free-lists, and a global shared heap. 49 49 \\ 50 50 \textbf{Version:} 3.13\\ … … 78 78 79 79 \paragraph{tbb malloc (\textsf{tbb})} 80 \cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses private heap for each thread.80 \cite{tbbmalloc} is a thread-safe allocator that is multi-threaded and uses a private heap for each thread. 81 81 Each private-heap has multiple bins of different sizes. Each bin contains free regions of the same size. 82 82 \\ … … 90 90 \section{Experiments} 91 91 92 The each micro-benchmark is configured and run with each of the allocators,93 The less time an allocator takes to complete a benchmark the better , so lower in the graphs is better.92 Each micro-benchmark is configured and run with each of the allocators, 93 The less time an allocator takes to complete a benchmark the better so lower in the graphs is better, except for the Memory micro-benchmark graphs. 94 94 All graphs use log scale on the Y-axis, except for the Memory micro-benchmark (see \VRef{s:MemoryMicroBenchmark}). 95 95 … … 231 231 Second is the low-performer group, which includes the rest of the memory allocators. 232 232 These memory allocators have significant program-induced passive false-sharing, where \textsf{hrd}'s is the worst performing allocator. 233 All of the allocator's in this group are sharing heaps among threads at some level. 234 235 Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}). 236 But, these allocators are among the low performers in the cache scratch. 237 It suggests these allocators do not actively produce false-sharing but preserve program-induced passive false sharing. 233 All of the allocators in this group are sharing heaps among threads at some level. 234 235 Interestingly, allocators such as \textsf{hrd} and \textsf{glc} performed well in micro-benchmark cache thrash (see \VRef{sec:cache-thrash-perf}), but, these allocators are among the low performers in the cache scratch. 236 It suggests these allocators do not actively produce false-sharing, but preserve program-induced passive false sharing. 238 237 239 238 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -
doc/theses/mubeen_zulfiqar_MMath/uw-ethesis-frontpgs.tex
r015925a re5d9274 13 13 \vspace*{1.0cm} 14 14 15 {\Huge\bf \CFAMemory Allocation}15 {\Huge\bf High-Performance Concurrent Memory Allocation} 16 16 17 17 \vspace*{1.0cm} … … 108 108 % D E C L A R A T I O N P A G E 109 109 % ------------------------------- 110 % The following is a sample De laration Page as provided by the GSO110 % The following is a sample Declaration Page as provided by the GSO 111 111 % December 13th, 2006. It is designed for an electronic thesis. 112 112 \begin{center}\textbf{Author's Declaration}\end{center} … … 136 136 137 137 The goal of this thesis is to build a low-latency memory allocator for both kernel and user multi-threaded systems, which is competitive with the best current memory allocators, while extending the feature set of existing and new allocator routines. 138 A new llheap memory-allocator is created that achieves all of these goals, while maintaining and managing sticky allocation properties for zero-fill and alignmentallocations without a performance loss.138 A new llheap memory-allocator is created that achieves all of these goals, while maintaining and managing sticky allocation properties for zero-filled and aligned allocations without a performance loss. 139 139 Hence, it becomes possible to use @realloc@ frequently as a safe operation, rather than just occasionally, because it preserves sticky properties when enlarging storage requests. 140 140 Furthermore, the ability to query sticky properties and information allows programmers to write safer programs, as it is possible to dynamically match allocation styles from unknown library routines that return allocations. 141 141 The C allocation API is also extended with @resize@, advanced @realloc@, @aalloc@, @amemalign@, and @cmemalign@ so programmers do not make mistakes writing theses useful allocation operations. 142 142 llheap is embedded into the \uC and \CFA runtime systems, both of which have user-level threading. 143 The ability to use \CFA's advanced type-system (and possibly \CC's too) to have one allocation routine with completely orthogonal sticky properties shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation.143 The ability to use \CFA's advanced type-system (and possibly \CC's too) to combine advanced memory operations into one allocation routine using named arguments shows how far the allocation API can be pushed, which increases safety and greatly simplifies programmer's use of dynamic allocation. 144 144 145 145 The llheap allocator also provides comprehensive statistics for all allocation operations, which are invaluable in understanding and debugging a program's dynamic behaviour. 146 No other memory allocator examined in the thesis provides comprehensive statistics gathering.147 As well, llheap provides a debugging mode where allocations are checked , along with internal pre/post conditions and invariants,is extremely useful, especially for students.146 No other memory allocator examined in the thesis provides such comprehensive statistics gathering. 147 As well, llheap provides a debugging mode where allocations are checked with internal pre/post conditions and invariants. It is extremely useful, especially for students. 148 148 While not as powerful as the @valgrind@ interpreter, a large number of allocations mistakes are detected. 149 149 Finally, contention-free statistics gathering and debugging have a low enough cost to be used in production code. 150 150 151 A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs ,has been an interesting challenge.151 A micro-benchmark test-suite is started for comparing allocators, rather than relying on a suite of arbitrary programs. It has been an interesting challenge. 152 152 These micro-benchmarks have adjustment knobs to simulate allocation patterns hard-coded into arbitrary test programs. 153 Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc and the new allocator llheap are all compared using the new micro-benchmark test-suite.153 Existing memory allocators, glibc, dlmalloc, hoard, jemalloc, ptmalloc3, rpmalloc, tbmalloc, and the new allocator llheap are all compared using the new micro-benchmark test-suite. 154 154 \cleardoublepage 155 155 … … 162 162 I would like to thank all the people who made this thesis possible. 163 163 164 I would like to acknowledge Peter A. Buhr for his assistance and support through tout the process.164 I would like to acknowledge Peter A. Buhr for his assistance and support throughout the process. 165 165 It would have been impossible without him. 166 166 167 I would like to acknowledge Gregor Richards and Trevor Brown for reading my thesis quickly and giving me great feedback on my work. 168 167 169 Also, I would say thanks to my team members at PLG especially Thierry, Michael, and Andrew for their input. 170 171 Finally, a special thank you to Huawei Canada for funding this work. 168 172 \end{center} 169 173 \cleardoublepage … … 195 199 % L I S T O F T A B L E S 196 200 % --------------------------- 197 \addcontentsline{toc}{chapter}{List of Tables}198 \listoftables199 \cleardoublepage200 \phantomsection % allows hyperref to link to the correct page201 % \addcontentsline{toc}{chapter}{List of Tables} 202 % \listoftables 203 % \cleardoublepage 204 % \phantomsection % allows hyperref to link to the correct page 201 205 202 206 % Change page numbering back to Arabic numerals -
doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex
r015925a re5d9274 106 106 pdffitwindow=false, % window fit to page when opened 107 107 pdfstartview={FitH}, % fits the width of the page to the window 108 pdftitle={ CforallMemory Allocation}, % title: CHANGE THIS TEXT!108 pdftitle={High-Performance Concurrent Memory Allocation}, % title: CHANGE THIS TEXT! 109 109 pdfauthor={Mubeen Zulfiqar}, % author: CHANGE THIS TEXT! and uncomment this line 110 110 pdfsubject={Cforall}, % subject: CHANGE THIS TEXT! and uncomment this line -
doc/theses/thierry_delisle_PhD/thesis/Makefile
r015925a re5d9274 3 3 Build = build 4 4 Figures = img 5 Macros = ../../../LaTeXmacros 6 TeXLIB = .:${Macros}:${Build}:../../../bibliography: 5 6 LaTMac = ../../../LaTeXmacros 7 BibRep = ../../../bibliography 8 9 Macros = ${LaTMac} 10 TeXLIB = .:${Macros}:${Build}:${BibRep}: 7 11 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 12 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex … … 37 41 emptytree \ 38 42 fairness \ 43 idle \ 44 idle1 \ 45 idle2 \ 46 idle_state \ 39 47 io_uring \ 40 48 pivot_ring \ … … 42 50 cycle \ 43 51 result.cycle.jax.ops \ 52 result.yield.jax.ops \ 53 result.churn.jax.ops \ 54 result.cycle.jax.ns \ 55 result.yield.jax.ns \ 56 result.churn.jax.ns \ 57 result.cycle.low.jax.ops \ 58 result.yield.low.jax.ops \ 59 result.churn.low.jax.ops \ 60 result.cycle.low.jax.ns \ 61 result.yield.low.jax.ns \ 62 result.churn.low.jax.ns \ 63 result.memcd.updt.qps \ 64 result.memcd.updt.lat \ 65 result.memcd.rate.qps \ 66 result.memcd.rate.99th \ 44 67 } 45 68 … … 52 75 ## Define the documents that need to be made. 53 76 all: thesis.pdf 54 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty77 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ${LaTMac}/common.tex ${LaTMac}/common.sty ${BibRep}/pl.bib 55 78 56 79 DOCUMENT = thesis.pdf … … 116 139 python3 $< $@ 117 140 118 build/result.%.ns.svg : data/% | ${Build} 119 ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops" 141 cycle_jax_ops_FLAGS = --MaxY=120000000 142 cycle_low_jax_ops_FLAGS = --MaxY=120000000 143 cycle_jax_ns_FLAGS = --MaxY=2000 144 cycle_low_jax_ns_FLAGS = --MaxY=2000 120 145 121 build/result.%.ops.svg : data/% | ${Build} 122 ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second" 146 yield_jax_ops_FLAGS = --MaxY=150000000 147 yield_low_jax_ops_FLAGS = --MaxY=150000000 148 yield_jax_ns_FLAGS = --MaxY=1500 149 yield_low_jax_ns_FLAGS = --MaxY=1500 150 151 build/result.%.ns.svg : data/% Makefile | ${Build} 152 ../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops/procs" $($(subst .,_,$*)_ns_FLAGS) 153 154 build/result.%.ops.svg : data/% Makefile | ${Build} 155 ../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second" $($(subst .,_,$*)_ops_FLAGS) 156 157 build/result.memcd.updt.qps.svg : data/memcd.updt Makefile | ${Build} 158 ../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Update Ratio" 159 160 build/result.memcd.updt.lat.svg : data/memcd.updt Makefile | ${Build} 161 ../../../../benchmark/plot.py -f $< -o $@ -y "Average Read Latency" -x "Update Ratio" 162 163 build/result.memcd.rate.qps.svg : data/memcd.rate Makefile | ${Build} 164 ../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Target QPS" 165 166 build/result.memcd.rate.99th.svg : data/memcd.rate Makefile | ${Build} 167 ../../../../benchmark/plot.py -f $< -o $@ -y "Tail Read Latency" -x "Target QPS" 123 168 124 169 ## pstex with inverted colors -
doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax
r015925a re5d9274 1 [["rdq-cycle-go", "./rdq-cycle-go - t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43606897.0, "Ops per second": 8720908.73, "ns per ops": 114.67, "Ops per threads": 2180344.0, "Ops per procs": 10901724.0, "Ops/sec/procs": 2180227.18, "ns per ops/procs": 458.67}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5010.922033, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 93993568.0, "Total blocks": 93993209.0, "Ops per second": 18757739.07, "ns per ops": 53.31, "Ops per threads": 1174919.0, "Ops per procs": 5874598.0, "Ops/sec/procs": 1172358.69, "ns per ops/procs": 852.98}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 136763517.0, "Ops per second": 27351079.35, "ns per ops": 36.56, "Ops per threads": 1709543.0, "Ops per procs": 8547719.0, "Ops/sec/procs": 1709442.46, "ns per ops/procs": 584.99}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27778961.0, "Ops per second": 5555545.09, "ns per ops": 180.0, "Ops per threads": 5555792.0, "Ops per procs": 27778961.0, "Ops/sec/procs": 5555545.09, "ns per ops/procs": 180.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.290878, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43976310.0, "Total blocks": 43976217.0, "Ops per second": 8778949.17, "ns per ops": 113.91, "Ops per threads": 2198815.0, "Ops per procs": 10994077.0, "Ops/sec/procs": 2194737.29, "ns per ops/procs": 455.64}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.151542, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44132300.0, "Total blocks": 44132201.0, "Ops per second": 8810334.37, "ns per ops": 113.5, "Ops per threads": 2206615.0, "Ops per procs": 11033075.0, "Ops/sec/procs": 2202583.59, "ns per ops/procs": 454.01}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 46353896.0, "Ops per second": 9270294.11, "ns per ops": 107.87, "Ops per threads": 2317694.0, "Ops per procs": 11588474.0, "Ops/sec/procs": 2317573.53, "ns per ops/procs": 431.49}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27894379.0, "Ops per second": 5578591.58, "ns per ops": 179.26, "Ops per threads": 5578875.0, "Ops per procs": 27894379.0, "Ops/sec/procs": 5578591.58, "ns per ops/procs": 179.26}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.743463, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32825528.0, "Total blocks": 32825527.0, "Ops per second": 6553645.29, "ns per ops": 152.59, "Ops per threads": 6565105.0, "Ops per procs": 32825528.0, "Ops/sec/procs": 6553645.29, "ns per ops/procs": 152.59}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 138213098.0, "Ops per second": 27640977.5, "ns per ops": 36.18, "Ops per threads": 1727663.0, "Ops per procs": 8638318.0, "Ops/sec/procs": 1727561.09, "ns per ops/procs": 578.85}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5007.914168, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44109513.0, "Total blocks": 44109419.0, "Ops per second": 8807961.06, "ns per ops": 113.53, "Ops per threads": 2205475.0, "Ops per procs": 11027378.0, "Ops/sec/procs": 2201990.27, "ns per ops/procs": 454.13}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5012.121876, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 94130673.0, "Total blocks": 94130291.0, "Ops per second": 18780603.37, "ns per ops": 53.25, "Ops per threads": 1176633.0, "Ops per procs": 5883167.0, "Ops/sec/procs": 1173787.71, "ns per ops/procs": 851.94}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 140936367.0, "Ops per second": 28185668.38, "ns per ops": 35.48, "Ops per threads": 1761704.0, "Ops per procs": 8808522.0, "Ops/sec/procs": 1761604.27, "ns per ops/procs": 567.66}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44279585.0, "Ops per second": 8855475.01, "ns per ops": 112.92, "Ops per threads": 2213979.0, "Ops per procs": 11069896.0, "Ops/sec/procs": 2213868.75, "ns per ops/procs": 451.7}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.37392, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32227534.0, "Total blocks": 32227533.0, "Ops per second": 6434730.02, "ns per ops": 155.41, "Ops per threads": 6445506.0, "Ops per procs": 32227534.0, "Ops/sec/procs": 6434730.02, "ns per ops/procs": 155.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5011.019789, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 90600569.0, "Total blocks": 90600173.0, "Ops per second": 18080265.66, "ns per ops": 55.31, "Ops per threads": 1132507.0, "Ops per procs": 5662535.0, "Ops/sec/procs": 1130016.6, "ns per ops/procs": 884.94}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.52474, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32861776.0, "Total blocks": 32861775.0, "Ops per second": 6561168.75, "ns per ops": 152.41, "Ops per threads": 6572355.0, "Ops per procs": 32861776.0, "Ops/sec/procs": 6561168.75, "ns per ops/procs": 152.41}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 28097680.0, "Ops per second": 5619274.9, "ns per ops": 177.96, "Ops per threads": 5619536.0, "Ops per procs": 28097680.0, "Ops/sec/procs": 5619274.9, "ns per ops/procs": 177.96}]]1 [["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1138076440.0, "Ops per second": 113792094.48, "ns per ops": 8.79, "Ops per threads": 94839.0, "Ops per procs": 47419851.0, "Ops/sec/procs": 4741337.27, "ns per ops/procs": 210.91}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 200285.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 17638575791.0, "Ops per second": 88067238.72, "ns per ops": 11.35, "Ops per threads": 2204821.0, "Ops per procs": 1102410986.0, "Ops/sec/procs": 5504202.42, "ns per ops/procs": 181.68}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54856916.0, "Ops per second": 5485691.0, "ns per ops": 184.0, "Ops per threads": 109713.0, "Ops per procs": 54856916.0, "Ops/sec/procs": 5485691.0, "ns per ops/procs": 184.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.449006, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 558836360.0, "Total blocks": 558836360.0, "Ops per second": 55741778.71, "ns per ops": 17.94, "Ops per threads": 69854.0, "Ops per procs": 34927272.0, "Ops/sec/procs": 3483861.17, "ns per ops/procs": 287.04}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10038.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 58647049.0, "Total blocks": 58647049.0, "Ops per second": 5842287.68, "ns per ops": 171.17, "Ops per threads": 7330.0, "Ops per procs": 3665440.0, "Ops/sec/procs": 365142.98, "ns per ops/procs": 2738.65}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10003.489711, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 728096996.0, "Total blocks": 728096996.0, "Ops per second": 72784299.98, "ns per ops": 13.74, "Ops per threads": 60674.0, "Ops per procs": 30337374.0, "Ops/sec/procs": 3032679.17, "ns per ops/procs": 329.74}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 63157049.0, "Total blocks": 63157049.0, "Ops per second": 6302255.13, "ns per ops": 158.67, "Ops per threads": 15789.0, "Ops per procs": 7894631.0, "Ops/sec/procs": 787781.89, "ns per ops/procs": 1269.39}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62412200.0, "Total blocks": 62411700.0, "Ops per second": 6235572.31, "ns per ops": 160.37, "Ops per threads": 124824.0, "Ops per procs": 62412200.0, "Ops/sec/procs": 6235572.31, "ns per ops/procs": 160.37}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 464608617.0, "Ops per second": 46457191.42, "ns per ops": 21.53, "Ops per threads": 116152.0, "Ops per procs": 58076077.0, "Ops/sec/procs": 5807148.93, "ns per ops/procs": 172.2}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391521066.0, "Ops per second": 39152106.0, "ns per ops": 25.0, "Ops per threads": 97880.0, "Ops per procs": 48940133.0, "Ops/sec/procs": 4894013.0, "ns per ops/procs": 206.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 963549550.0, "Ops per second": 96354955.0, "ns per ops": 10.0, "Ops per threads": 80295.0, "Ops per procs": 40147897.0, "Ops/sec/procs": 4014789.0, "ns per ops/procs": 251.0}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 867718190.0, "Ops per second": 86761170.55, "ns per ops": 11.53, "Ops per threads": 108464.0, "Ops per procs": 54232386.0, "Ops/sec/procs": 5422573.16, "ns per ops/procs": 184.41}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 962016289.0, "Ops per second": 96201628.0, "ns per ops": 10.0, "Ops per threads": 80168.0, "Ops per procs": 40084012.0, "Ops/sec/procs": 4008401.0, "ns per ops/procs": 251.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.837824, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54738237.0, "Total blocks": 54737741.0, "Ops per second": 5464622.46, "ns per ops": 183.0, "Ops per threads": 109476.0, "Ops per procs": 54738237.0, "Ops/sec/procs": 5464622.46, "ns per ops/procs": 183.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 731309408.0, "Ops per second": 73130940.0, "ns per ops": 13.0, "Ops per threads": 91413.0, "Ops per procs": 45706838.0, "Ops/sec/procs": 4570683.0, "ns per ops/procs": 220.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 739772688.0, "Ops per second": 73977268.0, "ns per ops": 13.0, "Ops per threads": 92471.0, "Ops per procs": 46235793.0, "Ops/sec/procs": 4623579.0, "ns per ops/procs": 218.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391449785.0, "Ops per second": 39144978.0, "ns per ops": 25.0, "Ops per threads": 97862.0, "Ops per procs": 48931223.0, "Ops/sec/procs": 4893122.0, "ns per ops/procs": 206.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10048.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57239183.0, "Total blocks": 57239183.0, "Ops per second": 5696211.13, "ns per ops": 175.56, "Ops per threads": 4769.0, "Ops per procs": 2384965.0, "Ops/sec/procs": 237342.13, "ns per ops/procs": 4213.33}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55248375.0, "Ops per second": 5524562.87, "ns per ops": 181.01, "Ops per threads": 110496.0, "Ops per procs": 55248375.0, "Ops/sec/procs": 5524562.87, "ns per ops/procs": 181.01}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61553053.0, "Total blocks": 61553053.0, "Ops per second": 6142186.88, "ns per ops": 162.81, "Ops per threads": 15388.0, "Ops per procs": 7694131.0, "Ops/sec/procs": 767773.36, "ns per ops/procs": 1302.47}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62811642.0, "Total blocks": 62811142.0, "Ops per second": 6275517.47, "ns per ops": 159.35, "Ops per threads": 125623.0, "Ops per procs": 62811642.0, "Ops/sec/procs": 6275517.47, "ns per ops/procs": 159.35}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10018.820873, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 260866706.0, "Total blocks": 260862710.0, "Ops per second": 26037665.44, "ns per ops": 38.41, "Ops per threads": 65216.0, "Ops per procs": 32608338.0, "Ops/sec/procs": 3254708.18, "ns per ops/procs": 307.25}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 874581175.0, "Ops per second": 87449851.2, "ns per ops": 11.44, "Ops per threads": 109322.0, "Ops per procs": 54661323.0, "Ops/sec/procs": 5465615.7, "ns per ops/procs": 182.96}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55228782.0, "Ops per second": 5522878.0, "ns per ops": 182.0, "Ops per threads": 110457.0, "Ops per procs": 55228782.0, "Ops/sec/procs": 5522878.0, "ns per ops/procs": 182.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62564955.0, "Total blocks": 62564455.0, "Ops per second": 6250797.96, "ns per ops": 159.98, "Ops per threads": 125129.0, "Ops per procs": 62564955.0, "Ops/sec/procs": 6250797.96, "ns per ops/procs": 159.98}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 738848909.0, "Ops per second": 73884890.0, "ns per ops": 13.0, "Ops per threads": 92356.0, "Ops per procs": 46178056.0, "Ops/sec/procs": 4617805.0, "ns per ops/procs": 218.0}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1131221613.0, "Ops per second": 113108175.94, "ns per ops": 8.84, "Ops per threads": 94268.0, "Ops per procs": 47134233.0, "Ops/sec/procs": 4712840.66, "ns per ops/procs": 212.19}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10008.209159, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 729328104.0, "Total blocks": 729328099.0, "Ops per second": 72872987.81, "ns per ops": 13.72, "Ops per threads": 60777.0, "Ops per procs": 30388671.0, "Ops/sec/procs": 3036374.49, "ns per ops/procs": 329.34}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 961002611.0, "Ops per second": 96100261.0, "ns per ops": 10.0, "Ops per threads": 80083.0, "Ops per procs": 40041775.0, "Ops/sec/procs": 4004177.0, "ns per ops/procs": 252.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 390098231.0, "Ops per second": 39009823.0, "ns per ops": 25.0, "Ops per threads": 97524.0, "Ops per procs": 48762278.0, "Ops/sec/procs": 4876227.0, "ns per ops/procs": 207.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55237591.0, "Ops per second": 5523759.0, "ns per ops": 182.0, "Ops per threads": 110475.0, "Ops per procs": 55237591.0, "Ops/sec/procs": 5523759.0, "ns per ops/procs": 182.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.576699, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54510321.0, "Total blocks": 54509820.0, "Ops per second": 5442011.04, "ns per ops": 183.76, "Ops per threads": 109020.0, "Ops per procs": 54510321.0, "Ops/sec/procs": 5442011.04, "ns per ops/procs": 183.76}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1135730371.0, "Ops per second": 113558509.97, "ns per ops": 8.81, "Ops per threads": 94644.0, "Ops per procs": 47322098.0, "Ops/sec/procs": 4731604.58, "ns per ops/procs": 211.34}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10039.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61004037.0, "Total blocks": 61004037.0, "Ops per second": 6076255.04, "ns per ops": 164.58, "Ops per threads": 7625.0, "Ops per procs": 3812752.0, "Ops/sec/procs": 379765.94, "ns per ops/procs": 2633.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10004.891999, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 747946345.0, "Total blocks": 747934349.0, "Ops per second": 74758062.86, "ns per ops": 13.38, "Ops per threads": 62328.0, "Ops per procs": 31164431.0, "Ops/sec/procs": 3114919.29, "ns per ops/procs": 321.04}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 466424792.0, "Ops per second": 46638931.23, "ns per ops": 21.44, "Ops per threads": 116606.0, "Ops per procs": 58303099.0, "Ops/sec/procs": 5829866.4, "ns per ops/procs": 171.53}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10086.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57343570.0, "Total blocks": 57343570.0, "Ops per second": 5685308.81, "ns per ops": 175.89, "Ops per threads": 4778.0, "Ops per procs": 2389315.0, "Ops/sec/procs": 236887.87, "ns per ops/procs": 4221.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10020.39533, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 263517289.0, "Total blocks": 263513293.0, "Ops per second": 26298093.07, "ns per ops": 38.03, "Ops per threads": 65879.0, "Ops per procs": 32939661.0, "Ops/sec/procs": 3287261.63, "ns per ops/procs": 304.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.357431, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 551670395.0, "Total blocks": 551662399.0, "Ops per second": 55027503.89, "ns per ops": 18.17, "Ops per threads": 68958.0, "Ops per procs": 34479399.0, "Ops/sec/procs": 3439218.99, "ns per ops/procs": 290.76}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10050.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56162695.0, "Total blocks": 56162695.0, "Ops per second": 5588033.65, "ns per ops": 178.95, "Ops per threads": 4680.0, "Ops per procs": 2340112.0, "Ops/sec/procs": 232834.74, "ns per ops/procs": 4294.89}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10019.690183, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 271866976.0, "Total blocks": 271862980.0, "Ops per second": 27133271.69, "ns per ops": 36.86, "Ops per threads": 67966.0, "Ops per procs": 33983372.0, "Ops/sec/procs": 3391658.96, "ns per ops/procs": 294.84}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10057.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62105022.0, "Total blocks": 62105022.0, "Ops per second": 6175186.04, "ns per ops": 161.94, "Ops per threads": 15526.0, "Ops per procs": 7763127.0, "Ops/sec/procs": 771898.25, "ns per ops/procs": 1295.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.81217, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 537080117.0, "Total blocks": 537072121.0, "Ops per second": 53569736.59, "ns per ops": 18.67, "Ops per threads": 67135.0, "Ops per procs": 33567507.0, "Ops/sec/procs": 3348108.54, "ns per ops/procs": 298.68}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55967030.0, "Ops per second": 5596438.25, "ns per ops": 178.69, "Ops per threads": 111934.0, "Ops per procs": 55967030.0, "Ops/sec/procs": 5596438.25, "ns per ops/procs": 178.69}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55703320.0, "Ops per second": 5570084.72, "ns per ops": 179.53, "Ops per threads": 111406.0, "Ops per procs": 55703320.0, "Ops/sec/procs": 5570084.72, "ns per ops/procs": 179.53}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 469211793.0, "Ops per second": 46918327.16, "ns per ops": 21.31, "Ops per threads": 117302.0, "Ops per procs": 58651474.0, "Ops/sec/procs": 5864790.9, "ns per ops/procs": 170.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.545208, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54925472.0, "Total blocks": 54924976.0, "Ops per second": 5483474.68, "ns per ops": 182.37, "Ops per threads": 109850.0, "Ops per procs": 54925472.0, "Ops/sec/procs": 5483474.68, "ns per ops/procs": 182.37}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10037.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60770550.0, "Total blocks": 60770550.0, "Ops per second": 6054474.7, "ns per ops": 165.17, "Ops per threads": 7596.0, "Ops per procs": 3798159.0, "Ops/sec/procs": 378404.67, "ns per ops/procs": 2642.67}]] -
doc/theses/thierry_delisle_PhD/thesis/local.bib
r015925a re5d9274 701 701 note = "[Online; accessed 12-April-2022]" 702 702 } 703 704 % RMR notes : 705 % [05/04, 12:36] Trevor Brown 706 % i don't know where rmr complexity was first introduced, but there are many many many papers that use the term and define it 707 % [05/04, 12:37] Trevor Brown 708 % here's one paper that uses the term a lot and links to many others that use it... might trace it to something useful there https://drops.dagstuhl.de/opus/volltexte/2021/14832/pdf/LIPIcs-DISC-2021-30.pdf 709 % [05/04, 12:37] Trevor Brown 710 % another option might be to cite a textbook 711 % [05/04, 12:42] Trevor Brown 712 % but i checked two textbooks in the area i'm aware of and i don't see a definition of rmr complexity in either 713 % [05/04, 12:42] Trevor Brown 714 % this one has a nice statement about the prevelance of rmr complexity, as well as some rough definition 715 % [05/04, 12:42] Trevor Brown 716 % https://dl.acm.org/doi/pdf/10.1145/3465084.3467938 717 718 % Race to idle notes : 719 % [13/04, 16:56] Martin Karsten 720 % I don't have a citation. Google brings up this one, which might be good: 721 % 722 % https://doi.org/10.1137/1.9781611973099.100 -
doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex
r015925a re5d9274 7 7 Networked ZIPF 8 8 9 Nginx : 5Gb still good, 4Gb starts to suffer 10 11 Cforall : 10Gb too high, 4 Gb too low 12 9 13 \section{Memcached} 10 14 11 In Memory 15 \subsection{Benchmark Environment} 16 These experiments are run on a cluster of homogenous Supermicro SYS-6017R-TDF compute nodes with the following characteristics: 17 The server runs Ubuntu 20.04.3 LTS on top of Linux Kernel 5.11.0-34. 18 Each node has 2 Intel(R) Xeon(R) CPU E5-2620 v2 running at 2.10GHz. 19 These CPUs have 6 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 24 \glspl{hthrd}. 20 The cpus each have 384 KB, 3 MB and 30 MB of L1, L2 and L3 caches respectively. 21 Each node is connected to the network through a Mellanox 10 Gigabit Ethernet port. 22 The network route uses 1 Mellanox SX1012 10/40 Gigabit Ethernet cluster switch. 12 23 13 Networked 24 25 26 \begin{figure} 27 \centering 28 \input{result.memcd.updt.qps.pstex_t} 29 \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} 30 \label{fig:memcd:updt:qps} 31 \end{figure} 32 33 \begin{figure} 34 \centering 35 \input{result.memcd.updt.lat.pstex_t} 36 \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} 37 \label{fig:memcd:updt:lat} 38 \end{figure} 39 40 \begin{figure} 41 \centering 42 \input{result.memcd.rate.qps.pstex_t} 43 \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} 44 \label{fig:memcd:rate:qps} 45 \end{figure} 46 47 \begin{figure} 48 \centering 49 \input{result.memcd.rate.99th.pstex_t} 50 \caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description} 51 \label{fig:memcd:rate:tail} 52 \end{figure} -
doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
r015925a re5d9274 6 6 \section{Benchmark Environment} 7 7 All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine. 8 9 For all benchmarks, \texttt{taskset} is used to limit the experiment to 1 NUMA Node with no hyper threading. 10 If more \glspl{hthrd} are needed, then 1 NUMA Node with hyperthreading is used. 11 If still more \glspl{hthrd} are needed then the experiment is limited to as few NUMA Nodes as needed. 12 8 13 9 14 \paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM. … … 23 28 24 29 \section{Cycling latency} 30 \begin{figure} 31 \centering 32 \input{cycle.pstex_t} 33 \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.} 34 \label{fig:cycle} 35 \end{figure} 25 36 The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue. 26 37 Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark. … … 42 53 Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system. 43 54 44 \begin{figure}45 \centering46 \input{cycle.pstex_t}47 \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}48 \label{fig:cycle}49 \end{figure}50 51 55 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available. 52 56 Beyond this point, adding more rings serves to mitigate even more the idle sleep handling. … … 54 58 55 59 The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition. 56 57 \begin{lstlisting} 58 Thread.main() { 59 count := 0 60 for { 61 wait() 62 this.next.wake() 63 count ++ 64 if must_stop() { break } 65 } 66 global.count += count 67 } 68 \end{lstlisting} 69 70 \begin{figure} 71 \centering 72 \input{result.cycle.jax.ops.pstex_t} 73 \vspace*{-10pt} 74 \label{fig:cycle:ns:jax} 75 \end{figure} 60 Figure~\ref{fig:cycle:code} shows pseudo code for this benchmark. 61 62 \begin{figure} 63 \begin{lstlisting} 64 Thread.main() { 65 count := 0 66 for { 67 wait() 68 this.next.wake() 69 count ++ 70 if must_stop() { break } 71 } 72 global.count += count 73 } 74 \end{lstlisting} 75 \caption[Cycle Benchmark : Pseudo Code]{Cycle Benchmark : Pseudo Code} 76 \label{fig:cycle:code} 77 \end{figure} 78 79 80 81 \subsection{Results} 82 \begin{figure} 83 \subfloat[][Throughput, 100 \ats per \proc]{ 84 \resizebox{0.5\linewidth}{!}{ 85 \input{result.cycle.jax.ops.pstex_t} 86 } 87 \label{fig:cycle:jax:ops} 88 } 89 \subfloat[][Throughput, 1 \ats per \proc]{ 90 \resizebox{0.5\linewidth}{!}{ 91 \input{result.cycle.low.jax.ops.pstex_t} 92 } 93 \label{fig:cycle:jax:low:ops} 94 } 95 96 \subfloat[][Latency, 100 \ats per \proc]{ 97 \resizebox{0.5\linewidth}{!}{ 98 \input{result.cycle.jax.ns.pstex_t} 99 } 100 101 } 102 \subfloat[][Latency, 1 \ats per \proc]{ 103 \resizebox{0.5\linewidth}{!}{ 104 \input{result.cycle.low.jax.ns.pstex_t} 105 } 106 \label{fig:cycle:jax:low:ns} 107 } 108 \caption[Cycle Benchmark on Intel]{Cycle Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 100 cycles per \proc, 5 \ats per cycle.} 109 \label{fig:cycle:jax} 110 \end{figure} 111 Figure~\ref{fig:cycle:jax} shows the throughput as a function of \proc count, with the following constants: 112 Each run uses 100 cycles per \proc, 5 \ats per cycle. 113 114 \todo{results discussion} 76 115 77 116 \section{Yield} … … 81 120 Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty. 82 121 This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done. 83 84 \todo{code, setup, results} 85 86 \begin{lstlisting} 87 Thread.main() { 88 count := 0 89 while !stop { 90 yield() 91 count ++ 92 } 93 global.count += count 94 } 95 \end{lstlisting} 122 Figure~\ref{fig:yield:code} shows pseudo code for this benchmark, the ``wait/wake-next'' is simply replaced by a yield. 123 124 \begin{figure} 125 \begin{lstlisting} 126 Thread.main() { 127 count := 0 128 for { 129 yield() 130 count ++ 131 if must_stop() { break } 132 } 133 global.count += count 134 } 135 \end{lstlisting} 136 \caption[Yield Benchmark : Pseudo Code]{Yield Benchmark : Pseudo Code} 137 \label{fig:yield:code} 138 \end{figure} 139 140 \subsection{Results} 141 \begin{figure} 142 \subfloat[][Throughput, 100 \ats per \proc]{ 143 \resizebox{0.5\linewidth}{!}{ 144 \input{result.yield.jax.ops.pstex_t} 145 } 146 \label{fig:yield:jax:ops} 147 } 148 \subfloat[][Throughput, 1 \ats per \proc]{ 149 \resizebox{0.5\linewidth}{!}{ 150 \input{result.yield.low.jax.ops.pstex_t} 151 } 152 \label{fig:yield:jax:low:ops} 153 } 154 155 \subfloat[][Latency, 100 \ats per \proc]{ 156 \resizebox{0.5\linewidth}{!}{ 157 \input{result.yield.jax.ns.pstex_t} 158 } 159 \label{fig:yield:jax:ns} 160 } 161 \subfloat[][Latency, 1 \ats per \proc]{ 162 \resizebox{0.5\linewidth}{!}{ 163 \input{result.yield.low.jax.ns.pstex_t} 164 } 165 \label{fig:yield:jax:low:ns} 166 } 167 \caption[Yield Benchmark on Intel]{Yield Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 1 \ats per \proc.} 168 \label{fig:yield:jax} 169 \end{figure} 170 Figure~\ref{fig:yield:ops:jax} shows the throughput as a function of \proc count, with the following constants: 171 Each run uses 100 \ats per \proc. 172 173 \todo{results discussion} 96 174 97 175 … … 105 183 In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly. 106 184 107 To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}. 108 When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any. 109 This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves. 110 For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}. 185 To achieve this the benchmark uses a fixed size array of semaphores. 186 Each \gls{at} picks a random semaphore, \texttt{V}s it to unblock a \at waiting and then \texttt{P}s on the semaphore. 187 This creates a flow where \glspl{at} push each other out of the semaphores before being pushed out themselves. 188 For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of semaphores plus the number of \glspl{proc}. 189 Note that the nature of these semaphores mean the counter can go beyond 1, which could lead to calls to \texttt{P} not blocking. 111 190 112 191 \todo{code, setup, results} … … 116 195 for { 117 196 r := random() % len(spots) 118 next := xchg(spots[r], this) 119 if next { next.wake() } 120 wait() 197 spots[r].V() 198 spots[r].P() 121 199 count ++ 122 200 if must_stop() { break } … … 125 203 } 126 204 \end{lstlisting} 205 206 \begin{figure} 207 \subfloat[][Throughput, 100 \ats per \proc]{ 208 \resizebox{0.5\linewidth}{!}{ 209 \input{result.churn.jax.ops.pstex_t} 210 } 211 \label{fig:churn:jax:ops} 212 } 213 \subfloat[][Throughput, 1 \ats per \proc]{ 214 \resizebox{0.5\linewidth}{!}{ 215 \input{result.churn.low.jax.ops.pstex_t} 216 } 217 \label{fig:churn:jax:low:ops} 218 } 219 220 \subfloat[][Latency, 100 \ats per \proc]{ 221 \resizebox{0.5\linewidth}{!}{ 222 \input{result.churn.jax.ns.pstex_t} 223 } 224 225 } 226 \subfloat[][Latency, 1 \ats per \proc]{ 227 \resizebox{0.5\linewidth}{!}{ 228 \input{result.churn.low.jax.ns.pstex_t} 229 } 230 \label{fig:churn:jax:low:ns} 231 } 232 \caption[Churn Benchmark on Intel]{\centering Churn Benchmark on Intel\smallskip\newline Throughput and latency of the Churn on the benchmark on the Intel machine. Throughput is the total operation per second across all cores. Latency is the duration of each opeartion.} 233 \label{fig:churn:jax} 234 \end{figure} 127 235 128 236 \section{Locality} -
doc/theses/thierry_delisle_PhD/thesis/text/intro.tex
r015925a re5d9274 2 2 \todo{A proper intro} 3 3 4 The C programming language \cit{C}4 The C programming language~\cite{C11} 5 5 6 The \CFA programming language\cite{cfa:frontpage,cfa:typesystem} which extends the C programming language to add modern safety and productiviy features while maintaining backwards compatibility. Among it's productiviy features, \CFA introduces support for threading\cit{CFA Concurrency}, to allow programmers to write modern concurrent and parallel programming. 7 While previous work on the concurrent package of \CFA focused on features and interfaces, this thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. More specifically, this thesis concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strictly \glsxtrshort{fifo} \gls{rQ}. 6 The \CFA programming language~\cite{cfa:frontpage,cfa:typesystem} extends the C programming language by adding modern safety and productivity features, while maintaining backwards compatibility. Among its productivity features, \CFA supports user-level threading~\cite{Delisle21} allowing programmers to write modern concurrent and parallel programs. 7 My previous master's thesis on concurrent in \CFA focused on features and interfaces. 8 This Ph.D.\ thesis focuses on performance, introducing \glsxtrshort{api} changes only when required by performance considerations. Specifically, this work concentrates on scheduling and \glsxtrshort{io}. Prior to this work, the \CFA runtime used a strict \glsxtrshort{fifo} \gls{rQ} and no non-blocking I/O capabilities at the user-thread level. 8 9 9 This work exclusively concentrates on Linux as it's operating system since the existing \CFA runtime and compiler does not already support other operating systems. Furthermore, as \CFA is yet to be released, supporting version of Linux older than the latest versionis not a goal of this work.10 As a research project, this work builds exclusively on newer versions of the Linux operating-system and gcc/clang compilers. While \CFA is released, supporting older versions of Linux ($<$~Ubuntu 16.04) and gcc/clang compilers ($<$~gcc 6.0) is not a goal of this work. -
doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
r015925a re5d9274 7 7 More precise \CFA supports adding \procs using the RAII object @processor@. 8 8 These objects can be created at any time and can be destroyed at any time. 9 They are normally create as automatic stack variables, but this is not a requirement.9 They are normally created as automatic stack variables, but this is not a requirement. 10 10 11 11 The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence. 12 12 13 13 \section{Manual Resizing} 14 The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed. 15 This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}. 16 17 There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown. 14 Manual resizing is expected to be a rare operation. 15 Programmers are mostly expected to resize clusters on startup or teardown. 16 Therefore dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state. 17 As such all internal arrays that are sized based on the number of \procs need to be \texttt{realloc}ed. 18 This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing when shrinkingbecause some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}. 19 20 There are no performance requirements, within reason, for resizing since it is expected to be rare. 18 21 However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks. 19 22 It should also avoid as much as possible any effect on performance when the number of \procs remain constant. 20 This later requirement pr ehibits simple solutions, like simply adding a global lock to these arrays.23 This later requirement prohibits naive solutions, like simply adding a global lock to the ready-queue arrays. 21 24 22 25 \subsection{Read-Copy-Update} … … 24 27 In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy. 25 28 This approach potentially has the advantage that it may not need any synchronization to do the switch. 26 The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in.27 Th e important question then becomes whether or not this race can be recovered from.28 If the changes that arrived late can be transferred from the original to the copy then this solution works. 29 30 For linked-lists, dequeing is somewhat of a problem.29 However, there is a race where \procs could still use the previous, original, data structure after the copy was switched in. 30 This race not only requires some added memory reclamation scheme, it also requires that operations made on the stale original version be eventually moved to the copy. 31 32 For linked-lists, enqueing is only somewhat problematic, \ats enqueued to the original queues need to be transferred to the new, which might not preserve ordering. 33 Dequeing is more challenging. 31 34 Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at. 32 Fixing this requires m aking the array contain pointers to subqueues rather than the subqueues themselves.35 Fixing this requires more synchronization or more indirection on the queues. 33 36 34 37 Another challenge is that the original must be kept until all \procs have witnessed the change. … … 97 100 In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful. 98 101 While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time. 99 Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice.100 Therefore \procs should not be actually \emph{removed} butsimply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.102 Furthermore, race conditions that spuriously lead to the impression that no \ats are ready are actually common in practice. 103 Therefore resources associated with \procs should not be freed but \procs simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready. 101 104 This state is referred to as \newterm{Idle-Sleep}. 102 105 … … 110 113 The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work. 111 114 115 \section{Sleeping} 116 As usual, the corner-stone of any feature related to the kernel is the choice of system call. 117 In terms of blocking a \gls{kthrd} until some event occurs the linux kernel has many available options: 118 119 \paragraph{\texttt{pthread\_mutex}/\texttt{pthread\_cond}} 120 The most classic option is to use some combination of \texttt{pthread\_mutex} and \texttt{pthread\_cond}. 121 These serve as straight forward mutual exclusion and synchronization tools and allow a \gls{kthrd} to wait on a \texttt{pthread\_cond} until signalled. 122 While this approach is generally perfectly appropriate for \glspl{kthrd} waiting after eachother, \io operations do not signal \texttt{pthread\_cond}s. 123 For \io results to wake a \proc waiting on a \texttt{pthread\_cond} means that a different \glspl{kthrd} must be woken up first, and then the \proc can be signalled. 124 125 \subsection{\texttt{io\_uring} and Epoll} 126 An alternative is to flip the problem on its head and block waiting for \io, using \texttt{io\_uring} or even \texttt{epoll}. 127 This creates the inverse situation, where \io operations directly wake sleeping \procs but waking \proc from a running \gls{kthrd} must use an indirect scheme. 128 This generally takes the form of creating a file descriptor, \eg, a dummy file, a pipe or an event fd, and using that file descriptor when \procs need to wake eachother. 129 This leads to additional complexity because there can be a race between these artificial \io operations and genuine \io operations. 130 If not handled correctly, this can lead to the artificial files going out of sync. 131 132 \subsection{Event FDs} 133 Another interesting approach is to use an event file descriptor\cit{eventfd}. 134 This is a Linux feature that is a file descriptor that behaves like \io, \ie, uses \texttt{read} and \texttt{write}, but also behaves like a semaphore. 135 Indeed, all read and writes must use 64bits large values\footnote{On 64-bit Linux, a 32-bit Linux would use 32 bits values.}. 136 Writes add their values to the buffer, that is arithmetic addition and not buffer append, and reads zero out the buffer and return the buffer values so far\footnote{This is without the \texttt{EFD\_SEMAPHORE} flag. This flags changes the behavior of \texttt{read} but is not needed for this work.}. 137 If a read is made while the buffer is already 0, the read blocks until a non-0 value is added. 138 What makes this feature particularly interesting is that \texttt{io\_uring} supports the \texttt{IORING\_REGISTER\_EVENTFD} command, to register an event fd to a particular instance. 139 Once that instance is registered, any \io completion will result in \texttt{io\_uring} writing to the event FD. 140 This means that a \proc waiting on the event FD can be \emph{directly} woken up by either other \procs or incomming \io. 141 142 \begin{figure} 143 \centering 144 \input{idle1.pstex_t} 145 \caption[Basic Idle Sleep Data Structure]{Basic Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock. 146 Each \proc has a private event FD.} 147 \label{fig:idle1} 148 \end{figure} 149 112 150 113 151 \section{Tracking Sleepers} 114 152 Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc. 115 153 The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at. 116 117 Furthermore, the ``Race-to-Idle'' approach means that there is some 118 119 \section{Sleeping} 120 121 \subsection{Event FDs} 122 123 \subsection{Epoll} 124 125 \subsection{\texttt{io\_uring}} 126 127 \section{Reducing Latency} 154 Since \ats can be made ready by timers, \io operations or other events outside a clusre, this race can occur even if the \proc going to sleep is the only \proc awake. 155 As a result, improper handling of this race can lead to all \procs going to sleep and the system deadlocking. 156 157 Furthermore, the ``Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers. 158 Contention slowing down \procs attempting to sleep or wake-up can be tolerated. 159 These \procs are not doing useful work and therefore not contributing to overall performance. 160 However, notifying, checking if a \proc must be woken-up and doing so if needed, can significantly affect overall performance and must be low cost. 161 162 \subsection{Sleepers List} 163 Each cluster maintains a list of idle \procs, organized as a stack. 164 This ordering hopefully allows \proc at the tail to stay in idle sleep for extended period of times. 165 Because of these unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \proc handle as much of the work as possible. 166 The idle \procs maintain the of sleepers among themselves and notifying a sleeping \proc takes as little work as possible. 167 This approach means that maintaining the list is fairly straightforward. 168 The list can simply use a single lock per cluster and only \procs that are getting in and out of idle state will contend for that lock. 169 170 This approach also simplifies notification. 171 Indeed, \procs need to be notify when a new \at is readied, but they also must be notified during resizing, so the \gls{kthrd} can be joined. 172 This means that whichever entity removes idle \procs from the sleeper list must be able to do so in any order. 173 Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure. 174 The notification process then simply needs to wake-up the desired idle \proc, using \texttt{pthread\_cond\_signal}, \texttt{write} on an fd, etc., and the \proc will handle the rest. 175 176 \subsection{Reducing Latency} 177 As mentioned in this section, \procs going idle for extremely short periods of time is likely in certain common scenarios. 178 Therefore, the latency of doing a system call to read from and writing to the event fd can actually negatively affect overall performance in a notable way. 179 Is it important to reduce latency and contention of the notification as much as possible. 180 Figure~\ref{fig:idle1} shoes the basic idle sleep data structure. 181 For the notifiers, this data structure can cause contention on the lock and the event fd syscall can cause notable latency. 182 183 \begin{figure} 184 \centering 185 \input{idle2.pstex_t} 186 \caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.} 187 \label{fig:idle2} 188 \end{figure} 189 190 The contention is mostly due to the lock on the list needing to be held to get to the head \proc. 191 That lock can be contended by \procs attempting to go to sleep, \procs waking or notification attempts. 192 The contentention from the \procs attempting to go to sleep can be mitigated slightly by using \texttt{try\_acquire} instead, so the \procs simply continue searching for \ats if the lock is held. 193 This trick cannot be used for waking \procs since they are not in a state where they can run \ats. 194 However, it is worth nothing that notification does not strictly require accessing the list or the head \proc. 195 Therefore, contention can be reduced notably by having notifiers avoid the lock entirely and adding a pointer to the event fd of the first idle \proc, as in Figure~\ref{fig:idle2}. 196 To avoid contention between the notifiers, instead of simply reading the atomic pointer, notifiers atomically exchange it to \texttt{null} so only only notifier will contend on the system call. 197 198 \begin{figure} 199 \centering 200 \input{idle_state.pstex_t} 201 \caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.} 202 \label{fig:idle:state} 203 \end{figure} 204 205 The next optimization that can be done is to avoid the latency of the event fd when possible. 206 This can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event fd. 207 A simple three state flag is added beside the event fd to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}. 208 The flag starts in state \texttt{SEARCH}, while the \proc is searching for \ats to run. 209 The \proc then confirms the sleep by atomically swaping the state to \texttt{SLEEP}. 210 If the previous state was still \texttt{SEARCH}, then the \proc does read the event fd. 211 Meanwhile, notifiers atomically exchange the state to \texttt{AWAKE} state. 212 if the previous state was \texttt{SLEEP}, then the notifier must write to the event fd. 213 However, if the notify arrives almost immediately after the \proc marks itself idle, then both reads and writes on the event fd can be omitted, which reduces latency notably. 214 This leads to the final data structure shown in Figure~\ref{fig:idle}. 215 216 \begin{figure} 217 \centering 218 \input{idle.pstex_t} 219 \caption[Low-latency Idle Sleep Data Structure]{Low-latency Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock. 220 Each \proc has a private event FD with a benaphore in front of it. 221 The list also has an atomic pointer to the event fd and benaphore of the first \proc on the list.} 222 \label{fig:idle} 223 \end{figure} -
doc/theses/thierry_delisle_PhD/thesis/thesis.tex
r015925a re5d9274 80 80 %\usepackage{nomencl} % For a nomenclature (optional; available from ctan.org) 81 81 \usepackage{amsmath,amssymb,amstext} % Lots of math symbols and environments 82 \usepackage {xcolor}82 \usepackage[dvipsnames]{xcolor} 83 83 \usepackage{graphicx} % For including graphics 84 \usepackage{subcaption} 84 85 85 86 % Hyperlinks make it very easy to navigate an electronic document. … … 104 105 colorlinks=true, % false: boxed links; true: colored links 105 106 linkcolor=blue, % color of internal links 106 citecolor= green,% color of links to bibliography107 citecolor=OliveGreen, % color of links to bibliography 107 108 filecolor=magenta, % color of file links 108 109 urlcolor=cyan % color of external links … … 204 205 \newcommand\at{\gls{at}\xspace}% 205 206 \newcommand\ats{\glspl{at}\xspace}% 207 \newcommand\Proc{\Pls{proc}\xspace}% 206 208 \newcommand\proc{\gls{proc}\xspace}% 207 209 \newcommand\procs{\glspl{proc}\xspace}% -
libcfa/src/Makefile.am
r015925a re5d9274 33 33 # The built sources must not depend on the installed inst_headers_src 34 34 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr -I$(srcdir)/concurrency $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 35 AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions - pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@35 AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -fvisibility=hidden -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 36 36 AM_CCASFLAGS = -g -Wall -Werror=return-type -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 37 37 CFACC = @CFACC@ … … 194 194 195 195 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 196 ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c - o ${@}196 ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@} 197 197 198 198 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 199 199 ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \ 200 $(CFACOMPILE) -quiet -XCFA,-l ${<} -c - o ${@}200 $(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@} 201 201 202 202 concurrency/io/call.cfa: $(srcdir)/concurrency/io/call.cfa.in -
libcfa/src/algorithms/range_iterator.cfa
r015925a re5d9274 20 20 #include <fstream.hfa> 21 21 22 void main(RangeIter & this) { 22 #include "bits/defs.hfa" 23 24 void main(RangeIter & this) libcfa_public { 23 25 for() { 24 26 this._start = -1; -
libcfa/src/assert.cfa
r015925a re5d9274 19 19 #include <unistd.h> // STDERR_FILENO 20 20 #include "bits/debug.hfa" 21 #include "bits/defs.hfa" 21 22 22 23 extern "C" { … … 26 27 27 28 // called by macro assert in assert.h 28 void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) { 29 // would be cool to remove libcfa_public but it's needed for libcfathread 30 void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) libcfa_public { 29 31 __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file ); 30 32 abort(); … … 32 34 33 35 // called by macro assertf 34 void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) { 36 // would be cool to remove libcfa_public but it's needed for libcfathread 37 void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) libcfa_public { 35 38 __cfaabi_bits_acquire(); 36 39 __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file ); -
libcfa/src/bits/debug.cfa
r015925a re5d9274 21 21 #include <unistd.h> 22 22 23 #include "bits/defs.hfa" 24 23 25 enum { buffer_size = 4096 }; 24 26 static char buffer[ buffer_size ]; 25 27 26 28 extern "C" { 27 void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) { 29 // would be cool to remove libcfa_public but it's needed for libcfathread 30 void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) libcfa_public { 28 31 // ensure all data is written 29 32 for ( int count = 0, retcode; count < len; count += retcode ) { … … 44 47 void __cfaabi_bits_release() __attribute__((__weak__)) {} 45 48 46 int __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 49 // would be cool to remove libcfa_public but it's needed for libcfathread 50 int __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) libcfa_public { 47 51 va_list args; 48 52 -
libcfa/src/bits/defs.hfa
r015925a re5d9274 36 36 #define __cfa_dlink(x) struct { struct x * next; struct x * back; } __dlink_substitute 37 37 #endif 38 39 #define libcfa_public __attribute__((visibility("default"))) 38 40 39 41 #ifdef __cforall -
libcfa/src/bits/weakso_locks.cfa
r015925a re5d9274 18 18 #include "bits/weakso_locks.hfa" 19 19 20 #pragma GCC visibility push(default) 21 20 22 void ?{}( blocking_lock &, bool, bool ) {} 21 23 void ^?{}( blocking_lock & ) {} -
libcfa/src/common.cfa
r015925a re5d9274 18 18 #include <stdlib.h> // div_t, *div 19 19 20 #pragma GCC visibility push(default) 21 20 22 //--------------------------------------- 21 23 -
libcfa/src/concurrency/alarm.cfa
r015925a re5d9274 141 141 //============================================================================================= 142 142 143 void sleep( Duration duration ) {143 void sleep( Duration duration ) libcfa_public { 144 144 alarm_node_t node = { active_thread(), duration, 0`s }; 145 145 -
libcfa/src/concurrency/clib/cfathread.cfa
r015925a re5d9274 237 237 238 238 typedef ThreadCancelled(cfathread_object) cfathread_exception; 239 typedef ThreadCancelled_vtable(cfathread_object) cfathread_vtable;239 typedef vtable(ThreadCancelled(cfathread_object)) cfathread_vtable; 240 240 241 241 void defaultResumptionHandler(ThreadCancelled(cfathread_object) & except) { … … 283 283 284 284 typedef ThreadCancelled(__cfainit) __cfainit_exception; 285 typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable;285 typedef vtable(ThreadCancelled(__cfainit)) __cfainit_vtable; 286 286 287 287 void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) { … … 326 326 } 327 327 328 #pragma GCC visibility push(default) 329 328 330 //================================================================================ 329 331 // Main Api 330 332 extern "C" { 331 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {333 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public { 332 334 *cl = new(); 333 335 return 0; 334 336 } 335 337 336 cfathread_cluster_t cfathread_cluster_self(void) {338 cfathread_cluster_t cfathread_cluster_self(void) libcfa_public { 337 339 return active_cluster(); 338 340 } 339 341 340 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {342 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public { 341 343 #if !defined(__CFA_NO_STATISTICS__) 342 344 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); -
libcfa/src/concurrency/coroutine.cfa
r015925a re5d9274 48 48 //----------------------------------------------------------------------------- 49 49 forall(T &) 50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public { 51 51 dst->virtual_table = src->virtual_table; 52 52 dst->the_coroutine = src->the_coroutine; … … 55 55 56 56 forall(T &) 57 const char * msg(CoroutineCancelled(T) *) {57 const char * msg(CoroutineCancelled(T) *) libcfa_public { 58 58 return "CoroutineCancelled(...)"; 59 59 } … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled , (T)) ){64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 89 89 90 90 void __stack_prepare( __stack_info_t * this, size_t create_size ); 91 void __stack_clean ( __stack_info_t * this );91 static void __stack_clean ( __stack_info_t * this ); 92 92 93 93 //----------------------------------------------------------------------------- … … 114 114 } 115 115 116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}(coroutine$& this) {126 void ^?{}(coroutine$& this) libcfa_public { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 128 coroutine$ * src = active_coroutine(); … … 146 146 // Part of the Public API 147 147 // Not inline since only ever called once per coroutine 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)); })149 void prime(T& cor) {148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) 149 void prime(T& cor) libcfa_public { 150 150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); … … 155 155 } 156 156 157 [void *, size_t] __stack_alloc( size_t storageSize ) {157 static [void *, size_t] __stack_alloc( size_t storageSize ) { 158 158 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 159 159 assert(__page_size != 0l); … … 193 193 } 194 194 195 void __stack_clean ( __stack_info_t * this ) {195 static void __stack_clean ( __stack_info_t * this ) { 196 196 void * storage = this->storage->limit; 197 197 … … 215 215 } 216 216 217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {217 void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public { 218 218 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 219 219 bool userStack; -
libcfa/src/concurrency/coroutine.hfa
r015925a re5d9274 22 22 //----------------------------------------------------------------------------- 23 23 // Exception thrown from resume when a coroutine stack is cancelled. 24 EHM_FORALL_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) ( 24 forall(coroutine_t &) 25 exception CoroutineCancelled { 25 26 coroutine_t * the_coroutine; 26 27 exception_t * the_exception; 27 );28 }; 28 29 29 30 forall(T &) … … 37 38 // Anything that implements this trait can be resumed. 38 39 // Anything that is resumed is a coroutine. 39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled ,(T))) {40 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled(T))) { 40 41 void main(T & this); 41 42 coroutine$ * get_coroutine(T & this); … … 60 61 //----------------------------------------------------------------------------- 61 62 // Public coroutine API 62 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)); })63 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) 63 64 void prime(T & cor); 64 65 … … 113 114 114 115 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 115 extern void __stack_clean ( __stack_info_t * this );116 117 116 118 117 // Suspend implementation inlined for performance … … 141 140 forall(T & | is_coroutine(T)) 142 141 void __cfaehm_cancelled_coroutine( 143 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)) );142 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ); 144 143 145 144 // Resume implementation inlined for performance 146 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)); })145 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) 147 146 static inline T & resume(T & cor) { 148 147 // optimization : read TLS once and reuse it -
libcfa/src/concurrency/exception.cfa
r015925a re5d9274 64 64 extern "C" { 65 65 66 struct exception_context_t * this_exception_context(void) {66 struct exception_context_t * this_exception_context(void) libcfa_public { 67 67 return &__get_stack( active_coroutine() )->exception_context; 68 68 } 69 69 70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public { 71 71 _Unwind_Stop_Fn stop_func; 72 72 void * stop_param; -
libcfa/src/concurrency/invoke.c
r015925a re5d9274 36 36 extern void enable_interrupts( _Bool poll ); 37 37 38 void __cfactx_invoke_coroutine(38 libcfa_public void __cfactx_invoke_coroutine( 39 39 void (*main)(void *), 40 40 void *this … … 70 70 } 71 71 72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));72 libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__)); 73 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) { 74 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); … … 77 77 } 78 78 79 void __cfactx_invoke_thread(79 libcfa_public void __cfactx_invoke_thread( 80 80 void (*main)(void *), 81 81 void *this … … 98 98 } 99 99 100 void __cfactx_start(100 libcfa_public void __cfactx_start( 101 101 void (*main)(void *), 102 102 struct coroutine$ * cor, -
libcfa/src/concurrency/io.cfa
r015925a re5d9274 221 221 const unsigned long long ctsc = rdtscl(); 222 222 223 if(proc->io.target == MAX) {223 if(proc->io.target == UINT_MAX) { 224 224 uint64_t chaos = __tls_rand(); 225 225 unsigned ext = chaos & 0xff; … … 232 232 else { 233 233 const unsigned target = proc->io.target; 234 /* paranoid */ verify( io.tscs[target].tv != MAX );234 /* paranoid */ verify( io.tscs[target].tv != ULLONG_MAX ); 235 235 HELP: if(target < ctxs_count) { 236 236 const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io); … … 246 246 __STATS__( true, io.calls.helped++; ) 247 247 } 248 proc->io.target = MAX;248 proc->io.target = UINT_MAX; 249 249 } 250 250 } … … 340 340 // for convenience, return both the index and the pointer to the sqe 341 341 // sqe == &sqes[idx] 342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public { 343 343 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); 344 344 … … 419 419 } 420 420 421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { 422 422 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); 423 423 -
libcfa/src/concurrency/io/call.cfa.in
r015925a re5d9274 139 139 // I/O Interface 140 140 //============================================================================================= 141 #pragma GCC visibility push(default) 141 142 """ 142 143 -
libcfa/src/concurrency/io/setup.cfa
r015925a re5d9274 26 26 27 27 #if !defined(CFA_HAVE_LINUX_IO_URING_H) 28 void ?{}(io_context_params & this) {}28 void ?{}(io_context_params & this) libcfa_public {} 29 29 30 30 void ?{}($io_context & this, struct cluster & cl) {} … … 66 66 #pragma GCC diagnostic pop 67 67 68 void ?{}(io_context_params & this) {68 void ?{}(io_context_params & this) libcfa_public { 69 69 this.num_entries = 256; 70 70 } -
libcfa/src/concurrency/io/types.hfa
r015925a re5d9274 17 17 #pragma once 18 18 19 #include <limits.h> 20 19 21 extern "C" { 20 22 #include <linux/types.h> … … 25 27 #include "iofwd.hfa" 26 28 #include "kernel/fwd.hfa" 27 #include "limits.hfa"28 29 29 30 #if defined(CFA_HAVE_LINUX_IO_URING_H) … … 140 141 const __u32 tail = *this->cq.tail; 141 142 142 if(head == tail) return MAX;143 if(head == tail) return ULLONG_MAX; 143 144 144 145 return this->cq.ts; -
libcfa/src/concurrency/kernel.cfa
r015925a re5d9274 389 389 390 390 // KERNEL_ONLY 391 void returnToKernel() {391 static void returnToKernel() { 392 392 /* paranoid */ verify( ! __preemption_enabled() ); 393 393 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); … … 547 547 } 548 548 549 void unpark( thread$ * thrd, unpark_hint hint ) {549 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 550 550 if( !thrd ) return; 551 551 … … 558 558 } 559 559 560 void park( void ) {560 void park( void ) libcfa_public { 561 561 __disable_interrupts_checked(); 562 562 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); … … 601 601 602 602 // KERNEL ONLY 603 bool force_yield( __Preemption_Reason reason ) {603 bool force_yield( __Preemption_Reason reason ) libcfa_public { 604 604 __disable_interrupts_checked(); 605 605 thread$ * thrd = kernelTLS().this_thread; … … 849 849 //----------------------------------------------------------------------------- 850 850 // Debug 851 bool threading_enabled(void) __attribute__((const)) {851 bool threading_enabled(void) __attribute__((const)) libcfa_public { 852 852 return true; 853 853 } … … 856 856 // Statistics 857 857 #if !defined(__CFA_NO_STATISTICS__) 858 void print_halts( processor & this ) {858 void print_halts( processor & this ) libcfa_public { 859 859 this.print_halts = true; 860 860 } … … 873 873 } 874 874 875 void crawl_cluster_stats( cluster & this ) {875 static void crawl_cluster_stats( cluster & this ) { 876 876 // Stop the world, otherwise stats could get really messed-up 877 877 // this doesn't solve all problems but does solve many … … 889 889 890 890 891 void print_stats_now( cluster & this, int flags ) {891 void print_stats_now( cluster & this, int flags ) libcfa_public { 892 892 crawl_cluster_stats( this ); 893 893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); -
libcfa/src/concurrency/kernel.hfa
r015925a re5d9274 49 49 50 50 // Coroutine used py processors for the 2-step context switch 51 coroutine processorCtx_t { 51 52 struct processorCtx_t { 53 struct coroutine$ self; 52 54 struct processor * proc; 53 55 }; -
libcfa/src/concurrency/kernel/cluster.cfa
r015925a re5d9274 49 49 50 50 // returns the maximum number of processors the RWLock support 51 __attribute__((weak)) unsigned __max_processors() {51 __attribute__((weak)) unsigned __max_processors() libcfa_public { 52 52 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); 53 53 if(!max_cores_s) { … … 233 233 if(is_empty(sl)) { 234 234 assert( sl.anchor.next == 0p ); 235 assert( sl.anchor.ts == -1llu);235 assert( sl.anchor.ts == MAX ); 236 236 assert( mock_head(sl) == sl.prev ); 237 237 } else { 238 238 assert( sl.anchor.next != 0p ); 239 assert( sl.anchor.ts != -1llu);239 assert( sl.anchor.ts != MAX ); 240 240 assert( mock_head(sl) != sl.prev ); 241 241 } … … 259 259 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 260 260 it->rdq.id = valrq; 261 it->rdq.target = MAX;261 it->rdq.target = UINT_MAX; 262 262 valrq += __shard_factor.readyq; 263 263 #if defined(CFA_HAVE_LINUX_IO_URING_H) 264 264 it->io.ctx->cq.id = valio; 265 it->io.target = MAX;265 it->io.target = UINT_MAX; 266 266 valio += __shard_factor.io; 267 267 #endif … … 472 472 this.prev = mock_head(this); 473 473 this.anchor.next = 0p; 474 this.anchor.ts = -1llu;474 this.anchor.ts = MAX; 475 475 #if !defined(__CFA_NO_STATISTICS__) 476 476 this.cnt = 0; … … 484 484 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts ); 485 485 /* paranoid */ verify( mock_head(this)->link.next == 0p ); 486 /* paranoid */ verify( mock_head(this)->link.ts == -1llu);486 /* paranoid */ verify( mock_head(this)->link.ts == MAX ); 487 487 /* paranoid */ verify( mock_head(this) == this.prev ); 488 488 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 ); … … 495 495 // Make sure the list is empty 496 496 /* paranoid */ verify( this.anchor.next == 0p ); 497 /* paranoid */ verify( this.anchor.ts == -1llu);497 /* paranoid */ verify( this.anchor.ts == MAX ); 498 498 /* paranoid */ verify( mock_head(this) == this.prev ); 499 499 } -
libcfa/src/concurrency/kernel/cluster.hfa
r015925a re5d9274 19 19 #include "kernel/private.hfa" 20 20 21 #include "limits.hfa"21 #include <limits.h> 22 22 23 23 //----------------------------------------------------------------------- … … 37 37 38 38 static inline void touch_tsc(__timestamp_t * tscs, size_t idx, unsigned long long ts_prev, unsigned long long ts_next) { 39 if (ts_next == MAX) return;39 if (ts_next == ULLONG_MAX) return; 40 40 unsigned long long now = rdtscl(); 41 41 unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED); … … 59 59 for(i; shard_factor) { 60 60 unsigned long long ptsc = ts(data[start + i]); 61 if(ptsc != -1ull) {61 if(ptsc != ULLONG_MAX) { 62 62 /* paranoid */ verify( start + i < count ); 63 63 unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i].ma); -
libcfa/src/concurrency/kernel/private.hfa
r015925a re5d9274 109 109 //----------------------------------------------------------------------------- 110 110 // Processor 111 void main(processorCtx_t *); 111 void main(processorCtx_t &); 112 static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; } 112 113 113 114 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); -
libcfa/src/concurrency/kernel/startup.cfa
r015925a re5d9274 120 120 #endif 121 121 122 cluster * mainCluster ;122 cluster * mainCluster libcfa_public; 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; … … 169 169 }; 170 170 171 void ?{}( current_stack_info_t & this ) {171 static void ?{}( current_stack_info_t & this ) { 172 172 __stack_context_t ctx; 173 173 CtxGet( ctx ); … … 209 209 // Construct the processor context of the main processor 210 210 void ?{}(processorCtx_t & this, processor * proc) { 211 (this. __cor){ "Processor" };212 this. __cor.starter = 0p;211 (this.self){ "Processor" }; 212 this.self.starter = 0p; 213 213 this.proc = proc; 214 214 } … … 507 507 self_mon_p = &self_mon; 508 508 link.next = 0p; 509 link.ts = -1llu;509 link.ts = MAX; 510 510 preferred = ready_queue_new_preferred(); 511 511 last_proc = 0p; … … 526 526 // Construct the processor context of non-main processors 527 527 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) { 528 (this. __cor){ info };528 (this.self){ info }; 529 529 this.proc = proc; 530 530 } … … 578 578 } 579 579 580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public { 581 581 ( this.terminated ){}; 582 582 ( this.runner ){}; … … 591 591 } 592 592 593 void ?{}(processor & this, const char name[], cluster & _cltr) {593 void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public { 594 594 (this){name, _cltr, 0p}; 595 595 } 596 596 597 597 extern size_t __page_size; 598 void ^?{}(processor & this) with( this ){598 void ^?{}(processor & this) libcfa_public with( this ) { 599 599 /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ); 600 600 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); … … 623 623 } 624 624 625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) { 626 626 this.name = name; 627 627 this.preemption_rate = preemption_rate; … … 658 658 } 659 659 660 void ^?{}(cluster & this) {660 void ^?{}(cluster & this) libcfa_public { 661 661 destroy(this.io.arbiter); 662 662 -
libcfa/src/concurrency/locks.cfa
r015925a re5d9274 24 24 #include <stdlib.hfa> 25 25 26 #pragma GCC visibility push(default) 27 26 28 //----------------------------------------------------------------------------- 27 29 // info_thread … … 116 118 } 117 119 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {120 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 121 thread$ * t = &try_pop_front( blocked_threads ); 120 122 owner = t; … … 264 266 void ^?{}( alarm_node_wrap(L) & this ) { } 265 267 266 void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {268 static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) { 267 269 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 268 270 lock( cond->lock __cfaabi_dbg_ctx2 ); … … 288 290 289 291 // this casts the alarm node to our wrapped type since we used type erasure 290 void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }292 static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 291 293 } 292 294 … … 305 307 void ^?{}( condition_variable(L) & this ){ } 306 308 307 void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {309 static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 308 310 if(&popped != 0p) { 309 311 popped.signalled = true; … … 350 352 int counter( condition_variable(L) & this ) with(this) { return count; } 351 353 352 s ize_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {354 static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) { 353 355 // add info_thread to waiting queue 354 356 insert_last( blocked_threads, *i ); … … 363 365 364 366 // helper for wait()'s' with no timeout 365 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {367 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 366 368 lock( lock __cfaabi_dbg_ctx2 ); 367 369 size_t recursion_count = queue_and_get_recursion(this, &i); … … 380 382 381 383 // helper for wait()'s' with a timeout 382 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {384 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 383 385 lock( lock __cfaabi_dbg_ctx2 ); 384 386 size_t recursion_count = queue_and_get_recursion(this, &info); … … 415 417 // fast_cond_var 416 418 void ?{}( fast_cond_var(L) & this ){ 417 this.blocked_threads{}; 419 this.blocked_threads{}; 418 420 #ifdef __CFA_DEBUG__ 419 421 this.lock_used = 0p; -
libcfa/src/concurrency/monitor.cfa
r015925a re5d9274 44 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 45 46 static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 47 static inline void ?{}(__condition_criterion_t & this ); 48 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 49 46 50 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 51 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 243 247 244 248 // Leave single monitor 245 void __leave( monitor$ * this ) {249 static void __leave( monitor$ * this ) { 246 250 // Lock the monitor spinlock 247 251 lock( this->lock __cfaabi_dbg_ctx2 ); … … 278 282 279 283 // Leave single monitor for the last time 280 void __dtor_leave( monitor$ * this, bool join ) {284 static void __dtor_leave( monitor$ * this, bool join ) { 281 285 __cfaabi_dbg_debug_do( 282 286 if( active_thread() != this->owner ) { … … 344 348 // Ctor for monitor guard 345 349 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {350 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public { 347 351 thread$ * thrd = active_thread(); 348 352 … … 369 373 } 370 374 371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {375 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { 372 376 this{ m, count, 0p }; 373 377 } … … 375 379 376 380 // Dtor for monitor guard 377 void ^?{}( monitor_guard_t & this ) {381 void ^?{}( monitor_guard_t & this ) libcfa_public { 378 382 // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); 379 383 … … 389 393 // Ctor for monitor guard 390 394 // Sorts monitors before entering 391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {395 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { 392 396 // optimization 393 397 thread$ * thrd = active_thread(); … … 409 413 410 414 // Dtor for monitor guard 411 void ^?{}( monitor_dtor_guard_t & this ) {415 void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { 412 416 // Leave the monitors in order 413 417 __dtor_leave( this.m, this.join ); … … 419 423 //----------------------------------------------------------------------------- 420 424 // Internal scheduling types 421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {425 static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 422 426 this.waiting_thread = waiting_thread; 423 427 this.count = count; … … 426 430 } 427 431 428 void ?{}(__condition_criterion_t & this ) with( this ) {432 static void ?{}(__condition_criterion_t & this ) with( this ) { 429 433 ready = false; 430 434 target = 0p; … … 433 437 } 434 438 435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {439 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 436 440 this.ready = false; 437 441 this.target = target; … … 442 446 //----------------------------------------------------------------------------- 443 447 // Internal scheduling 444 void wait( condition & this, uintptr_t user_info = 0 ) {448 void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { 445 449 brand_condition( this ); 446 450 … … 496 500 } 497 501 498 bool signal( condition & this ) {502 bool signal( condition & this ) libcfa_public { 499 503 if( is_empty( this ) ) { return false; } 500 504 … … 538 542 } 539 543 540 bool signal_block( condition & this ) {544 bool signal_block( condition & this ) libcfa_public { 541 545 if( !this.blocked.head ) { return false; } 542 546 … … 586 590 587 591 // Access the user_info of the thread waiting at the front of the queue 588 uintptr_t front( condition & this ) {592 uintptr_t front( condition & this ) libcfa_public { 589 593 verifyf( !is_empty(this), 590 594 "Attempt to access user data on an empty condition.\n" … … 608 612 // setup mask 609 613 // block 610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {614 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { 611 615 // This statment doesn't have a contiguous list of monitors... 612 616 // Create one! … … 994 998 // Can't be accepted since a mutex stmt is effectively an anonymous routine 995 999 // Thus we do not need a monitor group 996 void lock( monitor$ * this ) {1000 void lock( monitor$ * this ) libcfa_public { 997 1001 thread$ * thrd = active_thread(); 998 1002 … … 1046 1050 // Leave routine for mutex stmt 1047 1051 // Is just a wrapper around __leave for the is_lock trait to see 1048 void unlock( monitor$ * this ) { __leave( this ); }1052 void unlock( monitor$ * this ) libcfa_public { __leave( this ); } 1049 1053 1050 1054 // Local Variables: // -
libcfa/src/concurrency/monitor.hfa
r015925a re5d9274 119 119 } 120 120 121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );122 void ?{}(__condition_criterion_t & this );123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );121 // void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 122 // void ?{}(__condition_criterion_t & this ); 123 // void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 124 124 125 125 struct condition { -
libcfa/src/concurrency/preemption.cfa
r015925a re5d9274 38 38 #endif 39 39 40 __attribute__((weak)) Duration default_preemption() {40 __attribute__((weak)) Duration default_preemption() libcfa_public { 41 41 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 42 42 if(!preempt_rate_s) { … … 238 238 //---------- 239 239 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { 241 241 // create a assembler label before 242 242 // marked as clobber all to avoid movement … … 276 276 // Get data from the TLS block 277 277 // struct asm_region __cfaasm_get; 278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__ )); //no inline to avoid problems278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems 279 279 uintptr_t __cfatls_get( unsigned long int offset ) { 280 280 // create a assembler label before … … 295 295 extern "C" { 296 296 // Disable interrupts by incrementing the counter 297 void disable_interrupts(){297 __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { 298 298 // create a assembler label before 299 299 // marked as clobber all to avoid movement … … 326 326 // Enable interrupts by decrementing the counter 327 327 // If counter reaches 0, execute any pending __cfactx_switch 328 void enable_interrupts( bool poll ) {328 void enable_interrupts( bool poll ) libcfa_public { 329 329 // Cache the processor now since interrupts can start happening after the atomic store 330 330 processor * proc = __cfaabi_tls.this_processor; … … 362 362 //----------------------------------------------------------------------------- 363 363 // Kernel Signal Debug 364 void __cfaabi_check_preemption() {364 void __cfaabi_check_preemption() libcfa_public { 365 365 bool ready = __preemption_enabled(); 366 366 if(!ready) { abort("Preemption should be ready"); } -
libcfa/src/concurrency/ready_queue.cfa
r015925a re5d9274 125 125 const unsigned long long ctsc = rdtscl(); 126 126 127 if(proc->rdq.target == MAX) {127 if(proc->rdq.target == UINT_MAX) { 128 128 uint64_t chaos = __tls_rand(); 129 129 unsigned ext = chaos & 0xff; … … 137 137 const unsigned target = proc->rdq.target; 138 138 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv); 139 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );139 /* paranoid */ verify( readyQ.tscs[target].tv != ULLONG_MAX ); 140 140 if(target < lanes_count) { 141 141 const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq); … … 147 147 } 148 148 } 149 proc->rdq.target = MAX;149 proc->rdq.target = UINT_MAX; 150 150 } 151 151 … … 245 245 // get preferred ready for new thread 246 246 unsigned ready_queue_new_preferred() { 247 unsigned pref = MAX;247 unsigned pref = UINT_MAX; 248 248 if(struct thread$ * thrd = publicTLS_get( this_thread )) { 249 249 pref = thrd->preferred; -
libcfa/src/concurrency/ready_subqueue.hfa
r015925a re5d9274 32 32 /* paranoid */ verify( this.lock ); 33 33 /* paranoid */ verify( node->link.next == 0p ); 34 /* paranoid */ verify( node->link.ts== MAX );34 /* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX ); 35 35 /* paranoid */ verify( this.prev->link.next == 0p ); 36 /* paranoid */ verify( this.prev->link.ts== MAX );36 /* paranoid */ verify( __atomic_load_n(&this.prev->link.ts, __ATOMIC_RELAXED) == MAX ); 37 37 if( this.anchor.next == 0p ) { 38 38 /* paranoid */ verify( this.anchor.next == 0p ); 39 /* paranoid */ verify( this.anchor.ts== MAX );40 /* paranoid */ verify( this.anchor.ts!= 0 );39 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) == MAX ); 40 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 41 41 /* paranoid */ verify( this.prev == mock_head( this ) ); 42 42 } else { 43 43 /* paranoid */ verify( this.anchor.next != 0p ); 44 /* paranoid */ verify( this.anchor.ts!= MAX );45 /* paranoid */ verify( this.anchor.ts!= 0 );44 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX ); 45 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 46 46 /* paranoid */ verify( this.prev != mock_head( this ) ); 47 47 } … … 62 62 /* paranoid */ verify( this.lock ); 63 63 /* paranoid */ verify( this.anchor.next != 0p ); 64 /* paranoid */ verify( this.anchor.ts!= MAX );65 /* paranoid */ verify( this.anchor.ts != 0);64 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX ); 65 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 66 66 67 67 // Get the relevant nodes locally 68 68 thread$ * node = this.anchor.next; 69 69 this.anchor.next = node->link.next; 70 this.anchor.ts = node->link.ts;70 __atomic_store_n(&this.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED); 71 71 bool is_empty = this.anchor.next == 0p; 72 72 node->link.next = 0p; 73 node->link.ts = MAX;73 __atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED); 74 74 #if !defined(__CFA_NO_STATISTICS__) 75 75 this.cnt--; … … 79 79 if(is_empty) this.prev = mock_head( this ); 80 80 81 unsigned long long ats = __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED); 81 82 /* paranoid */ verify( node->link.next == 0p ); 82 /* paranoid */ verify( node->link.ts == MAX);83 /* paranoid */ verify( node->link.ts != 0);84 /* paranoid */ verify( this.anchor.ts != 0);85 /* paranoid */ verify( ( this.anchor.ts== MAX) == is_empty );86 return [node, this.anchor.ts];83 /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX ); 84 /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) != 0 ); 85 /* paranoid */ verify( ats != 0 ); 86 /* paranoid */ verify( (ats == MAX) == is_empty ); 87 return [node, ats]; 87 88 } 88 89 … … 96 97 // Cannot verify 'emptiness' here since it may not be locked 97 98 /* paranoid */ verify(this.anchor.ts != 0); 98 return this.anchor.ts; 99 /* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this.anchor.ts), &this.anchor.ts)); 100 return __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED); 99 101 } -
libcfa/src/concurrency/thread.cfa
r015925a re5d9274 19 19 #include "thread.hfa" 20 20 21 #include "exception.hfa" 21 22 #include "kernel/private.hfa" 22 #include " exception.hfa"23 #include "limits.hfa" 23 24 24 25 #define __CFA_INVOKE_PRIVATE__ … … 26 27 27 28 extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask; 29 30 #pragma GCC visibility push(default) 28 31 29 32 //----------------------------------------------------------------------------- … … 42 45 curr_cluster = &cl; 43 46 link.next = 0p; 44 link.ts = -1llu;47 link.ts = MAX; 45 48 preferred = ready_queue_new_preferred(); 46 49 last_proc = 0p; … … 87 90 } 88 91 89 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled ,(T))90 | { EHM_DEFAULT_VTABLE(ThreadCancelled ,(T)); })92 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T)) 93 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 91 94 void ?{}( thread_dtor_guard_t & this, 92 95 T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) { … … 166 169 167 170 //----------------------------------------------------------------------------- 168 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled ,(T))169 | { EHM_DEFAULT_VTABLE(ThreadCancelled,(T)); })171 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T)) 172 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 170 173 T & join( T & this ) { 171 174 thread_dtor_guard_t guard = { this, defaultResumptionHandler }; -
libcfa/src/concurrency/thread.hfa
r015925a re5d9274 32 32 }; 33 33 34 EHM_FORALL_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) ( 34 forall(thread_t &) 35 exception ThreadCancelled { 35 36 thread_t * the_thread; 36 37 exception_t * the_exception; 37 );38 }; 38 39 39 40 forall(T &) … … 79 80 }; 80 81 81 forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled ,(T))82 | { EHM_DEFAULT_VTABLE(ThreadCancelled,(T)); })82 forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T)) 83 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 83 84 void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) ); 84 85 void ^?{}( thread_dtor_guard_t & this ); … … 126 127 //---------- 127 128 // join 128 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled ,(T))129 | { EHM_DEFAULT_VTABLE(ThreadCancelled,(T)); })129 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T)) 130 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 130 131 T & join( T & this ); 131 132 -
libcfa/src/containers/maybe.cfa
r015925a re5d9274 17 17 #include <assert.h> 18 18 19 #pragma GCC visibility push(default) 19 20 20 21 forall(T) -
libcfa/src/containers/result.cfa
r015925a re5d9274 17 17 #include <assert.h> 18 18 19 #pragma GCC visibility push(default) 19 20 20 21 forall(T, E) -
libcfa/src/containers/string.cfa
r015925a re5d9274 18 18 #include <stdlib.hfa> 19 19 20 #pragma GCC visibility push(default) 20 21 21 22 /* -
libcfa/src/containers/string_sharectx.hfa
r015925a re5d9274 16 16 #pragma once 17 17 18 #pragma GCC visibility push(default) 19 18 20 //######################### String Sharing Context ######################### 19 21 20 22 struct VbyteHeap; 21 23 22 // A string_sharectx 24 // A string_sharectx 23 25 // 24 26 // Usage: -
libcfa/src/containers/vector.cfa
r015925a re5d9274 18 18 #include <stdlib.hfa> 19 19 20 #pragma GCC visibility push(default) 21 20 22 forall(T, allocator_t | allocator_c(T, allocator_t)) 21 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);23 static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other); 22 24 23 25 //------------------------------------------------------------------------------ … … 83 85 84 86 forall(T, allocator_t | allocator_c(T, allocator_t)) 85 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)87 static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other) 86 88 { 87 89 this->size = other->size; -
libcfa/src/device/cpu.cfa
r015925a re5d9274 31 31 } 32 32 33 #include "bits/defs.hfa" 33 34 #include "algorithms/range_iterator.hfa" 34 35 … … 456 457 } 457 458 458 cpu_info_t cpu_info;459 libcfa_public cpu_info_t cpu_info; -
libcfa/src/exception.c
r015925a re5d9274 27 27 #include "stdhdr/assert.h" 28 28 #include "virtual.h" 29 30 #pragma GCC visibility push(default) 31 29 32 #include "lsda.h" 30 33 … … 261 264 #else // defined( __ARM_ARCH ) 262 265 // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack. 263 // This workaround tries to keep default exception handling working. 266 // This workaround tries to keep default exception handling working. 264 267 if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) { 265 268 #endif -
libcfa/src/exception.hfa
r015925a re5d9274 10 10 // Created On : Thu Apr 7 10:25:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr Apr 8 15:16:00 202113 // Update Count : 412 // Last Modified On : Wed May 25 17:20:00 2022 13 // Update Count : 5 14 14 // 15 15 … … 18 18 // ----------------------------------------------------------------------------------------------- 19 19 20 // EHM_EXCEPTION(exception_name)(fields...); 21 // Create an exception (a virtual structure that inherits from exception_t) 22 // with the given name and fields. 23 #define EHM_EXCEPTION(exception_name) \ 24 _EHM_TYPE_ID_STRUCT(exception_name, ); \ 25 _EHM_TYPE_ID_VALUE(exception_name, ); \ 26 _EHM_VIRTUAL_TABLE_STRUCT(exception_name, , ); \ 27 _EHM_EXCEPTION_STRUCT(exception_name, , ) 20 // EHM_DEFAULT_VTABLE(exception_type) 21 // Create a declaration for a (possibly polymorphic) default vtable. 22 // Mostly used by and for the currency module. 23 #define EHM_DEFAULT_VTABLE(type) vtable(type) & const _default_vtable 28 24 29 // EHM_EXTERN_VTABLE(exception_name, table_name); 30 // Forward declare a virtual table called table_name for exception_name type. 31 #define EHM_EXTERN_VTABLE(exception_name, table_name) \ 32 _EHM_EXTERN_VTABLE(exception_name, , table_name) 33 34 // EHM_VIRTUAL_TABLE(exception_name, table_name); 35 // Define a virtual table called table_name for exception_name type. 36 #define EHM_VIRTUAL_TABLE(exception_name, table_name) \ 37 _EHM_DEFINE_COPY(exception_name, ) \ 38 _EHM_DEFINE_MSG(exception_name, ) \ 39 _EHM_VIRTUAL_TABLE(exception_name, , table_name) 40 41 // EHM_FORALL_EXCEPTION(exception_name, (assertions), (parameters))(fields...); 42 // As EHM_EXCEPTION but for polymorphic types instead of monomorphic ones. 43 // The assertions list should include all polymorphic parameters and 44 // assertions inside a parentisized list. Parameters should include all the 45 // polymorphic parameter names inside a parentisized list (same order). 46 #define EHM_FORALL_EXCEPTION(exception_name, assertions, parameters) \ 47 _EHM_TYPE_ID_STRUCT(exception_name, forall assertions); \ 48 _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall assertions, parameters); \ 49 _EHM_EXCEPTION_STRUCT(exception_name, forall assertions, parameters) 50 51 // EHM_FORALL_EXTERN_VTABLE(exception_name, (arguments), table_name); 52 // As EHM_EXTERN_VTABLE but for polymorphic types instead of monomorphic ones. 53 // Arguments should be the parentisized list of polymorphic arguments. 54 #define EHM_FORALL_EXTERN_VTABLE(exception_name, arguments, table_name) \ 55 _EHM_EXTERN_VTABLE(exception_name, arguments, table_name) 56 57 // EHM_FORALL_VIRTUAL_TABLE(exception_name, (arguments), table_name); 58 // As EHM_VIRTUAL_TABLE but for polymorphic types instead of monomorphic ones. 59 // Arguments should be the parentisized list of polymorphic arguments. 60 #define EHM_FORALL_VIRTUAL_TABLE(exception_name, arguments, table_name) \ 61 _EHM_TYPE_ID_VALUE(exception_name, arguments); \ 62 _EHM_DEFINE_COPY(exception_name, arguments) \ 63 _EHM_DEFINE_MSG(exception_name, arguments) \ 64 _EHM_VIRTUAL_TABLE(exception_name, arguments, table_name) 65 66 // EHM_DEFAULT_VTABLE(exception_name, (arguments)) 67 // Create a declaration for a (possibly polymorphic) default vtable. 68 #define EHM_DEFAULT_VTABLE(exception_name, arguments) \ 69 _EHM_VTABLE_TYPE(exception_name) arguments & const _default_vtable 70 71 // IS_EXCEPTION(exception_name [, (...parameters)]) 72 // IS_RESUMPTION_EXCEPTION(exception_name [, (parameters...)]) 73 // IS_TERMINATION_EXCEPTION(exception_name [, (parameters...)]) 74 // Create an assertion that exception_name, possibly with the qualifing parameters, is the given 75 // kind of exception with the standard vtable with the same parameters if applicable. 76 #define IS_EXCEPTION(...) _IS_EXCEPTION(is_exception, __VA_ARGS__, , ~) 77 #define IS_RESUMPTION_EXCEPTION(...) _IS_EXCEPTION(is_resumption_exception, __VA_ARGS__, , ~) 78 #define IS_TERMINATION_EXCEPTION(...) _IS_EXCEPTION(is_termination_exception, __VA_ARGS__, , ~) 79 80 // Macros starting with a leading underscore are internal. 81 82 // Create an exception type definition. must be tailing, can be polymorphic. 83 #define _EHM_EXCEPTION_STRUCT(exception_name, forall_clause, parameters) \ 84 forall_clause struct exception_name { \ 85 _EHM_VTABLE_TYPE(exception_name) parameters const * virtual_table; \ 86 _CLOSE 87 88 // Create a (possibly polymorphic) virtual table forward declaration. 89 #define _EHM_EXTERN_VTABLE(exception_name, arguments, table_name) \ 90 extern const _EHM_VTABLE_TYPE(exception_name) arguments table_name 91 92 // Create a (possibly polymorphic) virtual table definition. 93 #define _EHM_VIRTUAL_TABLE(exception_type, arguments, table_name) \ 94 const _EHM_VTABLE_TYPE(exception_type) arguments table_name @= { \ 95 .__cfavir_typeid : &_EHM_TYPE_ID_NAME(exception_type), \ 96 .size : sizeof(struct exception_type arguments), \ 97 .copy : copy, \ 98 .^?{} : ^?{}, \ 99 .msg : msg, \ 100 } 101 102 // Create a (possibly polymorphic) copy function from an assignment operator. 103 #define _EHM_DEFINE_FORALL_COPY(exception_name, forall_clause, parameters) \ 104 forall_clause void copy(exception_name parameters * this, \ 105 exception_name parameters * that) { \ 106 *this = *that; \ 107 } 108 109 #define _EHM_DEFINE_COPY(exception_name, arguments) \ 110 void copy(exception_name arguments * this, exception_name arguments * that) { \ 111 *this = *that; \ 112 } 113 114 // Create a (possibly polymorphic) msg function 115 #define _EHM_DEFINE_FORALL_MSG(exception_name, forall_clause, parameters) \ 116 forall_clause const char * msg(exception_name parameters * this) { \ 117 return #exception_name #parameters; \ 118 } 119 120 #define _EHM_DEFINE_MSG(exception_name, arguments) \ 121 const char * msg(exception_name arguments * this) { \ 122 return #exception_name #arguments; \ 123 } 124 125 // Produces the C compatable name of the virtual table type for a virtual type. 126 #define _EHM_VTABLE_TYPE(type_name) struct _GLUE2(type_name,_vtable) 127 128 // Create the vtable type for exception name. 129 #define _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall_clause, parameters) \ 130 forall_clause struct exception_name; \ 131 forall_clause _EHM_VTABLE_TYPE(exception_name) { \ 132 _EHM_TYPE_ID_TYPE(exception_name) parameters const * __cfavir_typeid; \ 133 size_t size; \ 134 void (*copy)(exception_name parameters * this, exception_name parameters * other); \ 135 void (*^?{})(exception_name parameters & this); \ 136 const char * (*msg)(exception_name parameters * this); \ 137 } 138 139 // Define the function required to satify the trait for exceptions. 140 #define _EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \ 141 forall_clause inline void mark_exception( \ 142 exception_name parameters const &, \ 143 _EHM_VTABLE_TYPE(exception_name) parameters const &) {} \ 144 145 #define __EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \ 146 forall_clause inline _EHM_VTABLE_TYPE(exception_name) parameters const & \ 147 get_exception_vtable(exception_name parameters const & this) { \ 148 /* This comes before the structure definition, but we know the offset. */ \ 149 /* return (_EHM_VTABLE_TYPE(exception_name) parameters const &)this; */ \ 150 assert(false); \ 151 } 152 153 // Generates a new type-id structure. This is used to mangle the name of the 154 // type-id instance so it also includes polymorphic information. Must be the 155 // direct decendent of exception_t. 156 // The second field is used to recover type information about the exception. 157 #define _EHM_TYPE_ID_STRUCT(exception_name, forall_clause) \ 158 forall_clause _EHM_TYPE_ID_TYPE(exception_name) { \ 159 __cfavir_type_info const * parent; \ 160 } 161 162 // Generate a new type-id value. 163 #define _EHM_TYPE_ID_VALUE(exception_name, arguments) \ 164 __attribute__((cfa_linkonce)) \ 165 _EHM_TYPE_ID_TYPE(exception_name) arguments const \ 166 _EHM_TYPE_ID_NAME(exception_name) = { \ 167 &__cfatid_exception_t, \ 168 } 169 170 // _EHM_TYPE_ID_STRUCT and _EHM_TYPE_ID_VALUE are the two that would need to 171 // be updated to extend the hierarchy if we are still using macros when that 172 // is added. 173 174 // Produce the C compatable name of the type-id type for an exception type. 175 #define _EHM_TYPE_ID_TYPE(exception_name) \ 176 struct _GLUE2(__cfatid_struct_, exception_name) 177 178 // Produce the name of the instance of the type-id for an exception type. 179 #define _EHM_TYPE_ID_NAME(exception_name) _GLUE2(__cfatid_,exception_name) 180 181 #define _IS_EXCEPTION(kind, exception_name, parameters, ...) \ 182 kind(exception_name parameters, _EHM_VTABLE_TYPE(exception_name) parameters) 183 184 // Internal helper macros: 185 #define _CLOSE(...) __VA_ARGS__ } 186 #define _GLUE2(left, right) left##right 25 // IS_EXCEPTION(exception_type) 26 // IS_RESUMPTION_EXCEPTION(exception_type) 27 // IS_TERMINATION_EXCEPTION(exception_type) 28 // Create an assertion that exception_type is the given kind of exception. 29 // This is used to mimic associated types so the vtable type is unmentioned. 30 #define IS_EXCEPTION(type) is_exception(type, vtable(type)) 31 #define IS_RESUMPTION_EXCEPTION(type) is_resumption_exception(type, vtable(type)) 32 #define IS_TERMINATION_EXCEPTION(type) is_termination_exception(type, vtable(type)) -
libcfa/src/fstream.cfa
r015925a re5d9274 22 22 #include <assert.h> 23 23 #include <errno.h> // errno 24 25 #pragma GCC visibility push(default) 24 26 25 27 // *********************************** ofstream *********************************** … … 118 120 // abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno ); 119 121 } // if 120 (os){ file }; // initialize 122 (os){ file }; // initialize 121 123 } // open 122 124 … … 157 159 va_list args; 158 160 va_start( args, format ); 159 161 160 162 int len; 161 163 for ( cnt; 10 ) { … … 241 243 // abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno ); 242 244 } // if 243 (is){ file }; // initialize 245 (is){ file }; // initialize 244 246 } // open 245 247 -
libcfa/src/fstream.hfa
r015925a re5d9274 18 18 #include "bits/weakso_locks.hfa" // mutex_lock 19 19 #include "iostream.hfa" 20 #include <exception.hfa>21 20 22 21 -
libcfa/src/heap.cfa
r015925a re5d9274 36 36 static bool traceHeap = false; 37 37 38 inline bool traceHeap() { return traceHeap; }39 40 bool traceHeapOn() {38 inline bool traceHeap() libcfa_public { return traceHeap; } 39 40 bool traceHeapOn() libcfa_public { 41 41 bool temp = traceHeap; 42 42 traceHeap = true; … … 44 44 } // traceHeapOn 45 45 46 bool traceHeapOff() {46 bool traceHeapOff() libcfa_public { 47 47 bool temp = traceHeap; 48 48 traceHeap = false; … … 50 50 } // traceHeapOff 51 51 52 bool traceHeapTerm() { return false; }52 bool traceHeapTerm() libcfa_public { return false; } 53 53 54 54 55 55 static bool prtFree = false; 56 56 57 bool prtFree() {57 static bool prtFree() { 58 58 return prtFree; 59 59 } // prtFree 60 60 61 bool prtFreeOn() {61 static bool prtFreeOn() { 62 62 bool temp = prtFree; 63 63 prtFree = true; … … 65 65 } // prtFreeOn 66 66 67 bool prtFreeOff() {67 static bool prtFreeOff() { 68 68 bool temp = prtFree; 69 69 prtFree = false; … … 388 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 389 // extern visibility, used by runtime kernel 390 size_t __page_size; // architecture pagesize 391 int __map_prot; // common mmap/mprotect protection 390 // would be cool to remove libcfa_public but it's needed for libcfathread 391 libcfa_public size_t __page_size; // architecture pagesize 392 libcfa_public int __map_prot; // common mmap/mprotect protection 392 393 393 394 … … 727 728 728 729 729 s ize_t prtFree( Heap & manager ) with( manager ) {730 static size_t prtFree( Heap & manager ) with( manager ) { 730 731 size_t total = 0; 731 732 #ifdef __STATISTICS__ … … 879 880 // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0, 880 881 // then malloc() returns a unique pointer value that can later be successfully passed to free(). 881 void * malloc( size_t size ) {882 void * malloc( size_t size ) libcfa_public { 882 883 #ifdef __STATISTICS__ 883 884 if ( likely( size > 0 ) ) { … … 894 895 895 896 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 896 void * aalloc( size_t dim, size_t elemSize ) {897 void * aalloc( size_t dim, size_t elemSize ) libcfa_public { 897 898 size_t size = dim * elemSize; 898 899 #ifdef __STATISTICS__ … … 910 911 911 912 // Same as aalloc() with memory set to zero. 912 void * calloc( size_t dim, size_t elemSize ) {913 void * calloc( size_t dim, size_t elemSize ) libcfa_public { 913 914 size_t size = dim * elemSize; 914 915 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER … … 951 952 // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier 952 953 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. 953 void * resize( void * oaddr, size_t size ) {954 void * resize( void * oaddr, size_t size ) libcfa_public { 954 955 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 955 956 if ( unlikely( size == 0 ) ) { // special cases … … 996 997 // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of 997 998 // the old and new sizes. 998 void * realloc( void * oaddr, size_t size ) {999 void * realloc( void * oaddr, size_t size ) libcfa_public { 999 1000 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1000 1001 if ( unlikely( size == 0 ) ) { // special cases … … 1060 1061 1061 1062 // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize. 1062 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {1063 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public { 1063 1064 return realloc( oaddr, dim * elemSize ); 1064 1065 } // reallocarray … … 1066 1067 1067 1068 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 1068 void * memalign( size_t alignment, size_t size ) {1069 void * memalign( size_t alignment, size_t size ) libcfa_public { 1069 1070 #ifdef __STATISTICS__ 1070 1071 if ( likely( size > 0 ) ) { … … 1081 1082 1082 1083 // Same as aalloc() with memory alignment. 1083 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {1084 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1084 1085 size_t size = dim * elemSize; 1085 1086 #ifdef __STATISTICS__ … … 1097 1098 1098 1099 // Same as calloc() with memory alignment. 1099 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {1100 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1100 1101 size_t size = dim * elemSize; 1101 1102 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER … … 1136 1137 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple 1137 1138 // of alignment. This requirement is universally ignored. 1138 void * aligned_alloc( size_t alignment, size_t size ) {1139 void * aligned_alloc( size_t alignment, size_t size ) libcfa_public { 1139 1140 return memalign( alignment, size ); 1140 1141 } // aligned_alloc … … 1145 1146 // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to 1146 1147 // free(3). 1147 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {1148 int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public { 1148 1149 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment 1149 1150 *memptr = memalign( alignment, size ); … … 1154 1155 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the 1155 1156 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 1156 void * valloc( size_t size ) {1157 void * valloc( size_t size ) libcfa_public { 1157 1158 return memalign( __page_size, size ); 1158 1159 } // valloc … … 1160 1161 1161 1162 // Same as valloc but rounds size to multiple of page size. 1162 void * pvalloc( size_t size ) {1163 void * pvalloc( size_t size ) libcfa_public { 1163 1164 return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size 1164 1165 } // pvalloc … … 1168 1169 // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is 1169 1170 // 0p, no operation is performed. 1170 void free( void * addr ) {1171 void free( void * addr ) libcfa_public { 1171 1172 if ( unlikely( addr == 0p ) ) { // special case 1172 1173 #ifdef __STATISTICS__ … … 1189 1190 1190 1191 // Returns the alignment of an allocation. 1191 size_t malloc_alignment( void * addr ) {1192 size_t malloc_alignment( void * addr ) libcfa_public { 1192 1193 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1193 1194 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1201 1202 1202 1203 // Returns true if the allocation is zero filled, e.g., allocated by calloc(). 1203 bool malloc_zero_fill( void * addr ) {1204 bool malloc_zero_fill( void * addr ) libcfa_public { 1204 1205 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1205 1206 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1212 1213 1213 1214 // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). 1214 size_t malloc_size( void * addr ) {1215 size_t malloc_size( void * addr ) libcfa_public { 1215 1216 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size 1216 1217 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1224 1225 // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by 1225 1226 // malloc or a related function. 1226 size_t malloc_usable_size( void * addr ) {1227 size_t malloc_usable_size( void * addr ) libcfa_public { 1227 1228 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1228 1229 Heap.Storage.Header * header; … … 1236 1237 1237 1238 // Prints (on default standard error) statistics about memory allocated by malloc and related functions. 1238 void malloc_stats( void ) {1239 void malloc_stats( void ) libcfa_public { 1239 1240 #ifdef __STATISTICS__ 1240 1241 printStats(); … … 1245 1246 1246 1247 // Changes the file descriptor where malloc_stats() writes statistics. 1247 int malloc_stats_fd( int fd __attribute__(( unused )) ) {1248 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { 1248 1249 #ifdef __STATISTICS__ 1249 1250 int temp = stats_fd; … … 1259 1260 // The string is printed on the file stream stream. The exported string includes information about all arenas (see 1260 1261 // malloc). 1261 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {1262 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public { 1262 1263 if ( options != 0 ) { errno = EINVAL; return -1; } 1263 1264 #ifdef __STATISTICS__ … … 1271 1272 // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument 1272 1273 // specifies the parameter to be modified, and value specifies the new value for that parameter. 1273 int mallopt( int option, int value ) {1274 int mallopt( int option, int value ) libcfa_public { 1274 1275 if ( value < 0 ) return 0; 1275 1276 choose( option ) { … … 1285 1286 1286 1287 // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). 1287 int malloc_trim( size_t ) {1288 int malloc_trim( size_t ) libcfa_public { 1288 1289 return 0; // => impossible to release memory 1289 1290 } // malloc_trim … … 1294 1295 // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function 1295 1296 // result. (The caller must free this memory.) 1296 void * malloc_get_state( void ) {1297 void * malloc_get_state( void ) libcfa_public { 1297 1298 return 0p; // unsupported 1298 1299 } // malloc_get_state … … 1301 1302 // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data 1302 1303 // structure pointed to by state. 1303 int malloc_set_state( void * ) {1304 int malloc_set_state( void * ) libcfa_public { 1304 1305 return 0; // unsupported 1305 1306 } // malloc_set_state … … 1307 1308 1308 1309 // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. 1309 __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }1310 __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; } 1310 1311 1311 1312 // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. 1312 __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; }1313 __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; } 1313 1314 1314 1315 // Amount subtracted to adjust for unfreed program storage (debug only). 1315 __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; }1316 __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; } 1316 1317 } // extern "C" 1317 1318 1318 1319 1319 1320 // Must have CFA linkage to overload with C linkage realloc. 1320 void * resize( void * oaddr, size_t nalign, size_t size ) {1321 void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1321 1322 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1322 1323 if ( unlikely( size == 0 ) ) { // special cases … … 1380 1381 1381 1382 1382 void * realloc( void * oaddr, size_t nalign, size_t size ) {1383 void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1383 1384 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1384 1385 if ( unlikely( size == 0 ) ) { // special cases -
libcfa/src/interpose.cfa
r015925a re5d9274 36 36 //============================================================================================= 37 37 38 void preload_libgcc(void) {38 static void preload_libgcc(void) { 39 39 dlopen( "libgcc_s.so.1", RTLD_NOW ); 40 40 if ( const char * error = dlerror() ) abort( "interpose_symbol : internal error pre-loading libgcc, %s\n", error ); … … 42 42 43 43 typedef void (* generic_fptr_t)(void); 44 generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {44 static generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) { 45 45 const char * error; 46 46 … … 83 83 //============================================================================================= 84 84 85 void sigHandler_segv( __CFA_SIGPARMS__ );86 void sigHandler_ill ( __CFA_SIGPARMS__ );87 void sigHandler_fpe ( __CFA_SIGPARMS__ );88 void sigHandler_abrt( __CFA_SIGPARMS__ );89 void sigHandler_term( __CFA_SIGPARMS__ );90 91 st ruct {85 static void sigHandler_segv( __CFA_SIGPARMS__ ); 86 static void sigHandler_ill ( __CFA_SIGPARMS__ ); 87 static void sigHandler_fpe ( __CFA_SIGPARMS__ ); 88 static void sigHandler_abrt( __CFA_SIGPARMS__ ); 89 static void sigHandler_term( __CFA_SIGPARMS__ ); 90 91 static struct { 92 92 void (* exit)( int ) __attribute__(( __noreturn__ )); 93 93 void (* abort)( void ) __attribute__(( __noreturn__ )); 94 94 } __cabi_libc; 95 95 96 int cfa_main_returned;96 libcfa_public int cfa_main_returned; 97 97 98 98 extern "C" { … … 148 148 149 149 // Forward declare abort after the __typeof__ call to avoid ambiguities 150 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));151 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));152 void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));153 void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));150 libcfa_public void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 151 libcfa_public void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 152 libcfa_public void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 153 libcfa_public void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )); 154 154 155 155 extern "C" { 156 void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {156 libcfa_public void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 157 157 abort( false, "%s", "" ); 158 158 } 159 159 160 void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {160 libcfa_public void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { 161 161 va_list argp; 162 162 va_start( argp, fmt ); … … 165 165 } 166 166 167 void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {167 libcfa_public void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 168 168 __cabi_libc.exit( status ); 169 169 } -
libcfa/src/iostream.cfa
r015925a re5d9274 32 32 #include "bitmanip.hfa" // high1 33 33 34 #pragma GCC visibility push(default) 34 35 35 36 // *********************************** ostream *********************************** -
libcfa/src/limits.cfa
r015925a re5d9274 20 20 #include <complex.h> 21 21 #include "limits.hfa" 22 23 #pragma GCC visibility push(default) 22 24 23 25 // Integral Constants -
libcfa/src/memory.cfa
r015925a re5d9274 16 16 #include "memory.hfa" 17 17 #include "stdlib.hfa" 18 19 #pragma GCC visibility push(default) 18 20 19 21 // Internal data object. -
libcfa/src/parseargs.cfa
r015925a re5d9274 24 24 #include "common.hfa" 25 25 #include "limits.hfa" 26 27 #pragma GCC visibility push(default) 26 28 27 29 extern int cfa_args_argc __attribute__((weak)); -
libcfa/src/parseconfig.cfa
r015925a re5d9274 14 14 15 15 16 #pragma GCC visibility push(default) 17 16 18 // *********************************** exceptions *********************************** 17 19 18 20 19 21 // TODO: Add names of missing config entries to exception (see further below) 20 staticvtable(Missing_Config_Entries) Missing_Config_Entries_vt;22 vtable(Missing_Config_Entries) Missing_Config_Entries_vt; 21 23 22 24 [ void ] ?{}( & Missing_Config_Entries this, unsigned int num_missing ) { … … 31 33 32 34 33 staticvtable(Parse_Failure) Parse_Failure_vt;35 vtable(Parse_Failure) Parse_Failure_vt; 34 36 35 37 [ void ] ?{}( & Parse_Failure this, [] char failed_key, [] char failed_value ) { … … 53 55 54 56 55 staticvtable(Validation_Failure) Validation_Failure_vt;57 vtable(Validation_Failure) Validation_Failure_vt; 56 58 57 59 [ void ] ?{}( & Validation_Failure this, [] char failed_key, [] char failed_value ) { … … 110 112 111 113 112 [ bool ] comments( & ifstream in, [] char name ) {114 static [ bool ] comments( & ifstream in, [] char name ) { 113 115 while () { 114 116 in | name; -
libcfa/src/rational.cfa
r015925a re5d9274 17 17 #include "fstream.hfa" 18 18 #include "stdlib.hfa" 19 20 #pragma GCC visibility push(default) 19 21 20 22 forall( T | Arithmetic( T ) ) { -
libcfa/src/startup.cfa
r015925a re5d9274 41 41 } // __cfaabi_appready_shutdown 42 42 43 void disable_interrupts() __attribute__(( weak )) {}44 void enable_interrupts() __attribute__(( weak )) {}43 void disable_interrupts() __attribute__(( weak )) libcfa_public {} 44 void enable_interrupts() __attribute__(( weak )) libcfa_public {} 45 45 46 46 … … 64 64 struct __spinlock_t; 65 65 extern "C" { 66 void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {}66 void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {} 67 67 } 68 68 -
libcfa/src/stdlib.cfa
r015925a re5d9274 25 25 #include <complex.h> // _Complex_I 26 26 #include <assert.h> 27 28 #pragma GCC visibility push(default) 27 29 28 30 //--------------------------------------- … … 225 227 #define GENERATOR LCG 226 228 227 uint32_t __global_random_seed; // sequential/concurrent 228 uint32_t __global_random_state; // sequential only 229 // would be cool to make hidden but it's needed for libcfathread 230 __attribute__((visibility("default"))) uint32_t __global_random_seed; // sequential/concurrent 231 __attribute__((visibility("hidden"))) uint32_t __global_random_state; // sequential only 229 232 230 233 void set_seed( PRNG & prng, uint32_t seed_ ) with( prng ) { state = seed = seed_; GENERATOR( state ); } // set seed -
libcfa/src/strstream.cfa
r015925a re5d9274 1 // 1 // 2 2 // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo 3 // 3 // 4 4 // The contents of this file are covered under the licence agreement in the 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // strstream.cfa -- 8 // 7 // strstream.cfa -- 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Thu Apr 22 22:24:35 2021 … … 12 12 // Last Modified On : Sun Oct 10 16:13:20 2021 13 13 // Update Count : 101 14 // 14 // 15 15 16 16 #include "strstream.hfa" … … 24 24 #include <unistd.h> // sbrk, sysconf 25 25 26 #pragma GCC visibility push(default) 26 27 27 28 // *********************************** strstream *********************************** -
libcfa/src/time.cfa
r015925a re5d9274 18 18 #include <stdio.h> // snprintf 19 19 #include <assert.h> 20 21 #pragma GCC visibility push(default) 20 22 21 23 static char * nanomsd( long int ns, char * buf ) { // most significant digits -
libcfa/src/virtual.c
r015925a re5d9274 16 16 #include "virtual.h" 17 17 #include "assert.h" 18 19 #pragma GCC visibility push(default) 18 20 19 21 int __cfavir_is_parent( -
src/AST/Expr.cpp
r015925a re5d9274 10 10 // Created On : Wed May 15 17:00:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Created On : Tue Nov 30 14:23:00 202113 // Update Count : 712 // Created On : Wed May 18 13:56:00 2022 13 // Update Count : 8 14 14 // 15 15 … … 21 21 22 22 #include "Copy.hpp" // for shallowCopy 23 #include "Eval.hpp" // for call24 23 #include "GenericSubstitution.hpp" 25 24 #include "LinkageSpec.hpp" … … 67 66 // --- UntypedExpr 68 67 68 bool UntypedExpr::get_lvalue() const { 69 std::string fname = InitTweak::getFunctionName( this ); 70 return lvalueFunctionNames.count( fname ); 71 } 72 69 73 UntypedExpr * UntypedExpr::createDeref( const CodeLocation & loc, const Expr * arg ) { 70 74 assert( arg ); 71 75 72 UntypedExpr * ret = c all( loc, "*?", arg);76 UntypedExpr * ret = createCall( loc, "*?", { arg } ); 73 77 if ( const Type * ty = arg->result ) { 74 78 const Type * base = InitTweak::getPointerBase( ty ); … … 87 91 } 88 92 89 bool UntypedExpr::get_lvalue() const {90 std::string fname = InitTweak::getFunctionName( this );91 return lvalueFunctionNames.count( fname );92 }93 94 93 UntypedExpr * UntypedExpr::createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs ) { 95 94 assert( lhs && rhs ); 96 95 97 UntypedExpr * ret = c all( loc, "?=?", lhs, rhs);96 UntypedExpr * ret = createCall( loc, "?=?", { lhs, rhs } ); 98 97 if ( lhs->result && rhs->result ) { 99 98 // if both expressions are typed, assumes that this assignment is a C bitwise assignment, … … 102 101 } 103 102 return ret; 103 } 104 105 UntypedExpr * UntypedExpr::createCall( const CodeLocation & loc, 106 const std::string & name, std::vector<ptr<Expr>> && args ) { 107 return new UntypedExpr( loc, 108 new NameExpr( loc, name ), std::move( args ) ); 104 109 } 105 110 -
src/AST/Expr.hpp
r015925a re5d9274 230 230 /// Creates a new assignment expression 231 231 static UntypedExpr * createAssign( const CodeLocation & loc, const Expr * lhs, const Expr * rhs ); 232 /// Creates a new call of a variable. 233 static UntypedExpr * createCall( const CodeLocation & loc, 234 const std::string & name, std::vector<ptr<Expr>> && args ); 232 235 233 236 const Expr * accept( Visitor & v ) const override { return v.visit( this ); } -
src/AST/module.mk
r015925a re5d9274 29 29 AST/DeclReplacer.cpp \ 30 30 AST/DeclReplacer.hpp \ 31 AST/Eval.hpp \32 31 AST/Expr.cpp \ 33 32 AST/Expr.hpp \ -
src/CodeGen/CodeGenerator.cc
r015925a re5d9274 1238 1238 } // namespace CodeGen 1239 1239 1240 1241 unsigned Indenter::tabsize = 2;1242 1243 std::ostream & operator<<( std::ostream & out, const BaseSyntaxNode * node ) {1244 if ( node ) {1245 node->print( out );1246 } else {1247 out << "nullptr";1248 }1249 return out;1250 }1251 1252 1240 // Local Variables: // 1253 1241 // tab-width: 4 // -
src/CodeGen/FixMain.cc
r015925a re5d9274 49 49 50 50 } 51 52 bool FixMain::replace_main = false;53 51 54 52 template<typename container> -
src/CodeGen/GenType.cc
r015925a re5d9274 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed May 1 15:24:00 2019