Index: doc/bibliography/pl.bib
===================================================================
--- doc/bibliography/pl.bib	(revision d489da80335483f120776a4061b19c68ca8e988b)
+++ doc/bibliography/pl.bib	(revision ef1da0e2df5a222ca4e50f7e06a2b61f8ba6efcd)
@@ -188,9 +188,9 @@
 	Unstructured {\it sends\/} and {\it accepts\/} are forbidden.  To
 	this the mechanisms of {\it delegation\/} and {\it delay queues\/}
-	are added to enable switching and triggering of activities. 
-	Concurrent subactivities and atomic actions are provided for 
+	are added to enable switching and triggering of activities.
+	Concurrent subactivities and atomic actions are provided for
 	compactness and simplicity.  We show how solutions to many important
 	concurrent problems [sic], such as pipelining, constraint management
-	and ``administration'' can be compactly expressed using these 
+	and ``administration'' can be compactly expressed using these
 	mechanisms.
    },
@@ -529,5 +529,5 @@
 	like ``c is a collection with element type e'', but how such things
 	are used isn't explained.
-	
+
 	For each descriptive class used in a parameter list, an implicit
 	parameter is created that is passed a vector of procedures.
@@ -1172,5 +1172,5 @@
 @techreport{Prokopec11,
     keywords	= {ctrie, concurrent map},
-    contributer = {a3moss@uwaterloo.ca}, 
+    contributer = {a3moss@uwaterloo.ca},
     title	= {Cache-aware lock-free concurrent hash tries},
     author	= {Prokopec, Aleksandar and Bagwell, Phil and Odersky, Martin},
@@ -1500,5 +1500,5 @@
     year	= 2001,
     url		= {http://citeseer.ist.psu.edu/berger01composing.html}
-} 
+}
 
 @article{Andrews83,
@@ -1545,5 +1545,5 @@
 	We give a rationale for our decisions and compare Concurrent C
 	extensions with the concurrent programming facilities in Ada.
-	Concurrent C has been implemented on the UNIX system running on a 
+	Concurrent C has been implemented on the UNIX system running on a
 	single processor.  A distributed version of Concurrent C is being
 	implemented.
@@ -1814,5 +1814,5 @@
     keywords	= {objects, concurrency},
     contributer	= {gjditchfield@plg},
-    author	= {P. A. Buhr and G. J. Ditchfield and B. M. Younger and C. R. Zarnke}, 
+    author	= {P. A. Buhr and G. J. Ditchfield and B. M. Younger and C. R. Zarnke},
     title	= {Concurrency in {C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}}},
     institution	= {Department of Computer Science, University of Waterloo},
@@ -2044,5 +2044,5 @@
     series	= {Lecture Notes in Computer Science, Ed. by G. Goos and J. Hartmanis}
 }
- 
+
 @article{Wang71,
     keywords	= {coroutines},
@@ -2056,5 +2056,5 @@
     pages	= {425-449},
 }
- 
+
 @article{Castagna95,
     keywords	= {type-systems, covariance, contravariance},
@@ -2390,5 +2390,5 @@
     year	= 1996,
 }
- 
+
 @article{Richardson93,
     keywords	= {C++, persistence, database},
@@ -2473,5 +2473,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 @article{design,
@@ -2700,5 +2700,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 @book{Eiffel,
@@ -3357,5 +3357,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 @manual{Fortran95,
@@ -3740,5 +3740,5 @@
     keywords	= {processes, distributed computing},
     contributer	= {pabuhr@plg},
-    author	= {Robert E. Strom and David F. Bacon and Arthur P. Goldberg and Andy Lowry and Daniel M. Yellin and Shaula Alexander Yemini}, 
+    author	= {Robert E. Strom and David F. Bacon and Arthur P. Goldberg and Andy Lowry and Daniel M. Yellin and Shaula Alexander Yemini},
     title	= {Hermes: A Language for Distributed Computing},
     institution	= {IBM T. J. Watson Research Center},
@@ -3751,5 +3751,5 @@
     keywords	= {processes, distributed computing},
     contributer	= {pabuhr@plg},
-    author	= {Robert E. Strom and David F. Bacon and Arthur P. Goldberg and Andy Lowry and Daniel M. Yellin and Shaula Alexander Yemini}, 
+    author	= {Robert E. Strom and David F. Bacon and Arthur P. Goldberg and Andy Lowry and Daniel M. Yellin and Shaula Alexander Yemini},
     title	= {Hermes: A Language for Distributed Computing},
     publisher	= {Prentice-Hall},
@@ -4302,5 +4302,5 @@
     pages	= {85-103}
 }
-   
+
 @article{Murer96,
     keywords	= {interators, generators, cursors},
@@ -4330,5 +4330,5 @@
 
 % J
-		  
+
 @book{Java,
     keywords	= {Java},
@@ -4627,5 +4627,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 @article{Dice15,
@@ -4978,5 +4978,5 @@
     number	= 31
 }
- 
+
 @article{Dueck90,
     keywords	= {attribute grammars},
@@ -5107,5 +5107,5 @@
     keywords	= {multiple inheritance},
     contributer	= {pabuhr@plg},
-    author	= {Harry Bretthauer and Thomas Christaller and J\"{u}rgen Kopp}, 
+    author	= {Harry Bretthauer and Thomas Christaller and J\"{u}rgen Kopp},
     title	= {Multiple vs. Single Inheritance in Object-oriented Programming Languages. What do we really want?},
     institution	= {Gesellschaft F\"{u}r Mathematik und Datenverarbeitung mbH},
@@ -5650,5 +5650,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 @book{Deitel04,
@@ -5827,5 +5827,5 @@
     year	= 1980,
     month	= nov, volume = 15, number = 11, pages = {47-56},
-    note	= {Proceedings of the ACM-SIGPLAN Symposium on the {Ada} Programming Language}, 
+    note	= {Proceedings of the ACM-SIGPLAN Symposium on the {Ada} Programming Language},
     comment	= {
         The two-pass (bottom-up, then top-down) algorithm, with a proof
@@ -5957,5 +5957,5 @@
         Given a base typed lambda calculus with function types, type
 	abstractions, and a recursive expression \(\mbox{fix } x:t.e\),
-	then type inference for the partially typed language 
+	then type inference for the partially typed language
 	\begin{eqnarray}
 	\lambda x:\tau.e	&\Rightarrow& \lambda x.e	\\
@@ -6603,5 +6603,5 @@
 	manner.  The paper also discusses efficient composition of
 	sequences of asynchronous calls to different locations in a
-	network. 
+	network.
     }
 }
@@ -6616,5 +6616,5 @@
     volume	= 32, number = 4, pages = {305-311},
     abstract	= {
-        
+
     }
 }
@@ -6934,5 +6934,5 @@
 	partitioning switch statements into dense tables.  It also
 	implements target-independent function tracing and expression-level
-	profiling. 
+	profiling.
     }
 }
@@ -7150,5 +7150,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 @inproceedings{Leissa14,
@@ -7268,5 +7268,5 @@
     keywords	= {Smalltalk, abstract class, protocol},
     contributer	= {gjditchfield@plg},
-    author	= {A. Goldberg and D. Robson}, 
+    author	= {A. Goldberg and D. Robson},
     title	= {Smalltalk-80: The Language and its Implementation},
     publisher	= {Addison-Wesley},
@@ -7845,5 +7845,5 @@
     title	= {Thread (computing)},
     author	= {{Threading Model}},
-    howpublished= {\href{https://en.wikipedia.org/wiki/Thread_(computing)}{https://\-en.wikipedia.org/\-wiki/\-Thread\_(computing)}},
+    howpublished= {\href{https://en.wikipedia.org/wiki/Thread_(computing)}{https://\-en.wikipedia.org/\-wiki/\-Thread\_\-(computing)}},
 }
 
@@ -7889,5 +7889,5 @@
     note	= {Lecture Notes in Computer Science, v. 19},
     abstract	= {
-        
+
     }
 }
@@ -8008,5 +8008,5 @@
     publisher	= {USENIX Association},
     address	= {Berkeley, CA, USA},
-} 
+}
 
 @article{Leroy00,
@@ -8354,5 +8354,5 @@
     author	= {Bjarne Stroustrup},
     title	= {What is ``Object-Oriented Programming''?},
-    booktitle	= {Proceedings of the First European Conference on Object Oriented Programming}, 
+    booktitle	= {Proceedings of the First European Conference on Object Oriented Programming},
     month	= jun,
     year	= 1987
@@ -8396,5 +8396,5 @@
     publisher	= {ACM},
     address	= {New York, NY, USA},
-} 
+}
 
 % X
Index: doc/theses/thierry_delisle_PhD/thesis/local.bib
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision d489da80335483f120776a4061b19c68ca8e988b)
+++ doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision ef1da0e2df5a222ca4e50f7e06a2b61f8ba6efcd)
@@ -583,5 +583,5 @@
   title={Per-entity load tracking},
   author={Corbet, Jonathan},
-  journal={LWN article, available at: https://lwn.net/Articles/531853},
+  journal={LWN article, available at: {\href{https://lwn.net/Articles/531853}{https://\-lwn.net/\-Articles/\-531853}}},
   year={2013}
 }
@@ -717,5 +717,5 @@
   title = {Scheduling Benchmarks},
   author = {Thierry Delisle},
-  howpublished = {\href{https://github.com/cforall/SchedulingBenchmarks_PhD22}{https://\-github.com/\-cforall/\-SchedulingBenchmarks\_\-PhD22}},
+  howpublished = {\href{https://github.com/cforall/SchedulingBenchmarks_PhD22}{https://\-github.com/\-cforall/\-Scheduling\-Benchmarks\_\-PhD22}},
 }
 
@@ -832,5 +832,5 @@
   title      = "eventfd(2) Linux User's Manual",
   year       = "2019",
-  month      = "MArch",
+  month      = "March",
 }
 
@@ -1060,5 +1060,5 @@
   year = "2020",
   month = "June",
-  howpublished = "\href{https://xkcd.com/2318/}",
+  howpublished = "\href{https://xkcd.com/2318/}{https://\-xkcd.com/\-2318/}",
   note = "[Online; accessed 10-June-2020]"
 }
@@ -1069,5 +1069,5 @@
   year = "2011",
   month = "June",
-  howpublished = "\href{https://xkcd.com/908/}",
+  howpublished = "\href{https://xkcd.com/908/}{https://\-xkcd.com/\-908/}",
   note = "[Online; accessed 25-August-2022]"
 }
Index: doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex	(revision d489da80335483f120776a4061b19c68ca8e988b)
+++ doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex	(revision ef1da0e2df5a222ca4e50f7e06a2b61f8ba6efcd)
@@ -187,5 +187,5 @@
 Looking at the left column on AMD, Figures~\ref{fig:cycle:nasus:ops} and \ref{fig:cycle:nasus:ns} all 4 runtimes achieve very similar throughput and scalability.
 However, as the number of \procs grows higher, the results on AMD show notably more variability than on Intel.
-The different performance improvements and plateaus are due to cache topology and appear at the expected: \proc counts of 64, 128 and 192, for the same reasons as on Intel.
+The different performance improvements and plateaus are due to cache topology and appear at the expected \proc counts of 64, 128 and 192, for the same reasons as on Intel.
 Looking next at the right column on AMD, Figures~\ref{fig:cycle:nasus:low:ops} and \ref{fig:cycle:nasus:low:ns}, Tokio and Go have the same throughput performance, while \CFA is slightly slower.
 This result is different than on Intel, where Tokio behaved like \CFA rather than behaving like Go.
@@ -491,6 +491,6 @@
 \section{Locality}
 
-As mentioned in the churn benchmark, when \glslink{atsched}{unparking} a \at, it is possible to either \unpark to the local or remote ready-queue.\footnote{
-It is also possible to \unpark to a third unrelated ready-queue, but without additional knowledge about the situation, it is likely to degrade performance.}
+As mentioned in the churn benchmark, when \glslink{atsched}{unparking} a \at, it is possible to either \unpark to the local or remote sub-queue.\footnote{
+It is also possible to \unpark to a third unrelated sub-queue, but without additional knowledge about the situation, it is likely to degrade performance.}
 The locality experiment includes two variations of the churn benchmark, where a data array is added.
 In both variations, before @V@ing the semaphore, each \at calls a @work@ function which increments random cells inside the data array.
@@ -720,6 +720,6 @@
 
 In both variations, the experiment effectively measures how long it takes for all \ats to run once after a given synchronization point.
-In an ideal scenario where the scheduler is strictly FIFO, every thread would run once after the synchronization and therefore the delay between leaders would be given by, $(CSL + SL) / (NP - 1)$,
-where $CSL$ is the context-switch latency, $SL$ is the cost for enqueueing and dequeuing a \at, and $NP$ is the number of \procs.
+In an ideal scenario where the scheduler is strictly FIFO, every thread would run once after the synchronization and therefore the delay between leaders would be given by, $NT(CSL + SL) / (NP - 1)$,
+where $CSL$ is the context-switch latency, $SL$ is the cost for enqueueing and dequeuing a \at, $NT$ is the number of \ats, and $NP$ is the number of \procs.
 However, if the scheduler allows \ats to run many times before other \ats can run once, this delay increases.
 The semaphore version is an approximation of strictly FIFO scheduling, where none of the \ats \emph{attempt} to run more than once.
@@ -734,7 +734,4 @@
 
 \begin{table}
-\caption[Transfer Benchmark on Intel and AMD]{Transfer Benchmark on Intel and AMD\smallskip\newline Average measurement of how long it takes for all \ats to acknowledge the leader \at.
-DNC stands for ``did not complete'', meaning that after 5 seconds of a new leader being decided, some \ats still had not acknowledged the new leader.}
-\label{fig:transfer:res}
 \setlength{\extrarowheight}{2pt}
 \setlength{\tabcolsep}{5pt}
@@ -753,4 +750,7 @@
 \end{tabular}
 \end{centering}
+\caption[Transfer Benchmark on Intel and AMD]{Transfer Benchmark on Intel and AMD\smallskip\newline Average measurement of how long it takes for all \ats to acknowledge the leader \at.
+DNC stands for ``did not complete'', meaning that after 5 seconds of a new leader being decided, some \ats still had not acknowledged the new leader.}
+\label{fig:transfer:res}
 \end{table}
 
@@ -768,9 +768,8 @@
 
 Looking at the next two columns, the results for the yield variation on Intel, the story is very different.
-\CFA achieves better latencies, presumably due to no synchronization with the yield.
+\CFA achieves better latencies, presumably due to the lack of synchronization with the yield.
 Go does complete the experiment, but with drastically higher latency:
 latency at 2 \procs is $350\times$ higher than \CFA and $70\times$ higher at 192 \procs.
-This difference is because Go has a classic work-stealing scheduler, but it adds coarse-grain preemption
-, which interrupts the spinning leader after a period.
+This difference is because Go has a classic work-stealing scheduler, but it adds coarse-grain preemption, which interrupts the spinning leader after a period.
 Neither Libfibre nor Tokio complete the experiment.
 Both runtimes also use classical work-stealing scheduling without preemption, and therefore, none of the work queues are ever emptied so no load balancing occurs.
Index: doc/theses/thierry_delisle_PhD/thesis/text/io.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/io.tex	(revision d489da80335483f120776a4061b19c68ca8e988b)
+++ doc/theses/thierry_delisle_PhD/thesis/text/io.tex	(revision ef1da0e2df5a222ca4e50f7e06a2b61f8ba6efcd)
@@ -25,5 +25,5 @@
 \paragraph{\lstinline{select}} is the oldest of these options, and takes as input a contiguous array of bits, where each bit represents a file descriptor of interest.
 Hence, the array length must be as long as the largest FD currently of interest.
-On return, it outputs the set in place to identify which of the file descriptors changed state.
+On return, it outputs the set motified in place to identify which of the file descriptors changed state.
 This destructive change means selecting in a loop requires re-initializing the array for each iteration.
 Another limit of @select@ is that calls from different \glspl{kthrd} sharing FDs are independent.
@@ -35,5 +35,5 @@
 \paragraph{\lstinline{poll}} is the next oldest option, and takes as input an array of structures containing the FD numbers rather than their position in an array of bits, allowing a more compact input for interest sets that contain widely spaced FDs.
 For small interest sets with densely packed FDs, the @select@ bit mask can take less storage, and hence, copy less information into the kernel.
-Furthermore, @poll@ is non-destructive, so the array of structures does not have to be re-initialized on every call.
+However, @poll@ is non-destructive, so the array of structures does not have to be re-initialized on every call.
 Like @select@, @poll@ suffers from the limitation that the interest set cannot be changed by other \glspl{kthrd}, while a manager thread is blocked in @poll@.
 
@@ -314,5 +314,5 @@
 A simple approach to polling is to allocate a \at per @io_uring@ instance and simply let the poller \ats poll their respective instances when scheduled.
 
-With the pool of SEQ instances approach, the big advantage is that it is fairly flexible.
+With the pool of SQE instances approach, the big advantage is that it is fairly flexible.
 It does not impose restrictions on what \ats submitting \io operations can and cannot do between allocations and submissions.
 It also can gracefully handle running out of resources, SQEs or the kernel returning @EBUSY@.
@@ -320,5 +320,5 @@
 The routing and allocation algorithm needs to keep track of which ring instances have available SQEs, block incoming requests if no instance is available, prevent barging if \ats are already queued up waiting for SQEs and handle SQEs being freed.
 The submission side needs to safely append SQEs to the ring buffer, correctly handle chains, make sure no SQE is dropped or left pending forever, notify the allocation side when SQEs can be reused, and handle the kernel returning @EBUSY@.
-Compared to the private-instance approach, all this synchronization has a significant cost this synchronization is entirely overhead.
+Compared to the private-instance approach, all this synchronization has a significant cost and this synchronization is entirely overhead.
 
 \subsubsection{Instance borrowing}
Index: doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision d489da80335483f120776a4061b19c68ca8e988b)
+++ doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision ef1da0e2df5a222ca4e50f7e06a2b61f8ba6efcd)
@@ -101,5 +101,5 @@
 This leaves too many \procs when there are not enough \ats for all the \procs to be useful.
 These idle \procs cannot be removed because their lifetime is controlled by the application, and only the application knows when the number of \ats may increase or decrease.
-While idle \procs can spin until work appears, this approach wastes energy, unnecessarily produces heat and prevents other applications from using the processor.
+While idle \procs can spin until work appears, this approach wastes energy, unnecessarily produces heat and prevents other applications from using the \gls{hthrd}.
 Therefore, idle \procs are put into an idle state, called \newterm{Idle-Sleep}, where the \gls{kthrd} is blocked until the scheduler deems it is needed.
 
@@ -107,5 +107,5 @@
 First, a data structure needs to keep track of all \procs that are in idle sleep.
 Because idle sleep is spurious, this data structure has strict performance requirements, in addition to strict correctness requirements.
-Next, some mechanism is needed to block \glspl{kthrd}, \eg @pthread_cond_wait@ on a pthread semaphore.
+Next, some mechanism is needed to block \glspl{kthrd}, \eg @pthread_cond_wait@ or a pthread semaphore.
 The complexity here is to support \at \glslink{atblock}{parking} and \glslink{atsched}{unparking}, user-level locking, timers, \io operations, and all other \CFA features with minimal complexity.
 Finally, the scheduler needs a heuristic to determine when to block and unblock an appropriate number of \procs.
