Index: doc/theses/thierry_delisle_PhD/thesis/local.bib
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -429,4 +429,35 @@
 }
 
+@inproceedings{Albers12,
+    author	= {Susanne Albers and Antonios Antoniadis},
+    title	= {Race to Idle: New Algorithms for Speed Scaling with a Sleep State},
+    booktitle	= {Proceedings of the 2012  Annual ACM-SIAM Symposium on Discrete Algorithms (SODA)},
+    doi		= {10.1137/1.9781611973099.100},
+    URL		= {https://epubs.siam.org/doi/abs/10.1137/1.9781611973099.100},
+    eprint	= {https://epubs.siam.org/doi/pdf/10.1137/1.9781611973099.100},
+    year	= 2012,
+    month	= jan,
+    pages	= {1266-1285},
+}
+
+@inproceedings{atikoglu2012workload,
+  title={Workload analysis of a large-scale key-value store},
+  author={Atikoglu, Berk and Xu, Yuehai and Frachtenberg, Eitan and Jiang, Song and Paleczny, Mike},
+  booktitle={Proceedings of the 12th ACM SIGMETRICS/PERFORMANCE joint international conference on Measurement and Modeling of Computer Systems},
+  pages={53--64},
+  year={2012}
+}
+
+@article{schillings1996engineering,
+  title={Be engineering insights: Benaphores},
+  author={Schillings, Benoit},
+  journal={Be Newsletters},
+  volume={1},
+  number={26},
+  year={1996}
+}
+
+
+
 % --------------------------------------------------
 % ULE FreeBSD scheduler
@@ -582,4 +613,98 @@
 }
 
+@misc{apache,
+  key = {Apache Software Foundation},
+  title = {{T}he {A}pache Web Server},
+  howpublished = {\href{http://httpd.apache.org}{http://\-httpd.apache.org}},
+  note = "[Online; accessed 6-June-2022]"
+}
+
+@misc{memcached,
+  key = {Brad Fitzpatrick},
+  title = {{M}emcached},
+  year = {2003},
+  howpublished = {\href{http://httpd.apache.org}{http://\-httpd.apache.org}},
+  note = "[Online; accessed 6-June-2022]"
+}
+
+@misc{libuv,
+  author = {libuv team},
+  title = {libuv: Asynchronous I/O made simple.},
+  howpublished = {\href{https://libuv.org/}{https://\-libuv.org/}},
+  note = "[Online; accessed 5-August-2022]"
+}
+
+@misc{SeriallyReusable,
+    author	= {IBM},
+    title	= {Serially reusable programs},
+    month	= mar,
+    howpublished= {\href{https://www.ibm.com/docs/en/ztpf/1.1.0.15?topic=structures-serially-reusable-programs}{https://www.ibm.com/\-docs/\-en/\-ztpf/\-1.1.0.15?\-topic=structures\--serially\--reusable-programs}},
+    year	= 2021,
+}
+
+@misc{GITHUB:mutilate,
+  title = {Mutilate: high-performance memcached load generator },
+  author = { Jacob Leverich },
+  howpublished = {\href{https://github.com/leverich/mutilate}{https://\-github.com/\-leverich/\-mutilate}},
+  version = {Change-Id: d65c6ef7c2f78ae05a9db3e37d7f6ddff1c0af64}
+}
+
+% --------------------------------------------------
+% Tech documents
+@techreport{rfc:tcp,
+  title={Transmission control protocol},
+  author={Postel, Jon},
+  year={1981}
+}
+
+@manual{win:priority,
+  key = {TaskSettings Priority},
+  title = {TaskSettings.Priority property},
+  year = "2020",
+  month = "September",
+  howpublished = {\href{https://docs.microsoft.com/en-us/windows/win32/taskschd/tasksettings-priority}{https://\-docs.microsoft.com/\-en-us/\-windows/\-win32/\-taskschd/\-tasksettings-priority}},
+  note = "[Online; accessed 5-August-2022]"
+}
+
+@manual{win:overlap,
+  key = {Synchronous and Asynchronous IO},
+  title = {Synchronous and Asynchronous I\/O},
+  year = "2021",
+  month = "March",
+  howpublished = {\href{https://docs.microsoft.com/en-us/windows/win32/fileio/synchronous-and-asynchronous-i-o}{https://\-docs.microsoft.com/\-en-us/\-windows/\-win32/\-fileio/\-synchronous-and-asynchronous-i-o}},
+  note = "[Online; accessed 5-August-2022]"
+}
+
+@book{russinovich2009windows,
+  title={Windows Internals},
+  author={Russinovich, M.E. and Solomon, D.A. and Ionescu, A.},
+  isbn={9780735625303},
+  lccn={2009927697},
+  series={Developer Reference Series},
+  url={https://books.google.ca/books?id=SfglSQAACAAJ},
+  year={2009},
+  publisher={Microsoft Press}
+}
+
+@manual{apple:gcd,
+  key = {Grand Central Dispatch},
+  title = {Grand Central Dispatch},
+  year = "2022",
+  author = {Apple Inc.},
+  howpublished = {https://developer.apple.com/documentation/DISPATCH},
+  note = "[Online; accessed 5-August-2022]"
+}
+
+@techreport{apple:gcd2,
+  key = {Grand Central Dispatch},
+  title = {Grand Central Dispatch, A better way to do multicore.},
+  year = "2009",
+  month = "August",
+  author = {Apple Inc.},
+  howpublished = {\href{http://web.archive.org/web/20090920043909/http://images.apple.com/macosx/technology/docs/GrandCentral_TB_brief_20090903.pdf}{http://web.archive.org/web/20090920043909/http://\-images.apple.com/\-macosx/\-technology/\-docs/\-GrandCentral\_TB\_brief\_20090903.pdf}},
+  note = "[Online; accessed 5-August-2022]"
+}
+
+
 % --------------------------------------------------
 % Man Pages
@@ -617,4 +742,18 @@
   year       = "2019",
   month      = "March",
+}
+
+@manual{MAN:sendfile,
+  key        = "sendfile",
+  title      = "sendfile(2) Linux User's Manual",
+  year       = "2017",
+  month      = "September",
+}
+
+@manual{MAN:splice,
+  key        = "splice",
+  title      = "splice(2) Linux User's Manual",
+  year       = "2019",
+  month      = "May",
 }
 
@@ -709,25 +848,22 @@
 }
 
-% RMR notes :
-% [05/04, 12:36] Trevor Brown
-%     i don't know where rmr complexity was first introduced, but there are many many many papers that use the term and define it
-% [05/04, 12:37] Trevor Brown
-%     here's one paper that uses the term a lot and links to many others that use it... might trace it to something useful there https://drops.dagstuhl.de/opus/volltexte/2021/14832/pdf/LIPIcs-DISC-2021-30.pdf
-% [05/04, 12:37] Trevor Brown
-%     another option might be to cite a textbook
-% [05/04, 12:42] Trevor Brown
-%     but i checked two textbooks in the area i'm aware of and i don't see a definition of rmr complexity in either
-% [05/04, 12:42] Trevor Brown
-%     this one has a nice statement about the prevelance of rmr complexity, as well as some rough definition
-% [05/04, 12:42] Trevor Brown
-%     https://dl.acm.org/doi/pdf/10.1145/3465084.3467938
-
-% Race to idle notes :
-% [13/04, 16:56] Martin Karsten
-%       I don't have a citation. Google brings up this one, which might be good:
-%
-% https://doi.org/10.1137/1.9781611973099.100
-
-
+@misc{wiki:ma,
+  author = "{Wikipedia contributors}",
+  title = "Bin packing problem --- {W}ikipedia{,} The Free Encyclopedia",
+  year = "2022",
+  howpublished = "\href{https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average}{https://\-en.wikipedia.org/\-wiki/\-Moving\_average\#Exponential\_moving\_average}",
+  note = "[Online; accessed 5-August-2022]"
+}
+
+@misc{wiki:jni,
+  author = "{Wikipedia contributors}",
+  title = "Java Native Interface --- {W}ikipedia{,} The Free Encyclopedia",
+  year = "2021",
+  howpublished = "\href{https://en.wikipedia.org/wiki/Java_Native_Interface}{https://\-en.wikipedia.org/\-wiki/\-Java\_Native\_Interface}",
+  note = "[Online; accessed 5-August-2022]"
+}
+
+% --------------------------------------------------
+% True Misc
 @misc{AIORant,
   author = "Linus Torvalds",
@@ -739,28 +875,24 @@
 }
 
-@misc{apache,
-  key = {Apache Software Foundation},
-  title = {{T}he {A}pache Web Server},
-  howpublished = {\href{http://httpd.apache.org}{http://\-httpd.apache.org}},
-  note = "[Online; accessed 6-June-2022]"
-}
-
-@misc{SeriallyReusable,
-    author	= {IBM},
-    title	= {Serially reusable programs},
-    month	= mar,
-    howpublished= {\href{https://www.ibm.com/docs/en/ztpf/1.1.0.15?topic=structures-serially-reusable-programs}{https://www.ibm.com/\-docs/\-en/\-ztpf/\-1.1.0.15?\-topic=structures\--serially\--reusable-programs}},
-    year	= 2021,
-}
-
-@inproceedings{Albers12,
-    author	= {Susanne Albers and Antonios Antoniadis},
-    title	= {Race to Idle: New Algorithms for Speed Scaling with a Sleep State},
-    booktitle	= {Proceedings of the 2012  Annual ACM-SIAM Symposium on Discrete Algorithms (SODA)},
-    doi		= {10.1137/1.9781611973099.100},
-    URL		= {https://epubs.siam.org/doi/abs/10.1137/1.9781611973099.100},
-    eprint	= {https://epubs.siam.org/doi/pdf/10.1137/1.9781611973099.100},
-    year	= 2012,
-    month	= jan,
-    pages	= {1266-1285},
-}
+@misc{xkcd:dynamicentropy,
+  author = "Randall Munroe",
+  title = "2318: Dynamic Entropy",
+  year = "2020",
+  month = "June",
+  howpublished = "\href{https://xkcd.com/2318/}",
+  note = "[Online; accessed 10-June-2020]"
+}
+
+@misc{go:safepoints,
+  author = "The Go Programming Language",
+  title = "src/runtime/preempt.go",
+  howpublished = {\href{https://go.dev/src/runtime/preempt.go}},
+  note = "[Online; accessed 5-August-2022]"
+}
+
+@misc{go:cgo,
+  author = "The Go Programming Language",
+  title = "cgo",
+  howpublished = {\href{https://pkg.go.dev/cmd/cgo}},
+  note = "[Online; accessed 5-August-2022]"
+}
Index: doc/theses/thierry_delisle_PhD/thesis/text/core.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/core.tex	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/text/core.tex	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -15,5 +15,5 @@
 For threading, a simple and common execution mental-model is the ``Ideal multi-tasking CPU'' :
 
-\begin{displayquote}[Linux CFS\cit{https://www.kernel.org/doc/Documentation/scheduler/sched-design-CFS.txt}]
+\begin{displayquote}[Linux CFS\cite{MAN:linux/cfs}]
 	{[The]} ``Ideal multi-tasking CPU'' is a (non-existent  :-)) CPU that has 100\% physical power and which can run each task at precise equal speed, in parallel, each at [an equal fraction of the] speed.  For example: if there are 2 tasks running, then it runs each at 50\% physical power --- i.e., actually in parallel.
 	\label{q:LinuxCFS}
@@ -183,9 +183,9 @@
 This suggests to the following approach:
 
-\subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
+\subsection{Dynamic Entropy}\cite{xkcd:dynamicentropy}
 The Relaxed-FIFO approach can be made to handle the case of mostly empty subqueues by tweaking the \glsxtrlong{prng}.
 The \glsxtrshort{prng} state can be seen as containing a list of all the future subqueues that will be accessed.
 While this concept is not particularly useful on its own, the consequence is that if the \glsxtrshort{prng} algorithm can be run \emph{backwards}, then the state also contains a list of all the subqueues that were accessed.
-Luckily, bidirectional \glsxtrshort{prng} algorithms do exist, \eg some Linear Congruential Generators\cit{https://en.wikipedia.org/wiki/Linear\_congruential\_generator} support running the algorithm backwards while offering good quality and performance.
+Luckily, bidirectional \glsxtrshort{prng} algorithms do exist, \eg some Linear Congruential Generators\cite{wiki:lcg} support running the algorithm backwards while offering good quality and performance.
 This particular \glsxtrshort{prng} can be used as follows:
 \begin{itemize}
@@ -220,5 +220,5 @@
 	\input{base.pstex_t}
 	\caption[Base \CFA design]{Base \CFA design \smallskip\newline A pool of subqueues offers the sharding, two per \glspl{proc}.
-	Each \gls{proc} can access all of the subqueues. 
+	Each \gls{proc} can access all of the subqueues.
 	Each \at is timestamped when enqueued.}
 	\label{fig:base}
@@ -245,5 +245,5 @@
 \end{figure}
 
-A simple solution to this problem is to use an exponential moving average\cit{https://en.wikipedia.org/wiki/Moving\_average\#Exponential\_moving\_average} (MA) instead of a raw timestamps, shown in Figure~\ref{fig:base-ma}.
+A simple solution to this problem is to use an exponential moving average\cite{wiki:ma} (MA) instead of a raw timestamps, shown in Figure~\ref{fig:base-ma}.
 Note, this is more complex because the \at at the head of a subqueue is still waiting, so its wait time has not ended.
 Therefore, the exponential moving average is actually an exponential moving average of how long each dequeued \at has waited.
Index: doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -10,7 +10,6 @@
 
 \section{Memcached}
-Memcached~\cit{memcached} is an in memory key-value store that is used in many production environments, \eg \cit{Berk Atikoglu et al., Workload Analysis of a Large-Scale Key-Value Store,
-SIGMETRICS 2012}.
-This also server also has the notable added benefit that there exists a full-featured front-end for performance testing called @mutilate@~\cit{mutilate}.
+Memcached~\cite{memcached} is an in memory key-value store that is used in many production environments, \eg \cite{atikoglu2012workload}.
+This also server also has the notable added benefit that there exists a full-featured front-end for performance testing called @mutilate@~\cite{GITHUB:mutilate}.
 Experimenting on memcached allows for a simple test of the \CFA runtime as a whole, it will exercise the scheduler, the idle-sleep mechanism, as well the \io subsystem for sockets.
 This experiment does not exercise the \io subsytem with regards to disk operations.
@@ -98,6 +97,6 @@
 Most of the implementation is fairly straight forward however the inclusion of file \io introduces a new challenge that had to be hacked around.
 
-Normally, webservers use @sendfile@\cit{sendfile} to send files over the socket.
-@io_uring@ does not support @sendfile@, it supports @splice@\cit{splice} instead, which is strictly more powerful.
+Normally, webservers use @sendfile@\cite{MAN:sendfile} to send files over the socket.
+@io_uring@ does not support @sendfile@, it supports @splice@\cite{splice} instead, which is strictly more powerful.
 However, because of how linux implements file \io, see Subsection~\ref{ononblock}, @io_uring@'s implementation must delegate calls to splice to worker threads inside the kernel.
 As of Linux 5.13, @io_uring@ caps the numer of these worker threads to @RLIMIT_NPROC@ and therefore, when tens of thousands of splice requests are made, it can create tens of thousands of \glspl{kthrd}.
@@ -108,5 +107,5 @@
 When the saturation point of the server is attained, latency will increase and inevitably some client connections will timeout.
 As these clients close there connections, the server must close these sockets without delay so the OS can reclaim the resources used by these connections.
-Indeed, until they are closed on the server end, the connection will linger in the CLOSE-WAIT tcp state~\cit{RFC793} and the tcp buffers will be preserved.
+Indeed, until they are closed on the server end, the connection will linger in the CLOSE-WAIT tcp state~\cite{rfc:tcp} and the tcp buffers will be preserved.
 However, this poses a problem using blocking @sendfile@ calls.
 The calls can block if they do not have suffcient memory, which can be caused by having too many connections in the CLOSE-WAIT state.
Index: doc/theses/thierry_delisle_PhD/thesis/text/existing.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/existing.tex	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/text/existing.tex	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -14,6 +14,6 @@
 
 \section{Naming Convention}
-Scheduling has been studied by various communities concentrating on different incarnation of the same problems. 
-As a result, there are no standard naming conventions for scheduling that is respected across these communities. 
+Scheduling has been studied by various communities concentrating on different incarnation of the same problems.
+As a result, there are no standard naming conventions for scheduling that is respected across these communities.
 This document uses the term \newterm{\Gls{at}} to refer to the abstract objects being scheduled and the term \newterm{\Gls{proc}} to refer to the concrete objects executing these \ats.
 
@@ -28,5 +28,5 @@
 \section{Dynamic Scheduling}
 \newterm{Dynamic schedulers} determine \ats dependencies and costs during scheduling, if at all.
-Hence, unlike static scheduling, \ats dependencies are conditional and detected at runtime. 
+Hence, unlike static scheduling, \ats dependencies are conditional and detected at runtime.
 This detection takes the form of observing new \ats(s) in the system and determining dependencies from their behaviour, including suspending or halting a \ats that dynamically detects unfulfilled dependencies.
 Furthermore, each \ats has the responsibility of adding dependent \ats back into the system once dependencies are fulfilled.
@@ -51,5 +51,5 @@
 Most common operating systems use some variant on priorities with overlaps and dynamic priority adjustments.
 For example, Microsoft Windows uses a pair of priorities
-\cit{https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities,https://docs.microsoft.com/en-us/windows/win32/taskschd/taskschedulerschema-priority-settingstype-element}, one specified by users out of ten possible options and one adjusted by the system.
+\cite{win:priority}, one specified by users out of ten possible options and one adjusted by the system.
 
 \subsection{Uninformed and Self-Informed Dynamic Schedulers}
@@ -137,5 +137,9 @@
 The scheduler may also temporarily adjust priorities after certain effects like the completion of I/O requests.
 
-\todo{load balancing}
+In~\cite{russinovich2009windows}, Chapter 1 section ``Processes, Threads, and Jobs'' discusses the scheduling policy more in depth.
+Multicore scheduling is based on a combination of priorities, preferred \proc.
+Each \at is assigned an \newterm{ideal} \proc using a round-robin policy.
+\Ats are distributed among the \procs according to their priority, preferring to match \ats to their ideal \proc and then to the last \proc they ran on.
+This is similar to a variation of work stealing, where the stealing \proc restore the \at to its original \proc after running it, but with priorities added onto the mix.
 
 \paragraph{Apple OS X}
@@ -156,5 +160,5 @@
 \paragraph{Go}\label{GoSafePoint}
 Go's scheduler uses a randomized work-stealing algorithm that has a global run-queue (\emph{GRQ}) and each processor (\emph{P}) has both a fixed-size run-queue (\emph{LRQ}) and a high-priority next ``chair'' holding a single element~\cite{GITHUB:go,YTUBE:go}.
-Preemption is present, but only at safe-points,~\cit{https://go.dev/src/runtime/preempt.go} which are inserted detection code at various frequent access boundaries.
+Preemption is present, but only at safe-points,~\cite{go:safepoints} which are inserted detection code at various frequent access boundaries.
 
 The algorithm is as follows :
@@ -199,13 +203,12 @@
 
 \paragraph{Grand Central Dispatch}
-An Apple\cit{Official GCD source} API that offers task parallelism~\cite{wiki:taskparallel}.
+An Apple\cite{apple:gcd} API that offers task parallelism~\cite{wiki:taskparallel}.
 Its distinctive aspect is multiple ``Dispatch Queues'', some of which are created by programmers.
 Each queue has its own local ordering guarantees, \eg \ats on queue $A$ are executed in \emph{FIFO} order.
 
-\todo{load balancing and scheduling}
-
-% http://web.archive.org/web/20090920043909/http://images.apple.com/macosx/technology/docs/GrandCentral_TB_brief_20090903.pdf
-
-In terms of semantics, the Dispatch Queues seem to be very similar to Intel\textregistered ~TBB @execute()@ and predecessor semantics.
+While the documentation only gives limited insight into the scheduling and load balancing approach, \cite{apple:gcd2} suggests an approach fairly classic;
+Where each \proc has a queue of \newterm{blocks} to run, effectively \ats, and they drain their respective queues in \glsxtrshort{fifo}.
+They seem to add the concept of dependent queues with clear ordering, where a executing a block ends-up scheduling more blocks.
+In terms of semantics, these Dispatch Queues seem to be very similar to Intel\textregistered ~TBB @execute()@ and predecessor semantics.
 
 \paragraph{LibFibre}
Index: doc/theses/thierry_delisle_PhD/thesis/text/io.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/io.tex	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/text/io.tex	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -141,5 +141,5 @@
 In the worst case, where all \glspl{thrd} are consistently blocking on \io, it devolves into 1-to-1 threading.
 However, regardless of the frequency of \io operations, it achieves the fundamental goal of not blocking \glspl{proc} when \glspl{thrd} are ready to run.
-This approach is used by languages like Go\cit{Go}, frameworks like libuv\cit{libuv}, and web servers like Apache~\cite{apache} and Nginx~\cite{nginx}, since it has the advantage that it can easily be used across multiple operating systems.
+This approach is used by languages like Go\cite{GITHUB:go}, frameworks like libuv\cite{libuv}, and web servers like Apache~\cite{apache} and Nginx~\cite{nginx}, since it has the advantage that it can easily be used across multiple operating systems.
 This advantage is especially relevant for languages like Go, which offer a homogeneous \glsxtrshort{api} across all platforms.
 As opposed to C, which has a very limited standard api for \io, \eg, the C standard library has no networking.
@@ -148,5 +148,5 @@
 These options effectively fall into two broad camps: waiting for \io to be ready versus waiting for \io to complete.
 All operating systems that support asynchronous \io must offer an interface along one of these lines, but the details vary drastically.
-For example, Free BSD offers @kqueue@~\cite{MAN:bsd/kqueue}, which behaves similarly to @epoll@, but with some small quality of use improvements, while Windows (Win32)~\cit{https://docs.microsoft.com/en-us/windows/win32/fileio/synchronous-and-asynchronous-i-o} offers ``overlapped I/O'', which handles submissions similarly to @O_NONBLOCK@ with extra flags on the synchronous system call, but waits for completion events, similarly to @io_uring@.
+For example, Free BSD offers @kqueue@~\cite{MAN:bsd/kqueue}, which behaves similarly to @epoll@, but with some small quality of use improvements, while Windows (Win32)~\cite{win:overlap} offers ``overlapped I/O'', which handles submissions similarly to @O_NONBLOCK@ with extra flags on the synchronous system call, but waits for completion events, similarly to @io_uring@.
 
 For this project, I selected @io_uring@, in large parts because of its generality.
Index: doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -60,10 +60,9 @@
 To achieve this goal requires each reader to have its own memory to mark as locked and unlocked.
 The read acquire possibly waits for a writer to finish the critical section and then acquires a reader's local spinlock.
-The write acquire acquires the global lock, guaranteeing mutual exclusion among writers, and then acquires each of the local reader locks.
+The write acquires the global lock, guaranteeing mutual exclusion among writers, and then acquires each of the local reader locks.
 Acquiring all the local read locks guarantees mutual exclusion among the readers and the writer, while the wait on the read side prevents readers from continuously starving the writer.
-
 Figure~\ref{f:SpecializedReadersWriterLock} shows the outline for this specialized readers-writer lock.
 The lock in nonblocking, so both readers and writers spin while the lock is held.
-\todo{finish explanation}
+This very wide sharding strategy means that readers have very good locality, since they only ever need to access two memory location.
 
 \begin{figure}
@@ -138,5 +137,5 @@
 
 \subsection{Event FDs}
-Another interesting approach is to use an event file descriptor\cit{eventfd}.
+Another interesting approach is to use an event file descriptor\cite{eventfd}.
 This Linux feature is a file descriptor that behaves like \io, \ie, uses @read@ and @write@, but also behaves like a semaphore.
 Indeed, all reads and writes must use a word-sized values, \ie 64 or 32 bits.
@@ -218,5 +217,5 @@
 \end{figure}
 
-The next optimization is to avoid the latency of the event @fd@, which can be done by adding what is effectively a binary benaphore\cit{benaphore} in front of the event @fd@.
+The next optimization is to avoid the latency of the event @fd@, which can be done by adding what is effectively a binary benaphore\cite{schillings1996engineering} in front of the event @fd@.
 The benaphore over the event @fd@ logically provides a three state flag to avoid unnecessary system calls, where the states are expressed explicit in Figure~\ref{fig:idle:state}.
 A \proc begins its idle sleep by adding itself to the idle list before searching for an \at.
Index: doc/theses/thierry_delisle_PhD/thesis/text/runtime.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/runtime.tex	(revision 511a936813b37ad7d2002f0d594c479aed4dd644)
+++ doc/theses/thierry_delisle_PhD/thesis/text/runtime.tex	(revision 80402869270f5295e4f74c38b827512b732bcf72)
@@ -62,5 +62,5 @@
 Only UNIX @man@ pages identify whether or not a library function is thread safe, and hence, may block on a pthreads lock or system call; hence interoperability with UNIX library functions is a challenge for an M:N threading model.
 
-Languages like Go and Java, which have strict interoperability with C\cit{JNI, GoLang with C}, can control operations in C by ``sandboxing'' them, \eg a blocking function may be delegated to a \gls{kthrd}. Sandboxing may help towards guaranteeing that the kind of deadlock mentioned above does not occur.
+Languages like Go and Java, which have strict interoperability with C\cite{wiki:jni,go:cgo}, can control operations in C by ``sandboxing'' them, \eg a blocking function may be delegated to a \gls{kthrd}. Sandboxing may help towards guaranteeing that the kind of deadlock mentioned above does not occur.
 
 As mentioned in Section~\ref{intro}, \CFA is binary compatible with C and, as such, must support all C library functions. Furthermore, interoperability can happen at the function-call level, inline code, or C and \CFA translation units linked together. This fine-grained interoperability between C and \CFA has two consequences:
