Index: doc/LaTeXmacros/common.tex
===================================================================
--- doc/LaTeXmacros/common.tex	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/LaTeXmacros/common.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -11,6 +11,6 @@
 %% Created On       : Sat Apr  9 10:06:17 2016
 %% Last Modified By : Peter A. Buhr
-%% Last Modified On : Fri Sep  4 13:56:52 2020
-%% Update Count     : 383
+%% Last Modified On : Mon Oct  5 09:34:46 2020
+%% Update Count     : 464
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
@@ -55,19 +55,4 @@
 \newlength{\parindentlnth}
 \setlength{\parindentlnth}{\parindent}
-
-\newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{#1}}}
-\newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}}
-\newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}}
-
-\newlength{\gcolumnposn}				% temporary hack because lstlisting does not handle tabs correctly
-\newlength{\columnposn}
-\setlength{\gcolumnposn}{2.5in}
-\setlength{\columnposn}{\gcolumnposn}
-\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
-\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
-
-% allow escape sequence in lstinline
-%\usepackage{etoolbox}
-%\patchcmd{\lsthk@TextStyle}{\let\lst@DefEsc\@empty}{}{}{\errmessage{failed to patch}}
 
 \usepackage{pslatex}					% reduce size of san serif font
@@ -244,8 +229,28 @@
 \usepackage{listings}									% format program code
 \usepackage{lstlang}
-
-\newcommand{\CFADefaults}{%
+\makeatletter
+
+\newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{#1}}}
+\newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}}
+\newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}}
+
+\newlength{\gcolumnposn}				% temporary hack because lstlisting does not handle tabs correctly
+\newlength{\columnposn}
+\setlength{\gcolumnposn}{2.75in}
+\setlength{\columnposn}{\gcolumnposn}
+\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
+\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
+
+% allow escape sequence in lstinline
+%\usepackage{etoolbox}
+%\patchcmd{\lsthk@TextStyle}{\let\lst@DefEsc\@empty}{}{}{\errmessage{failed to patch}}
+
+% allow adding to lst literate
+\def\addToLiterate#1{\protect\edef\lst@literate{\unexpanded\expandafter{\lst@literate}\unexpanded{#1}}}
+\lst@Key{add to literate}{}{\addToLiterate{#1}}
+\makeatother
+
+\newcommand{\CFAStyle}{%
 \lstset{
-language=CFA,
 columns=fullflexible,
 basicstyle=\linespread{0.9}\sf,			% reduce line spacing and use sanserif font
@@ -262,22 +267,37 @@
 belowskip=3pt,
 % replace/adjust listing characters that look bad in sanserif
-literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1
+literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.75ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1
 	{~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
 	{<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
-moredelim=**[is][\color{red}]{?}{?},	% red highlighting ?...? (registered trademark symbol) emacs: C-q M-.
+}% lstset
+}% CFAStyle
+
+\ifdefined\CFALatin% extra Latin-1 escape characters
+\lstnewenvironment{cfa}[1][]{
+\lstset{
+language=CFA,
+moredelim=**[is][\color{red}]{®}{®},	% red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
 moredelim=**[is][\color{blue}]{ß}{ß},	% blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
 moredelim=**[is][\color{OliveGreen}]{¢}{¢}, % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
 moredelim=[is][\lstset{keywords={}}]{¶}{¶}, % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
+% replace/adjust listing characters that look bad in sanserif
+add to literate={`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
 }% lstset
-}% CFADefaults
-\newcommand{\CFAStyle}{%
-\CFADefaults
+\lstset{#1}
+}{}
 % inline code ©...© (copyright symbol) emacs: C-q M-)
 \lstMakeShortInline©					% single-character for \lstinline
-}% CFAStyle
-
-\lstnewenvironment{cfa}[1][]
-{\CFADefaults\lstset{#1}}
-{}
+\else% regular ASCI characters
+\lstnewenvironment{cfa}[1][]{
+\lstset{
+language=CFA,
+escapechar=\$,							% LaTeX escape in CFA code
+moredelim=**[is][\color{red}]{@}{@},	% red highlighting @...@
+}% lstset
+\lstset{#1}
+}{}
+% inline code @...@ (at symbol)
+\lstMakeShortInline@					% single-character for \lstinline
+\fi%
 
 % Local Variables: %
Index: doc/LaTeXmacros/lstlang.sty
===================================================================
--- doc/LaTeXmacros/lstlang.sty	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/LaTeXmacros/lstlang.sty	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -8,6 +8,6 @@
 %% Created On       : Sat May 13 16:34:42 2017
 %% Last Modified By : Peter A. Buhr
-%% Last Modified On : Tue Jan  8 14:40:33 2019
-%% Update Count     : 21
+%% Last Modified On : Wed Sep 23 22:40:04 2020
+%% Update Count     : 24
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
@@ -115,7 +115,7 @@
 		auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__,
 		coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally,
-		__float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,
+		__float80, float80, __float128, float128, forall, ftype, generator, _Generic, _Imaginary, __imag, __imag__,
 		inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or,
-		otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread,
+		otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, suspend, thread,
 		_Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__,
 		virtual, __volatile, __volatile__, waitfor, when, with, zero_t,
@@ -125,5 +125,7 @@
 
 % C++ programming language
-\lstdefinelanguage{C++}[ANSI]{C++}{}
+\lstdefinelanguage{C++}[ANSI]{C++}{
+	morekeywords={nullptr,}
+}
 
 % uC++ programming language, based on ANSI C++
Index: doc/bibliography/pl.bib
===================================================================
--- doc/bibliography/pl.bib	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/bibliography/pl.bib	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -1005,5 +1005,5 @@
     key		= {Cforall Benchmarks},
     author	= {{\textsf{C}{$\mathbf{\forall}$} Benchmarks}},
-    howpublished= {\href{https://plg.uwaterloo.ca/~cforall/doc/CforallConcurrentBenchmarks.tar}{https://\-plg.uwaterloo.ca/\-$\sim$cforall/\-doc/\-CforallConcurrentBenchmarks.tar}},
+    howpublished= {\href{https://github.com/cforall/ConcurrentBenchmarks_SPE20}{https://\-github.com/\-cforall/\-ConcurrentBenchmarks\_SPE20}},
 }
 
@@ -1973,5 +1973,5 @@
     title	= {Cooperating Sequential Processes},
     institution	= {Technological University},
-    address	= {Eindhoven, Netherlands},
+    address	= {Eindhoven, Neth.},
     year	= 1965,
     note	= {Reprinted in \cite{Genuys68} pp. 43--112.}
Index: doc/papers/concurrency/Paper.tex
===================================================================
--- doc/papers/concurrency/Paper.tex	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/papers/concurrency/Paper.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -224,17 +224,17 @@
 {}
 \lstnewenvironment{C++}[1][]                            % use C++ style
-{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
+{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`}}\lstset{#1}}
 {}
 \lstnewenvironment{uC++}[1][]
-{\lstset{language=uC++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
+{\lstset{language=uC++,moredelim=**[is][\protect\color{red}]{`}{`}}\lstset{#1}}
 {}
 \lstnewenvironment{Go}[1][]
-{\lstset{language=Golang,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
+{\lstset{language=Golang,moredelim=**[is][\protect\color{red}]{`}{`}}\lstset{#1}}
 {}
 \lstnewenvironment{python}[1][]
-{\lstset{language=python,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
+{\lstset{language=python,moredelim=**[is][\protect\color{red}]{`}{`}}\lstset{#1}}
 {}
 \lstnewenvironment{java}[1][]
-{\lstset{language=java,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
+{\lstset{language=java,moredelim=**[is][\protect\color{red}]{`}{`}}\lstset{#1}}
 {}
 
@@ -284,5 +284,5 @@
 
 \begin{document}
-\linenumbers				% comment out to turn off line numbering
+%\linenumbers				% comment out to turn off line numbering
 
 \maketitle
@@ -450,16 +450,16 @@
 \hline
 stateful			& thread	& \multicolumn{1}{c|}{No} & \multicolumn{1}{c}{Yes} \\
-\hline    
-\hline    
+\hline
+\hline
 No					& No		& \textbf{1}\ \ \ @struct@				& \textbf{2}\ \ \ @mutex@ @struct@		\\
-\hline    
+\hline
 Yes (stackless)		& No		& \textbf{3}\ \ \ @generator@			& \textbf{4}\ \ \ @mutex@ @generator@	\\
-\hline    
+\hline
 Yes (stackful)		& No		& \textbf{5}\ \ \ @coroutine@			& \textbf{6}\ \ \ @mutex@ @coroutine@	\\
-\hline    
+\hline
 No					& Yes		& \textbf{7}\ \ \ {\color{red}rejected}	& \textbf{8}\ \ \ {\color{red}rejected}	\\
-\hline    
+\hline
 Yes (stackless)		& Yes		& \textbf{9}\ \ \ {\color{red}rejected}	& \textbf{10}\ \ \ {\color{red}rejected} \\
-\hline    
+\hline
 Yes (stackful)		& Yes		& \textbf{11}\ \ \ @thread@				& \textbf{12}\ \ @mutex@ @thread@		\\
 \end{tabular}
@@ -2896,5 +2896,5 @@
 \label{s:RuntimeStructureCluster}
 
-A \newterm{cluster} is a collection of user and kernel threads, where the kernel threads run the user threads from the cluster's ready queue, and the operating system runs the kernel threads on the processors from its ready queue.
+A \newterm{cluster} is a collection of user and kernel threads, where the kernel threads run the user threads from the cluster's ready queue, and the operating system runs the kernel threads on the processors from its ready queue~\cite{Buhr90a}.
 The term \newterm{virtual processor} is introduced as a synonym for kernel thread to disambiguate between user and kernel thread.
 From the language perspective, a virtual processor is an actual processor (core).
@@ -2992,10 +2992,11 @@
 \end{cfa}
 where CPU time in nanoseconds is from the appropriate language clock.
-Each benchmark is performed @N@ times, where @N@ is selected so the benchmark runs in the range of 2--20 seconds for the specific programming language.
+Each benchmark is performed @N@ times, where @N@ is selected so the benchmark runs in the range of 2--20 seconds for the specific programming language;
+each @N@ appears after the experiment name in the following tables.
 The total time is divided by @N@ to obtain the average time for a benchmark.
 Each benchmark experiment is run 13 times and the average appears in the table.
+For languages with a runtime JIT (Java, Node.js, Python), a single half-hour long experiment is run to check stability;
+all long-experiment results are statistically equivalent, \ie median/average/standard-deviation correlate with the short-experiment results, indicating the short experiments reached a steady state.
 All omitted tests for other languages are functionally identical to the \CFA tests and available online~\cite{CforallConcurrentBenchmarks}.
-% tar --exclude-ignore=exclude -cvhf benchmark.tar benchmark
-% cp -p benchmark.tar /u/cforall/public_html/doc/concurrent_benchmark.tar
 
 \paragraph{Creation}
@@ -3006,7 +3007,6 @@
 
 \begin{multicols}{2}
-\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
-\begin{cfa}
-@coroutine@ MyCoroutine {};
+\begin{cfa}[xleftmargin=0pt]
+`coroutine` MyCoroutine {};
 void ?{}( MyCoroutine & this ) {
 #ifdef EAGER
@@ -3016,5 +3016,5 @@
 void main( MyCoroutine & ) {}
 int main() {
-	BENCH( for ( N ) { @MyCoroutine c;@ } )
+	BENCH( for ( N ) { `MyCoroutine c;` } )
 	sout | result;
 }
@@ -3030,18 +3030,19 @@
 
 \begin{tabular}[t]{@{}r*{3}{D{.}{.}{5.2}}@{}}
-\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
-\CFA generator			& 0.6		& 0.6		& 0.0		\\
-\CFA coroutine lazy		& 13.4		& 13.1		& 0.5		\\
-\CFA coroutine eager	& 144.7		& 143.9		& 1.5		\\
-\CFA thread				& 466.4		& 468.0		& 11.3		\\
-\uC coroutine			& 155.6		& 155.7		& 1.7		\\
-\uC thread				& 523.4		& 523.9		& 7.7		\\
-Python generator		& 123.2		& 124.3		& 4.1		\\
-Node.js generator		& 33.4		& 33.5		& 0.3		\\
-Goroutine thread		& 751.0		& 750.5		& 3.1		\\
-Rust tokio thread		& 1860.0	& 1881.1	& 37.6		\\
-Rust thread				& 53801.0	& 53896.8	& 274.9		\\
-Java thread				& 120274.0	& 120722.9	& 2356.7	\\
-Pthreads thread			& 31465.5	& 31419.5	& 140.4
+\multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
+\CFA generator (1B)			& 0.6		& 0.6		& 0.0		\\
+\CFA coroutine lazy	(100M)	& 13.4		& 13.1		& 0.5		\\
+\CFA coroutine eager (10M)	& 144.7		& 143.9		& 1.5		\\
+\CFA thread (10M)			& 466.4		& 468.0		& 11.3		\\
+\uC coroutine (10M)			& 155.6		& 155.7		& 1.7		\\
+\uC thread (10M)			& 523.4		& 523.9		& 7.7		\\
+Python generator (10M)		& 123.2		& 124.3		& 4.1		\\
+Node.js generator (10M)		& 33.4		& 33.5		& 0.3		\\
+Goroutine thread (10M)		& 751.0		& 750.5		& 3.1		\\
+Rust tokio thread (10M)		& 1860.0	& 1881.1	& 37.6		\\
+Rust thread	(250K)			& 53801.0	& 53896.8	& 274.9		\\
+Java thread (250K)			& 119256.0	& 119679.2	& 2244.0	\\
+% Java thread (1 000 000)		& 123100.0	& 123052.5	& 751.6 	\\
+Pthreads thread	(250K)		& 31465.5	& 31419.5	& 140.4
 \end{tabular}
 \end{multicols}
@@ -3052,19 +3053,20 @@
 Internal scheduling is measured using a cycle of two threads signalling and waiting.
 Figure~\ref{f:schedint} shows the code for \CFA, with results in Table~\ref{t:schedint}.
-Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects.
-Java scheduling is significantly greater because the benchmark explicitly creates multiple threads in order to prevent the JIT from making the program sequential, \ie removing all locking.
+Note, the \CFA incremental cost for bulk acquire is a fixed cost for small numbers of mutex objects.
+User-level threading has one kernel thread, eliminating contention between the threads (direct handoff of the kernel thread).
+Kernel-level threading has two kernel threads allowing some contention.
 
 \begin{multicols}{2}
-\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
-\begin{cfa}
+\setlength{\tabcolsep}{3pt}
+\begin{cfa}[xleftmargin=0pt]
 volatile int go = 0;
-@condition c;@
-@monitor@ M {} m1/*, m2, m3, m4*/;
-void call( M & @mutex p1/*, p2, p3, p4*/@ ) {
-	@signal( c );@
-}
-void wait( M & @mutex p1/*, p2, p3, p4*/@ ) {
+`condition c;`
+`monitor` M {} m1/*, m2, m3, m4*/;
+void call( M & `mutex p1/*, p2, p3, p4*/` ) {
+	`signal( c );`
+}
+void wait( M & `mutex p1/*, p2, p3, p4*/` ) {
 	go = 1;	// continue other thread
-	for ( N ) { @wait( c );@ } );
+	for ( N ) { `wait( c );` } );
 }
 thread T {};
@@ -3091,12 +3093,13 @@
 
 \begin{tabular}{@{}r*{3}{D{.}{.}{5.2}}@{}}
-\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
-\CFA @signal@, 1 monitor	& 364.4		& 364.2		& 4.4		\\
-\CFA @signal@, 2 monitor	& 484.4		& 483.9		& 8.8		\\
-\CFA @signal@, 4 monitor	& 709.1		& 707.7		& 15.0		\\
-\uC @signal@ monitor		& 328.3		& 327.4		& 2.4		\\
-Rust cond. variable			& 7514.0	& 7437.4	& 397.2		\\
-Java @notify@ monitor		& 9623.0	& 9654.6	& 236.2		\\
-Pthreads cond. variable		& 5553.7	& 5576.1	& 345.6
+\multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
+\CFA @signal@, 1 monitor (10M)	& 364.4		& 364.2		& 4.4		\\
+\CFA @signal@, 2 monitor (10M)	& 484.4		& 483.9		& 8.8		\\
+\CFA @signal@, 4 monitor (10M)	& 709.1		& 707.7		& 15.0		\\
+\uC @signal@ monitor (10M)		& 328.3		& 327.4		& 2.4		\\
+Rust cond. variable	(1M)		& 7514.0	& 7437.4	& 397.2		\\
+Java @notify@ monitor (1M)		& 8717.0	& 8774.1	& 471.8		\\
+% Java @notify@ monitor (100 000 000)		& 8634.0	& 8683.5	& 330.5		\\
+Pthreads cond. variable (1M)	& 5553.7	& 5576.1	& 345.6
 \end{tabular}
 \end{multicols}
@@ -3107,14 +3110,14 @@
 External scheduling is measured using a cycle of two threads calling and accepting the call using the @waitfor@ statement.
 Figure~\ref{f:schedext} shows the code for \CFA with results in Table~\ref{t:schedext}.
-Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects.
+Note, the \CFA incremental cost for bulk acquire is a fixed cost for small numbers of mutex objects.
 
 \begin{multicols}{2}
-\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
+\setlength{\tabcolsep}{5pt}
 \vspace*{-16pt}
-\begin{cfa}
-@monitor@ M {} m1/*, m2, m3, m4*/;
-void call( M & @mutex p1/*, p2, p3, p4*/@ ) {}
-void wait( M & @mutex p1/*, p2, p3, p4*/@ ) {
-	for ( N ) { @waitfor( call : p1/*, p2, p3, p4*/ );@ }
+\begin{cfa}[xleftmargin=0pt]
+`monitor` M {} m1/*, m2, m3, m4*/;
+void call( M & `mutex p1/*, p2, p3, p4*/` ) {}
+void wait( M & `mutex p1/*, p2, p3, p4*/` ) {
+	for ( N ) { `waitfor( call : p1/*, p2, p3, p4*/ );` }
 }
 thread T {};
@@ -3133,14 +3136,14 @@
 \columnbreak
 
-\vspace*{-16pt}
+\vspace*{-18pt}
 \captionof{table}{External-scheduling comparison (nanoseconds)}
 \label{t:schedext}
 \begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
-\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
-\CFA @waitfor@, 1 monitor	& 367.1	& 365.3	& 5.0	\\
-\CFA @waitfor@, 2 monitor	& 463.0	& 464.6	& 7.1	\\
-\CFA @waitfor@, 4 monitor	& 689.6	& 696.2	& 21.5	\\
-\uC \lstinline[language=uC++]|_Accept| monitor	& 328.2	& 329.1	& 3.4	\\
-Go \lstinline[language=Golang]|select| channel	& 365.0	& 365.5	& 1.2
+\multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
+\CFA @waitfor@, 1 monitor (10M)	& 367.1	& 365.3	& 5.0	\\
+\CFA @waitfor@, 2 monitor (10M)	& 463.0	& 464.6	& 7.1	\\
+\CFA @waitfor@, 4 monitor (10M)	& 689.6	& 696.2	& 21.5	\\
+\uC \lstinline[language=uC++]|_Accept| monitor (10M)	& 328.2	& 329.1	& 3.4	\\
+Go \lstinline[language=Golang]|select| channel (10M)	& 365.0	& 365.5	& 1.2
 \end{tabular}
 \end{multicols}
@@ -3155,8 +3158,8 @@
 
 \begin{multicols}{2}
-\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
-\begin{cfa}
-@monitor@ M {} m1/*, m2, m3, m4*/;
-call( M & @mutex p1/*, p2, p3, p4*/@ ) {}
+\setlength{\tabcolsep}{3pt}
+\begin{cfa}[xleftmargin=0pt]
+`monitor` M {} m1/*, m2, m3, m4*/;
+call( M & `mutex p1/*, p2, p3, p4*/` ) {}
 int main() {
 	BENCH( for( N ) call( m1/*, m2, m3, m4*/ ); )
@@ -3173,14 +3176,15 @@
 \label{t:mutex}
 \begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
-\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
-test-and-test-set lock			& 19.1	& 18.9	& 0.4	\\
-\CFA @mutex@ function, 1 arg.	& 48.3	& 47.8	& 0.9	\\
-\CFA @mutex@ function, 2 arg.	& 86.7	& 87.6	& 1.9	\\
-\CFA @mutex@ function, 4 arg.	& 173.4	& 169.4	& 5.9	\\
-\uC @monitor@ member rtn.		& 54.8	& 54.8	& 0.1	\\
-Goroutine mutex lock			& 34.0	& 34.0	& 0.0	\\
-Rust mutex lock					& 33.0	& 33.2	& 0.8	\\
-Java synchronized method		& 31.0	& 31.0	& 0.0	\\
-Pthreads mutex Lock				& 31.0	& 31.1	& 0.4
+\multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
+test-and-test-set lock (50M)		& 19.1	& 18.9	& 0.4	\\
+\CFA @mutex@ function, 1 arg. (50M)	& 48.3	& 47.8	& 0.9	\\
+\CFA @mutex@ function, 2 arg. (50M)	& 86.7	& 87.6	& 1.9	\\
+\CFA @mutex@ function, 4 arg. (50M)	& 173.4	& 169.4	& 5.9	\\
+\uC @monitor@ member rtn. (50M)		& 54.8	& 54.8	& 0.1	\\
+Goroutine mutex lock (50M)			& 34.0	& 34.0	& 0.0	\\
+Rust mutex lock (50M)				& 33.0	& 33.2	& 0.8	\\
+Java synchronized method (50M)		& 31.0	& 30.9	& 0.5	\\
+% Java synchronized method (10 000 000 000)		& 31.0 & 30.2 & 0.9 \\
+Pthreads mutex Lock (50M)			& 31.0	& 31.1	& 0.4
 \end{tabular}
 \end{multicols}
@@ -3201,5 +3205,5 @@
 % To: "Peter A. Buhr" <pabuhr@plg2.cs.uwaterloo.ca>
 % Date: Fri, 24 Jan 2020 13:49:18 -0500
-% 
+%
 % I can also verify that the previous version, which just tied a bunch of promises together, *does not* go back to the
 % event loop at all in the current version of Node. Presumably they're taking advantage of the fact that the ordering of
@@ -3211,15 +3215,14 @@
 
 \begin{multicols}{2}
-\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
-\begin{cfa}[aboveskip=0pt,belowskip=0pt]
-@coroutine@ C {};
-void main( C & ) { for () { @suspend;@ } }
+\begin{cfa}[xleftmargin=0pt]
+`coroutine` C {};
+void main( C & ) { for () { `suspend;` } }
 int main() { // coroutine test
 	C c;
-	BENCH( for ( N ) { @resume( c );@ } )
+	BENCH( for ( N ) { `resume( c );` } )
 	sout | result;
 }
 int main() { // thread test
-	BENCH( for ( N ) { @yield();@ } )
+	BENCH( for ( N ) { `yield();` } )
 	sout | result;
 }
@@ -3234,20 +3237,22 @@
 \label{t:ctx-switch}
 \begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
-\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
-C function			& 1.8		& 1.8		& 0.0	\\
-\CFA generator		& 1.8		& 2.0		& 0.3	\\
-\CFA coroutine		& 32.5		& 32.9		& 0.8	\\
-\CFA thread			& 93.8		& 93.6		& 2.2	\\
-\uC coroutine		& 50.3		& 50.3		& 0.2	\\
-\uC thread			& 97.3		& 97.4		& 1.0	\\
-Python generator	& 40.9		& 41.3		& 1.5	\\
-Node.js await		& 1852.2	& 1854.7	& 16.4	\\
-Node.js generator	& 33.3		& 33.4		& 0.3	\\
-Goroutine thread	& 143.0		& 143.3		& 1.1	\\
-Rust async await	& 32.0		& 32.0		& 0.0	\\
-Rust tokio thread	& 143.0		& 143.0		& 1.7	\\
-Rust thread			& 332.0		& 331.4		& 2.4	\\
-Java thread			& 405.0		& 415.0		& 17.6	\\
-Pthreads thread		& 334.3		& 335.2		& 3.9
+\multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
+C function (10B)			& 1.8		& 1.8		& 0.0	\\
+\CFA generator (5B)			& 1.8		& 2.0		& 0.3	\\
+\CFA coroutine (100M)		& 32.5		& 32.9		& 0.8	\\
+\CFA thread (100M)			& 93.8		& 93.6		& 2.2	\\
+\uC coroutine (100M)		& 50.3		& 50.3		& 0.2	\\
+\uC thread (100M)			& 97.3		& 97.4		& 1.0	\\
+Python generator (100M)		& 40.9		& 41.3		& 1.5	\\
+Node.js await (5M)			& 1852.2	& 1854.7	& 16.4	\\
+Node.js generator (100M)	& 33.3		& 33.4		& 0.3	\\
+Goroutine thread (100M)		& 143.0		& 143.3		& 1.1	\\
+Rust async await (100M)		& 32.0		& 32.0		& 0.0	\\
+Rust tokio thread (100M)	& 143.0		& 143.0		& 1.7	\\
+Rust thread (25M)			& 332.0		& 331.4		& 2.4	\\
+Java thread (100M)			& 405.0		& 415.0		& 17.6	\\
+% Java thread (  100 000 000)			& 413.0 & 414.2 & 6.2 \\
+% Java thread (5 000 000 000)			& 415.0 & 415.2 & 6.1 \\
+Pthreads thread (25M)		& 334.3		& 335.2		& 3.9
 \end{tabular}
 \end{multicols}
@@ -3258,8 +3263,11 @@
 Languages using 1:1 threading based on pthreads can at best meet or exceed, due to language overhead, the pthread results.
 Note, pthreads has a fast zero-contention mutex lock checked in user space.
-Languages with M:N threading have better performance than 1:1 because there is no operating-system interactions.
+Languages with M:N threading have better performance than 1:1 because there is no operating-system interactions (context-switching or locking).
+As well, for locking experiments, M:N threading has less contention if only one kernel thread is used.
 Languages with stackful coroutines have higher cost than stackless coroutines because of stack allocation and context switching;
 however, stackful \uC and \CFA coroutines have approximately the same performance as stackless Python and Node.js generators.
 The \CFA stackless generator is approximately 25 times faster for suspend/resume and 200 times faster for creation than stackless Python and Node.js generators.
+The Node.js context-switch is costly when asynchronous await must enter the event engine because a promise is not fulfilled.
+Finally, the benchmark results correlate across programming languages with and without JIT, indicating the JIT has completed any runtime optimizations.
 
 
@@ -3319,5 +3327,5 @@
 
 The authors recognize the design assistance of Aaron Moss, Rob Schluntz, Andrew Beach, and Michael Brooks; David Dice for commenting and helping with the Java benchmarks; and Gregor Richards for helping with the Node.js benchmarks.
-This research is funded by a grant from Waterloo-Huawei (\url{http://www.huawei.com}) Joint Innovation Lab. %, and Peter Buhr is partially funded by the Natural Sciences and Engineering Research Council of Canada.
+This research is funded by the NSERC/Waterloo-Huawei (\url{http://www.huawei.com}) Joint Innovation Lab. %, and Peter Buhr is partially funded by the Natural Sciences and Engineering Research Council of Canada.
 
 {%
Index: doc/papers/concurrency/annex/local.bib
===================================================================
--- doc/papers/concurrency/annex/local.bib	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/papers/concurrency/annex/local.bib	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -59,5 +59,5 @@
 @manual{Cpp-Transactions,
 	keywords	= {C++, Transactional Memory},
-	title		= {Technical Specification for C++ Extensions for Transactional Memory},
+	title		= {Tech. Spec. for C++ Extensions for Transactional Memory},
 	organization= {International Standard ISO/IEC TS 19841:2015 },
 	publisher   = {American National Standards Institute},
Index: doc/papers/concurrency/mail2
===================================================================
--- doc/papers/concurrency/mail2	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/papers/concurrency/mail2	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -959,2 +959,117 @@
 Software: Practice and Experience Editorial Office
 
+
+
+Date: Wed, 2 Sep 2020 20:55:34 +0000
+From: Richard Jones <onbehalfof@manuscriptcentral.com>
+Reply-To: R.E.Jones@kent.ac.uk
+To: tdelisle@uwaterloo.ca, pabuhr@uwaterloo.ca
+Subject: Software: Practice and Experience - Decision on Manuscript ID
+ SPE-19-0219.R2
+
+02-Sep-2020
+
+Dear Dr Buhr,
+
+Many thanks for submitting SPE-19-0219.R2 entitled "Advanced Control-flow and Concurrency in Cforall" to Software: Practice and Experience. The paper has now been reviewed and the comments of the referees are included at the bottom of this letter. I apologise for the length of time it has taken to get these.
+
+Both reviewers consider this paper to be close to acceptance. However, before I can accept this paper, I would like you address the comments of Reviewer 2, particularly with regard to the description of the adaptation Java harness to deal with warmup. I would expect to see a convincing argument that the computation has reached a steady state. I would also like you to provide the values for N for each benchmark run. This should be very straightforward for you to do. There are a couple of papers on steady state that you may wish to consult (though I am certainly not pushing my own work).
+
+1) Barrett, Edd; Bolz-Tereick, Carl Friedrich; Killick, Rebecca; Mount, Sarah and Tratt, Laurence. Virtual Machine Warmup Blows Hot and Cold. OOPSLA 2017. https://doi.org/10.1145/3133876
+Virtual Machines (VMs) with Just-In-Time (JIT) compilers are traditionally thought to execute programs in two phases: the initial warmup phase determines which parts of a program would most benefit from dynamic compilation, before JIT compiling those parts into machine code; subsequently the program is said to be at a steady state of peak performance. Measurement methodologies almost always discard data collected during the warmup phase such that reported measurements focus entirely on peak performance. We introduce a fully automated statistical approach, based on changepoint analysis, which allows us to determine if a program has reached a steady state and, if so, whether that represents peak performance or not. Using this, we show that even when run in the most controlled of circumstances, small, deterministic, widely studied microbenchmarks often fail to reach a steady state of peak performance on a variety of common VMs. Repeating our experiment on 3 different machines, we found that at most 43.5% of pairs consistently reach a steady state of peak performance.
+
+2) Kalibera, Tomas and Jones, Richard. Rigorous Benchmarking in Reasonable Time. ISMM  2013. https://doi.org/10.1145/2555670.2464160
+Experimental evaluation is key to systems research. Because modern systems are complex and non-deterministic, good experimental methodology demands that researchers account for uncertainty. To obtain valid results, they are expected to run many iterations of benchmarks, invoke virtual machines (VMs) several times, or even rebuild VM or benchmark binaries more than once. All this repetition costs time to complete experiments. Currently, many evaluations give up on sufficient repetition or rigorous statistical methods, or even run benchmarks only in training sizes. The results reported often lack proper variation estimates and, when a small difference between two systems is reported, some are simply unreliable.In contrast, we provide a statistically rigorous methodology for repetition and summarising results that makes efficient use of experimentation time. Time efficiency comes from two key observations. First, a given benchmark on a given platform is typically prone to much less non-determinism than the common worst-case of published corner-case studies. Second, repetition is most needed where most uncertainty arises (whether between builds, between executions or between iterations). We capture experimentation cost with a novel mathematical model, which we use to identify the number of repetitions at each level of an experiment necessary and sufficient to obtain a given level of precision.We present our methodology as a cookbook that guides researchers on the number of repetitions they should run to obtain reliable results. We also show how to present results with an effect size confidence interval. As an example, we show how to use our methodology to conduct throughput experiments with the DaCapo and SPEC CPU benchmarks on three recent platforms.
+
+You have 42 days from the date of this email to submit your revision. If you are unable to complete the revision within this time, please contact me to request a short extension.
+
+You can upload your revised manuscript and submit it through your Author Center. Log into https://mc.manuscriptcentral.com/spe and enter your Author Center, where you will find your manuscript title listed under "Manuscripts with Decisions".
+
+When submitting your revised manuscript, you will be able to respond to the comments made by the referee(s) in the space provided.  You can use this space to document any changes you make to the original manuscript.
+
+If you would like help with English language editing, or other article preparation support, Wiley Editing Services offers expert help with English Language Editing, as well as translation, manuscript formatting, and figure formatting at www.wileyauthors.com/eeo/preparation. You can also check out our resources for Preparing Your Article for general guidance about writing and preparing your manuscript at www.wileyauthors.com/eeo/prepresources.
+ 
+Once again, thank you for submitting your manuscript to Software: Practice and Experience. I look forward to receiving your revision.
+
+Sincerely,
+Richard
+
+Prof. Richard Jones
+Editor, Software: Practice and Experience
+R.E.Jones@kent.ac.uk
+
+Referee(s)' Comments to Author:
+
+Reviewing: 1
+
+Comments to the Author
+Overall, I felt that this draft was an improvement on previous drafts and I don't have further changes to request. 
+
+I appreciated the new language to clarify the relationship of external and internal scheduling, for example, as well as the new measurements of Rust tokio. Also, while I still believe that the choice between thread/generator/coroutine and so forth could be made crisper and clearer, the current draft of Section 2 did seem adequate to me in terms of specifying the considerations that users would have to take into account to make the choice.
+
+
+Reviewing: 2
+
+Comments to the Author
+First: let me apologise for the delay on this review. I'll blame the global pandemic combined with my institution's senior management's counterproductive decisions for taking up most of my time and all of my energy.
+
+At this point, reading the responses, I think we've been around the course enough times that further iteration is unlikely to really improve the paper any further, so I'm happy to recommend acceptance.    My main comments are that there were some good points in the responses to *all* the reviews and I strongly encourage the authors to incorporate those discursive responses into the final paper so they may benefit readers as well as reviewers.   I agree with the recommendations of reviewer #2 that the paper could usefully be split in to two, which I think I made to a previous revision, but I'm happy to leave that decision to the Editor. 
+
+Finally, the paper needs to describe how the Java harness was adapted to deal with warmup; why the computation has warmed up and reached a steady state - similarly for js and Python. The tables should also give the "N" chosen for each benchmark run.
+ 
+minor points
+* don't start sentences with "However"
+* most downloaded isn't an "Award"
+
+
+
+Date: Thu, 1 Oct 2020 05:34:29 +0000
+From: Richard Jones <onbehalfof@manuscriptcentral.com>
+Reply-To: R.E.Jones@kent.ac.uk
+To: pabuhr@uwaterloo.ca
+Subject: Revision reminder - SPE-19-0219.R2
+
+01-Oct-2020
+
+Dear Dr Buhr
+
+SPE-19-0219.R2
+
+This is a reminder that your opportunity to revise and re-submit your manuscript will expire 14 days from now. If you require more time please contact me directly and I may grant an extension to this deadline, otherwise the option to submit a revision online, will not be available.
+
+If your article is of potential interest to the general public, (which means it must be timely, groundbreaking, interesting and impact on everyday society) then please e-mail ejp@wiley.co.uk explaining the public interest side of the research. Wiley will then investigate the potential for undertaking a global press campaign on the article.
+
+I look forward to receiving your revision.
+
+Sincerely,
+
+Prof. Richard Jones
+Editor, Software: Practice and Experience
+
+https://mc.manuscriptcentral.com/spe
+
+
+
+Date: Tue, 6 Oct 2020 15:29:41 +0000
+From: Mayank Roy Chowdhury <onbehalfof@manuscriptcentral.com>
+Reply-To: speoffice@wiley.com
+To: tdelisle@uwaterloo.ca, pabuhr@uwaterloo.ca
+Subject: SPE-19-0219.R3 successfully submitted
+
+06-Oct-2020
+
+Dear Dr Buhr,
+
+Your manuscript entitled "Advanced Control-flow and Concurrency in Cforall" has been successfully submitted online and is presently being given full consideration for publication in Software: Practice and Experience.
+
+Your manuscript number is SPE-19-0219.R3.  Please mention this number in all future correspondence regarding this submission.
+
+You can view the status of your manuscript at any time by checking your Author Center after logging into https://mc.manuscriptcentral.com/spe.  If you have difficulty using this site, please click the 'Get Help Now' link at the top right corner of the site.
+
+
+Thank you for submitting your manuscript to Software: Practice and Experience.
+
+Sincerely,
+
+Software: Practice and Experience Editorial Office
+
Index: doc/papers/concurrency/response3
===================================================================
--- doc/papers/concurrency/response3	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/papers/concurrency/response3	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,27 @@
+        I would like you address the comments of Reviewer 2, particularly with regard to the description of the adaptation Java harness to deal with warmup. I would expect to see a convincing argument that the computation has reached a steady state.
+
+We understand referee2 and your concern about the JIT experiments, which is why we verified our experiments with two experts in JIT development for both Java and Node.js before submitting the paper. We also read the supplied papers, but most of the information is not applicable to our work for the following reasons.
+
+1. SPEC benchmarks are medium to large. In contrast, our benchmarks are 5-15 lines in length for each programming language (see code for the Cforall tests in the paper). Hence, there is no significant computations, complex control flow, or use of memory. They test one specific language features (context switch, mutex call, etc.) in isolation over and over again. These language features are fixed (e.g., acquiring and releasing a lock is a fixed cost). Therefore, unless the feature can be removed there is nothing to optimize at runtime. But these features cannot be removed without changing the meaning of the benchmark. If the feature is removed, the timing result would be 0. In fact, it was difficult to prevent the JIT from completely eliding some benchmarks because there are no side-effects.
+
+2. All of our benchmark results correlate across programming languages with and without JIT, indicating the JIT has completed any runtime optimizations (added this sentence to Section 8.1). Any large differences are explained by how a language implements a feature not by how the compiler/JIT precesses that feature. Section 8.1 discusses these points in detail.
+
+3. We also added a sentence about running all JIT-base programming language experiments for 30 minutes and there was no statistical difference, med/avg/std correlated with the short-run experiments, which seems a convincing argument that the benchmark has reached a steady state. If the JIT takes longer than 30 minutes to achieve its optimization goals, it is unlikely to be useful.
+
+4. The purpose of the performance section is not to draw conclusions about improvements. It is to contrast program-language implementation approaches. Section 8.1 talks about ramifications of certain design and implementation decisions with respect to overall performance. The only conclusion we draw about performance is:
+
+   Performance comparisons with other concurrent systems and languages show the Cforall approach is competitive across all basic operations, which translates directly into good performance in well-written applications with advanced control-flow.
+
+
+       I would also like you to provide the values for N for each benchmark run.
+
+Done.
+
+
+Referee 2 suggested
+
+   * don't start sentences with "However"
+
+However, there are numerous grammar sites on the web indicating "however" (a conjunction) at the start of a sentence is acceptable, e.g.:
+
+https://www.merriam-webster.com/words-at-play/can-you-start-a-sentence-with-however This is a stylistic choice, more than anything else, as we have a considerable body of evidence of writers using however to begin sentences, frequently with the meaning of "nevertheless."
Index: doc/proposals/ZeroCostPreemption.md
===================================================================
--- doc/proposals/ZeroCostPreemption.md	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/proposals/ZeroCostPreemption.md	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,16 @@
+## "Zero Cost" Preemption in for Cforall ##
+
+Similar to "Zero Cost" exceptions, this is a proposal to support preemption with little to no runtime cost for the book-keeping. (Other than having exceptions).
+
+Preemption stops users threads at random locations and forces a context switch using a signal handler. Since this is not safe and/or does not make sense in many contexts, the runtime needs a system to disable interrupts for certain regions of codes.
+
+Currently, Cforall uses _[kernel] thread-local storage_(TLS) to handle this, setting a flag to false when preemption should be disabled. This works on x86/x64 but only with a specific TLS model, and does not work with ARM. The problem is that if the loading of the TLS variable is not done in a single instruction, it allows a race condition, where user-threads could disable preemption for the wrong processor, i.e., be moved to a different processor and update the previous processor.
+
+The fix being worked on is to protect the specific TLS variable with a special function.
+
+## The Proposal ##
+A better approach, would be to re-use the Exception Handling Data structure to identify regions of code that do not allow preemption. These regions of code would be marked using the same mechanism which marks stack unwinding requirements.
+
+When the signal handler is called, it would search the stack similarly to how the stack is searched when an exception is thrown and do the context switch or not based on the result.
+
+This is an optimization, since signal handlers for preemption are already rare and costly but enabling/disabling interrupts is very common (1000x more common). Using the "Zero-Cost" exception mechanism, enabling/disabling interrupts should be free at runtime and the rare signal/handler become more expensive.
Index: doc/proposals/function_type_change.md
===================================================================
--- doc/proposals/function_type_change.md	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/proposals/function_type_change.md	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,34 @@
+## Eliminate Variable Declarations in Function Type ##
+
+The parameters of a function had been living in the wrong place.
+
+As the function type has no relation with the actual declarations of the variables, but only the types of them, putting declarations in FunctionType is unnecessary.
+Meanwhile, in new-ast data model, the declaration nodes should be kept as unique as possible, since they semantically denote unique objects in the source code. Shared declarations often lead to undesirable behaviors, especially when weak references exist (reminder: currently weak references only point to declarations). They also pose difficulty for implementing correct _functional_ algorithms, as copying a declaration node _should_ always mean creating a new entity.
+In the programming language and type theory model, declarations are also never a part of function type; the functions `int f(int a)` and `int f(int b)` have the exact same type (int)->(int), and representing the type as (int a)->(int b) is misleading.
+
+
+## Summary of Changes ##
+
+- `ast::FunctionDecl`
+Now owns its parameter and return variables directly.
+
+- `ast::FunctionType`
+Parameter and return types are now pure types (no more decls)
+Forall clause is part of type information so it is still kept.
+
+- Unify.cc
+Renamed some functions to reflect the changes (decl -> type)
+
+- Convert.cpp
+Drop decls in function type, unless it is directly in function decl (move them to `FunctionDecl` params and returns)
+Add dummy variable decls while converting back.
+
+## Relevant Clean-up Work ##
+
+- CurrentObject.cpp
+No longer has weak references to type nodes and replaced by raw pointers. Using weak pointers do not accomplish anything since a non in-place mutation outside invalidates current iterator anyways and an in-place mutation outside is still seen by the iterator with just a raw pointer.
+
+- Validate.cc
+`EnumAndPointerDecay` is redundant in `resolveTypeof` and therefore dropped.
+Note: this pass needs some structural change to accommodate the new function type representation.
+
Index: doc/refrat/refrat.tex
===================================================================
--- doc/refrat/refrat.tex	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/refrat/refrat.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -11,6 +11,6 @@
 %% Created On       : Wed Apr  6 14:52:25 2016
 %% Last Modified By : Peter A. Buhr
-%% Last Modified On : Wed Jan 31 17:30:23 2018
-%% Update Count     : 108
+%% Last Modified On : Mon Oct  5 09:02:53 2020
+%% Update Count     : 110
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
@@ -30,36 +30,10 @@
 \usepackage{upquote}									% switch curled `'" to straight
 \usepackage{calc}
-\usepackage{xspace}
 \usepackage{varioref}									% extended references
-\usepackage{listings}									% format program code
 \usepackage[flushmargin]{footmisc}						% support label/reference in footnote
 \usepackage{latexsym}                                   % \Box glyph
 \usepackage{mathptmx}                                   % better math font with "times"
 \usepackage[usenames]{color}
-\input{common}                                          % common CFA document macros
-\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref}
-\usepackage{breakurl}
-\renewcommand{\UrlFont}{\small\sf}
-
-\usepackage[pagewise]{lineno}
-\renewcommand{\linenumberfont}{\scriptsize\sffamily}
-\usepackage[firstpage]{draftwatermark}
-\SetWatermarkLightness{0.9}
-
-% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
-% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
-% AFTER HYPERREF.
-\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
-
-\setlength{\topmargin}{-0.45in}							% move running title into header
-\setlength{\headsep}{0.25in}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\CFAStyle												% use default CFA format-style
-\lstnewenvironment{C++}[1][]                            % use C++ style
-{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{®}{®}#1}}
-{}
-
+\newcommand{\CFALatin}{}
 % inline code ©...© (copyright symbol) emacs: C-q M-)
 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
@@ -69,9 +43,33 @@
 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
 % math escape $...$ (dollar symbol)
+\input{common}                                          % common CFA document macros
+\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref}
+\usepackage{breakurl}
+\renewcommand{\UrlFont}{\small\sf}
+
+\usepackage[pagewise]{lineno}
+\renewcommand{\linenumberfont}{\scriptsize\sffamily}
+\usepackage[firstpage]{draftwatermark}
+\SetWatermarkLightness{0.9}
+
+% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
+% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
+% AFTER HYPERREF.
+\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
+
+\setlength{\topmargin}{-0.45in}							% move running title into header
+\setlength{\headsep}{0.25in}
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
+\CFAStyle												% use default CFA format-style
+\lstnewenvironment{C++}[1][]                            % use C++ style
+{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{®}{®},#1}}
+{}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
 % Names used in the document.
-\newcommand{\Version}{\input{../../version}}
+\newcommand{\Version}{\input{build/version}}
 \newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}}
 \newcommand{\Emph}[2][red]{{\color{#1}\textbf{\emph{#2}}}}
Index: doc/theses/andrew_beach_MMath/glossaries.tex
===================================================================
--- doc/theses/andrew_beach_MMath/glossaries.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/andrew_beach_MMath/glossaries.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,36 @@
+% Variaus Glossary Definitions
+
+% Main glossary entries -- definitions of relevant terminology
+\newglossaryentry{computer}
+{
+name=computer,
+description={A programmable machine that receives input data,
+               stores and manipulates the data, and provides
+               formatted output}
+}
+
+% Nomenclature glossary entries -- New definitions, or unusual terminology
+\newglossary*{nomenclature}{Nomenclature}
+\newglossaryentry{dingledorf}
+{
+type=nomenclature,
+name=dingledorf,
+description={A person of supposed average intelligence who makes
+               incredibly brainless misjudgments}
+}
+
+% List of Abbreviations (abbreviations are from the glossaries-extra package)
+\newabbreviation{aaaaz}{AAAAZ}{American Association of Amature Astronomers
+                               and Zoologists}
+
+% List of Symbols
+\newglossary*{symbols}{List of Symbols}
+\newglossaryentry{rvec}
+{
+name={$\mathbf{v}$},
+sort={label},
+type=symbols,
+description={Random vector: a location in n-dimensional Cartesian space,
+             where each dimensional component is determined by a random
+             process}
+}
Index: doc/theses/andrew_beach_MMath/thesis.tex
===================================================================
--- doc/theses/andrew_beach_MMath/thesis.tex	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/theses/andrew_beach_MMath/thesis.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -34,37 +34,6 @@
 \usepackage[toc,abbreviations]{glossaries-extra}
 
-% Main glossary entries -- definitions of relevant terminology
-\newglossaryentry{computer}
-{
-name=computer,
-description={A programmable machine that receives input data,
-               stores and manipulates the data, and provides
-               formatted output}
-}
-
-% Nomenclature glossary entries -- New definitions, or unusual terminology
-\newglossary*{nomenclature}{Nomenclature}
-\newglossaryentry{dingledorf}
-{
-type=nomenclature,
-name=dingledorf,
-description={A person of supposed average intelligence who makes incredibly
-               brainless misjudgments}
-}
-
-% List of Abbreviations (abbreviations are from the glossaries-extra package)
-\newabbreviation{aaaaz}{AAAAZ}{American Association of Amature Astronomers
-               and Zoologists}
-
-% List of Symbols
-\newglossary*{symbols}{List of Symbols}
-\newglossaryentry{rvec}
-{
-name={$\mathbf{v}$},
-sort={label},
-type=symbols,
-description={Random vector: a location in n-dimensional Cartesian space, where
-               each dimensional component is determined by a random process}
-}
+% Define all the glossaries.
+\input{glossaries}
 
 % Generate the glossaries defined above.
Index: doc/theses/fangren_yu_COOP_S20/Makefile
===================================================================
--- doc/theses/fangren_yu_COOP_S20/Makefile	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/theses/fangren_yu_COOP_S20/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -46,5 +46,4 @@
 # File Dependencies #
 
-
 ${DOCUMENT} : ${BASE}.ps
 	ps2pdf $<
Index: doc/theses/fangren_yu_COOP_S20/Report.tex
===================================================================
--- doc/theses/fangren_yu_COOP_S20/Report.tex	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/theses/fangren_yu_COOP_S20/Report.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -1,3 +1,3 @@
-\documentclass[twoside,12pt]{article}
+\documentclass[twoside,11pt]{article}
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -11,10 +11,17 @@
 \usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt]{subfig}
 \renewcommand{\thesubfigure}{\alph{subfigure})}
+\usepackage[flushmargin]{footmisc}						% support label/reference in footnote
 \usepackage{latexsym}                                   % \Box glyph
 \usepackage{mathptmx}                                   % better math font with "times"
+\usepackage[toc]{appendix}								% article does not have appendix
 \usepackage[usenames]{color}
 \input{common}                                          % common CFA document macros
 \usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref}
 \usepackage{breakurl}
+\urlstyle{sf}
+
+% reduce spacing
+\setlist[itemize]{topsep=5pt,parsep=0pt}% global
+\setlist[enumerate]{topsep=5pt,parsep=0pt}% global
 
 \usepackage[pagewise]{lineno}
@@ -26,4 +33,5 @@
 \renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
 \newcommand{\NOTE}{\textbf{NOTE}}
+\newcommand{\TODO}[1]{{\color{Purple}#1}}
 
 \setlength{\topmargin}{-0.45in}							% move running title into header
@@ -32,14 +40,10 @@
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
-\CFADefaults
+\CFAStyle												% CFA code-style for all languages
 \lstset{
-language=C++,											% make C++ the default language
-escapechar=\$,											% LaTeX escape in CFA code
-moredelim=**[is][\color{red}]{`}{`},
+language=C++,moredelim=**[is][\color{red}]{@}{@}		% make C++ the default language
 }% lstset
-\lstMakeShortInline@%
 \lstnewenvironment{C++}[1][]                            % use C++ style
-{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}}
-{}
+{\lstset{language=C++,moredelim=**[is][\color{red}]{@}{@}}\lstset{#1}}{}
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -84,178 +88,158 @@
 \section{Overview}
 
-cfa-cc is the reference compiler for the \CFA programming language, which is a non-
-object-oriented extension to C.
-\CFA attempts to introduce productive modern programming language features to C
-while maintaining as much backward-compatibility as possible, so that most existing C
-programs can seamlessly work with \CFA.
-
-Since the \CFA project was dated back to the early 2000s, and only restarted in the past
-few years, there is a significant amount of legacy code in the current compiler codebase,
-with little proper documentation available. This becomes a difficulty while developing new
-features based on the previous implementations, and especially while diagnosing
-problems.
-
-Currently, the \CFA team is also facing another problem: bad compiler performance. For
-the development of a new programming language, writing a standard library is an
-important part. The incompetence of the compiler causes building the library files to take
-tens of minutes, making iterative development and testing almost impossible. There is
-ongoing effort to rewrite the core data structure of the compiler to overcome the
-performance issue, but many bugs may appear during the work, and lack of documentation
-makes debugging extremely difficult.
-
-This developer's reference will be continuously improved and eventually cover the
-compiler codebase. For now, the focus is mainly on the parts being rewritten, and also the
-performance bottleneck, namely the resolution algorithm. It is aimed to provide new
-developers to the project enough guidance and clarify the purposes and behavior of certain
-functions which are not mentioned in the previous \CFA research papers.
+@cfa-cc@ is the reference compiler for the \CFA programming language, which is a non-object-oriented extension to C.
+\CFA attempts to introduce productive modern programming language features to C while maintaining as much backward-compatibility as possible, so that most existing C programs can seamlessly work with \CFA.
+
+Since the \CFA project dates back to the early 2000s, and only restarted in the past few years, there is a significant amount of legacy code in the current compiler codebase with little documentation.
+The lack of documentation makes it difficult to develop new features from the current implementation and diagnose problems.
+
+Currently, the \CFA team is also facing poor compiler performance.
+For the development of a new programming language, writing standard libraries is an important component.
+The slow compiler causes building of the library files to take tens of minutes, making iterative development and testing almost impossible.
+There is an ongoing effort to rewrite the core data-structure of the compiler to overcome the performance issue, but many bugs have appeared during this work, and lack of documentation is hampering debugging.
+
+This developer's reference manual begins the documentation and should be continuously im\-proved until it eventually covers the entire compiler codebase.
+For now, the focus is mainly on the parts being rewritten, and also the primary performance bottleneck, namely the resolution algorithm.
+Its aimed is to provide new project developers with guidance in understanding the codebase, and clarify the purpose and behaviour of certain functions that are not mentioned in the previous \CFA research papers~\cite{Bilson03,Ditchfield92,Moss19}.
 
 
 \section{Compiler Framework}
 
+\CFA source code is first transformed into an abstract syntax tree (AST) by the parser before analyzed by the compiler.
+
+
 \subsection{AST Representation}
 
-Source code input is first transformed into abstract syntax tree (AST) representation by the
-parser before analyzed by the compiler.
-
-There are 4 major categories of AST nodes used by the compiler, along with some derived
-structures.
-
-\subsubsection{Declaration nodes}
+
+There are 4 major categories of AST nodes used by the compiler, along with some derived structures.
+
+\subsubsection{Declaration Nodes}
 
 A declaration node represents either of:
 \begin{itemize}
 \item
-Type declaration: struct, union, typedef or type parameter (see Appendix A.3)
-\item
-Variable declaration
-\item
-Function declaration
+type declaration: @struct@, @union@, @typedef@ or type parameter (see \VRef[Appendix]{s:KindsTypeParameters})
+\item
+variable declaration
+\item
+function declaration
 \end{itemize}
 Declarations are introduced by standard C declarations, with the usual scoping rules.
-In addition, declarations can also be introduced by the forall clause (which is the origin
-of \CFA's name):
+In addition, declarations can also be qualified by the \lstinline[language=CFA]@forall@ clause (which is the origin of \CFA's name):
 \begin{cfa}
-forall (<$\emph{TypeParameterList}$> | <$\emph{AssertionList}$>)
+forall ( <$\emph{TypeParameterList}$> | <$\emph{AssertionList}$> )
 	$\emph{declaration}$
 \end{cfa}
-Type parameters in \CFA are similar to \CC template type parameters. The \CFA
-declaration
+Type parameters in \CFA are similar to \CC template type parameters.
+The \CFA declaration
 \begin{cfa}
 forall (dtype T) ...
 \end{cfa}
-behaves similarly as the \CC template declaration
+behaves similarly to the \CC template declaration
 \begin{C++}
 template <typename T> ...
 \end{C++}
 
-Assertions are a distinctive feature of \CFA: contrary to the \CC template where
-arbitrary functions and operators can be used in a template definition, in a \CFA
-parametric function, operations on parameterized types must be declared in assertions.
-
+Assertions are a distinctive feature of \CFA, similar to \emph{interfaces} in D and Go, and \emph{traits} in Rust.
+Contrary to the \CC template where arbitrary functions and operators can be used in a template definition, in a \CFA parametric function, operations on parameterized types must be declared in assertions.
 Consider the following \CC template:
 \begin{C++}
-template <typename T> int foo(T t) {
-	return bar(t) + baz(t);
+@template@ forall<typename T> T foo( T t ) {
+	return t + t * t;
 }
 \end{C++}
-Unless bar and baz are also parametric functions taking any argument type, they must be
-declared in the assertions, or otherwise the code will not compile:
+where there are no explicit requirements on the type @T@.
+Therefore, the \CC compiler must deduce what operators are required during textual (macro) expansion of the template at each usage.
+As a result, templates cannot be compiled.
+\CFA assertions specify restrictions on type parameters:
 \begin{cfa}
-forall (dtype T | { int bar(T); int baz(t); }) int foo (T t) {
-	return bar(t) + baz(t);
+forall( dtype T | @{ T ?+?( T, T ); T ?*?( T, T ) }@ ) int foo ( T t ) {
+	return t + t * t;
 }
 \end{cfa}
-Assertions are written using the usual function declaration syntax. The scope of type
-parameters and assertions is the following declaration.
-
-\subsubsection{Type nodes}
-
-A type node represents the type of an object or expression.
-Named types reference the corresponding type declarations. The type of a function is its
-function pointer type (same as standard C).
-With the addition of type parameters, named types may contain a list of parameter values
-(actual parameter types).
-
-\subsubsection{Statement nodes}
-
-Statement nodes represent the statements in the program, including basic expression
-statements, control flows and blocks.
+Assertions are written using the usual \CFA function declaration syntax.
+Only types with operators ``@+@'' and ``@*@'' work with this function, and the function prototype is sufficient to allow separate compilation.
+
+Type parameters and assertions are used in the following compiler data-structures.
+
+
+\subsubsection{Type Nodes}
+
+Type nodes represent the type of an object or expression.
+Named types reference the corresponding type declarations.
+The type of a function is its function pointer type (same as standard C).
+With the addition of type parameters, named types may contain a list of parameter values (actual parameter types).
+
+
+\subsubsection{Statement Nodes}
+
+Statement nodes represent the executable statements in the program, including basic expression statements, control flows and blocks.
 Local declarations (within a block statement) are represented as declaration statements.
 
-\subsubsection{Expression nodes}
-
-Some expressions are represented differently in the compiler before and after resolution
-stage:
+
+\subsubsection{Expression Nodes}
+
+Some expressions are represented differently before and after the resolution stage:
 \begin{itemize}
 \item
-Name expressions: NameExpr pre-resolution, VariableExpr post-resolution
-\item
-Member expressions: UntypedMemberExpr pre-resolution, MemberExpr post-resolution
-\item
-Function call expressions (including overloadable operators): UntypedExpr pre-resolution, ApplicationExpr post-resolution
+Name expressions: @NameExpr@ pre-resolution, @VariableExpr@ post-resolution
+\item
+Member expressions: @UntypedMemberExpr@ pre-resolution, @MemberExpr@ post-resolution
+\item
+\begin{sloppypar}
+Function call expressions (including overloadable operators): @UntypedExpr@ pre-resolution, @ApplicationExpr@ post-resolution
+\end{sloppypar}
 \end{itemize}
-The pre-resolution representations contain only the symbols. Post-resolution results link
-them to the actual variable and function declarations.
+The pre-resolution representation contains only the symbols.
+Post-resolution links them to the actual variable and function declarations.
 
 
 \subsection{Compilation Passes}
 
-Compilation steps are implemented as passes, which follows a general structural recursion
-pattern on the syntax tree.
-
-The basic work flow of compilation passes follows preorder and postorder traversal on
-tree data structure, implemented with visitor pattern, and can be loosely described with
-the following pseudocode:
-\begin{C++}
-Pass::visit (node_t node) {
-	previsit(node);
-	if (visit_children)
+Compilation steps are implemented as passes, which follows a general structural recursion pattern on the syntax tree.
+
+The basic workflow of compilation passes follows preorder and postorder traversal on the AST data-structure, implemented with visitor pattern, and can be loosely described with the following pseudocode:
+\begin{C++}
+Pass::visit( node_t node ) {
+	previsit( node );
+	if ( visit_children )
 		for each child of node:
-			child.accept(this);
-	postvisit(node);
+			child.accept( this );
+	postvisit( node );
 }
 \end{C++}
-Operations in previsit() happen in preorder (top to bottom) and operations in
-postvisit() happen in postorder (bottom to top). The precise order of recursive
-operations on child nodes can be found in @Common/PassVisitor.impl.h@ (old) and
-@AST/Pass.impl.hpp@ (new).
-Implementations of compilation passes need to follow certain conventions:
+Operations in @previsit@ happen in preorder (top to bottom) and operations in @postvisit@ happen in postorder (bottom to top).
+The precise order of recursive operations on child nodes can be found in @Common/PassVisitor.impl.h@ (old) and @AST/Pass.impl.hpp@ (new).
+
+Implementations of compilation passes follow certain conventions:
 \begin{itemize}
 \item
-Passes \textbf{should not} directly override the visit method (Non-virtual Interface
-principle); if a pass desires different recursion behavior, it should set
-@visit_children@ to false and perform recursive calls manually within previsit or
-postvisit procedures. To enable this option, inherit from @WithShortCircuiting@ mixin.
-\item
-previsit may mutate the node but \textbf{must not} change the node type or return null.
-\item
-postvisit may mutate the node, reconstruct it to a different node type, or delete it by
-returning null.
+Passes \textbf{should not} directly override the visit method (Non-virtual Interface principle);
+if a pass desires different recursion behaviour, it should set @visit_children@ to false and perform recursive calls manually within previsit or postvisit procedures.
+To enable this option, inherit from the @WithShortCircuiting@ mixin.
+\item
+previsit may mutate the node but \textbf{must not} change the node type or return @nullptr@.
+\item
+postvisit may mutate the node, reconstruct it to a different node type, or delete it by returning @nullptr@.
 \item
 If the previsit or postvisit method is not defined for a node type, the step is skipped.
-If the return type is declared as void, the original node is returned by default. These
-behaviors are controlled by template specialization rules; see
-@Common/PassVisitor.proto.h@ (old) and @AST/Pass.proto.hpp@ (new) for details.
+If the return type is declared as @void@, the original node is returned by default.
+These behaviours are controlled by template specialization rules;
+see @Common/PassVisitor.proto.h@ (old) and @AST/@ @Pass.proto.hpp@ (new) for details.
 \end{itemize}
 Other useful mixin classes for compilation passes include:
 \begin{itemize}
 \item
-WithGuards allows saving values of variables and restore automatically upon exiting
-the current node.
-\item
-WithVisitorRef creates a wrapped entity of current pass (the actual argument
-passed to recursive calls internally) for explicit recursion, usually used together
-with WithShortCircuiting.
-\item
-WithSymbolTable gives a managed symbol table with built-in scoping rule handling
-(\eg on entering and exiting a block statement)
+@WithGuards@ allows saving and restoring variable values automatically upon entering/exiting the current node.
+\item
+@WithVisitorRef@ creates a wrapped entity for the current pass (the actual argument passed to recursive calls internally) for explicit recursion, usually used together with @WithShortCircuiting@.
+\item
+@WithSymbolTable@ gives a managed symbol table with built-in scoping-rule handling (\eg on entering and exiting a block statement)
 \end{itemize}
-\NOTE: If a pass extends the functionality of another existing pass, due to \CC overloading
-resolution rules, it \textbf{must} explicitly introduce the inherited previsit and postvisit procedures
-to its own scope, or otherwise they will not be picked up by template resolution:
+\NOTE: If a pass extends the functionality of another existing pass, due to \CC overloading resolution rules, it \textbf{must} explicitly introduce the inherited previsit and postvisit procedures to its own scope, or otherwise they are not picked up by template resolution:
 \begin{C++}
 class Pass2: public Pass1 {
-	using Pass1::previsit;
-	using Pass1::postvisit;
+	@using Pass1::previsit;@
+	@using Pass1::postvisit;@
 	// new procedures
 }
@@ -263,76 +247,74 @@
 
 
-\subsection{Data Structure Change WIP (new-ast)}
-
-It has been observed that excessive copying of syntax tree structures accounts for a
-majority of computation cost and significantly slows down the compiler. In the previous
-implementation of the syntax tree, every internal node has a unique parent; therefore all
-copies are required to duplicate everything down to the bottom. A new, experimental
-re-implementation of the syntax tree (source under directory AST/ hereby referred to as
-``new-ast'') attempts to overcome this issue with a functional approach that allows sharing
-of common sub-structures and only makes copies when necessary.
-
-The core of new-ast is a customized implementation of smart pointers, similar to
-@std::shared_ptr@ and @std::weak_ptr@ in \CC standard library. Reference counting is
-used to detect sharing and allows optimization. For a purely functional (a.k.a. immutable)
-data structure, all mutations are modelled by shallow copies along the path of mutation.
+\subsection{Data Structure Change (new-ast)}
+
+It has been observed that excessive copying of syntax tree structures accounts for a majority of computation cost and significantly slows down the compiler.
+In the previous implementation of the syntax tree, every internal node has a unique parent;
+therefore all copies are required to duplicate the entire subtree.
+A new, experimental re-implementation of the syntax tree (source under directory @AST/@ hereby referred to as ``new-ast'') attempts to overcome this issue with a functional approach that allows sharing of common sub-structures and only makes copies when necessary.
+
+The core of new-ast is a customized implementation of smart pointers, similar to @std::shared_ptr@ and @std::weak_ptr@ in the \CC standard library.
+Reference counting is used to detect sharing and allowing certain optimizations.
+For a purely functional (immutable) data-structure, all mutations are modelled by shallow copies along the path of mutation.
 With reference counting optimization, unique nodes are allowed to be mutated in place.
-This however, may potentially introduce some complications and bugs; a few issues are
-discussed near the end of this section.
-
-\subsubsection{Source: AST/Node.hpp}
-
-class @ast::Node@ is the base class of all new-ast node classes, which implements
-reference counting mechanism. Two different counters are recorded: ``strong'' reference
-count for number of nodes semantically owning it; ``weak'' reference count for number of
-nodes holding a mere reference and only need to observe changes.
-class @ast::ptr_base@ is the smart pointer implementation and also takes care of
-resource management.
-
-Direct access through the smart pointer is read-only. A mutable access should be obtained
-by calling shallowCopy or mutate as below.
-
-Currently, the weak pointers are only used to reference declaration nodes from a named
-type, or a variable expression. Since declaration nodes are intended to denote unique
-entities in the program, weak pointers always point to unique (unshared) nodes. This may
-change in the future, and weak references to shared nodes may introduce some problems;
+This however, may potentially introduce some complications and bugs;
+a few issues are discussed near the end of this section.
+
+
+\subsubsection{Source: \lstinline{AST/Node.hpp}}
+
+Class @ast::Node@ is the base class of all new-ast node classes, which implements reference counting mechanism.
+Two different counters are recorded: ``strong'' reference count for number of nodes semantically owning it;
+``weak'' reference count for number of nodes holding a mere reference and only need to observe changes.
+Class @ast::ptr_base@ is the smart pointer implementation and also takes care of resource management.
+
+Direct access through the smart pointer is read-only.
+A mutable access should be obtained by calling @shallowCopy@ or mutate as below.
+
+Currently, the weak pointers are only used to reference declaration nodes from a named type, or a variable expression.
+Since declaration nodes are intended to denote unique entities in the program, weak pointers always point to unique (unshared) nodes.
+This property may change in the future, and weak references to shared nodes may introduce some problems;
 see mutate function below.
 
-All node classes should always use smart pointers in the structure and should not use raw
-pointers.
-
+All node classes should always use smart pointers in structure definitions versus raw pointers.
+Function
 \begin{C++}
 void ast::Node::increment(ref_type ref)
 \end{C++}
-Increments this node's strong or weak reference count.
+increments this node's strong or weak reference count.
+Function
 \begin{C++}
 void ast::Node::decrement(ref_type ref, bool do_delete = true)
 \end{C++}
-Decrements this node's strong or weak reference count. If strong reference count reaches
-zero, the node is deleted by default.
-\NOTE: Setting @do_delete@ to false may result in a detached node. Subsequent code should
-manually delete the node or assign it to a strong pointer to prevent memory leak.
+decrements this node's strong or weak reference count.
+If strong reference count reaches zero, the node is deleted.
+\NOTE: Setting @do_delete@ to false may result in a detached node.
+Subsequent code should manually delete the node or assign it to a strong pointer to prevent memory leak.
+
 Reference counting functions are internally called by @ast::ptr_base@.
+Function
 \begin{C++}
 template<typename node_t>
 node_t * shallowCopy(const node_t * node)
 \end{C++}
-Returns a mutable, shallow copy of node: all child pointers are pointing to the same child
-nodes.
+returns a mutable, shallow copy of node: all child pointers are pointing to the same child nodes.
+Function
 \begin{C++}
 template<typename node_t>
 node_t * mutate(const node_t * node)
 \end{C++}
-If node is unique (strong reference count is 1), returns a mutable pointer to the same node.
-Otherwise, returns shallowCopy(node).
-It is an error to mutate a shared node that is weak-referenced. Currently this does not
-happen. The problem may appear once weak pointers to shared nodes (\eg expression
-nodes) are used; special care will be needed.
-
-\NOTE: This naive uniqueness check may not be sufficient in some cases. A discussion of the
-issue is presented at the end of this section.
+returns a mutable pointer to the same node, if the node is unique (strong reference count is 1);
+otherwise, it returns @shallowCopy(node)@.
+It is an error to mutate a shared node that is weak-referenced.
+Currently this does not happen.
+A problem may appear once weak pointers to shared nodes (\eg expression nodes) are used;
+special care is needed.
+
+\NOTE: This naive uniqueness check may not be sufficient in some cases.
+A discussion of the issue is presented at the end of this section.
+Functions
 \begin{C++}
 template<typename node_t, typename parent_t, typename field_t, typename assn_t>
-const node_t * mutate_field(const node_t * node, field_t parent_t::*field, assn_t && val)
+const node_t * mutate_field(const node_t * node, field_t parent_t::* field, assn_t && val)
 \end{C++}
 \begin{C++}
@@ -342,10 +324,10 @@
 		field_t && val)
 \end{C++}
-Helpers for mutating a field on a node using pointer to member (creates shallow copy
-when necessary).
-
-\subsubsection{Issue: Undetected sharing}
-
-The @mutate@ behavior described above has a problem: deeper shared nodes may be
+are helpers for mutating a field on a node using pointer to a member function (creates shallow copy when necessary).
+
+
+\subsubsection{Issue: Undetected Sharing}
+
+The @mutate@ behaviour described above has a problem: deeper shared nodes may be
 mistakenly considered as unique. \VRef[Figure]{f:DeepNodeSharing} shows how the problem could arise:
 \begin{figure}
@@ -355,71 +337,59 @@
 \label{f:DeepNodeSharing}
 \end{figure}
-Suppose that we are working on the tree rooted at P1, which
-is logically the chain P1-A-B and P2 is irrelevant, and then
-mutate(B) is called. The algorithm considers B as unique since
-it is only directly owned by A. However, the other tree P2-A-B
-indirectly shares the node B and is therefore wrongly mutated.
-
-To partly address this problem, if the mutation is called higher up the tree, a chain
-mutation helper can be used:
-
-\subsubsection{Source: AST/Chain.hpp}
-
+Given the tree rooted at P1, which is logically the chain P1-A-B, and P2 is irrelevant, assume @mutate(B)@ is called.
+The algorithm considers B as unique since it is only directly owned by A.
+However, the other tree P2-A-B indirectly shares the node B and is therefore wrongly mutated.
+
+To partly address this problem, if the mutation is called higher up the tree, a chain mutation helper can be used.
+
+\subsubsection{Source: \lstinline{AST/Chain.hpp}}
+
+Function
 \begin{C++}
 template<typename node_t, Node::ref_type ref_t>
 auto chain_mutate(ptr_base<node_t, ref_t> & base)
 \end{C++}
-This function returns a chain mutator handle which takes pointer-to-member to go down
-the tree while creating shallow copies as necessary; see @struct _chain_mutator@ in the
-source code for details.
-
-For example, in the above diagram, if mutation of B is wanted while at P1, the call using
-@chain_mutate@ looks like the following:
+returns a chain mutator handle that takes pointer-to-member to go down the tree, while creating shallow copies as necessary;
+see @struct _chain_mutator@ in the source code for details.
+
+For example, in the above diagram, if mutation of B is wanted while at P1, the call using @chain_mutate@ looks like the following:
 \begin{C++}
 chain_mutate(P1.a)(&A.b) = new_value_of_b;
 \end{C++}
-Note that if some node in chain mutate is shared (therefore shallow copied), it implies that
-every node further down will also be copied, thus correctly executing the functional
-mutation algorithm. This example code creates copies of both A and B and performs
-mutation on the new nodes, so that the other tree P2-A-B is untouched.
-However, if a pass traverses down to node B and performs mutation, for example, in
-@postvisit(B)@, information on sharing higher up is lost. Since the new-ast structure is only in
-experimental use with the resolver algorithm, which mostly rebuilds the tree bottom-up,
-this issue does not actually happen. It should be addressed in the future when other
-compilation passes are migrated to new-ast and many of them contain procedural
-mutations, where it might cause accidental mutations to other logically independent trees
-(\eg common sub-expression) and become a bug.
-
-
-\vspace*{20pt} % FIX ME, spacing problem with this heading ???
+\NOTE: if some node in chain mutate is shared (therefore shallow copied), it implies that every node further down is also copied, thus correctly executing the functional mutation algorithm.
+This example code creates copies of both A and B and performs mutation on the new nodes, so that the other tree P2-A-B is untouched.
+However, if a pass traverses down to node B and performs mutation, for example, in @postvisit(B)@, information on sharing higher up is lost.
+Since the new-ast structure is only in experimental use with the resolver algorithm, which mostly rebuilds the tree bottom-up, this issue does not actually happen.
+It should be addressed in the future when other compilation passes are migrated to new-ast and many of them contain procedural mutations, where it might cause accidental mutations to other logically independent trees (\eg common sub-expression) and become a bug.
+
+
 \section{Compiler Algorithm Documentation}
 
-This documentation currently covers most of the resolver, data structures used in variable
-and expression resolution, and a few directly related passes. Later passes involving code
-generation is not included yet; documentation for those will be done afterwards.
+This compiler algorithm documentation covers most of the resolver, data structures used in variable and expression resolution, and a few directly related passes.
+Later passes involving code generation are not included yet;
+documentation for those will be done latter.
+
 
 \subsection{Symbol Table}
 
-\NOTE: For historical reasons, the symbol table data structure was called ``indexer'' in the
-old implementation. Hereby we will be using the name SymbolTable everywhere.
-The symbol table stores a mapping from names to declarations and implements a similar
-name space separation rule, and the same scoping rules in standard C.\footnote{ISO/IEC 9899:1999, Sections 6.2.1 and 6.2.3} The difference in
-name space rule is that typedef aliases are no longer considered ordinary identifiers.
-In addition to C tag types (struct, union, enum), \CFA introduces another tag type, trait,
-which is a named collection of assertions.
-
-\subsubsection{Source: AST/SymbolTable.hpp}
-
-\subsubsection{Source: SymTab/Indexer.h}
-
+\NOTE: For historical reasons, the symbol-table data-structure is called @indexer@ in the old implementation.
+Hereby, the name is changed to @SymbolTable@.
+The symbol table stores a mapping from names to declarations, implements a similar name-space separation rule, and provides the same scoping rules as standard C.\footnote{ISO/IEC 9899:1999, Sections 6.2.1 and 6.2.3.}
+The difference in name-space rule is that @typedef@ aliases are no longer considered ordinary identifiers.
+In addition to C tag-types (@struct@, @union@, @enum@), \CFA introduces another tag type, @trait@, which is a named collection of assertions.
+
+
+\subsubsection{Source: \lstinline{AST/SymbolTable.hpp}}
+
+Function
 \begin{C++}
 SymbolTable::addId(const DeclWithType * decl)
 \end{C++}
-Since \CFA allows overloading of variables and functions, ordinary identifier names need
-to be mangled. The mangling scheme is closely based on the Itanium \CC ABI,\footnote{\url{https://itanium-cxx-abi.github.io/cxx-abi/abi.html}, Section 5.1} while
-making adaptations to \CFA specific features, mainly assertions and overloaded variables
-by type. Naming conflicts are handled by mangled names; lookup by name returns a list of
-declarations with the same literal identifier name.
-
+provides name mangling of identifiers, since \CFA allows overloading of variables and functions.
+The mangling scheme is closely based on the Itanium \CC ABI,\footnote{\url{https://itanium-cxx-abi.github.io/cxx-abi/abi.html}, Section 5.1} while making adaptations to \CFA specific features, mainly assertions and overloaded variables by type.
+
+Naming conflicts are handled by mangled names;
+lookup by name returns a list of declarations with the same identifier name.
+Functions
 \begin{C++}
 SymbolTable::addStruct(const StructDecl * decl)
@@ -428,176 +398,175 @@
 SymbolTable::addTrait(const TraitDecl * decl)
 \end{C++}
-Adds a tag type declaration to the symbol table.
+add a tag-type declaration to the symbol table.
+Function
 \begin{C++}
 SymbolTable::addType(const NamedTypeDecl * decl)
 \end{C++}
-Adds a typedef alias to the symbol table.
-
-\textbf{C Incompatibility Note}: Since Cforall allows using struct, union and enum type names
-without the keywords, typedef names and tag type names cannot be disambiguated by
-syntax rules. Currently the compiler puts them together and disallows collision. The
-following program is valid C but not valid Cforall:
+adds a @typedef@ alias to the symbol table.
+
+\textbf{C Incompatibility Note}: Since \CFA allows using @struct@, @union@ and @enum@ type-names without a prefix keyword, as in \CC, @typedef@ names and tag-type names cannot be disambiguated by syntax rules.
+Currently the compiler puts them together and disallows collision.
+The following program is valid C but invalid \CFA (and \CC):
 \begin{C++}
 struct A {};
+typedef int A; // gcc: ok, cfa: Cannot redefine typedef A
+struct A sa; // C disambiguates via struct prefix
+A ia;
+\end{C++}
+In practices, such usage is extremely rare, and hence, this change (as in \CC) has minimal impact on existing C programs.
+The declaration
+\begin{C++}
+struct A {};
+typedef struct A A; // A is an alias for struct A
+A a;
+struct A b;
+\end{C++}
+is not an error because the alias name is identical to the original.
+Finally, the following program is allowed in \CFA:
+\begin{C++}
 typedef int A;
-// gcc: ok, cfa: Cannot redefine typedef A
-\end{C++}
-In actual practices however, such usage is extremely rare, and typedef struct A A; is
-not considered an error, but silently discarded. Therefore, we expect this change to have
-minimal impact on existing C programs.
-Meanwhile, the following program is allowed in Cforall:
-\begin{C++}
-typedef int A;
-void A();
+void A(); // name mangled
 // gcc: A redeclared as different kind of symbol, cfa: ok
 \end{C++}
+because the function name is mangled.
+
 
 \subsection{Type Environment and Unification}
 
-The core of parametric type resolution algorithm.
-Type Environment organizes type parameters in \textbf{equivalent classes} and maps them to
-actual types. Unification is the algorithm that takes two (possibly parametric) types and
-parameter mappings and attempts to produce a common type by matching the type
-environments.
+The following core ideas underlie the parametric type-resolution algorithm.
+A type environment organizes type parameters into \textbf{equivalent classes} and maps them to actual types.
+Unification is the algorithm that takes two (possibly parametric) types and parameter mappings, and attempts to produce a common type by matching information in the type environments.
 
 The unification algorithm is recursive in nature and runs in two different modes internally:
 \begin{itemize}
 \item
-\textbf{Exact} unification mode requires equivalent parameters to match perfectly;
-\item
-\textbf{Inexact} unification mode allows equivalent parameters to be converted to a
-common type.
+Exact unification mode requires equivalent parameters to match perfectly.
+\item
+Inexact unification mode allows equivalent parameters to be converted to a common type.
 \end{itemize}
-For a pair of matching parameters (actually, their equivalent classes), if either side is open
-(not bound to a concrete type yet), they are simply combined.
-
-Within inexact mode, types are allowed to differ on their cv-qualifiers; additionally, if a
-type never appear either in parameter list or as the base type of a pointer, it may also be
-widened (i.e. safely converted). As Cforall currently does not implement subclassing similar
-to object-oriented languages, widening conversions are on primitive types only, for
-example the conversion from int to long.
-
-The need for two unification modes come from the fact that parametric types are
-considered compatible only if all parameters are exactly the same (not just compatible).
-Pointer types also behaves similarly; in fact, they may be viewed as a primitive kind of
-parametric types. @int*@ and @long*@ are different types, just like @vector(int)@ and
-@vector(long)@ are, for the parametric type @vector(T)@.
-
-The resolver should use the following ``@public@'' functions:\footnote{
-Actual code also tracks assertions on type parameters; those extra arguments are omitted here for
-conciseness.}
-
-
-\subsubsection{Source: ResolvExpr/Unify.cc}
-
-\begin{C++}
-bool unify(const Type *type1, const Type *type2, TypeEnvironment &env,
-OpenVarSet &openVars, const SymbolTable &symtab, Type *&commonType)
-\end{C++}
-Attempts to unify @type1@ and @type2@ with current type environment.
-
-If operation succeeds, @env@ is modified by combining the equivalence classes of matching
-parameters in @type1@ and @type2@, and their common type is written to commonType.
-
-If operation fails, returns false.
-\begin{C++}
-bool typesCompatible(const Type * type1, const Type * type2, const
-SymbolTable &symtab, const TypeEnvironment &env)
-bool typesCompatibleIgnoreQualifiers(const Type * type1, const Type *
-type2, const SymbolTable &symtab, const TypeEnvironment &env)
-\end{C++}
-
-Determines if type1 and type2 can possibly be the same type. The second version ignores
-the outermost cv-qualifiers if present.\footnote{
-In const \lstinline@int * const@, only the second \lstinline@const@ is ignored.}
-
-The call has no side effect.
-
-\NOTE: No attempts are made to widen the types (exact unification is used), although the
-function names may suggest otherwise. E.g. @typesCompatible(int, long)@ returns false.
+For a pair of matching parameters (actually, their equivalent classes), if either side is open (not bound to a concrete type yet), they are combined.
+
+Within the inexact mode, types are allowed to differ on their cv-qualifiers (\eg @const@, @volatile@, \etc);
+additionally, if a type never appear either in a parameter list or as the base type of a pointer, it may also be widened (\ie safely converted).
+As \CFA currently does not implement subclassing as in object-oriented languages, widening conversions are only on the primitive types, \eg conversion from @int@ to @long int@.
+
+The need for two unification modes comes from the fact that parametric types are considered compatible only if all parameters are exactly the same (not just compatible).
+Pointer types also behaves similarly;
+in fact, they may be viewed as a primitive kind of parametric types.
+@int *@ and @long *@ are different types, just like @vector(int)@ and @vector(long)@ are, for the parametric type @*(T)@ / @vector(T)@, respectively.
+
+The resolver uses the following @public@ functions:\footnote{
+Actual code also tracks assertions on type parameters; those extra arguments are omitted here for conciseness.}
+
+
+\subsubsection{Source: \lstinline{ResolvExpr/Unify.cc}}
+
+Function
+\begin{C++}
+bool unify(const Type * type1, const Type * type2, TypeEnvironment & env,
+	OpenVarSet & openVars, const SymbolTable & symtab, Type *& commonType)
+\end{C++}
+returns a boolean indicating if the unification succeeds or fails after attempting to unify @type1@ and @type2@ within current type environment.
+If the unify succeeds, @env@ is modified by combining the equivalence classes of matching parameters in @type1@ and @type2@, and their common type is written to @commonType@.
+If the unify fails, nothing changes.
+Functions
+\begin{C++}
+bool typesCompatible(const Type * type1, const Type * type2, const SymbolTable & symtab,
+	const TypeEnvironment & env)
+bool typesCompatibleIgnoreQualifiers(const Type * type1, const Type * type2,
+	const SymbolTable & symtab, const TypeEnvironment & env)
+\end{C++}
+return a boolean indicating if types @type1@ and @type2@ can possibly be the same type.
+The second version ignores the outermost cv-qualifiers if present.\footnote{
+In \lstinline@const int * const@, only the second \lstinline@const@ is ignored.}
+These function have no side effects.
+
+\NOTE: No attempt is made to widen the types (exact unification is used), although the function names may suggest otherwise, \eg @typesCompatible(int, long)@ returns false.
 
 
 \subsection{Expression Resolution}
 
-The design of the current version of expression resolver is outlined in the Ph.D. Thesis from
-Aaron Moss~\cite{Moss19}.
-
+The design of the current version of expression resolver is outlined in the Ph.D.\ thesis by Aaron Moss~\cite{Moss19}.
 A summary of the resolver algorithm for each expression type is presented below.
 
-All overloadable operators are modelled as function calls. For a function call,
-interpretations of the function and arguments are found recursively. Then the following
-steps produce a filtered list of valid interpretations:
+All overloadable operators are modelled as function calls.
+For a function call, interpretations of the function and arguments are found recursively.
+Then the following steps produce a filtered list of valid interpretations:
 \begin{enumerate}
 \item
-From all possible combinations of interpretations of the function and arguments,
-those where argument types may be converted to function parameter types are
-considered valid.
+From all possible combinations of interpretations of the function and arguments, those where argument types may be converted to function parameter types are considered valid.
 \item
 Valid interpretations with the minimum sum of argument costs are kept.
 \item
-Argument costs are then discarded; the actual cost for the function call expression is
-the sum of conversion costs from the argument types to parameter types.
-\item
-For each return type, the interpretations with satisfiable assertions are then sorted
-by actual cost computed in step 3. If for a given type, the minimum cost
-interpretations are not unique, it is said that for that return type the interpretation
-is ambiguous. If the minimum cost interpretation is unique but contains an
-ambiguous argument, it is also considered ambiguous.
+\label{p:argcost}
+Argument costs are then discarded; the actual cost for the function call expression is the sum of conversion costs from the argument types to parameter types.
+\item
+\label{p:returntype}
+For each return type, the interpretations with satisfiable assertions are then sorted by actual cost computed in step~\ref{p:argcost}.
+If for a given type, the minimum cost interpretations are not unique, that return type is ambiguous.
+If the minimum cost interpretation is unique but contains an ambiguous argument, it is also ambiguous.
 \end{enumerate}
-Therefore, for each return type, the resolver produces either of:
+Therefore, for each return type, the resolver produces:
 \begin{itemize}
 \item
-No alternatives
-\item
-A single valid alternative
-\item
-An ambiguous alternative
+no alternatives
+\item
+a single valid alternative
+\item
+an ambiguous alternative
 \end{itemize}
-Note that an ambiguous alternative may be discarded at the parent expressions because a
-different return type matches better for the parent expressions.
-
-The non-overloadable expressions in Cforall are: cast expressions, address-of (unary @&@)
-expressions, short-circuiting logical expressions (@&&@, @||@) and ternary conditional
-expression (@?:@).
-
-For a cast expression, the convertible argument types are kept. Then the result is selected
-by lowest argument cost, and further by lowest conversion cost to target type. If the lowest
-cost is still not unique, or an ambiguous argument interpretation is selected, the cast
-expression is ambiguous. In an expression statement, the top level expression is implicitly
-cast to void.
+\NOTE: an ambiguous alternative may be discarded at the parent expressions because a different return type matches better for the parent expressions.
+
+The \emph{non}-overloadable expressions in \CFA are: cast expressions, address-of (unary @&@) expressions, short-circuiting logical expressions (@&&@, @||@) and ternary conditional expression (@?:@).
+
+For a cast expression, the convertible argument types are kept.
+Then the result is selected by lowest argument cost, and further by lowest conversion cost to target type.
+If the lowest cost is still not unique or an ambiguous argument interpretation is selected, the cast expression is ambiguous.
+In an expression statement, the top level expression is implicitly cast to @void@.
 
 For an address-of expression, only lvalue results are kept and the minimum cost is selected.
 
-For logical expressions @&&@ and @||@, arguments are implicitly cast to bool, and follow the rule
-of cast expression as above.
-
-For the ternary conditional expression, the condition is implicitly cast to bool, and the
-branch expressions must have compatible types. Each pair of compatible branch
-expression types produce a possible interpretation, and the cost is defined as the sum of
-expression costs plus the sum of conversion costs to the common type.
-
-TODO: Write a specification for expression costs.
+For logical expressions @&&@ and @||@, arguments are implicitly cast to @bool@, and follow the rules fr cast expression above.
+
+For the ternary conditional expression, the condition is implicitly cast to @bool@, and the branch expressions must have compatible types.
+Each pair of compatible branch expression types produce a possible interpretation, and the cost is defined as the sum of the expression costs plus the sum of conversion costs to the common type.
+
+
+\subsection{Conversion and Application Cost}
+
+There were some unclear parts in the previous documentation in the cost system, as described in the Moss thesis~\cite{Moss19}, section 4.1.2.
+Some clarification are presented in this section.
+
+\begin{enumerate}
+\item
+Conversion to a type denoted by parameter may incur additional cost if the match is not exact.
+For example, if a function is declared to accept @(T, T)@ and receives @(int, long)@, @T@ is deducted @long@ and an additional widening conversion cost is added for @int@ to @T@.
+
+\item
+The specialization level of a function is the sum of the least depth of an appearance of a type parameter (counting pointers, references and parameterized types), plus the number of assertions.
+A higher specialization level is favoured if argument conversion costs are equal.
+
+\item
+Coercion of pointer types is only allowed in explicit cast expressions;
+the only allowed implicit pointer casts are adding qualifiers to the base type and cast to @void*@, and these counts as safe conversions.
+Note that implicit cast from @void *@ to other pointer types is no longer valid, as opposed to standard C. 
+\end{enumerate}
 
 
 \subsection{Assertion Satisfaction}
 
-The resolver tries to satisfy assertions on expressions only when it is needed: either while
-selecting from multiple alternatives of a same result type for a function call (step 4 of
-resolving function calls), or upon reaching the top level of an expression statement.
-
-Unsatisfiable alternatives are discarded. Satisfiable alternatives receive \textbf{implicit
-parameters}: in Cforall, parametric functions are designed such that they can be compiled
-separately, as opposed to \CC templates which are only compiled at instantiation. Given a
-parametric function definition:
+The resolver tries to satisfy assertions on expressions only when it is needed: either while selecting from multiple alternatives of a same result type for a function call (step \ref{p:returntype} of resolving function calls) or upon reaching the top level of an expression statement.
+
+Unsatisfiable alternatives are discarded.
+Satisfiable alternatives receive \textbf{implicit parameters}: in \CFA, parametric functions may be separately compiled, as opposed to \CC templates which are only compiled at instantiation.
+Given the parametric function-definition:
 \begin{C++}
 forall (otype T | {void foo(T);})
 void bar (T t) { foo(t); }
 \end{C++}
-The function bar does not know which @foo@ to call when compiled without knowing the call
-site, so it requests a function pointer to be passed as an extra argument. At the call site,
-implicit parameters are automatically inserted by the compiler.
-
-\textbf{TODO}: Explain how recursive assertion satisfaction and polymorphic recursion work.
-
+the function @bar@ does not know which @foo@ to call when compiled without knowing the call site, so it requests a function pointer to be passed as an extra argument.
+At the call site, implicit parameters are automatically inserted by the compiler.
+
+Implementation of implicit parameters is discussed in \VRef[Appendix]{s:ImplementationParametricFunctions}.
 
 \section{Tests}
@@ -605,27 +574,26 @@
 \subsection{Test Suites}
 
-Automatic test suites are located under the @tests/@ directory. A test case consists of an
-input CFA source file (name ending with @.cfa@), and an expected output file located
-in @.expect/@ directory relative to the source file, with the same file name ending with @.txt@.
-So a test named @tuple/tupleCast@ has the following files, for example:
+Automatic test suites are located under the @tests/@ directory.
+A test case consists of an input CFA source file (suffix @.cfa@), and an expected output file located in the @tests/.expect/@ directory, with the same file name ending with suffix @.txt@.
+For example, the test named @tests/tuple/tupleCast.cfa@ has the following files, for example:
 \begin{C++}
 tests/
-..     tuple/
-......     .expect/
-..........       tupleCast.txt
-......     tupleCast.cfa
-\end{C++}
-If compilation fails, the error output is compared to the expect file. If compilation succeeds,
-the built program is run and its output compared to the expect file.
-To run the tests, execute the test script @test.py@ under the @tests/@ directory, with a list of
-test names to be run, or @--all@ to run all tests. The test script reports test cases
-fail/success, compilation time and program run time.
+	tuple/
+		.expect/
+			tupleCast.txt
+		tupleCast.cfa
+\end{C++}
+If compilation fails, the error output is compared to the expect file.
+If the compilation succeeds but does not generate an executable, the compilation output is compared to the expect file.
+If the compilation succeeds and generates an executable, the executable is run and its output is compared to the expect file.
+To run the tests, execute the test script @test.py@ under the @tests/@ directory, with a list of test names to be run, or @--all@ (or @make all-tests@) to run all tests.
+The test script reports test cases fail/success, compilation time and program run time.
+To see all the options available for @test.py@ using the @--help@ option.
 
 
 \subsection{Performance Reports}
 
-To turn on performance reports, pass @-S@ flag to the compiler.
-
-3 kinds of performance reports are available:
+To turn on performance reports, pass the @-XCFA -S@ flag to the compiler.
+Three kinds of performance reports are available:
 \begin{enumerate}
 \item
@@ -639,6 +607,124 @@
 @Common/Stats/Counter.h@.
 \end{enumerate}
-It is suggested to run performance tests with optimized build (@g++@ flag @-O3@)
-
+It is suggested to run performance tests with optimization (@g++@ flag @-O3@).
+
+
+\appendix
+\section{Appendix}
+
+\subsection{Kinds of Type Parameters}
+\label{s:KindsTypeParameters}
+
+A type parameter in a @forall@ clause has 3 kinds:
+\begin{enumerate}[listparindent=0pt]
+\item
+@dtype@: any data type (built-in or user defined) that is not a concrete type.
+
+A non-concrete type is an incomplete type such as an opaque type or pointer/reference with an implicit (pointer) size and implicitly generated reference and dereference operations.
+\item
+@otype@: any data type (built-in or user defined) that is concrete type.
+
+A concrete type is a complete type, \ie types that can be used to create a variable, which also implicitly asserts the existence of default and copy constructors, assignment, and destructor\footnote{\CFA implements the same automatic resource management (RAII) semantics as \CC.}.
+% \item
+% @ftype@: any function type.
+% 
+% @ftype@ provides two purposes:
+% \begin{itemize}
+% \item
+% Differentiate function pointer from data pointer because (in theory) some systems have different sizes for these pointers.
+% \item
+% Disallow a function pointer to match an overloaded data pointer, since variables and functions can have the same names.
+% \end{itemize}
+
+\item
+@ttype@: tuple (variadic) type.
+
+Restricted to the type for the last parameter in a function, it provides a type-safe way to implement variadic functions.
+Note however, that it has certain restrictions, as described in the implementation section below.
+\end{enumerate}
+
+
+\subsection{GNU C Nested Functions}
+
+\CFA is designed to be mostly compatible with GNU C, an extension to ISO C99 and C11 standards. The \CFA compiler also implements some language features by GCC extensions, most notably nested functions.
+
+In ISO C, function definitions are not allowed to be nested. GCC allows nested functions with full lexical scoping. The following example is taken from GCC documentation\footnote{\url{https://gcc.gnu.org/onlinedocs/gcc/Nested-Functions.html}}:
+\begin{C++}
+void bar( int * array, int offset, int size ) {
+	int access( int * array, int index ) { return array[index + offset]; }
+	int i;
+	/* ... */
+	for ( i = 0; i < size; i++ )
+		/* ... */ access (array, i) /* ... */
+}
+\end{C++}
+GCC nested functions behave identically to \CC lambda functions with default by-reference capture (stack-allocated, lifetime ends upon exiting the declared block), while also possible to be passed as arguments with standard function pointer types.
+
+
+\subsection{Implementation of Parametric Functions}
+\label{s:ImplementationParametricFunctions}
+
+\CFA implements parametric functions using the implicit parameter approach: required assertions are passed to the callee by function pointers;
+size of a parametric type must also be known if referenced directly (\ie not as a pointer). 
+
+The implementation is similar to the one from Scala\footnote{\url{https://www.scala-lang.org/files/archive/spec/2.13/07-implicits.html}}, with some notable differences in resolution:
+\begin{enumerate}
+\item
+All types, variables, and functions are candidates of implicit parameters
+\item
+The parameter (assertion) name must match the actual declarations.
+\end{enumerate}
+
+For example, the \CFA function declaration
+\begin{cfa}
+forall( otype T | { int foo( T, int ); } )
+int bar(T);
+\end{cfa}
+after implicit parameter expansion, has the actual signature\footnote{\textbf{otype} also requires the type to have constructor and destructor, which are the first two function pointers preceding the one for \textbf{foo}.}
+\begin{C++}
+int bar( T, size_t, void (*)(T&), void (*)(T&), int (*)(T, int) );
+\end{C++}
+The implicit parameter approach has an apparent issue: when the satisfying declaration is also parametric, it may require its own implicit parameters too.
+That also causes the supplied implicit parameter to have a different \textbf{actual} type than the \textbf{nominal} type, so it cannot be passed directly.
+Therefore, a wrapper with matching actual type must be created, and it is here where GCC nested functions are used internally by the compiler.
+
+Consider the following program:
+\begin{cfa}
+int assertion(int);
+
+forall( otype T | { int assertion(T); } )
+void foo(T);
+
+forall(otype T | { void foo(T); } )
+void bar(T t) {
+	foo(t);
+}
+\end{cfa}
+The \CFA compiler translates the program to non-parametric form\footnote{In the final code output, \lstinline@T@ needs to be replaced by an opaque type, and arguments must be accessed by a frame pointer offset table, due to the unknown sizes. The presented code here is simplified for better understanding.}
+\begin{C++}
+// ctor, dtor and size arguments are omitted
+void foo(T, int (*)(T));
+
+void bar(T t, void (*foo)(T)) {
+	foo(t);
+}
+\end{C++}
+However, when @bar(1)@ is called, @foo@ cannot be directly provided as an argument:
+\begin{C++}
+bar(1, foo); // WRONG: foo has different actual type
+\end{C++}
+and an additional step is required:
+\begin{C++}
+{
+	void _foo_wrapper(int t) {
+		foo( t, assertion );
+	}
+	bar( 1, _foo_wrapper );
+}
+\end{C++}
+Nested assertions and implicit parameter creation may continue indefinitely.
+This issue is a limitation of implicit parameter implementation.
+In particular, polymorphic variadic recursion must be structural (\ie the number of arguments decreases in any possible recursive calls), otherwise code generation gets into an infinite loop.
+The \CFA compiler sets a limit on assertion depth and reports an error if assertion resolution does not terminate within the limit (as for \lstinline[language=C++]@templates@ in \CC).
 
 \bibliographystyle{plain}
Index: doc/theses/thierry_delisle_PhD/code/Makefile
===================================================================
--- doc/theses/thierry_delisle_PhD/code/Makefile	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,22 +1,0 @@
-
-
-CXXFLAGS = -O3 -g -Wall -Wextra -std=c++17
-LDFLAGS = -pthread -latomic
-
-push:
-	clang++ relaxed_list.cpp -g -Wall -Wextra -std=c++17 -fsyntax-only &&  rsync -av relaxed_list.cpp relaxed_list.hpp utils.hpp assert.hpp scale.sh plg7b:~/workspace/sched/.
-
-relaxed_list: $(firstword $(MAKEFILE_LIST)) | build
-	clang++ relaxed_list.cpp $(CXXFLAGS) $(LDFLAGS) -lpng -MMD -MF build/$(@).d -o $(@)
-
--include build/relaxed_list.d
-
-layout.ast: $(firstword $(MAKEFILE_LIST)) | build
-	clang++ relaxed_list_layout.cpp $(CXXFLAGS) -MMD -MF build/$(@).d -MT $(@) -E -o build/$(@).ii
-	clang++ -Xclang -fdump-record-layouts -fsyntax-only $(CXXFLAGS) build/$(@).ii > build/layout.ast.raw
-	cat build/$(@).raw > $(@)
-
--include build/layout.ast.d
-
-build:
-	mkdir -p build
Index: doc/theses/thierry_delisle_PhD/code/assert.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/assert.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,22 +1,0 @@
-#pragma once
-
-#ifndef NDEBUG
-#include <cassert>
-#include <cstdlib>
-
-#define sstr(s) #s
-#define xstr(s) sstr(s)
-
-extern const char * __my_progname;
-
-#define assertf(cond, ...) ({             \
-	if(!(cond)) {                       \
-		fprintf(stderr, "%s: " __FILE__ ":" xstr(__LINE__) ": %s: Assertion '" xstr(cond) "' failed.\n", __my_progname, __PRETTY_FUNCTION__); \
-		fprintf(stderr, __VA_ARGS__); \
-		fprintf(stderr, "\n"); \
-		std::abort();                 \
-	}                                   \
-})
-#else
-#define assertf(cond, ...)
-#endif
Index: doc/theses/thierry_delisle_PhD/code/bitbench/select.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/bitbench/select.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,186 +1,0 @@
-
-#include "../utils.hpp"
-
-void consume(int i, int j) __attribute__((noinline));
-void consume(int i, int j) {
-	asm volatile("":: "rm" (i), "rm" (i) );
-}
-
-static inline unsigned rand_bit_sw(unsigned rnum, size_t mask) {
-	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
-	uint64_t v = mask;   // Input value to find position with rank r.
-	unsigned int r = bit + 1;// Input: bit's desired rank [1-64].
-	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
-	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
-	unsigned int t;      // Bit count temporary.
-
-	// Do a normal parallel bit count for a 64-bit integer,
-	// but store all intermediate steps.
-	a =  v - ((v >> 1) & ~0UL/3);
-	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
-	c = (b + (b >> 4)) & ~0UL/0x11;
-	d = (c + (c >> 8)) & ~0UL/0x101;
-
-
-	t = (d >> 32) + (d >> 48);
-	// Now do branchless select!
-	s  = 64;
-	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
-	t  = (d >> (s - 16)) & 0xff;
-	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
-	t  = (c >> (s - 8)) & 0xf;
-	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
-	t  = (b >> (s - 4)) & 0x7;
-	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
-	t  = (a >> (s - 2)) & 0x3;
-	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
-	t  = (v >> (s - 1)) & 0x1;
-	s -= ((t - r) & 256) >> 8;
-	return s - 1;
-}
-
-static inline unsigned rand_bit_hw(unsigned rnum, size_t mask) {
-	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
-	uint64_t picked = _pdep_u64(1ul << bit, mask);
-	return picked ? __builtin_ctzl(picked) : 0;
-}
-
-struct TLS {
-	Random rng = { 6 };
-} tls;
-
-const unsigned numLists = 64;
-
-static inline void blind() {
-	int i = tls.rng.next() % numLists;
-	int j = tls.rng.next() % numLists;
-
-	consume(i, j);
-}
-
-std::atomic_size_t list_mask[7];
-static inline void bitmask_sw() {
-	unsigned i, j;
-	{
-		// Pick two lists at random
-		unsigned num = ((numLists - 1) >> 6) + 1;
-
-		unsigned ri = tls.rng.next();
-		unsigned rj = tls.rng.next();
-
-		unsigned wdxi = (ri >> 6u) % num;
-		unsigned wdxj = (rj >> 6u) % num;
-
-		size_t maski = list_mask[wdxi].load(std::memory_order_relaxed);
-		size_t maskj = list_mask[wdxj].load(std::memory_order_relaxed);
-
-		unsigned bi = rand_bit_sw(ri, maski);
-		unsigned bj = rand_bit_sw(rj, maskj);
-
-		i = bi | (wdxi << 6);
-		j = bj | (wdxj << 6);
-	}
-
-	consume(i, j);
-}
-
-static inline void bitmask_hw() {
-	#if !defined(__BMI2__)
-		#warning NO bmi2 for pdep rand_bit
-		return;
-	#endif
-	unsigned i, j;
-	{
-		// Pick two lists at random
-		unsigned num = ((numLists - 1) >> 6) + 1;
-
-		unsigned ri = tls.rng.next();
-		unsigned rj = tls.rng.next();
-
-		unsigned wdxi = (ri >> 6u) % num;
-		unsigned wdxj = (rj >> 6u) % num;
-
-		size_t maski = list_mask[wdxi].load(std::memory_order_relaxed);
-		size_t maskj = list_mask[wdxj].load(std::memory_order_relaxed);
-
-		unsigned bi = rand_bit_hw(ri, maski);
-		unsigned bj = rand_bit_hw(rj, maskj);
-
-		i = bi | (wdxi << 6);
-		j = bj | (wdxj << 6);
-	}
-
-	consume(i, j);
-}
-
-struct {
-	const unsigned mask = 7;
-	const unsigned depth = 3;
-	const uint64_t indexes = 0x0706050403020100;
-	uint64_t masks( unsigned node ) {
-		return 0xff00ffff00ff;
-	}
-} snzm;
-static inline void sparsemask() {
-	#if !defined(__BMI2__)
-		#warning NO bmi2 for sparse mask
-		return;
-	#endif
-	unsigned i, j;
-	{
-		// Pick two random number
-		unsigned ri = tls.rng.next();
-		unsigned rj = tls.rng.next();
-
-		// Pick two nodes from it
-		unsigned wdxi = ri & snzm.mask;
-		unsigned wdxj = rj & snzm.mask;
-
-		// Get the masks from the nodes
-		size_t maski = snzm.masks(wdxi);
-		size_t maskj = snzm.masks(wdxj);
-
-		uint64_t idxsi = _pext_u64(snzm.indexes, maski);
-		uint64_t idxsj = _pext_u64(snzm.indexes, maskj);
-
-		auto pi = __builtin_popcountll(maski);
-		auto pj = __builtin_popcountll(maskj);
-
-		ri = pi ? ri & ((pi >> 3) - 1) : 0;
-		rj = pj ? rj & ((pj >> 3) - 1) : 0;
-
-		unsigned bi = (idxsi >> (ri << 3)) & 0xff;
-		unsigned bj = (idxsj >> (rj << 3)) & 0xff;
-
-		i = (bi << snzm.depth) | wdxi;
-		j = (bj << snzm.depth) | wdxj;
-	}
-
-	consume(i, j);
-}
-
-template<typename T>
-void benchmark( T func, const std::string & name ) {
-	std::cout << "Starting " << name << std::endl;
-	auto before = Clock::now();
-	const int N = 250'000'000;
-	for(int i = 0; i < N; i++) {
-		func();
-	}
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	double duration = durr.count();
-	std::cout << "Duration(s) : " << duration << std::endl;
-	std::cout << "Ops/sec     : " << uint64_t(N / duration) << std::endl;
-	std::cout << "ns/Op       : " << double(duration * 1'000'000'000.0 / N) << std::endl;
-	std::cout << std::endl;
-}
-
-int main() {
-	std::cout.imbue(std::locale(""));
-
-	benchmark(blind, "Blind guess");
-	benchmark(bitmask_sw, "Dense bitmask");
-	benchmark(bitmask_hw, "Dense bitmask with Parallel Deposit");
-	benchmark(sparsemask, "Parallel Extract bitmask");
-}
Index: doc/theses/thierry_delisle_PhD/code/bts.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/bts.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,279 +1,0 @@
-#include <array>
-#include <iomanip>
-#include <iostream>
-#include <locale>
-#include <string>
-#include <thread>
-#include <vector>
-
-#include <getopt.h>
-#include <unistd.h>
-#include <sys/sysinfo.h>
-
-#include "utils.hpp"
-
-// ================================================================================================
-//                        UTILS
-// ================================================================================================
-
-struct local_stat_t {
-	size_t cnt = 0;
-};
-
-struct global_stat_t {
-	std::atomic_size_t cnt = { 0 };
-};
-
-void atomic_max(std::atomic_size_t & target, size_t value) {
-	for(;;) {
-		size_t expect = target.load(std::memory_order_relaxed);
-		if(value <= expect) return;
-		bool success = target.compare_exchange_strong(expect, value);
-		if(success) return;
-	}
-}
-
-void atomic_min(std::atomic_size_t & target, size_t value) {
-	for(;;) {
-		size_t expect = target.load(std::memory_order_relaxed);
-		if(value >= expect) return;
-		bool success = target.compare_exchange_strong(expect, value);
-		if(success) return;
-	}
-}
-
-void tally_stats(global_stat_t & global, local_stat_t & local) {
-	global.cnt   += local.cnt;
-}
-
-void waitfor(double & duration, barrier_t & barrier, std::atomic_bool & done) {
-	std::cout << "Starting" << std::endl;
-	auto before = Clock::now();
-	barrier.wait(0);
-
-	while(true) {
-		usleep(100000);
-		auto now = Clock::now();
-		duration_t durr = now - before;
-		if( durr.count() > duration ) {
-			done = true;
-			break;
-		}
-		std::cout << "\r" << std::setprecision(4) << durr.count();
-		std::cout.flush();
-	}
-
-	barrier.wait(0);
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	duration = durr.count();
-	std::cout << "\rClosing down" << std::endl;
-}
-
-void waitfor(double & duration, barrier_t & barrier, const std::atomic_size_t & count) {
-	std::cout << "Starting" << std::endl;
-	auto before = Clock::now();
-	barrier.wait(0);
-
-	while(true) {
-		usleep(100000);
-		size_t c = count.load();
-		if( c == 0 ) {
-			break;
-		}
-		std::cout << "\r" << c;
-		std::cout.flush();
-	}
-
-	barrier.wait(0);
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	duration = durr.count();
-	std::cout << "\rClosing down" << std::endl;
-}
-
-void print_stats(double duration, unsigned nthread, global_stat_t & global) {
-	std::cout << "Done" << std::endl;
-
-	size_t ops = global.cnt;
-	size_t ops_sec = size_t(double(ops) / duration);
-	size_t ops_thread = ops_sec / nthread;
-	auto dur_nano = duration_cast<std::nano>(1.0);
-
-	std::cout << "Duration      : " << duration << "s\n";
-	std::cout << "ns/Op         : " << ( dur_nano / ops_thread )<< "\n";
-	std::cout << "Ops/sec/thread: " << ops_thread << "\n";
-	std::cout << "Ops/sec       : " << ops_sec << "\n";
-	std::cout << "Total ops     : " << ops << "\n";
-}
-
-static inline bool bts(std::atomic_size_t & target, size_t bit ) {
-	/*
-	int result = 0;
-	asm volatile(
-		"LOCK btsq %[bit], %[target]\n\t"
-		:"=@ccc" (result)
-		: [target] "m" (target), [bit] "r" (bit)
-	);
- 	return result != 0;
-	/*/
-	size_t mask = 1ul << bit;
-	size_t ret = target.fetch_or(mask, std::memory_order_relaxed);
-	return (ret & mask) != 0;
-	//*/
-}
-
-static inline bool btr(std::atomic_size_t & target, size_t bit ) {
-	/*
-	int result = 0;
-	asm volatile(
-		"LOCK btrq %[bit], %[target]\n\t"
-		:"=@ccc" (result)
-		: [target] "m" (target), [bit] "r" (bit)
-	);
- 	return result != 0;
-	/*/
-	size_t mask = 1ul << bit;
-	size_t ret = target.fetch_and(~mask, std::memory_order_relaxed);
-	return (ret & mask) != 0;
-	//*/
-}
-
-// ================================================================================================
-//                        EXPERIMENTS
-// ================================================================================================
-
-// ================================================================================================
-__attribute__((noinline)) void runPingPong_body(
-	std::atomic<bool>& done,
-	local_stat_t & local,
-	std::atomic_size_t & target,
-	size_t id
-) {
-	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
-
-		bool ret;
-		ret = bts(target, id);
-		assert(!ret);
-
-		// -----
-
-		ret = btr(target, id);
-		assert(ret);
-		local.cnt++;
-	}
-}
-
-void run(unsigned nthread, double duration) {
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Data to check everything is OK
-	global_stat_t global;
-
-	// Flag to signal termination
-	std::atomic_bool done  = { false };
-
-	std::cout << "Initializing ";
-	// List being tested
-	std::atomic_size_t word = { 0 };
-	{
-		std::thread * threads[nthread];
-		unsigned i = 1;
-		for(auto & t : threads) {
-			t = new std::thread([&done, &word, &barrier, &global](unsigned tid) {
-				local_stat_t local;
-
-				// affinity(tid);
-
-				barrier.wait(tid);
-
-				// EXPERIMENT START
-
-				runPingPong_body(done, local, word, tid - 1);
-
-				// EXPERIMENT END
-
-				barrier.wait(tid);
-
-				tally_stats(global, local);
-			}, i++);
-		}
-
-		waitfor(duration, barrier, done);
-
-		for(auto t : threads) {
-			t->join();
-			delete t;
-		}
-	}
-
-	print_stats(duration, nthread, global);
-}
-
-// ================================================================================================
-
-int main(int argc, char * argv[]) {
-
-	double duration   = 5.0;
-	unsigned nthreads = 2;
-
-	std::cout.imbue(std::locale(""));
-
-	for(;;) {
-		static struct option options[] = {
-			{"duration",  required_argument, 0, 'd'},
-			{"nthreads",  required_argument, 0, 't'},
-			{0, 0, 0, 0}
-		};
-
-		int idx = 0;
-		int opt = getopt_long(argc, argv, "d:t:", options, &idx);
-
-		std::string arg = optarg ? optarg : "";
-		size_t len = 0;
-		switch(opt) {
-			case -1:
-				if(optind != argc) {
-					std::cerr << "Too many arguments " << argc << " " << idx << std::endl;
-					goto usage;
-				}
-				goto run;
-			// Numeric Arguments
-			case 'd':
-				try {
-					duration = std::stod(optarg, &len);
-					if(len != arg.size()) { throw std::invalid_argument(""); }
-				} catch(std::invalid_argument &) {
-					std::cerr << "Duration must be a valid double, was " << arg << std::endl;
-					goto usage;
-				}
-				break;
-			case 't':
-				try {
-					nthreads = std::stoul(optarg, &len);
-					if(len != arg.size() || nthreads > (8 * sizeof(size_t))) { throw std::invalid_argument(""); }
-				} catch(std::invalid_argument &) {
-					std::cerr << "Number of threads must be a positive integer less than or equal to " << sizeof(size_t) * 8 << ", was " << arg << std::endl;
-					goto usage;
-				}
-				break;
-			// Other cases
-			default: /* ? */
-				std::cerr << opt << std::endl;
-			usage:
-				std::cerr << "Usage: " << argv[0] << ": [options]" << std::endl;
-				std::cerr << std::endl;
-				std::cerr << "  -d, --duration=DURATION  Duration of the experiment, in seconds" << std::endl;
-				std::cerr << "  -t, --nthreads=NTHREADS  Number of kernel threads" << std::endl;
-				std::exit(1);
-		}
-	}
-	run:
-
-	check_cache_line_size();
-
-	std::cout << "Running " << nthreads << " threads for " << duration << " seconds" << std::endl;
-	run(nthreads, duration);
-	return 0;
-}
Index: doc/theses/thierry_delisle_PhD/code/bts_test.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/bts_test.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,32 +1,0 @@
-#include <cassert>
-#include <iostream>
-
-bool bts(volatile size_t & target, size_t bit ) {
-	bool result = false;
-	asm volatile(
-		"LOCK btsq %[bit], %[target]\n\t"
-		:"=c" (result)
-		: [target] "m" (target), [bit] "r" (bit)
-	);
- 	return result;
-}
-
-bool btr(volatile size_t & target, size_t bit ) {
-	bool result = false;
-	asm volatile(
-		"LOCK btrq %[bit], %[target]\n\t"
-		:"=c" (result)
-		: [target] "m" (target), [bit] "r" (bit)
-	);
- 	return result;
-}
-
-int main() {
-	volatile size_t i = 0;
-	std::cout << std::hex << i << std::endl;
-	assert(bts(i, 31));
-	std::cout << std::hex << i << std::endl;
-	assert(btr(i, 31));
-	std::cout << std::hex << i << std::endl;
-	return 0;
-}
Index: doc/theses/thierry_delisle_PhD/code/links.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/links.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,122 +1,0 @@
-#pragma once
-
-#include "assert.hpp"
-#include "utils.hpp"
-
-template<typename node_t>
-struct _LinksFields_t {
-	node_t * prev = nullptr;
-	node_t * next = nullptr;
-	volatile unsigned long long ts = 0;
-	unsigned hint = (unsigned)-1;
-};
-
-template<typename node_t>
-class __attribute__((aligned(128))) intrusive_queue_t {
-public:
-	typedef spinlock_t lock_t;
-
-	struct stat {
-		ssize_t diff = 0;
-		size_t  push = 0;
-		size_t  pop  = 0;
-	};
-
-private:
-	struct sentinel_t {
-		_LinksFields_t<node_t> _links;
-	};
-
-public:
-	lock_t lock;
-
-private:
-	sentinel_t before;
-	sentinel_t after;
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Winvalid-offsetof"
-	static constexpr auto fields_offset = offsetof( node_t, _links );
-#pragma GCC diagnostic pop
-public:
-	intrusive_queue_t()
-		: before{{ nullptr, tail() }}
-		, after {{ head(), nullptr }}
-	{
-		/* paranoid */ assert((reinterpret_cast<uintptr_t>( head() ) + fields_offset) == reinterpret_cast<uintptr_t>(&before));
-		/* paranoid */ assert((reinterpret_cast<uintptr_t>( tail() ) + fields_offset) == reinterpret_cast<uintptr_t>(&after ));
-		/* paranoid */ assert(head()->_links.prev == nullptr);
-		/* paranoid */ assert(head()->_links.next == tail() );
-		/* paranoid */ assert(tail()->_links.next == nullptr);
-		/* paranoid */ assert(tail()->_links.prev == head() );
-		/* paranoid */ assert(sizeof(*this) == 128);
-		/* paranoid */ assert((intptr_t(this) % 128) == 0);
-	}
-
-	~intrusive_queue_t() = default;
-
-	inline node_t * head() const {
-		node_t * rhead = reinterpret_cast<node_t *>(
-			reinterpret_cast<uintptr_t>( &before ) - fields_offset
-		);
-		assert(rhead);
-		return rhead;
-	}
-
-	inline node_t * tail() const {
-		node_t * rtail = reinterpret_cast<node_t *>(
-			reinterpret_cast<uintptr_t>( &after ) - fields_offset
-		);
-		assert(rtail);
-		return rtail;
-	}
-
-	inline bool push(node_t * node) {
-		assert(lock);
-		assert(node->_links.ts != 0);
-		node_t * tail = this->tail();
-
-		node_t * prev = tail->_links.prev;
-		// assertf(node->_links.ts >= prev->_links.ts,
-		// 	"New node has smaller timestamp: %llu < %llu", node->_links.ts, prev->_links.ts);
-		node->_links.next = tail;
-		node->_links.prev = prev;
-		prev->_links.next = node;
-		tail->_links.prev = node;
-
-		if(before._links.ts == 0l) {
-			before._links.ts = node->_links.ts;
-			assert(node->_links.prev == this->head());
-			return true;
-		}
-		return false;
-	}
-
-	inline std::pair<node_t *, bool> pop() {
-		assert(lock);
-		node_t * head = this->head();
-		node_t * tail = this->tail();
-
-		node_t * node = head->_links.next;
-		node_t * next = node->_links.next;
-		if(node == tail) return {nullptr, false};
-
-		head->_links.next = next;
-		next->_links.prev = head;
-
-		if(next == tail) {
-			before._links.ts = 0l;
-			return {node, true};
-		}
-		else {
-			assert(next->_links.ts != 0);
-			before._links.ts = next->_links.ts;
-			assert(before._links.ts != 0);
-			return {node, false};
-		}
-	}
-
-	long long ts() const {
-		return before._links.ts;
-	}
-};
Index: doc/theses/thierry_delisle_PhD/code/prefetch.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/prefetch.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,106 +1,0 @@
-#include <algorithm>
-#include <array>
-#include <chrono>
-#include <iostream>
-#include <locale>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <cassert>
-
-struct __attribute__((aligned(64))) element {
-	size_t value;
-};
-
-using block = std::array<element, 100>;
-
-block * create() {
-	block * b = new block();
-	for(auto & e : *b) {
-		e.value = rand();
-	}
-	b->back().value = b->size();
-
-	return b;
-}
-
-static inline size_t find(const block & b) {
-	size_t r = 0;
-	for(; r < b.size(); r++) {
-		if(__builtin_expect(b[r].value == b.size(), false)) break;
-	}
-
-	return r;
-}
-
-void usage(char * argv[]) {
-	std::cerr << argv[0] << ": [DURATION (FLOAT:SEC)] [NBLOCKS]" << std::endl;;
-	std::exit(1);
-}
-
-int main(int argc, char * argv[]) {
-	size_t nblocks = 1000;
-	double duration = 5;
-
-	std::cout.imbue(std::locale(""));
-
-	switch (argc)
-	{
-	case 3:
-		nblocks = std::stoul(argv[2]);
-		[[fallthrough]];
-	case 2:
-		duration = std::stod(argv[1]);
-		if( duration <= 0.0 ) {
-			std::cerr << "Duration must be positive, was " << argv[1] << "(" << duration << ")" << std::endl;
-			usage(argv);
-		}
-		[[fallthrough]];
-	case 1:
-		break;
-	default:
-		usage(argv);
-		break;
-	}
-
-	std::vector<std::unique_ptr<block>> blocks;
-	for(size_t i = 0; i < nblocks; i++) {
-		blocks.emplace_back( create() );
-	}
-	std::random_shuffle(blocks.begin(), blocks.end());
-
-	size_t CRC = 0;
-	size_t count = 0;
-
-	using clock = std::chrono::high_resolution_clock;
-	auto before = clock::now();
-
-	while(true) {
-		for(const auto & b : blocks) {
-			CRC += find(*b);
-			count++;
-		}
-		auto now = clock::now();
-		std::chrono::duration<double> durr = now - before;
-		if( durr.count() > duration ) {
-			break;
-		}
-	}
-
-	auto after = clock::now();
-	std::chrono::duration<double> durr = after - before;
-	duration = durr.count();
-
-	using std::chrono::duration_cast;
-	using std::chrono::nanoseconds;
-
-	size_t ops_sec = size_t(double(count) / duration);
-	auto dur_nano = duration_cast<nanoseconds>(std::chrono::duration<double>(1.0)).count();
-
-	std::cout << "CRC           : " << CRC << "\n";
-	std::cout << "Duration      : " << duration << "s\n";
-	std::cout << "Total ops     : " << count << "\n";
-	std::cout << "Ops/sec       : " << ops_sec << "\n";
-	std::cout << "ns/Op         : " << ( dur_nano / ops_sec )<< "\n";
-}
Index: doc/theses/thierry_delisle_PhD/code/process.sh
===================================================================
--- doc/theses/thierry_delisle_PhD/code/process.sh	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,42 +1,0 @@
-#!/bin/bash
-
-NAME=$1
-
-if [ ! -f "raw/${NAME}.out" ]; then
-    echo "Not output for ${NAME}"
-    exit 1
-fi
-
-if [ ! -f "raw/${NAME}.data" ]; then
-    echo "Not perf record for ${NAME}"
-    exit 1
-fi
-
-echo "Processing perf data for ${NAME}"
-
-OPS=$(grep -e 'Total ops' raw/${NAME}.out)
-CPOP=$( echo "Hello $OPS" | \grep -oP ", \K[0-9,]+(?=o)" --color | tr -d ',')
-CPUSH=$(echo "Hello $OPS" | \grep -oP "\(\K[0-9,]+(?=i)" --color | tr -d ',')
-
-REPORT=''
-perf report -n --percent-limit 5 --stdio --no-children -i raw/${NAME}.data > raw/.temp
-EVENT=$(cat raw/.temp | grep -e '^# Samples'| cut -d ' ' -f 6)
-SPOP=$( cat raw/.temp | grep -e '] relaxed_list<Node>::pop'  | tr -s ' ' | cut -d ' ' -f 3)
-SPUSH=$(cat raw/.temp | grep -e '] relaxed_list<Node>::push' | tr -s ' ' | cut -d ' ' -f 3)
-SARR=$( cat raw/.temp | grep -e '] snz[i|m]_t::node::arrive_h'   | tr -s ' ' | cut -d ' ' -f 3)
-
-echo "$OPS"
-echo "Push count: $CPUSH"
-echo "Pop  count: $CPOP"
-
-echo "Pop    samples: $SPOP"
-echo "Push   samples: $SPUSH"
-echo "Arrive samples: $SARR"
-
-SpPUSH=$(bc -l <<< "scale=9; $SPUSH / $CPUSH")
-SpPOP=$( bc -l <<< "scale=9; $SPOP  / $CPOP" )
-SpARR=$( bc -l <<< "scale=9; $SARR  / $CPUSH")
-
-printf "%s per push()  : %.9f\n" $EVENT $SpPUSH | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta'
-printf "%s per pop()   : %.9f\n" $EVENT $SpPOP  | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta'
-printf "%s per arrive(): %.9f\n" $EVENT $SpARR  | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta'
Index: doc/theses/thierry_delisle_PhD/code/processor.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/processor.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,53 +1,0 @@
-#include <atomic>
-
-struct thread {};
-
-struct cluster {
-	void add();
-	void remove();
-	thread * next();
-};
-
-struct processor {
-
-	cluster cluster;
-	std::atomic<bool> stop;
-	volatile bool idle;
-};
-
-
-void run(thread * ) {
-	// verify preemption
-
-	// run Thread
-
-	// verify preemption
-
-	// finish Running
-}
-
-void main(processor & self) {
-
-	self.cluster.add();
-
-	while(!self.stop) {
-		if(thread * t = self.cluster.next()) {
-			run(t);
-			continue;
-		}
-
-		self.set_idle();
-		std::atomic_thread_fence();
-
-		if(thread * t = self.cluster.next()) {
-			self.idle = false;
-			run(t);
-			continue;
-		}
-
-		halt();
-	}
-
-	self.cluster.remove();
-
-}
Index: doc/theses/thierry_delisle_PhD/code/processor_list.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/processor_list.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,215 +1,0 @@
-#include <cassert>
-
-#include <atomic>
-#include <new>
-#include <type_traits>
-
-struct processor;
-
-struct __attribute__((aligned(64))) processor_id {
-	std::atomic<processor *> handle;
-	std::atomic<bool> lock;
-
-	processor_id() = default;
-	processor_id(processor * proc) : handle(proc), lock() {
-		/*paranoid*/ assert(std::atomic_is_lock_free(&lock));
-	}
-};
-
-extern unsigned num();
-
-#define ERROR throw 1
-
-class processor_list {
-private:
-
-	static const constexpr std::size_t cache_line_size = 64;
-
-	static_assert(sizeof (processor_id) <= cache_line_size, "ERROR: Instances must fit in one cache line" );
-	static_assert(alignof(processor_id) == cache_line_size, "ERROR: Instances must aligned to one cache line" );
-
-	const unsigned max;     // total cachelines allocated
-	std::atomic_uint alloc; // cachelines currently in use
-	std::atomic_uint ready; // cachelines ready to iterate over (!= to alloc when thread is in second half of doregister)
-	std::atomic<bool> lock; // writerlock
-	processor_id * data;    // data pointer
-
-private:
-	inline void acquire(std::atomic<bool> & ll) {
-		while( __builtin_expect(ll.exchange(true),false) ) {
-			while(ll.load(std::memory_order_relaxed))
-				asm volatile("pause");
-		}
-		/* paranoid */ assert(ll);
-	}
-
-public:
-	processor_list()
-		: max(num())
-		, alloc(0)
-		, ready(0)
-		, lock{false}
-		, data( new processor_id[max] )
-	{
-		/*paranoid*/ assert(num() == max);
-		/*paranoid*/ assert(std::atomic_is_lock_free(&alloc));
-		/*paranoid*/ assert(std::atomic_is_lock_free(&ready));
-	}
-
-	~processor_list() {
-		delete[] data;
-	}
-
-	//=======================================================================
-	// Lock-Free registering/unregistering of threads
-	unsigned doregister(processor * proc) {
-		// Step - 1 : check if there is already space in the data
-		uint_fast32_t s = ready;
-
-		// Check among all the ready
-		for(uint_fast32_t i = 0; i < s; i++) {
-			processor * null = nullptr; // Re-write every loop since compare thrashes it
-			if( data[i].handle.load(std::memory_order_relaxed) == null
-			 && data[i].handle.compare_exchange_strong(null, proc)) {
-				/*paranoid*/ assert(i < ready);
-				/*paranoid*/ assert(alignof(decltype(data[i])) == cache_line_size);
-				/*paranoid*/ assert((uintptr_t(&data[i]) % cache_line_size) == 0);
-				return i;
-			}
-		}
-
-		if(max <= alloc) ERROR;
-
-		// Step - 2 : F&A to get a new spot in the array.
-		uint_fast32_t n = alloc++;
-		if(max <= n) ERROR;
-
-		// Step - 3 : Mark space as used and then publish it.
-		void * storage = &data[n];
-		new (storage) processor_id( proc );
-		while(true) {
-			unsigned copy = n;
-			if( ready.load(std::memory_order_relaxed) == n
-			 && ready.compare_exchange_weak(copy, n + 1) )
-			 	break;
-			asm volatile("pause");
-		}
-
-		// Return new spot.
-		/*paranoid*/ assert(n < ready);
-		/*paranoid*/ assert(alignof(decltype(data[n])) == cache_line_size);
-		/*paranoid*/ assert((uintptr_t(&data[n]) % cache_line_size) == 0);
-		return n;
-	}
-
-	processor * unregister(unsigned iproc) {
-		/*paranoid*/ assert(iproc < ready);
-		auto ret = data[iproc].handle.load(std::memory_order_relaxed);
-		data[iproc].handle = nullptr;
-		return ret;
-	}
-
-	// Reset all registration
-	// Unsafe in most cases, use for testing only.
-	void reset() {
-		alloc = 0;
-		ready = 0;
-	}
-
-	processor * get(unsigned iproc) {
-		return data[iproc].handle.load(std::memory_order_relaxed);
-	}
-
-	//=======================================================================
-	// Reader-writer lock implementation
-	// Concurrent with doregister/unregister,
-	//    i.e., threads can be added at any point during or between the entry/exit
-
-	//-----------------------------------------------------------------------
-	// Reader side
-	void read_lock(unsigned iproc) {
-		/*paranoid*/ assert(iproc < ready);
-
-		// Step 1 : make sure no writer are in the middle of the critical section
-		while(lock.load(std::memory_order_relaxed))
-			asm volatile("pause");
-
-		// Fence needed because we don't want to start trying to acquire the lock
-		// before we read a false.
-		// Not needed on x86
-		// std::atomic_thread_fence(std::memory_order_seq_cst);
-
-		// Step 2 : acquire our local lock
-		acquire( data[iproc].lock );
-		/*paranoid*/ assert(data[iproc].lock);
-	}
-
-	void read_unlock(unsigned iproc) {
-		/*paranoid*/ assert(iproc < ready);
-		/*paranoid*/ assert(data[iproc].lock);
-		data[iproc].lock.store(false, std::memory_order_release);
-	}
-
-	//-----------------------------------------------------------------------
-	// Writer side
-	uint_fast32_t write_lock() {
-		// Step 1 : lock global lock
-		// It is needed to avoid processors that register mid Critical-Section
-		//   to simply lock their own lock and enter.
-		acquire(lock);
-
-		// Step 2 : lock per-proc lock
-		// Processors that are currently being registered aren't counted
-		//   but can't be in read_lock or in the critical section.
-		// All other processors are counted
-		uint_fast32_t s = ready;
-		for(uint_fast32_t i = 0; i < s; i++) {
-			acquire( data[i].lock );
-		}
-
-		return s;
-	}
-
-	void write_unlock(uint_fast32_t last_s) {
-		// Step 1 : release local locks
-		// This must be done while the global lock is held to avoid
-		//   threads that where created mid critical section
-		//   to race to lock their local locks and have the writer
-		//   immidiately unlock them
-		// Alternative solution : return s in write_lock and pass it to write_unlock
-		for(uint_fast32_t i = 0; i < last_s; i++) {
-			assert(data[i].lock);
-			data[i].lock.store(false, std::memory_order_release);
-		}
-
-		// Step 2 : release global lock
-		/*paranoid*/ assert(true == lock);
-		lock.store(false, std::memory_order_release);
-	}
-
-	//-----------------------------------------------------------------------
-	// Checking support
-	uint_fast32_t epoch_check() {
-		// Step 1 : lock global lock
-		// It is needed to avoid processors that register mid Critical-Section
-		//   to simply lock their own lock and enter.
-		while(lock.load(std::memory_order_relaxed))
-			asm volatile("pause");
-
-		// Step 2 : lock per-proc lock
-		// Processors that are currently being registered aren't counted
-		//   but can't be in read_lock or in the critical section.
-		// All other processors are counted
-		uint_fast32_t s = ready;
-		for(uint_fast32_t i = 0; i < s; i++) {
-			while(data[i].lock.load(std::memory_order_relaxed))
-				asm volatile("pause");
-		}
-
-		return s;
-	}
-
-public:
-};
-
-#undef ERROR
Index: doc/theses/thierry_delisle_PhD/code/processor_list_fast.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/processor_list_fast.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,173 +1,0 @@
-#include "processor_list.hpp"
-
-#include <array>
-#include <iomanip>
-#include <iostream>
-#include <locale>
-#include <string>
-#include <thread>
-
-#include "utils.hpp"
-
-unsigned num() {
-	return 0x1000000;
-}
-
-//-------------------
-
-struct processor {
-	unsigned id;
-};
-void run(unsigned nthread, double duration, unsigned writes, unsigned epochs) {
-	assert(writes < 100);
-
-	// List being tested
-	processor_list list = {};
-
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Data to check everything is OK
-	size_t write_committed = 0ul;
-	struct {
-		std::atomic_size_t write = { 0ul };
-		std::atomic_size_t read  = { 0ul };
-		std::atomic_size_t epoch = { 0ul };
-	} lock_cnt;
-
-	// Flag to signal termination
-	std::atomic_bool done = { false };
-
-	std::thread * threads[nthread];
-	unsigned i = 1;
-	for(auto & t : threads) {
-		t = new std::thread([&done, &list, &barrier, &write_committed, &lock_cnt, writes, epochs](unsigned tid) {
-			Random rand(tid + rdtscl());
-			processor proc;
-			proc.id = list.doregister(&proc);
-			size_t writes_cnt = 0;
-			size_t reads_cnt = 0;
-			size_t epoch_cnt = 0;
-
-			affinity(tid);
-
-			barrier.wait(tid);
-
-			while(__builtin_expect(!done, true)) {
-				auto r = rand.next() % 100;
-				if (r < writes) {
-					auto n = list.write_lock();
-					write_committed++;
-					writes_cnt++;
-					assert(writes_cnt < -2ul);
-					list.write_unlock(n);
-				}
-				else if(r < epochs) {
-					list.epoch_check();
-					epoch_cnt++;
-				}
-				else {
-					list.read_lock(proc.id);
-					reads_cnt++;
-					assert(reads_cnt < -2ul);
-					list.read_unlock(proc.id);
-				}
-			}
-
-			barrier.wait(tid);
-
-			auto p = list.unregister(proc.id);
-			assert(&proc == p);
-			lock_cnt.write += writes_cnt;
-			lock_cnt.read  += reads_cnt;
-			lock_cnt.epoch += epoch_cnt;
-		}, i++);
-	}
-
-	auto before = Clock::now();
-	barrier.wait(0);
-
-	while(true) {
-		usleep(1000);
-		auto now = Clock::now();
-		duration_t durr = now - before;
-		if( durr.count() > duration ) {
-			done = true;
-			break;
-		}
-	}
-
-	barrier.wait(0);
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	duration = durr.count();
-
-	for(auto t : threads) {
-		t->join();
-		delete t;
-	}
-
-	assert(write_committed == lock_cnt.write);
-
-	size_t totalop = lock_cnt.read + lock_cnt.write + lock_cnt.epoch;
-	size_t ops_sec = size_t(double(totalop) / duration);
-	size_t ops_thread = ops_sec / nthread;
-	double dur_nano = duration_cast<std::nano>(1.0);
-
-	std::cout << "Duration      : " << duration << "s\n";
-	std::cout << "Total ops     : " << totalop << "(" << lock_cnt.read << "r, " << lock_cnt.write << "w, " << lock_cnt.epoch << "e)\n";
-	std::cout << "Ops/sec       : " << ops_sec << "\n";
-	std::cout << "Ops/sec/thread: " << ops_thread << "\n";
-	std::cout << "ns/Op         : " << ( dur_nano / ops_thread )<< "\n";
-}
-
-void usage(char * argv[]) {
-	std::cerr << argv[0] << ": [DURATION (FLOAT:SEC)] [NTHREADS] [%WRITES]" << std::endl;;
-	std::exit(1);
-}
-
-int main(int argc, char * argv[]) {
-
-	double duration   = 5.0;
-	unsigned nthreads = 2;
-	unsigned writes   = 0;
-	unsigned epochs   = 0;
-
-	std::cout.imbue(std::locale(""));
-
-	switch (argc)
-	{
-	case 5:
-		epochs = std::stoul(argv[4]);
-		[[fallthrough]];
-	case 4:
-		writes = std::stoul(argv[3]);
-		if( (writes + epochs) > 100 ) {
-			std::cerr << "Writes + Epochs must be valid percentage, was " << argv[3] << " + " << argv[4] << "(" << writes << " + " << epochs << ")" << std::endl;
-			usage(argv);
-		}
-		[[fallthrough]];
-	case 3:
-		nthreads = std::stoul(argv[2]);
-		[[fallthrough]];
-	case 2:
-		duration = std::stod(argv[1]);
-		if( duration <= 0.0 ) {
-			std::cerr << "Duration must be positive, was " << argv[1] << "(" << duration << ")" << std::endl;
-			usage(argv);
-		}
-		[[fallthrough]];
-	case 1:
-		break;
-	default:
-		usage(argv);
-		break;
-	}
-
-	check_cache_line_size();
-
-	std::cout << "Running " << nthreads << " threads for " << duration << " seconds with " << writes << "% writes and " << epochs << "% epochs" << std::endl;
-	run(nthreads, duration, writes, epochs + writes);
-
-	return 0;
-}
Index: doc/theses/thierry_delisle_PhD/code/processor_list_good.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/processor_list_good.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,269 +1,0 @@
-#include "processor_list.hpp"
-
-#include <iostream>
-#include <string>
-#include <thread>
-
-unsigned num() {
-	return 0x1000000;
-}
-
-// Barrier from
-class barrier_t {
-public:
-	barrier_t(size_t total)
-		: waiting(0)
-		, total(total)
-	{}
-
-	void wait(unsigned) {
-		size_t target = waiting++;
-		target = (target - (target % total)) + total;
-		while(waiting < target)
-			asm volatile("pause");
-
-		assert(waiting < (1ul << 60));
-    	}
-
-private:
-	std::atomic<size_t> waiting;
-	size_t total;
-};
-
-class Random {
-private:
-	unsigned int seed;
-public:
-	Random(int seed) {
-		this->seed = seed;
-	}
-
-	/** returns pseudorandom x satisfying 0 <= x < n. **/
-	unsigned int next() {
-		seed ^= seed << 6;
-		seed ^= seed >> 21;
-		seed ^= seed << 7;
-		return seed;
-    	}
-};
-
-//-------------------
-
-struct processor {
-	unsigned id;
-};
-
-// Stage 1
-// Make sure that the early registration works correctly
-// Registration uses a different process if the act of
-// registering the processor makes it the highest processor count
-// seen yet.
-void stage1(unsigned nthread, unsigned repeats) {
-	const int n = repeats;
-	const int nproc = 10;
-
-	// List being tested
-	processor_list list;
-
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Seen values to detect duplicattion
-	std::atomic<processor *> ids[nthread * nproc];
-	for(auto & i : ids) {
-		i = nullptr;
-	}
-
-	// Can't pass VLA to lambda
-	std::atomic<processor *> * idsp = ids;
-
-	// Threads which will run the code
-	std::thread * threads[nthread];
-	unsigned i = 1;
-	for(auto & t : threads) {
-		// Each thread will try to register a processor then add it to the
-		// list of registerd processor
-		t = new std::thread([&list, &barrier, idsp, n](unsigned tid){
-			processor proc[nproc];
-			for(int i = 0; i < n; i++) {
-				for(auto & p : proc) {
-					// Register the thread
-					p.id = list.doregister(&p);
-				}
-
-				for(auto & p : proc) {
-					// Make sure no one got this id before
-					processor * prev = idsp[p.id].exchange(&p);
-					assert(nullptr == prev);
-
-					// Make sure id is still consistend
-					assert(&p == list.get(p.id));
-				}
-
-				// wait for round to finish
-				barrier.wait(tid);
-
-				// wait for reset
-				barrier.wait(tid);
-			}
-		}, i++);
-	}
-
-	for(int i = 0; i < n; i++) {
-		//Wait for round to finish
-		barrier.wait(0);
-
-		// Reset list
-		list.reset();
-
-		std::cout << i << "\r";
-
-		// Reset seen values
-		for(auto & i : ids) {
-			i = nullptr;
-		}
-
-		// Start next round
-		barrier.wait(0);
-	}
-
-	for(auto t : threads) {
-		t->join();
-		delete t;
-	}
-}
-
-// Stage 2
-// Check that once churning starts, registration is still consistent.
-void stage2(unsigned nthread, unsigned repeats) {
-	// List being tested
-	processor_list list;
-
-	// Threads which will run the code
-	std::thread * threads[nthread];
-	unsigned i = 1;
-	for(auto & t : threads) {
-		// Each thread will try to register a few processors and
-		// unregister them, making sure that the registration is
-		// consistent
-		t = new std::thread([&list, repeats](unsigned tid){
-			processor procs[10];
-			for(unsigned i = 0; i < repeats; i++) {
-				// register the procs and note the id
-				for(auto & p : procs) {
-					p.id = list.doregister(&p);
-				}
-
-				if(1 == tid) std::cout << i << "\r";
-
-				// check the id is still consistent
-				for(const auto & p : procs) {
-					assert(&p == list.get(p.id));
-				}
-
-				// unregister and check the id is consistent
-				for(const auto & p : procs) {
-					assert(&p == list.unregister(p.id));
-				}
-			}
-		}, i++);
-	}
-
-	for(auto t : threads) {
-		t->join();
-		delete t;
-	}
-}
-
-bool is_writer();
-
-// Stage 3
-// Check that the reader writer lock works.
-void stage3(unsigned nthread, unsigned repeats) {
-	// List being tested
-	processor_list list;
-
-	size_t before = 0;
-
-	std::unique_ptr<size_t> after( new size_t(0) );
-
-	std::atomic<bool> done ( false );
-
-	// Threads which will run the code
-	std::thread * threads[nthread];
-	unsigned i = 1;
-	for(auto & t : threads) {
-		// Each thread will try to register a few processors and
-		// unregister them, making sure that the registration is
-		// consistent
-		t = new std::thread([&list, repeats, &before, &after, &done](unsigned tid){
-			Random rng(tid);
-			processor proc;
-			proc.id = list.doregister(&proc);
-			while(!done) {
-
-				if( (rng.next() % 100) == 0 ) {
-					auto r = list.write_lock();
-
-					auto b = before++;
-
-					std::cout << b << "\r";
-
-					(*after)++;
-
-					if(b >= repeats) done = true;
-
-					list.write_unlock(r);
-				}
-				else {
-					list.read_lock(proc.id);
-					assert(before == *after);
-					list.read_unlock(proc.id);
-				}
-
-			}
-
-			list.unregister(proc.id);
-		}, i++);
-	}
-
-	for(auto t : threads) {
-		t->join();
-		delete t;
-	}
-}
-
-int main(int argc, char * argv[]) {
-
-	unsigned nthreads = 1;
-	if( argc >= 3 ) {
-		size_t idx;
-		nthreads = std::stoul(argv[2], &idx);
-		assert('\0' == argv[2][idx]);
-	}
-
-	unsigned repeats = 100;
-	if( argc >= 2 ) {
-		size_t idx;
-		repeats = std::stoul(argv[1], &idx);
-		assert('\0' == argv[1][idx]);
-	}
-
-	processor_list::check_cache_line_size();
-
-	std::cout << "Running " << repeats << " repetitions on " << nthreads << " threads" << std::endl;
-	std::cout << "Checking registration - early" << std::endl;
-	stage1(nthreads, repeats);
-	std::cout << "Done                         " << std::endl;
-
-	std::cout << "Checking registration - churn" << std::endl;
-	stage2(nthreads, repeats);
-	std::cout << "Done                         " << std::endl;
-
-	std::cout << "Checking RW lock             " << std::endl;
-	stage3(nthreads, repeats);
-	std::cout << "Done                         " << std::endl;
-
-
-	return 0;
-}
Index: doc/theses/thierry_delisle_PhD/code/randbit.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/randbit.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,236 +1,0 @@
-#include <cstddef>
-#include <cstdint>
-#include <x86intrin.h>
-
-__attribute__((noinline)) unsigned nthSetBit(size_t mask, unsigned bit) {
-	uint64_t v = mask;   // Input value to find position with rank r.
-	unsigned int r = bit;// Input: bit's desired rank [1-64].
-	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
-	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
-	unsigned int t;      // Bit count temporary.
-
-	// Do a normal parallel bit count for a 64-bit integer,
-	// but store all intermediate steps.
-	// a = (v & 0x5555...) + ((v >> 1) & 0x5555...);
-	a =  v - ((v >> 1) & ~0UL/3);
-	// b = (a & 0x3333...) + ((a >> 2) & 0x3333...);
-	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
-	// c = (b & 0x0f0f...) + ((b >> 4) & 0x0f0f...);
-	c = (b + (b >> 4)) & ~0UL/0x11;
-	// d = (c & 0x00ff...) + ((c >> 8) & 0x00ff...);
-	d = (c + (c >> 8)) & ~0UL/0x101;
-
-
-	t = (d >> 32) + (d >> 48);
-	// Now do branchless select!
-	s  = 64;
-	// if (r > t) {s -= 32; r -= t;}
-	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
-	t  = (d >> (s - 16)) & 0xff;
-	// if (r > t) {s -= 16; r -= t;}
-	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
-	t  = (c >> (s - 8)) & 0xf;
-	// if (r > t) {s -= 8; r -= t;}
-	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
-	t  = (b >> (s - 4)) & 0x7;
-	// if (r > t) {s -= 4; r -= t;}
-	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
-	t  = (a >> (s - 2)) & 0x3;
-	// if (r > t) {s -= 2; r -= t;}
-	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
-	t  = (v >> (s - 1)) & 0x1;
-	// if (r > t) s--;
-	s -= ((t - r) & 256) >> 8;
-	// s = 65 - s;
-	return s;
-}
-
-unsigned rand_bit(unsigned rnum, uint64_t mask) {
-	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
-#if defined(BRANCHLESS)
-	uint64_t v = mask;   // Input value to find position with rank r.
-	unsigned int r = bit + 1;// Input: bit's desired rank [1-64].
-	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
-	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
-	unsigned int t;      // Bit count temporary.
-
-	// Do a normal parallel bit count for a 64-bit integer,
-	// but store all intermediate steps.
-	// a = (v & 0x5555...) + ((v >> 1) & 0x5555...);
-	a =  v - ((v >> 1) & ~0UL/3);
-	// b = (a & 0x3333...) + ((a >> 2) & 0x3333...);
-	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
-	// c = (b & 0x0f0f...) + ((b >> 4) & 0x0f0f...);
-	c = (b + (b >> 4)) & ~0UL/0x11;
-	// d = (c & 0x00ff...) + ((c >> 8) & 0x00ff...);
-	d = (c + (c >> 8)) & ~0UL/0x101;
-
-
-	t = (d >> 32) + (d >> 48);
-	// Now do branchless select!
-	s  = 64;
-	// if (r > t) {s -= 32; r -= t;}
-	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
-	t  = (d >> (s - 16)) & 0xff;
-	// if (r > t) {s -= 16; r -= t;}
-	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
-	t  = (c >> (s - 8)) & 0xf;
-	// if (r > t) {s -= 8; r -= t;}
-	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
-	t  = (b >> (s - 4)) & 0x7;
-	// if (r > t) {s -= 4; r -= t;}
-	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
-	t  = (a >> (s - 2)) & 0x3;
-	// if (r > t) {s -= 2; r -= t;}
-	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
-	t  = (v >> (s - 1)) & 0x1;
-	// if (r > t) s--;
-	s -= ((t - r) & 256) >> 8;
-	// s = 65 - s;
-	return s - 1;
-#elif defined(LOOP)
-	for(unsigned i = 0; i < bit; i++) {
-		mask ^= (1ul << (__builtin_ffsl(mask) - 1ul));
-	}
-	return __builtin_ffsl(mask) - 1ul;
-#elif defined(PDEP)
-	uint64_t picked = _pdep_u64(1ul << bit, mask);
-	return __builtin_ffsl(picked) - 1ul;
-#else
-#error must define LOOP, PDEP or BRANCHLESS
-#endif
-}
-
-#include <cassert>
-#include <atomic>
-#include <chrono>
-#include <iomanip>
-#include <iostream>
-#include <locale>
-#include <thread>
-
-#include <unistd.h>
-
-class barrier_t {
-public:
-	barrier_t(size_t total)
-		: waiting(0)
-		, total(total)
-	{}
-
-	void wait(unsigned) {
-		size_t target = waiting++;
-		target = (target - (target % total)) + total;
-		while(waiting < target)
-			asm volatile("pause");
-
-		assert(waiting < (1ul << 60));
-    	}
-
-private:
-	std::atomic<size_t> waiting;
-	size_t total;
-};
-
-class Random {
-private:
-	unsigned int seed;
-public:
-	Random(int seed) {
-		this->seed = seed;
-	}
-
-	/** returns pseudorandom x satisfying 0 <= x < n. **/
-	unsigned int next() {
-		seed ^= seed << 6;
-		seed ^= seed >> 21;
-		seed ^= seed << 7;
-		return seed;
-    	}
-};
-
-using Clock = std::chrono::high_resolution_clock;
-using duration_t = std::chrono::duration<double>;
-using std::chrono::nanoseconds;
-
-template<typename Ratio, typename T>
-T duration_cast(T seconds) {
-	return std::chrono::duration_cast<std::chrono::duration<T, Ratio>>(std::chrono::duration<T>(seconds)).count();
-}
-
-void waitfor(double & duration, barrier_t & barrier, std::atomic_bool & done) {
-
-
-	std::cout << "Starting" << std::endl;
-	auto before = Clock::now();
-	barrier.wait(0);
-
-	while(true) {
-		usleep(100000);
-		auto now = Clock::now();
-		duration_t durr = now - before;
-		if( durr.count() > duration ) {
-			done = true;
-			break;
-		}
-		std::cout << "\r" << std::setprecision(4) << durr.count();
-		std::cout.flush();
-	}
-
-	barrier.wait(0);
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	duration = durr.count();
-	std::cout << "\rClosing down" << std::endl;
-}
-
-__attribute__((noinline)) void body(Random & rand) {
-	uint64_t mask = (uint64_t(rand.next()) << 32ul) | uint64_t(rand.next());
-	unsigned idx = rand.next();
-
-	unsigned bit = rand_bit(idx, mask);
-
-	if(__builtin_expect(((1ul << bit) & mask) == 0, false)) {
-		std::cerr << std::hex <<  "Rand " << idx << " from " << mask;
-		std::cerr << " gave " << (1ul << bit) << "(" << std::dec << bit << ")" << std::endl;
-		std::abort();
-	}
-}
-
-void runRandBit(double duration) {
-
-	std::atomic_bool done  = { false };
-	barrier_t barrier(2);
-
-	size_t count = 0;
-	std::thread thread([&done, &barrier, &count]() {
-
-		Random rand(22);
-
-		barrier.wait(1);
-
-		for(;!done; count++) {
-			body(rand);
-		}
-
-		barrier.wait(1);
-	});
-
-	waitfor(duration, barrier, done);
-	thread.join();
-
-	size_t ops = count;
-	size_t ops_sec = size_t(double(ops) / duration);
-	auto dur_nano = duration_cast<std::nano>(1.0);
-
-	std::cout << "Duration      : " << duration << "s\n";
-	std::cout << "ns/Op         : " << ( dur_nano / ops )<< "\n";
-	std::cout << "Ops/sec       : " << ops_sec << "\n";
-	std::cout << "Total ops     : " << ops << std::endl;
-
-}
-
-int main() {
-	std::cout.imbue(std::locale(""));
-	runRandBit(5);
-}
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/Makefile
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,6 @@
+all: gui-proto
+
+CXXFLAGS = -fpic -g -O0 -I.
+
+gui-proto: proto-gui/main.o thrdlib/thread.o
+	$(CXX) -pthread -ldl -o ${@} ${^} -ftls-model=initial-exec
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/proto-gui/main.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/proto-gui/main.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/proto-gui/main.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,310 @@
+#include "thrdlib/thread.hpp"
+
+#include <cassert>
+
+#include <algorithm>
+#include <atomic>
+#include <iostream>
+#include <memory>
+#include <vector>
+
+#include <getopt.h>
+using thrdlib::thread_t;
+
+
+extern __attribute__((aligned(128))) thread_local struct {
+	void * volatile this_thread;
+	void * volatile this_processor;
+	void * volatile this_stats;
+
+	struct {
+		volatile unsigned short disable_count;
+		volatile bool enabled;
+		volatile bool in_progress;
+	} preemption_state;
+
+	#if defined(__SIZEOF_INT128__)
+		__uint128_t rand_seed;
+	#else
+		uint64_t rand_seed;
+	#endif
+	struct {
+		uint64_t fwd_seed;
+		uint64_t bck_seed;
+	} ready_rng;
+} kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
+
+//--------------------
+// Constants
+unsigned nframes;
+unsigned fsize;
+unsigned nproduce;
+
+//--------------------
+// Frame management
+
+class Frame {
+	static const thread_t reset;
+	static const thread_t set;
+	std::atomic<thread_t> rdy_state = { reset };
+	std::atomic<thread_t> rnd_state = { set };
+public:
+	unsigned number;
+	std::unique_ptr<unsigned char[]> data;
+
+private:
+	inline bool wait( thread_t self, std::atomic<thread_t> & state, std::atomic<thread_t> & other ) {
+		bool ret;
+		while(true) {
+			thread_t expected = state;
+			if( expected == set ) { ret = false; goto END; }
+			assert( expected == reset );
+			if( std::atomic_compare_exchange_strong( &state, &expected, self) ) {
+				thrdlib::park( self );
+				ret = true;
+				goto END;
+			}
+		}
+		END:
+		assert( state == set );
+		assert( other != set );
+		state = reset;
+		return ret;
+	}
+
+	inline bool publish(  std::atomic<thread_t> & state ) {
+		thread_t got = std::atomic_exchange( &state, set );
+		assert( got != set );
+
+		if( got == reset ) return false;
+
+		thrdlib::unpark( got );
+		return true;
+	}
+
+public:
+	inline bool wait_rendered( thread_t self ) {
+		return wait( self, rnd_state, rdy_state );
+	}
+
+	inline bool wait_ready   ( thread_t self ) {
+		return wait( self, rdy_state, rnd_state );
+	}
+
+	inline bool publish() {
+		return publish( rdy_state );
+	}
+
+	inline bool release() {
+		return publish( rnd_state );
+	}
+};
+
+const thread_t Frame::reset = nullptr;
+const thread_t Frame::set   = reinterpret_cast<thread_t>(1);
+
+std::unique_ptr<Frame[]> frames;
+volatile unsigned last_produced = 0;
+
+//--------------------
+// Threads
+thread_t volatile the_stats_thread = nullptr;
+
+inline void fence(void) {
+	std::atomic_thread_fence(std::memory_order_seq_cst);
+}
+
+struct {
+	struct {
+		volatile unsigned long long   parks = 0;
+		volatile unsigned long long unparks = 0;
+	} sim;
+	struct {
+		volatile unsigned long long   parks = 0;
+		volatile unsigned long long unparks = 0;
+	} rend;
+
+	struct {
+		volatile unsigned long long ran = 0;
+		volatile unsigned long long saw = 0;
+	} stats;
+} thrd_stats;
+
+void Stats( thread_t self ) {
+	the_stats_thread = self;
+	fence();
+	thrdlib::park( self );
+
+	std::vector<bool> seen;
+	seen.resize(nproduce, false);
+
+	while(last_produced < nproduce) {
+		thrdlib::yield();
+		thrd_stats.stats.ran++;
+		if( last_produced > 0 ) seen.at(last_produced - 1) = true;
+	}
+
+	thrd_stats.stats.saw = std::count(seen.begin(), seen.end(), true);
+}
+
+void Simulator( thread_t self ) {
+	for(unsigned i = 0; i < nproduce; i++) {
+		auto & frame = frames[i % nframes];
+		// Wait for the frames to be rendered
+		if( frame.wait_rendered( self ) ) {
+			thrd_stats.sim.parks++;
+		}
+
+		// Write the frame information
+		frame.number = i;
+		for( unsigned x = 0; x < fsize; x++ ) {
+			frame.data[x] = i;
+		}
+		std::cout << "Simulated " << i << std::endl;
+		last_produced = i+1;
+
+		// Publish it
+		if( frame.publish()  ) {
+			thrd_stats.sim.unparks++;
+		}
+	}
+}
+
+void Renderer( thread_t self ) {
+	thrdlib::unpark( the_stats_thread );
+	for(unsigned i = 0; i < nproduce; i++) {
+		auto & frame = frames[i % nframes];
+		// Wait for the frames to be ready
+		if( frame.wait_ready( self ) ) {
+			thrd_stats.rend.parks++;
+		}
+
+		// Render the frame
+		unsigned total = 0;
+		for( unsigned x = 0; x < fsize; x++ ) {
+			total += frame.data[x];
+		}
+
+		std::cout << "Rendered " << i << std::endl;
+		assert(total == i * fsize);
+
+		// Release
+		if( frame.release() ) {
+			thrd_stats.rend.unparks++;
+		}
+	}
+
+}
+
+
+
+int main(int argc, char * argv[]) {
+	nframes  = 3;
+	fsize    = 1000;
+	nproduce = 60;
+
+	const char * framework;
+
+	for(;;) {
+		static struct option options[] = {
+			{"buff",  required_argument, 0, 'b'},
+			{"nprod",  required_argument, 0, 'p'},
+			{"fsize",   required_argument, 0, 'f'},
+			{0, 0, 0, 0}
+		};
+
+		int idx = 0;
+		int opt = getopt_long(argc, argv, "b:p:f:", options, &idx);
+
+		std::string arg = optarg ? optarg : "";
+		size_t len = 0;
+		switch(opt) {
+			// Exit Case
+			case -1:
+				/* paranoid */ assert(optind <= argc);
+				if( optind == argc ) {
+					std::cerr << "Must specify a framework" << std::endl;
+					goto usage;
+
+				}
+				framework = argv[optind];
+				goto run;
+			case 'b':
+				try {
+					nframes = std::stoul(optarg, &len);
+					if(nframes == 0 || len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Number of buffered frames must be at least 1, was" << arg << std::endl;
+					goto usage;
+				}
+				break;
+			case 'p':
+				try {
+					nproduce = std::stoul(optarg, &len);
+					if(nproduce == 0 || len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Number of produced frames must be at least 1, was" << arg << std::endl;
+					goto usage;
+				}
+				break;
+			case 'f':
+				try {
+					fsize = std::stoul(optarg, &len);
+					if(fsize == 0 || len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Size of produced frames must be at least 1, was" << arg << std::endl;
+					goto usage;
+				}
+				break;
+			// Other cases
+			default: /* ? */
+				std::cerr << opt << std::endl;
+			usage:
+				std::cerr << "Usage: " << argv[0] << " [options] framework" << std::endl;
+				std::cerr << std::endl;
+				std::cerr << "  -b, --buff=COUNT    Number of frames to buffer" << std::endl;
+				std::cerr << "  -p, --nprod=COUNT   Number of frames to produce" << std::endl;
+				std::cerr << "  -f, --fsize=SIZE    Size of each frame in bytes" << std::endl;
+				std::exit(1);
+		}
+	}
+	run:
+	assert( framework );
+
+	frames.reset(new Frame[nframes]);
+	for(unsigned i = 0; i < nframes; i++) {
+		frames[i].number = 0;
+		frames[i].data.reset(new unsigned char[fsize]);
+	}
+	std::cout << "Created frames of " << fsize << " bytes" << std::endl;
+	std::cout << "(Buffering " << nframes << ")" << std::endl;
+
+	thrdlib::init( framework, 2 );
+
+	thread_t stats     = thrdlib::create( Stats );
+	std::cout << "Created Stats Thread" << std::endl;
+	while( the_stats_thread == nullptr ) thrdlib::yield();
+
+	std::cout << "Creating Main Threads" << std::endl;
+	thread_t renderer  = thrdlib::create( Renderer  );
+	thread_t simulator = thrdlib::create( Simulator );
+
+	std::cout << "Running" << std::endl;
+
+	thrdlib::join( simulator );
+	thrdlib::join( renderer  );
+	thrdlib::join( stats     );
+
+	thrdlib::clean();
+
+	std::cout << "----------" << std::endl;
+	std::cout << "# Parks" << std::endl;
+	std::cout << "  Renderer   park: " << thrd_stats. sim.  parks << std::endl;
+	std::cout << "  Renderer unpark: " << thrd_stats. sim.unparks << std::endl;
+	std::cout << " Simulator   park: " << thrd_stats.rend.  parks << std::endl;
+	std::cout << " Simulator unpark: " << thrd_stats.rend.unparks << std::endl;
+
+	std::cout << "Stats thread" << std::endl;
+	std::cout << " Ran             : " << thrd_stats.stats.ran << " times" << std::endl;
+	std::cout << " Saw             : " << thrd_stats.stats.saw << " (" << ((100.f * thrd_stats.stats.saw) / nproduce) << "%)" << std::endl;
+}
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/Makefile
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,19 @@
+all: fibre.so pthread.so cforall.so
+
+clean:
+	rm -rf fibre.so pthread.so
+
+CXXFLAGS=-Wall -Wextra -O3 -g -fpic -std=c++17 -pthread -ftls-model=initial-exec
+
+pthread.so: pthread.cpp Makefile
+	$(CXX) $(CXXFLAGS) -shared -o ${@} ${<}
+
+fibre.so: fibre.cpp Makefile
+	$(CXX) $(CXXFLAGS) -shared -o ${@} ${<} -lfibre
+
+CFAINC=${HOME}/local/include/cfa-dev
+CFALIB=${HOME}/local/lib/cfa-dev/x64-debug
+CFAFLAGS=-z execstack -I${CFAINC} -I${CFAINC}/concurrency -L${CFALIB} -Wl,-rpath,${CFALIB}
+
+cforall.so: cforall.cpp Makefile
+	$(CXX) $(CXXFLAGS) $(CFAFLAGS) -shared -o ${@} ${<} -lcfathread -lcfa -ldl -lm
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/cforall.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/cforall.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/cforall.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,43 @@
+#include <cassert>
+#include <clib/cfathread.h>
+
+typedef cfathread_t thread_t;
+static_assert(sizeof(thread_t) == sizeof(void*), "thread_t musst be of same size as void*");
+
+#if !defined(__cplusplus)
+#error no __cplusplus define!
+#endif
+
+extern "C" {
+	//--------------------
+	// Basic thread support
+	thread_t thrdlib_create( void (*the_main)( thread_t ) ) {
+		return cfathread_create( the_main );
+	}
+
+	void thrdlib_join( thread_t handle ) {
+		cfathread_join( handle );
+	}
+
+	void thrdlib_park( thread_t ) {
+		cfathread_park();
+	}
+
+	void thrdlib_unpark( thread_t handle ) {
+		cfathread_unpark( handle );
+	}
+
+	void thrdlib_yield( void ) {
+		cfathread_yield();
+	}
+
+	//--------------------
+	// Basic kernel features
+	void thrdlib_init( int procs ) {
+		cfathread_setproccnt(procs);
+	}
+
+	void thrdlib_clean( void ) {
+		cfathread_setproccnt(1);
+	}
+}
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/fibre.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/fibre.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/fibre.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,48 @@
+#include <cassert>
+#include <libfibre/cfibre.h>
+
+typedef cfibre_t thread_t;
+static_assert(sizeof(thread_t) == sizeof(void*), "thread_t musst be of same size as void*");
+
+void * fibre_runner(void * arg) {
+	auto the_main = (void (*)( thread_t ))arg;
+	the_main( cfibre_self() );
+	return nullptr;
+}
+
+extern "C" {
+	//--------------------
+	// Basic thread support
+	thread_t thrdlib_create( void (*the_main)( thread_t ) ) {
+		thread_t fibre;
+		cfibre_create( &fibre, nullptr, fibre_runner, (void*)the_main );
+		return fibre;
+	}
+
+	void thrdlib_join( thread_t handle ) {
+		cfibre_join( handle, nullptr );
+	}
+
+	void thrdlib_park( thread_t handle ) {
+		assert( handle == cfibre_self() );
+		cfibre_park();
+	}
+
+	void thrdlib_unpark( thread_t handle ) {
+		cfibre_unpark( handle );
+	}
+
+	void thrdlib_yield( void ) {
+		cfibre_yield();
+	}
+
+	//--------------------
+	// Basic kernel features
+	void thrdlib_init( int procs ) {
+		cfibre_init_n(1, procs );
+	}
+
+	void thrdlib_clean( void ) {
+
+	}
+}
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/pthread.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/pthread.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/pthread.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,99 @@
+#include <pthread.h>
+#include <errno.h>
+#include <cstring>
+#include <cstdio>
+#include <iostream>
+
+#define CHECKED(x) { int err = x; if( err != 0 ) { std::cerr << "KERNEL ERROR: Operation \"" #x "\" return error " << err << " - " << strerror(err) << std::endl; std::abort(); } }
+
+struct __bin_sem_t {
+	pthread_mutex_t 	lock;
+	pthread_cond_t  	cond;
+	int     		val;
+
+	__bin_sem_t() {
+		// Create the mutex with error checking
+		pthread_mutexattr_t mattr;
+		pthread_mutexattr_init( &mattr );
+		pthread_mutexattr_settype( &mattr, PTHREAD_MUTEX_ERRORCHECK_NP);
+		pthread_mutex_init(&lock, &mattr);
+
+		pthread_cond_init (&cond, nullptr);
+		val = 0;
+	}
+
+	~__bin_sem_t() {
+		CHECKED( pthread_mutex_destroy(&lock) );
+		CHECKED( pthread_cond_destroy (&cond) );
+	}
+
+	void wait() {
+		CHECKED( pthread_mutex_lock(&lock) );
+			while(val < 1) {
+				pthread_cond_wait(&cond, &lock);
+			}
+			val -= 1;
+		CHECKED( pthread_mutex_unlock(&lock) );
+	}
+
+	bool post() {
+		bool needs_signal = false;
+
+		CHECKED( pthread_mutex_lock(&lock) );
+			if(val < 1) {
+				val += 1;
+				pthread_cond_signal(&cond);
+				needs_signal = true;
+			}
+		CHECKED( pthread_mutex_unlock(&lock) );
+
+		return needs_signal;
+	}
+};
+
+#undef CHECKED
+
+//--------------------
+// Basic types
+struct pthread_runner_t {
+	pthread_t handle;
+	__bin_sem_t sem;
+};
+typedef pthread_runner_t * thread_t;
+
+static_assert(sizeof(thread_t) == sizeof(void*), "thread_t musst be of same size as void*");
+
+extern "C" {
+	//--------------------
+	// Basic thread support
+	thread_t thrdlib_create( void (*main)( thread_t ) ) {
+		thread_t thrd = new pthread_runner_t();
+		int r = pthread_create( &thrd->handle, nullptr, (void *(*)(void *))main, thrd );
+		if( r != 0 ) std::abort();
+		return thrd;
+	}
+
+	void thrdlib_join( thread_t handle ) {
+		void * ret;
+		int r = pthread_join( handle->handle, &ret );
+		if( r != 0 ) std::abort();
+		delete handle;
+	}
+
+	void thrdlib_park( thread_t handle ) {
+		handle->sem.wait();
+	}
+
+	void thrdlib_unpark( thread_t handle ) {
+		handle->sem.post();
+	}
+
+	void thrdlib_yield( void ) {
+		int r = pthread_yield();
+		if( r != 0 ) std::abort();
+	}
+
+	//--------------------
+	// Basic kernel features
+	void thrdlib_init( int ) {}
+}
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/thread.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/thread.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/thread.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,68 @@
+#include "thread.hpp"
+
+#include <cstdarg>										// va_start, va_end
+#include <cstdio>
+#include <cstring>										// strlen
+extern "C" {
+	#include <unistd.h>										// _exit, getpid
+	#include <signal.h>
+	#include <dlfcn.h>										// dlopen, dlsym
+	#include <execinfo.h>									// backtrace, messages
+}
+
+#include <iostream>
+#include <string>
+
+using thrdlib::thread_t;
+
+thread_t (*thrdlib::create)( void (*main)( thread_t ) ) = nullptr;
+void (*thrdlib::join)( thread_t handle ) = nullptr;
+void (*thrdlib::park)( thread_t handle ) = nullptr;
+void (*thrdlib::unpark)( thread_t handle ) = nullptr;
+void (*thrdlib::yield)( void ) = nullptr;
+void (*lib_clean)(void) = nullptr;
+
+typedef void (*fptr_t)();
+static fptr_t open_symbol( void * library, const char * symbol, bool required ) {
+	void * ptr = dlsym( library, symbol );
+
+	const char * error = dlerror();
+	if ( required && error ) {
+		std::cerr << "Fetching symbol '" << symbol << "' failed with error '" << error << "'\n";
+		std::abort();
+	}
+
+	return (fptr_t)ptr;
+}
+
+//--------------------
+// Basic kernel features
+void thrdlib::init( const char * name, int procs ) {
+	std::string file = __FILE__;
+	std::size_t found = file.find_last_of("/");
+  	std::string libname = file.substr(0,found+1) + name + ".so";
+
+	std::cout << "Use framework " << name << "(" << libname << ")\n";
+
+	void * library = dlopen( libname.c_str(), RTLD_NOW );
+	if ( const char * error = dlerror() ) {
+		std::cerr << "Could not open library '" << libname << "' from name '" << name <<"'\n";
+		std::cerr << "Error was : '" << error << "'\n";
+		std::abort();
+	}
+
+	void (*lib_init)( int ) = (void (*)( int ))open_symbol( library, "thrdlib_init", false );
+	lib_clean = open_symbol( library, "thrdlib_clean" , false );
+
+	thrdlib::create = (typeof(thrdlib::create))open_symbol( library, "thrdlib_create", true  );
+	thrdlib::join   = (typeof(thrdlib::join  ))open_symbol( library, "thrdlib_join"  , true  );
+	thrdlib::park   = (typeof(thrdlib::park  ))open_symbol( library, "thrdlib_park"  , true  );
+	thrdlib::unpark = (typeof(thrdlib::unpark))open_symbol( library, "thrdlib_unpark", true  );
+	thrdlib::yield  = (typeof(thrdlib::yield ))open_symbol( library, "thrdlib_yield" , true  );
+
+	lib_init( procs );
+}
+
+void thrdlib::clean( void ) {
+	if(lib_clean) lib_clean();
+}
Index: doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/thread.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/thread.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readQ_example/thrdlib/thread.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,18 @@
+#pragma once
+
+namespace thrdlib {
+	typedef void * thread_t;
+
+	//--------------------
+	// Basic thread support
+	extern thread_t (*create)( void (*main)( thread_t ) );
+	extern void (*join)( thread_t handle );
+	extern void (*park)( thread_t handle );
+	extern void (*unpark)( thread_t handle );
+	extern void (*yield)( void ) ;
+
+	//--------------------
+	// Basic kernel features
+	extern void init( const char * name, int procs );
+	extern void clean( void );
+};
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/Makefile
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,22 @@
+
+
+CXXFLAGS = -O3 -g -Wall -Wextra -std=c++17
+LDFLAGS = -pthread -latomic
+
+push:
+	clang++ relaxed_list.cpp -g -Wall -Wextra -std=c++17 -fsyntax-only &&  rsync -av relaxed_list.cpp relaxed_list.hpp utils.hpp assert.hpp scale.sh plg7b:~/workspace/sched/.
+
+relaxed_list: $(firstword $(MAKEFILE_LIST)) | build
+	clang++ relaxed_list.cpp $(CXXFLAGS) $(LDFLAGS) -lpng -MMD -MF build/$(@).d -o $(@)
+
+-include build/relaxed_list.d
+
+layout.ast: $(firstword $(MAKEFILE_LIST)) | build
+	clang++ relaxed_list_layout.cpp $(CXXFLAGS) -MMD -MF build/$(@).d -MT $(@) -E -o build/$(@).ii
+	clang++ -Xclang -fdump-record-layouts -fsyntax-only $(CXXFLAGS) build/$(@).ii > build/layout.ast.raw
+	cat build/$(@).raw > $(@)
+
+-include build/layout.ast.d
+
+build:
+	mkdir -p build
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/assert.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/assert.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/assert.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,22 @@
+#pragma once
+
+#ifndef NDEBUG
+#include <cassert>
+#include <cstdlib>
+
+#define sstr(s) #s
+#define xstr(s) sstr(s)
+
+extern const char * __my_progname;
+
+#define assertf(cond, ...) ({             \
+	if(!(cond)) {                       \
+		fprintf(stderr, "%s: " __FILE__ ":" xstr(__LINE__) ": %s: Assertion '" xstr(cond) "' failed.\n", __my_progname, __PRETTY_FUNCTION__); \
+		fprintf(stderr, __VA_ARGS__); \
+		fprintf(stderr, "\n"); \
+		std::abort();                 \
+	}                                   \
+})
+#else
+#define assertf(cond, ...)
+#endif
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/bitbench/select.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/bitbench/select.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/bitbench/select.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,186 @@
+
+#include "../utils.hpp"
+
+void consume(int i, int j) __attribute__((noinline));
+void consume(int i, int j) {
+	asm volatile("":: "rm" (i), "rm" (i) );
+}
+
+static inline unsigned rand_bit_sw(unsigned rnum, size_t mask) {
+	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
+	uint64_t v = mask;   // Input value to find position with rank r.
+	unsigned int r = bit + 1;// Input: bit's desired rank [1-64].
+	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
+	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
+	unsigned int t;      // Bit count temporary.
+
+	// Do a normal parallel bit count for a 64-bit integer,
+	// but store all intermediate steps.
+	a =  v - ((v >> 1) & ~0UL/3);
+	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
+	c = (b + (b >> 4)) & ~0UL/0x11;
+	d = (c + (c >> 8)) & ~0UL/0x101;
+
+
+	t = (d >> 32) + (d >> 48);
+	// Now do branchless select!
+	s  = 64;
+	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
+	t  = (d >> (s - 16)) & 0xff;
+	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
+	t  = (c >> (s - 8)) & 0xf;
+	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
+	t  = (b >> (s - 4)) & 0x7;
+	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
+	t  = (a >> (s - 2)) & 0x3;
+	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
+	t  = (v >> (s - 1)) & 0x1;
+	s -= ((t - r) & 256) >> 8;
+	return s - 1;
+}
+
+static inline unsigned rand_bit_hw(unsigned rnum, size_t mask) {
+	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
+	uint64_t picked = _pdep_u64(1ul << bit, mask);
+	return picked ? __builtin_ctzl(picked) : 0;
+}
+
+struct TLS {
+	Random rng = { 6 };
+} tls;
+
+const unsigned numLists = 64;
+
+static inline void blind() {
+	int i = tls.rng.next() % numLists;
+	int j = tls.rng.next() % numLists;
+
+	consume(i, j);
+}
+
+std::atomic_size_t list_mask[7];
+static inline void bitmask_sw() {
+	unsigned i, j;
+	{
+		// Pick two lists at random
+		unsigned num = ((numLists - 1) >> 6) + 1;
+
+		unsigned ri = tls.rng.next();
+		unsigned rj = tls.rng.next();
+
+		unsigned wdxi = (ri >> 6u) % num;
+		unsigned wdxj = (rj >> 6u) % num;
+
+		size_t maski = list_mask[wdxi].load(std::memory_order_relaxed);
+		size_t maskj = list_mask[wdxj].load(std::memory_order_relaxed);
+
+		unsigned bi = rand_bit_sw(ri, maski);
+		unsigned bj = rand_bit_sw(rj, maskj);
+
+		i = bi | (wdxi << 6);
+		j = bj | (wdxj << 6);
+	}
+
+	consume(i, j);
+}
+
+static inline void bitmask_hw() {
+	#if !defined(__BMI2__)
+		#warning NO bmi2 for pdep rand_bit
+		return;
+	#endif
+	unsigned i, j;
+	{
+		// Pick two lists at random
+		unsigned num = ((numLists - 1) >> 6) + 1;
+
+		unsigned ri = tls.rng.next();
+		unsigned rj = tls.rng.next();
+
+		unsigned wdxi = (ri >> 6u) % num;
+		unsigned wdxj = (rj >> 6u) % num;
+
+		size_t maski = list_mask[wdxi].load(std::memory_order_relaxed);
+		size_t maskj = list_mask[wdxj].load(std::memory_order_relaxed);
+
+		unsigned bi = rand_bit_hw(ri, maski);
+		unsigned bj = rand_bit_hw(rj, maskj);
+
+		i = bi | (wdxi << 6);
+		j = bj | (wdxj << 6);
+	}
+
+	consume(i, j);
+}
+
+struct {
+	const unsigned mask = 7;
+	const unsigned depth = 3;
+	const uint64_t indexes = 0x0706050403020100;
+	uint64_t masks( unsigned node ) {
+		return 0xff00ffff00ff;
+	}
+} snzm;
+static inline void sparsemask() {
+	#if !defined(__BMI2__)
+		#warning NO bmi2 for sparse mask
+		return;
+	#endif
+	unsigned i, j;
+	{
+		// Pick two random number
+		unsigned ri = tls.rng.next();
+		unsigned rj = tls.rng.next();
+
+		// Pick two nodes from it
+		unsigned wdxi = ri & snzm.mask;
+		unsigned wdxj = rj & snzm.mask;
+
+		// Get the masks from the nodes
+		size_t maski = snzm.masks(wdxi);
+		size_t maskj = snzm.masks(wdxj);
+
+		uint64_t idxsi = _pext_u64(snzm.indexes, maski);
+		uint64_t idxsj = _pext_u64(snzm.indexes, maskj);
+
+		auto pi = __builtin_popcountll(maski);
+		auto pj = __builtin_popcountll(maskj);
+
+		ri = pi ? ri & ((pi >> 3) - 1) : 0;
+		rj = pj ? rj & ((pj >> 3) - 1) : 0;
+
+		unsigned bi = (idxsi >> (ri << 3)) & 0xff;
+		unsigned bj = (idxsj >> (rj << 3)) & 0xff;
+
+		i = (bi << snzm.depth) | wdxi;
+		j = (bj << snzm.depth) | wdxj;
+	}
+
+	consume(i, j);
+}
+
+template<typename T>
+void benchmark( T func, const std::string & name ) {
+	std::cout << "Starting " << name << std::endl;
+	auto before = Clock::now();
+	const int N = 250'000'000;
+	for(int i = 0; i < N; i++) {
+		func();
+	}
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	double duration = durr.count();
+	std::cout << "Duration(s) : " << duration << std::endl;
+	std::cout << "Ops/sec     : " << uint64_t(N / duration) << std::endl;
+	std::cout << "ns/Op       : " << double(duration * 1'000'000'000.0 / N) << std::endl;
+	std::cout << std::endl;
+}
+
+int main() {
+	std::cout.imbue(std::locale(""));
+
+	benchmark(blind, "Blind guess");
+	benchmark(bitmask_sw, "Dense bitmask");
+	benchmark(bitmask_hw, "Dense bitmask with Parallel Deposit");
+	benchmark(sparsemask, "Parallel Extract bitmask");
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/bts.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/bts.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/bts.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,279 @@
+#include <array>
+#include <iomanip>
+#include <iostream>
+#include <locale>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include <getopt.h>
+#include <unistd.h>
+#include <sys/sysinfo.h>
+
+#include "utils.hpp"
+
+// ================================================================================================
+//                        UTILS
+// ================================================================================================
+
+struct local_stat_t {
+	size_t cnt = 0;
+};
+
+struct global_stat_t {
+	std::atomic_size_t cnt = { 0 };
+};
+
+void atomic_max(std::atomic_size_t & target, size_t value) {
+	for(;;) {
+		size_t expect = target.load(std::memory_order_relaxed);
+		if(value <= expect) return;
+		bool success = target.compare_exchange_strong(expect, value);
+		if(success) return;
+	}
+}
+
+void atomic_min(std::atomic_size_t & target, size_t value) {
+	for(;;) {
+		size_t expect = target.load(std::memory_order_relaxed);
+		if(value >= expect) return;
+		bool success = target.compare_exchange_strong(expect, value);
+		if(success) return;
+	}
+}
+
+void tally_stats(global_stat_t & global, local_stat_t & local) {
+	global.cnt   += local.cnt;
+}
+
+void waitfor(double & duration, barrier_t & barrier, std::atomic_bool & done) {
+	std::cout << "Starting" << std::endl;
+	auto before = Clock::now();
+	barrier.wait(0);
+
+	while(true) {
+		usleep(100000);
+		auto now = Clock::now();
+		duration_t durr = now - before;
+		if( durr.count() > duration ) {
+			done = true;
+			break;
+		}
+		std::cout << "\r" << std::setprecision(4) << durr.count();
+		std::cout.flush();
+	}
+
+	barrier.wait(0);
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	duration = durr.count();
+	std::cout << "\rClosing down" << std::endl;
+}
+
+void waitfor(double & duration, barrier_t & barrier, const std::atomic_size_t & count) {
+	std::cout << "Starting" << std::endl;
+	auto before = Clock::now();
+	barrier.wait(0);
+
+	while(true) {
+		usleep(100000);
+		size_t c = count.load();
+		if( c == 0 ) {
+			break;
+		}
+		std::cout << "\r" << c;
+		std::cout.flush();
+	}
+
+	barrier.wait(0);
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	duration = durr.count();
+	std::cout << "\rClosing down" << std::endl;
+}
+
+void print_stats(double duration, unsigned nthread, global_stat_t & global) {
+	std::cout << "Done" << std::endl;
+
+	size_t ops = global.cnt;
+	size_t ops_sec = size_t(double(ops) / duration);
+	size_t ops_thread = ops_sec / nthread;
+	auto dur_nano = duration_cast<std::nano>(1.0);
+
+	std::cout << "Duration      : " << duration << "s\n";
+	std::cout << "ns/Op         : " << ( dur_nano / ops_thread )<< "\n";
+	std::cout << "Ops/sec/thread: " << ops_thread << "\n";
+	std::cout << "Ops/sec       : " << ops_sec << "\n";
+	std::cout << "Total ops     : " << ops << "\n";
+}
+
+static inline bool bts(std::atomic_size_t & target, size_t bit ) {
+	/*
+	int result = 0;
+	asm volatile(
+		"LOCK btsq %[bit], %[target]\n\t"
+		:"=@ccc" (result)
+		: [target] "m" (target), [bit] "r" (bit)
+	);
+ 	return result != 0;
+	/*/
+	size_t mask = 1ul << bit;
+	size_t ret = target.fetch_or(mask, std::memory_order_relaxed);
+	return (ret & mask) != 0;
+	//*/
+}
+
+static inline bool btr(std::atomic_size_t & target, size_t bit ) {
+	/*
+	int result = 0;
+	asm volatile(
+		"LOCK btrq %[bit], %[target]\n\t"
+		:"=@ccc" (result)
+		: [target] "m" (target), [bit] "r" (bit)
+	);
+ 	return result != 0;
+	/*/
+	size_t mask = 1ul << bit;
+	size_t ret = target.fetch_and(~mask, std::memory_order_relaxed);
+	return (ret & mask) != 0;
+	//*/
+}
+
+// ================================================================================================
+//                        EXPERIMENTS
+// ================================================================================================
+
+// ================================================================================================
+__attribute__((noinline)) void runPingPong_body(
+	std::atomic<bool>& done,
+	local_stat_t & local,
+	std::atomic_size_t & target,
+	size_t id
+) {
+	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
+
+		bool ret;
+		ret = bts(target, id);
+		assert(!ret);
+
+		// -----
+
+		ret = btr(target, id);
+		assert(ret);
+		local.cnt++;
+	}
+}
+
+void run(unsigned nthread, double duration) {
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Data to check everything is OK
+	global_stat_t global;
+
+	// Flag to signal termination
+	std::atomic_bool done  = { false };
+
+	std::cout << "Initializing ";
+	// List being tested
+	std::atomic_size_t word = { 0 };
+	{
+		std::thread * threads[nthread];
+		unsigned i = 1;
+		for(auto & t : threads) {
+			t = new std::thread([&done, &word, &barrier, &global](unsigned tid) {
+				local_stat_t local;
+
+				// affinity(tid);
+
+				barrier.wait(tid);
+
+				// EXPERIMENT START
+
+				runPingPong_body(done, local, word, tid - 1);
+
+				// EXPERIMENT END
+
+				barrier.wait(tid);
+
+				tally_stats(global, local);
+			}, i++);
+		}
+
+		waitfor(duration, barrier, done);
+
+		for(auto t : threads) {
+			t->join();
+			delete t;
+		}
+	}
+
+	print_stats(duration, nthread, global);
+}
+
+// ================================================================================================
+
+int main(int argc, char * argv[]) {
+
+	double duration   = 5.0;
+	unsigned nthreads = 2;
+
+	std::cout.imbue(std::locale(""));
+
+	for(;;) {
+		static struct option options[] = {
+			{"duration",  required_argument, 0, 'd'},
+			{"nthreads",  required_argument, 0, 't'},
+			{0, 0, 0, 0}
+		};
+
+		int idx = 0;
+		int opt = getopt_long(argc, argv, "d:t:", options, &idx);
+
+		std::string arg = optarg ? optarg : "";
+		size_t len = 0;
+		switch(opt) {
+			case -1:
+				if(optind != argc) {
+					std::cerr << "Too many arguments " << argc << " " << idx << std::endl;
+					goto usage;
+				}
+				goto run;
+			// Numeric Arguments
+			case 'd':
+				try {
+					duration = std::stod(optarg, &len);
+					if(len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Duration must be a valid double, was " << arg << std::endl;
+					goto usage;
+				}
+				break;
+			case 't':
+				try {
+					nthreads = std::stoul(optarg, &len);
+					if(len != arg.size() || nthreads > (8 * sizeof(size_t))) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Number of threads must be a positive integer less than or equal to " << sizeof(size_t) * 8 << ", was " << arg << std::endl;
+					goto usage;
+				}
+				break;
+			// Other cases
+			default: /* ? */
+				std::cerr << opt << std::endl;
+			usage:
+				std::cerr << "Usage: " << argv[0] << ": [options]" << std::endl;
+				std::cerr << std::endl;
+				std::cerr << "  -d, --duration=DURATION  Duration of the experiment, in seconds" << std::endl;
+				std::cerr << "  -t, --nthreads=NTHREADS  Number of kernel threads" << std::endl;
+				std::exit(1);
+		}
+	}
+	run:
+
+	check_cache_line_size();
+
+	std::cout << "Running " << nthreads << " threads for " << duration << " seconds" << std::endl;
+	run(nthreads, duration);
+	return 0;
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/bts_test.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/bts_test.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/bts_test.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,32 @@
+#include <cassert>
+#include <iostream>
+
+bool bts(volatile size_t & target, size_t bit ) {
+	bool result = false;
+	asm volatile(
+		"LOCK btsq %[bit], %[target]\n\t"
+		:"=c" (result)
+		: [target] "m" (target), [bit] "r" (bit)
+	);
+ 	return result;
+}
+
+bool btr(volatile size_t & target, size_t bit ) {
+	bool result = false;
+	asm volatile(
+		"LOCK btrq %[bit], %[target]\n\t"
+		:"=c" (result)
+		: [target] "m" (target), [bit] "r" (bit)
+	);
+ 	return result;
+}
+
+int main() {
+	volatile size_t i = 0;
+	std::cout << std::hex << i << std::endl;
+	assert(bts(i, 31));
+	std::cout << std::hex << i << std::endl;
+	assert(btr(i, 31));
+	std::cout << std::hex << i << std::endl;
+	return 0;
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/links.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/links.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/links.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,122 @@
+#pragma once
+
+#include "assert.hpp"
+#include "utils.hpp"
+
+template<typename node_t>
+struct _LinksFields_t {
+	node_t * prev = nullptr;
+	node_t * next = nullptr;
+	volatile unsigned long long ts = 0;
+	unsigned hint = (unsigned)-1;
+};
+
+template<typename node_t>
+class __attribute__((aligned(128))) intrusive_queue_t {
+public:
+	typedef spinlock_t lock_t;
+
+	struct stat {
+		ssize_t diff = 0;
+		size_t  push = 0;
+		size_t  pop  = 0;
+	};
+
+private:
+	struct sentinel_t {
+		_LinksFields_t<node_t> _links;
+	};
+
+public:
+	lock_t lock;
+
+private:
+	sentinel_t before;
+	sentinel_t after;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winvalid-offsetof"
+	static constexpr auto fields_offset = offsetof( node_t, _links );
+#pragma GCC diagnostic pop
+public:
+	intrusive_queue_t()
+		: before{{ nullptr, tail() }}
+		, after {{ head(), nullptr }}
+	{
+		/* paranoid */ assert((reinterpret_cast<uintptr_t>( head() ) + fields_offset) == reinterpret_cast<uintptr_t>(&before));
+		/* paranoid */ assert((reinterpret_cast<uintptr_t>( tail() ) + fields_offset) == reinterpret_cast<uintptr_t>(&after ));
+		/* paranoid */ assert(head()->_links.prev == nullptr);
+		/* paranoid */ assert(head()->_links.next == tail() );
+		/* paranoid */ assert(tail()->_links.next == nullptr);
+		/* paranoid */ assert(tail()->_links.prev == head() );
+		/* paranoid */ assert(sizeof(*this) == 128);
+		/* paranoid */ assert((intptr_t(this) % 128) == 0);
+	}
+
+	~intrusive_queue_t() = default;
+
+	inline node_t * head() const {
+		node_t * rhead = reinterpret_cast<node_t *>(
+			reinterpret_cast<uintptr_t>( &before ) - fields_offset
+		);
+		assert(rhead);
+		return rhead;
+	}
+
+	inline node_t * tail() const {
+		node_t * rtail = reinterpret_cast<node_t *>(
+			reinterpret_cast<uintptr_t>( &after ) - fields_offset
+		);
+		assert(rtail);
+		return rtail;
+	}
+
+	inline bool push(node_t * node) {
+		assert(lock);
+		assert(node->_links.ts != 0);
+		node_t * tail = this->tail();
+
+		node_t * prev = tail->_links.prev;
+		// assertf(node->_links.ts >= prev->_links.ts,
+		// 	"New node has smaller timestamp: %llu < %llu", node->_links.ts, prev->_links.ts);
+		node->_links.next = tail;
+		node->_links.prev = prev;
+		prev->_links.next = node;
+		tail->_links.prev = node;
+
+		if(before._links.ts == 0l) {
+			before._links.ts = node->_links.ts;
+			assert(node->_links.prev == this->head());
+			return true;
+		}
+		return false;
+	}
+
+	inline std::pair<node_t *, bool> pop() {
+		assert(lock);
+		node_t * head = this->head();
+		node_t * tail = this->tail();
+
+		node_t * node = head->_links.next;
+		node_t * next = node->_links.next;
+		if(node == tail) return {nullptr, false};
+
+		head->_links.next = next;
+		next->_links.prev = head;
+
+		if(next == tail) {
+			before._links.ts = 0l;
+			return {node, true};
+		}
+		else {
+			assert(next->_links.ts != 0);
+			before._links.ts = next->_links.ts;
+			assert(before._links.ts != 0);
+			return {node, false};
+		}
+	}
+
+	long long ts() const {
+		return before._links.ts;
+	}
+};
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/prefetch.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/prefetch.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/prefetch.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,106 @@
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <iostream>
+#include <locale>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <cassert>
+
+struct __attribute__((aligned(64))) element {
+	size_t value;
+};
+
+using block = std::array<element, 100>;
+
+block * create() {
+	block * b = new block();
+	for(auto & e : *b) {
+		e.value = rand();
+	}
+	b->back().value = b->size();
+
+	return b;
+}
+
+static inline size_t find(const block & b) {
+	size_t r = 0;
+	for(; r < b.size(); r++) {
+		if(__builtin_expect(b[r].value == b.size(), false)) break;
+	}
+
+	return r;
+}
+
+void usage(char * argv[]) {
+	std::cerr << argv[0] << ": [DURATION (FLOAT:SEC)] [NBLOCKS]" << std::endl;;
+	std::exit(1);
+}
+
+int main(int argc, char * argv[]) {
+	size_t nblocks = 1000;
+	double duration = 5;
+
+	std::cout.imbue(std::locale(""));
+
+	switch (argc)
+	{
+	case 3:
+		nblocks = std::stoul(argv[2]);
+		[[fallthrough]];
+	case 2:
+		duration = std::stod(argv[1]);
+		if( duration <= 0.0 ) {
+			std::cerr << "Duration must be positive, was " << argv[1] << "(" << duration << ")" << std::endl;
+			usage(argv);
+		}
+		[[fallthrough]];
+	case 1:
+		break;
+	default:
+		usage(argv);
+		break;
+	}
+
+	std::vector<std::unique_ptr<block>> blocks;
+	for(size_t i = 0; i < nblocks; i++) {
+		blocks.emplace_back( create() );
+	}
+	std::random_shuffle(blocks.begin(), blocks.end());
+
+	size_t CRC = 0;
+	size_t count = 0;
+
+	using clock = std::chrono::high_resolution_clock;
+	auto before = clock::now();
+
+	while(true) {
+		for(const auto & b : blocks) {
+			CRC += find(*b);
+			count++;
+		}
+		auto now = clock::now();
+		std::chrono::duration<double> durr = now - before;
+		if( durr.count() > duration ) {
+			break;
+		}
+	}
+
+	auto after = clock::now();
+	std::chrono::duration<double> durr = after - before;
+	duration = durr.count();
+
+	using std::chrono::duration_cast;
+	using std::chrono::nanoseconds;
+
+	size_t ops_sec = size_t(double(count) / duration);
+	auto dur_nano = duration_cast<nanoseconds>(std::chrono::duration<double>(1.0)).count();
+
+	std::cout << "CRC           : " << CRC << "\n";
+	std::cout << "Duration      : " << duration << "s\n";
+	std::cout << "Total ops     : " << count << "\n";
+	std::cout << "Ops/sec       : " << ops_sec << "\n";
+	std::cout << "ns/Op         : " << ( dur_nano / ops_sec )<< "\n";
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/process.sh
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/process.sh	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/process.sh	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+NAME=$1
+
+if [ ! -f "raw/${NAME}.out" ]; then
+    echo "Not output for ${NAME}"
+    exit 1
+fi
+
+if [ ! -f "raw/${NAME}.data" ]; then
+    echo "Not perf record for ${NAME}"
+    exit 1
+fi
+
+echo "Processing perf data for ${NAME}"
+
+OPS=$(grep -e 'Total ops' raw/${NAME}.out)
+CPOP=$( echo "Hello $OPS" | \grep -oP ", \K[0-9,]+(?=o)" --color | tr -d ',')
+CPUSH=$(echo "Hello $OPS" | \grep -oP "\(\K[0-9,]+(?=i)" --color | tr -d ',')
+
+REPORT=''
+perf report -n --percent-limit 5 --stdio --no-children -i raw/${NAME}.data > raw/.temp
+EVENT=$(cat raw/.temp | grep -e '^# Samples'| cut -d ' ' -f 6)
+SPOP=$( cat raw/.temp | grep -e '] relaxed_list<Node>::pop'  | tr -s ' ' | cut -d ' ' -f 3)
+SPUSH=$(cat raw/.temp | grep -e '] relaxed_list<Node>::push' | tr -s ' ' | cut -d ' ' -f 3)
+SARR=$( cat raw/.temp | grep -e '] snz[i|m]_t::node::arrive_h'   | tr -s ' ' | cut -d ' ' -f 3)
+
+echo "$OPS"
+echo "Push count: $CPUSH"
+echo "Pop  count: $CPOP"
+
+echo "Pop    samples: $SPOP"
+echo "Push   samples: $SPUSH"
+echo "Arrive samples: $SARR"
+
+SpPUSH=$(bc -l <<< "scale=9; $SPUSH / $CPUSH")
+SpPOP=$( bc -l <<< "scale=9; $SPOP  / $CPOP" )
+SpARR=$( bc -l <<< "scale=9; $SARR  / $CPUSH")
+
+printf "%s per push()  : %.9f\n" $EVENT $SpPUSH | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta'
+printf "%s per pop()   : %.9f\n" $EVENT $SpPOP  | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta'
+printf "%s per arrive(): %.9f\n" $EVENT $SpARR  | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta'
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,53 @@
+#include <atomic>
+
+struct thread {};
+
+struct cluster {
+	void add();
+	void remove();
+	thread * next();
+};
+
+struct processor {
+
+	cluster cluster;
+	std::atomic<bool> stop;
+	volatile bool idle;
+};
+
+
+void run(thread * ) {
+	// verify preemption
+
+	// run Thread
+
+	// verify preemption
+
+	// finish Running
+}
+
+void main(processor & self) {
+
+	self.cluster.add();
+
+	while(!self.stop) {
+		if(thread * t = self.cluster.next()) {
+			run(t);
+			continue;
+		}
+
+		self.set_idle();
+		std::atomic_thread_fence();
+
+		if(thread * t = self.cluster.next()) {
+			self.idle = false;
+			run(t);
+			continue;
+		}
+
+		halt();
+	}
+
+	self.cluster.remove();
+
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,215 @@
+#include <cassert>
+
+#include <atomic>
+#include <new>
+#include <type_traits>
+
+struct processor;
+
+struct __attribute__((aligned(64))) processor_id {
+	std::atomic<processor *> handle;
+	std::atomic<bool> lock;
+
+	processor_id() = default;
+	processor_id(processor * proc) : handle(proc), lock() {
+		/*paranoid*/ assert(std::atomic_is_lock_free(&lock));
+	}
+};
+
+extern unsigned num();
+
+#define ERROR throw 1
+
+class processor_list {
+private:
+
+	static const constexpr std::size_t cache_line_size = 64;
+
+	static_assert(sizeof (processor_id) <= cache_line_size, "ERROR: Instances must fit in one cache line" );
+	static_assert(alignof(processor_id) == cache_line_size, "ERROR: Instances must aligned to one cache line" );
+
+	const unsigned max;     // total cachelines allocated
+	std::atomic_uint alloc; // cachelines currently in use
+	std::atomic_uint ready; // cachelines ready to iterate over (!= to alloc when thread is in second half of doregister)
+	std::atomic<bool> lock; // writerlock
+	processor_id * data;    // data pointer
+
+private:
+	inline void acquire(std::atomic<bool> & ll) {
+		while( __builtin_expect(ll.exchange(true),false) ) {
+			while(ll.load(std::memory_order_relaxed))
+				asm volatile("pause");
+		}
+		/* paranoid */ assert(ll);
+	}
+
+public:
+	processor_list()
+		: max(num())
+		, alloc(0)
+		, ready(0)
+		, lock{false}
+		, data( new processor_id[max] )
+	{
+		/*paranoid*/ assert(num() == max);
+		/*paranoid*/ assert(std::atomic_is_lock_free(&alloc));
+		/*paranoid*/ assert(std::atomic_is_lock_free(&ready));
+	}
+
+	~processor_list() {
+		delete[] data;
+	}
+
+	//=======================================================================
+	// Lock-Free registering/unregistering of threads
+	unsigned doregister(processor * proc) {
+		// Step - 1 : check if there is already space in the data
+		uint_fast32_t s = ready;
+
+		// Check among all the ready
+		for(uint_fast32_t i = 0; i < s; i++) {
+			processor * null = nullptr; // Re-write every loop since compare thrashes it
+			if( data[i].handle.load(std::memory_order_relaxed) == null
+			 && data[i].handle.compare_exchange_strong(null, proc)) {
+				/*paranoid*/ assert(i < ready);
+				/*paranoid*/ assert(alignof(decltype(data[i])) == cache_line_size);
+				/*paranoid*/ assert((uintptr_t(&data[i]) % cache_line_size) == 0);
+				return i;
+			}
+		}
+
+		if(max <= alloc) ERROR;
+
+		// Step - 2 : F&A to get a new spot in the array.
+		uint_fast32_t n = alloc++;
+		if(max <= n) ERROR;
+
+		// Step - 3 : Mark space as used and then publish it.
+		void * storage = &data[n];
+		new (storage) processor_id( proc );
+		while(true) {
+			unsigned copy = n;
+			if( ready.load(std::memory_order_relaxed) == n
+			 && ready.compare_exchange_weak(copy, n + 1) )
+			 	break;
+			asm volatile("pause");
+		}
+
+		// Return new spot.
+		/*paranoid*/ assert(n < ready);
+		/*paranoid*/ assert(alignof(decltype(data[n])) == cache_line_size);
+		/*paranoid*/ assert((uintptr_t(&data[n]) % cache_line_size) == 0);
+		return n;
+	}
+
+	processor * unregister(unsigned iproc) {
+		/*paranoid*/ assert(iproc < ready);
+		auto ret = data[iproc].handle.load(std::memory_order_relaxed);
+		data[iproc].handle = nullptr;
+		return ret;
+	}
+
+	// Reset all registration
+	// Unsafe in most cases, use for testing only.
+	void reset() {
+		alloc = 0;
+		ready = 0;
+	}
+
+	processor * get(unsigned iproc) {
+		return data[iproc].handle.load(std::memory_order_relaxed);
+	}
+
+	//=======================================================================
+	// Reader-writer lock implementation
+	// Concurrent with doregister/unregister,
+	//    i.e., threads can be added at any point during or between the entry/exit
+
+	//-----------------------------------------------------------------------
+	// Reader side
+	void read_lock(unsigned iproc) {
+		/*paranoid*/ assert(iproc < ready);
+
+		// Step 1 : make sure no writer are in the middle of the critical section
+		while(lock.load(std::memory_order_relaxed))
+			asm volatile("pause");
+
+		// Fence needed because we don't want to start trying to acquire the lock
+		// before we read a false.
+		// Not needed on x86
+		// std::atomic_thread_fence(std::memory_order_seq_cst);
+
+		// Step 2 : acquire our local lock
+		acquire( data[iproc].lock );
+		/*paranoid*/ assert(data[iproc].lock);
+	}
+
+	void read_unlock(unsigned iproc) {
+		/*paranoid*/ assert(iproc < ready);
+		/*paranoid*/ assert(data[iproc].lock);
+		data[iproc].lock.store(false, std::memory_order_release);
+	}
+
+	//-----------------------------------------------------------------------
+	// Writer side
+	uint_fast32_t write_lock() {
+		// Step 1 : lock global lock
+		// It is needed to avoid processors that register mid Critical-Section
+		//   to simply lock their own lock and enter.
+		acquire(lock);
+
+		// Step 2 : lock per-proc lock
+		// Processors that are currently being registered aren't counted
+		//   but can't be in read_lock or in the critical section.
+		// All other processors are counted
+		uint_fast32_t s = ready;
+		for(uint_fast32_t i = 0; i < s; i++) {
+			acquire( data[i].lock );
+		}
+
+		return s;
+	}
+
+	void write_unlock(uint_fast32_t last_s) {
+		// Step 1 : release local locks
+		// This must be done while the global lock is held to avoid
+		//   threads that where created mid critical section
+		//   to race to lock their local locks and have the writer
+		//   immidiately unlock them
+		// Alternative solution : return s in write_lock and pass it to write_unlock
+		for(uint_fast32_t i = 0; i < last_s; i++) {
+			assert(data[i].lock);
+			data[i].lock.store(false, std::memory_order_release);
+		}
+
+		// Step 2 : release global lock
+		/*paranoid*/ assert(true == lock);
+		lock.store(false, std::memory_order_release);
+	}
+
+	//-----------------------------------------------------------------------
+	// Checking support
+	uint_fast32_t epoch_check() {
+		// Step 1 : lock global lock
+		// It is needed to avoid processors that register mid Critical-Section
+		//   to simply lock their own lock and enter.
+		while(lock.load(std::memory_order_relaxed))
+			asm volatile("pause");
+
+		// Step 2 : lock per-proc lock
+		// Processors that are currently being registered aren't counted
+		//   but can't be in read_lock or in the critical section.
+		// All other processors are counted
+		uint_fast32_t s = ready;
+		for(uint_fast32_t i = 0; i < s; i++) {
+			while(data[i].lock.load(std::memory_order_relaxed))
+				asm volatile("pause");
+		}
+
+		return s;
+	}
+
+public:
+};
+
+#undef ERROR
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_fast.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_fast.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_fast.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,173 @@
+#include "processor_list.hpp"
+
+#include <array>
+#include <iomanip>
+#include <iostream>
+#include <locale>
+#include <string>
+#include <thread>
+
+#include "utils.hpp"
+
+unsigned num() {
+	return 0x1000000;
+}
+
+//-------------------
+
+struct processor {
+	unsigned id;
+};
+void run(unsigned nthread, double duration, unsigned writes, unsigned epochs) {
+	assert(writes < 100);
+
+	// List being tested
+	processor_list list = {};
+
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Data to check everything is OK
+	size_t write_committed = 0ul;
+	struct {
+		std::atomic_size_t write = { 0ul };
+		std::atomic_size_t read  = { 0ul };
+		std::atomic_size_t epoch = { 0ul };
+	} lock_cnt;
+
+	// Flag to signal termination
+	std::atomic_bool done = { false };
+
+	std::thread * threads[nthread];
+	unsigned i = 1;
+	for(auto & t : threads) {
+		t = new std::thread([&done, &list, &barrier, &write_committed, &lock_cnt, writes, epochs](unsigned tid) {
+			Random rand(tid + rdtscl());
+			processor proc;
+			proc.id = list.doregister(&proc);
+			size_t writes_cnt = 0;
+			size_t reads_cnt = 0;
+			size_t epoch_cnt = 0;
+
+			affinity(tid);
+
+			barrier.wait(tid);
+
+			while(__builtin_expect(!done, true)) {
+				auto r = rand.next() % 100;
+				if (r < writes) {
+					auto n = list.write_lock();
+					write_committed++;
+					writes_cnt++;
+					assert(writes_cnt < -2ul);
+					list.write_unlock(n);
+				}
+				else if(r < epochs) {
+					list.epoch_check();
+					epoch_cnt++;
+				}
+				else {
+					list.read_lock(proc.id);
+					reads_cnt++;
+					assert(reads_cnt < -2ul);
+					list.read_unlock(proc.id);
+				}
+			}
+
+			barrier.wait(tid);
+
+			auto p = list.unregister(proc.id);
+			assert(&proc == p);
+			lock_cnt.write += writes_cnt;
+			lock_cnt.read  += reads_cnt;
+			lock_cnt.epoch += epoch_cnt;
+		}, i++);
+	}
+
+	auto before = Clock::now();
+	barrier.wait(0);
+
+	while(true) {
+		usleep(1000);
+		auto now = Clock::now();
+		duration_t durr = now - before;
+		if( durr.count() > duration ) {
+			done = true;
+			break;
+		}
+	}
+
+	barrier.wait(0);
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	duration = durr.count();
+
+	for(auto t : threads) {
+		t->join();
+		delete t;
+	}
+
+	assert(write_committed == lock_cnt.write);
+
+	size_t totalop = lock_cnt.read + lock_cnt.write + lock_cnt.epoch;
+	size_t ops_sec = size_t(double(totalop) / duration);
+	size_t ops_thread = ops_sec / nthread;
+	double dur_nano = duration_cast<std::nano>(1.0);
+
+	std::cout << "Duration      : " << duration << "s\n";
+	std::cout << "Total ops     : " << totalop << "(" << lock_cnt.read << "r, " << lock_cnt.write << "w, " << lock_cnt.epoch << "e)\n";
+	std::cout << "Ops/sec       : " << ops_sec << "\n";
+	std::cout << "Ops/sec/thread: " << ops_thread << "\n";
+	std::cout << "ns/Op         : " << ( dur_nano / ops_thread )<< "\n";
+}
+
+void usage(char * argv[]) {
+	std::cerr << argv[0] << ": [DURATION (FLOAT:SEC)] [NTHREADS] [%WRITES]" << std::endl;;
+	std::exit(1);
+}
+
+int main(int argc, char * argv[]) {
+
+	double duration   = 5.0;
+	unsigned nthreads = 2;
+	unsigned writes   = 0;
+	unsigned epochs   = 0;
+
+	std::cout.imbue(std::locale(""));
+
+	switch (argc)
+	{
+	case 5:
+		epochs = std::stoul(argv[4]);
+		[[fallthrough]];
+	case 4:
+		writes = std::stoul(argv[3]);
+		if( (writes + epochs) > 100 ) {
+			std::cerr << "Writes + Epochs must be valid percentage, was " << argv[3] << " + " << argv[4] << "(" << writes << " + " << epochs << ")" << std::endl;
+			usage(argv);
+		}
+		[[fallthrough]];
+	case 3:
+		nthreads = std::stoul(argv[2]);
+		[[fallthrough]];
+	case 2:
+		duration = std::stod(argv[1]);
+		if( duration <= 0.0 ) {
+			std::cerr << "Duration must be positive, was " << argv[1] << "(" << duration << ")" << std::endl;
+			usage(argv);
+		}
+		[[fallthrough]];
+	case 1:
+		break;
+	default:
+		usage(argv);
+		break;
+	}
+
+	check_cache_line_size();
+
+	std::cout << "Running " << nthreads << " threads for " << duration << " seconds with " << writes << "% writes and " << epochs << "% epochs" << std::endl;
+	run(nthreads, duration, writes, epochs + writes);
+
+	return 0;
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_good.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_good.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_good.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,269 @@
+#include "processor_list.hpp"
+
+#include <iostream>
+#include <string>
+#include <thread>
+
+unsigned num() {
+	return 0x1000000;
+}
+
+// Barrier from
+class barrier_t {
+public:
+	barrier_t(size_t total)
+		: waiting(0)
+		, total(total)
+	{}
+
+	void wait(unsigned) {
+		size_t target = waiting++;
+		target = (target - (target % total)) + total;
+		while(waiting < target)
+			asm volatile("pause");
+
+		assert(waiting < (1ul << 60));
+    	}
+
+private:
+	std::atomic<size_t> waiting;
+	size_t total;
+};
+
+class Random {
+private:
+	unsigned int seed;
+public:
+	Random(int seed) {
+		this->seed = seed;
+	}
+
+	/** returns pseudorandom x satisfying 0 <= x < n. **/
+	unsigned int next() {
+		seed ^= seed << 6;
+		seed ^= seed >> 21;
+		seed ^= seed << 7;
+		return seed;
+    	}
+};
+
+//-------------------
+
+struct processor {
+	unsigned id;
+};
+
+// Stage 1
+// Make sure that the early registration works correctly
+// Registration uses a different process if the act of
+// registering the processor makes it the highest processor count
+// seen yet.
+void stage1(unsigned nthread, unsigned repeats) {
+	const int n = repeats;
+	const int nproc = 10;
+
+	// List being tested
+	processor_list list;
+
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Seen values to detect duplicattion
+	std::atomic<processor *> ids[nthread * nproc];
+	for(auto & i : ids) {
+		i = nullptr;
+	}
+
+	// Can't pass VLA to lambda
+	std::atomic<processor *> * idsp = ids;
+
+	// Threads which will run the code
+	std::thread * threads[nthread];
+	unsigned i = 1;
+	for(auto & t : threads) {
+		// Each thread will try to register a processor then add it to the
+		// list of registerd processor
+		t = new std::thread([&list, &barrier, idsp, n](unsigned tid){
+			processor proc[nproc];
+			for(int i = 0; i < n; i++) {
+				for(auto & p : proc) {
+					// Register the thread
+					p.id = list.doregister(&p);
+				}
+
+				for(auto & p : proc) {
+					// Make sure no one got this id before
+					processor * prev = idsp[p.id].exchange(&p);
+					assert(nullptr == prev);
+
+					// Make sure id is still consistend
+					assert(&p == list.get(p.id));
+				}
+
+				// wait for round to finish
+				barrier.wait(tid);
+
+				// wait for reset
+				barrier.wait(tid);
+			}
+		}, i++);
+	}
+
+	for(int i = 0; i < n; i++) {
+		//Wait for round to finish
+		barrier.wait(0);
+
+		// Reset list
+		list.reset();
+
+		std::cout << i << "\r";
+
+		// Reset seen values
+		for(auto & i : ids) {
+			i = nullptr;
+		}
+
+		// Start next round
+		barrier.wait(0);
+	}
+
+	for(auto t : threads) {
+		t->join();
+		delete t;
+	}
+}
+
+// Stage 2
+// Check that once churning starts, registration is still consistent.
+void stage2(unsigned nthread, unsigned repeats) {
+	// List being tested
+	processor_list list;
+
+	// Threads which will run the code
+	std::thread * threads[nthread];
+	unsigned i = 1;
+	for(auto & t : threads) {
+		// Each thread will try to register a few processors and
+		// unregister them, making sure that the registration is
+		// consistent
+		t = new std::thread([&list, repeats](unsigned tid){
+			processor procs[10];
+			for(unsigned i = 0; i < repeats; i++) {
+				// register the procs and note the id
+				for(auto & p : procs) {
+					p.id = list.doregister(&p);
+				}
+
+				if(1 == tid) std::cout << i << "\r";
+
+				// check the id is still consistent
+				for(const auto & p : procs) {
+					assert(&p == list.get(p.id));
+				}
+
+				// unregister and check the id is consistent
+				for(const auto & p : procs) {
+					assert(&p == list.unregister(p.id));
+				}
+			}
+		}, i++);
+	}
+
+	for(auto t : threads) {
+		t->join();
+		delete t;
+	}
+}
+
+bool is_writer();
+
+// Stage 3
+// Check that the reader writer lock works.
+void stage3(unsigned nthread, unsigned repeats) {
+	// List being tested
+	processor_list list;
+
+	size_t before = 0;
+
+	std::unique_ptr<size_t> after( new size_t(0) );
+
+	std::atomic<bool> done ( false );
+
+	// Threads which will run the code
+	std::thread * threads[nthread];
+	unsigned i = 1;
+	for(auto & t : threads) {
+		// Each thread will try to register a few processors and
+		// unregister them, making sure that the registration is
+		// consistent
+		t = new std::thread([&list, repeats, &before, &after, &done](unsigned tid){
+			Random rng(tid);
+			processor proc;
+			proc.id = list.doregister(&proc);
+			while(!done) {
+
+				if( (rng.next() % 100) == 0 ) {
+					auto r = list.write_lock();
+
+					auto b = before++;
+
+					std::cout << b << "\r";
+
+					(*after)++;
+
+					if(b >= repeats) done = true;
+
+					list.write_unlock(r);
+				}
+				else {
+					list.read_lock(proc.id);
+					assert(before == *after);
+					list.read_unlock(proc.id);
+				}
+
+			}
+
+			list.unregister(proc.id);
+		}, i++);
+	}
+
+	for(auto t : threads) {
+		t->join();
+		delete t;
+	}
+}
+
+int main(int argc, char * argv[]) {
+
+	unsigned nthreads = 1;
+	if( argc >= 3 ) {
+		size_t idx;
+		nthreads = std::stoul(argv[2], &idx);
+		assert('\0' == argv[2][idx]);
+	}
+
+	unsigned repeats = 100;
+	if( argc >= 2 ) {
+		size_t idx;
+		repeats = std::stoul(argv[1], &idx);
+		assert('\0' == argv[1][idx]);
+	}
+
+	processor_list::check_cache_line_size();
+
+	std::cout << "Running " << repeats << " repetitions on " << nthreads << " threads" << std::endl;
+	std::cout << "Checking registration - early" << std::endl;
+	stage1(nthreads, repeats);
+	std::cout << "Done                         " << std::endl;
+
+	std::cout << "Checking registration - churn" << std::endl;
+	stage2(nthreads, repeats);
+	std::cout << "Done                         " << std::endl;
+
+	std::cout << "Checking RW lock             " << std::endl;
+	stage3(nthreads, repeats);
+	std::cout << "Done                         " << std::endl;
+
+
+	return 0;
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/randbit.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/randbit.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/randbit.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,236 @@
+#include <cstddef>
+#include <cstdint>
+#include <x86intrin.h>
+
+__attribute__((noinline)) unsigned nthSetBit(size_t mask, unsigned bit) {
+	uint64_t v = mask;   // Input value to find position with rank r.
+	unsigned int r = bit;// Input: bit's desired rank [1-64].
+	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
+	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
+	unsigned int t;      // Bit count temporary.
+
+	// Do a normal parallel bit count for a 64-bit integer,
+	// but store all intermediate steps.
+	// a = (v & 0x5555...) + ((v >> 1) & 0x5555...);
+	a =  v - ((v >> 1) & ~0UL/3);
+	// b = (a & 0x3333...) + ((a >> 2) & 0x3333...);
+	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
+	// c = (b & 0x0f0f...) + ((b >> 4) & 0x0f0f...);
+	c = (b + (b >> 4)) & ~0UL/0x11;
+	// d = (c & 0x00ff...) + ((c >> 8) & 0x00ff...);
+	d = (c + (c >> 8)) & ~0UL/0x101;
+
+
+	t = (d >> 32) + (d >> 48);
+	// Now do branchless select!
+	s  = 64;
+	// if (r > t) {s -= 32; r -= t;}
+	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
+	t  = (d >> (s - 16)) & 0xff;
+	// if (r > t) {s -= 16; r -= t;}
+	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
+	t  = (c >> (s - 8)) & 0xf;
+	// if (r > t) {s -= 8; r -= t;}
+	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
+	t  = (b >> (s - 4)) & 0x7;
+	// if (r > t) {s -= 4; r -= t;}
+	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
+	t  = (a >> (s - 2)) & 0x3;
+	// if (r > t) {s -= 2; r -= t;}
+	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
+	t  = (v >> (s - 1)) & 0x1;
+	// if (r > t) s--;
+	s -= ((t - r) & 256) >> 8;
+	// s = 65 - s;
+	return s;
+}
+
+unsigned rand_bit(unsigned rnum, uint64_t mask) {
+	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
+#if defined(BRANCHLESS)
+	uint64_t v = mask;   // Input value to find position with rank r.
+	unsigned int r = bit + 1;// Input: bit's desired rank [1-64].
+	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
+	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
+	unsigned int t;      // Bit count temporary.
+
+	// Do a normal parallel bit count for a 64-bit integer,
+	// but store all intermediate steps.
+	// a = (v & 0x5555...) + ((v >> 1) & 0x5555...);
+	a =  v - ((v >> 1) & ~0UL/3);
+	// b = (a & 0x3333...) + ((a >> 2) & 0x3333...);
+	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
+	// c = (b & 0x0f0f...) + ((b >> 4) & 0x0f0f...);
+	c = (b + (b >> 4)) & ~0UL/0x11;
+	// d = (c & 0x00ff...) + ((c >> 8) & 0x00ff...);
+	d = (c + (c >> 8)) & ~0UL/0x101;
+
+
+	t = (d >> 32) + (d >> 48);
+	// Now do branchless select!
+	s  = 64;
+	// if (r > t) {s -= 32; r -= t;}
+	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
+	t  = (d >> (s - 16)) & 0xff;
+	// if (r > t) {s -= 16; r -= t;}
+	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
+	t  = (c >> (s - 8)) & 0xf;
+	// if (r > t) {s -= 8; r -= t;}
+	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
+	t  = (b >> (s - 4)) & 0x7;
+	// if (r > t) {s -= 4; r -= t;}
+	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
+	t  = (a >> (s - 2)) & 0x3;
+	// if (r > t) {s -= 2; r -= t;}
+	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
+	t  = (v >> (s - 1)) & 0x1;
+	// if (r > t) s--;
+	s -= ((t - r) & 256) >> 8;
+	// s = 65 - s;
+	return s - 1;
+#elif defined(LOOP)
+	for(unsigned i = 0; i < bit; i++) {
+		mask ^= (1ul << (__builtin_ffsl(mask) - 1ul));
+	}
+	return __builtin_ffsl(mask) - 1ul;
+#elif defined(PDEP)
+	uint64_t picked = _pdep_u64(1ul << bit, mask);
+	return __builtin_ffsl(picked) - 1ul;
+#else
+#error must define LOOP, PDEP or BRANCHLESS
+#endif
+}
+
+#include <cassert>
+#include <atomic>
+#include <chrono>
+#include <iomanip>
+#include <iostream>
+#include <locale>
+#include <thread>
+
+#include <unistd.h>
+
+class barrier_t {
+public:
+	barrier_t(size_t total)
+		: waiting(0)
+		, total(total)
+	{}
+
+	void wait(unsigned) {
+		size_t target = waiting++;
+		target = (target - (target % total)) + total;
+		while(waiting < target)
+			asm volatile("pause");
+
+		assert(waiting < (1ul << 60));
+    	}
+
+private:
+	std::atomic<size_t> waiting;
+	size_t total;
+};
+
+class Random {
+private:
+	unsigned int seed;
+public:
+	Random(int seed) {
+		this->seed = seed;
+	}
+
+	/** returns pseudorandom x satisfying 0 <= x < n. **/
+	unsigned int next() {
+		seed ^= seed << 6;
+		seed ^= seed >> 21;
+		seed ^= seed << 7;
+		return seed;
+    	}
+};
+
+using Clock = std::chrono::high_resolution_clock;
+using duration_t = std::chrono::duration<double>;
+using std::chrono::nanoseconds;
+
+template<typename Ratio, typename T>
+T duration_cast(T seconds) {
+	return std::chrono::duration_cast<std::chrono::duration<T, Ratio>>(std::chrono::duration<T>(seconds)).count();
+}
+
+void waitfor(double & duration, barrier_t & barrier, std::atomic_bool & done) {
+
+
+	std::cout << "Starting" << std::endl;
+	auto before = Clock::now();
+	barrier.wait(0);
+
+	while(true) {
+		usleep(100000);
+		auto now = Clock::now();
+		duration_t durr = now - before;
+		if( durr.count() > duration ) {
+			done = true;
+			break;
+		}
+		std::cout << "\r" << std::setprecision(4) << durr.count();
+		std::cout.flush();
+	}
+
+	barrier.wait(0);
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	duration = durr.count();
+	std::cout << "\rClosing down" << std::endl;
+}
+
+__attribute__((noinline)) void body(Random & rand) {
+	uint64_t mask = (uint64_t(rand.next()) << 32ul) | uint64_t(rand.next());
+	unsigned idx = rand.next();
+
+	unsigned bit = rand_bit(idx, mask);
+
+	if(__builtin_expect(((1ul << bit) & mask) == 0, false)) {
+		std::cerr << std::hex <<  "Rand " << idx << " from " << mask;
+		std::cerr << " gave " << (1ul << bit) << "(" << std::dec << bit << ")" << std::endl;
+		std::abort();
+	}
+}
+
+void runRandBit(double duration) {
+
+	std::atomic_bool done  = { false };
+	barrier_t barrier(2);
+
+	size_t count = 0;
+	std::thread thread([&done, &barrier, &count]() {
+
+		Random rand(22);
+
+		barrier.wait(1);
+
+		for(;!done; count++) {
+			body(rand);
+		}
+
+		barrier.wait(1);
+	});
+
+	waitfor(duration, barrier, done);
+	thread.join();
+
+	size_t ops = count;
+	size_t ops_sec = size_t(double(ops) / duration);
+	auto dur_nano = duration_cast<std::nano>(1.0);
+
+	std::cout << "Duration      : " << duration << "s\n";
+	std::cout << "ns/Op         : " << ( dur_nano / ops )<< "\n";
+	std::cout << "Ops/sec       : " << ops_sec << "\n";
+	std::cout << "Total ops     : " << ops << std::endl;
+
+}
+
+int main() {
+	std::cout.imbue(std::locale(""));
+	runRandBit(5);
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,1141 @@
+#if !defined(LIST_VARIANT_HPP)
+#define LIST_VARIANT_HPP "relaxed_list.hpp"
+#endif
+
+#include LIST_VARIANT_HPP
+#if !defined(LIST_VARIANT)
+#error not variant selected
+#endif
+
+#include <array>
+#include <iomanip>
+#include <iostream>
+#include <locale>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include <getopt.h>
+#include <unistd.h>
+#include <sys/sysinfo.h>
+
+#include "utils.hpp"
+
+struct __attribute__((aligned(64))) Node {
+	static std::atomic_size_t creates;
+	static std::atomic_size_t destroys;
+
+	_LinksFields_t<Node> _links;
+
+	int value;
+	int id;
+
+	Node() { creates++; }
+	Node(int value): value(value) { creates++; }
+	~Node() { destroys++; }
+};
+
+std::atomic_size_t Node::creates  = { 0 };
+std::atomic_size_t Node::destroys = { 0 };
+
+bool enable_stats = false;
+
+template<>
+thread_local LIST_VARIANT<Node>::TLS LIST_VARIANT<Node>::tls = {};
+
+template<>
+std::atomic_uint32_t LIST_VARIANT<Node>::ticket = { 0 };
+
+#ifndef NO_STATS
+template<>
+LIST_VARIANT<Node>::GlobalStats LIST_VARIANT<Node>::global_stats = {};
+#endif
+
+// ================================================================================================
+//                        UTILS
+// ================================================================================================
+
+struct local_stat_t {
+	size_t in  = 0;
+	size_t out = 0;
+	size_t empty = 0;
+	size_t crc_in  = 0;
+	size_t crc_out = 0;
+	size_t valmax = 0;
+	size_t valmin = 100000000ul;
+	struct {
+		size_t val = 0;
+		size_t cnt = 0;
+	} comp;
+	struct {
+		size_t val = 0;
+		size_t cnt = 0;
+	} subm;
+};
+
+struct global_stat_t {
+	std::atomic_size_t in  = { 0 };
+	std::atomic_size_t out = { 0 };
+	std::atomic_size_t empty = { 0 };
+	std::atomic_size_t crc_in  = { 0 };
+	std::atomic_size_t crc_out = { 0 };
+	std::atomic_size_t valmax = { 0 };
+	std::atomic_size_t valmin = { 100000000ul };
+	struct {
+		std::atomic_size_t val = { 0 };
+		std::atomic_size_t cnt = { 0 };
+	} comp;
+	struct {
+		std::atomic_size_t val = { 0 };
+		std::atomic_size_t cnt = { 0 };
+	} subm;
+};
+
+void atomic_max(std::atomic_size_t & target, size_t value) {
+	for(;;) {
+		size_t expect = target.load(std::memory_order_relaxed);
+		if(value <= expect) return;
+		bool success = target.compare_exchange_strong(expect, value);
+		if(success) return;
+	}
+}
+
+void atomic_min(std::atomic_size_t & target, size_t value) {
+	for(;;) {
+		size_t expect = target.load(std::memory_order_relaxed);
+		if(value >= expect) return;
+		bool success = target.compare_exchange_strong(expect, value);
+		if(success) return;
+	}
+}
+
+void tally_stats(global_stat_t & global, local_stat_t & local) {
+
+	global.in    += local.in;
+	global.out   += local.out;
+	global.empty += local.empty;
+
+	global.crc_in  += local.crc_in;
+	global.crc_out += local.crc_out;
+
+	global.comp.val += local.comp.val;
+	global.comp.cnt += local.comp.cnt;
+	global.subm.val += local.subm.val;
+	global.subm.cnt += local.subm.cnt;
+
+	atomic_max(global.valmax, local.valmax);
+	atomic_min(global.valmin, local.valmin);
+
+	LIST_VARIANT<Node>::stats_tls_tally();
+}
+
+void waitfor(double & duration, barrier_t & barrier, std::atomic_bool & done) {
+	std::cout << "Starting" << std::endl;
+	auto before = Clock::now();
+	barrier.wait(0);
+	bool is_tty = isatty(STDOUT_FILENO);
+
+	while(true) {
+		usleep(100000);
+		auto now = Clock::now();
+		duration_t durr = now - before;
+		if( durr.count() > duration ) {
+			done = true;
+			break;
+		}
+		if(is_tty) {
+			std::cout << "\r" << std::setprecision(4) << durr.count();
+			std::cout.flush();
+		}
+	}
+
+	barrier.wait(0);
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	duration = durr.count();
+	std::cout << "\rClosing down" << std::endl;
+}
+
+void waitfor(double & duration, barrier_t & barrier, const std::atomic_size_t & count) {
+	std::cout << "Starting" << std::endl;
+	auto before = Clock::now();
+	barrier.wait(0);
+
+	while(true) {
+		usleep(100000);
+		size_t c = count.load();
+		if( c == 0 ) {
+			break;
+		}
+		std::cout << "\r" << c;
+		std::cout.flush();
+	}
+
+	barrier.wait(0);
+	auto after = Clock::now();
+	duration_t durr = after - before;
+	duration = durr.count();
+	std::cout << "\rClosing down" << std::endl;
+}
+
+void print_stats(double duration, unsigned nthread, global_stat_t & global) {
+	assert(Node::creates == Node::destroys);
+	assert(global.crc_in == global.crc_out);
+
+	std::cout << "Done" << std::endl;
+
+	size_t ops = global.in + global.out;
+	size_t ops_sec = size_t(double(ops) / duration);
+	size_t ops_thread = ops_sec / nthread;
+	auto dur_nano = duration_cast<std::nano>(1.0);
+
+	if(global.valmax != 0) {
+		std::cout << "Max runs      : " << global.valmax << "\n";
+		std::cout << "Min runs      : " << global.valmin << "\n";
+	}
+	if(global.comp.cnt != 0) {
+		std::cout << "Submit count  : " << global.subm.cnt << "\n";
+		std::cout << "Submit average: " << ((double(global.subm.val)) / global.subm.cnt) << "\n";
+		std::cout << "Complete count: " << global.comp.cnt << "\n";
+		std::cout << "Complete avg  : " << ((double(global.comp.val)) / global.comp.cnt) << "\n";
+	}
+	std::cout << "Duration      : " << duration << "s\n";
+	std::cout << "ns/Op         : " << ( dur_nano / ops_thread )<< "\n";
+	std::cout << "Ops/sec/thread: " << ops_thread << "\n";
+	std::cout << "Ops/sec       : " << ops_sec << "\n";
+	std::cout << "Total ops     : " << ops << "(" << global.in << "i, " << global.out << "o, " << global.empty << "e)\n";
+	#ifndef NO_STATS
+		LIST_VARIANT<Node>::stats_print(std::cout);
+	#endif
+}
+
+void save_fairness(const int data[], int factor, unsigned nthreads, size_t columns, size_t rows, const std::string & output);
+
+// ================================================================================================
+//                        EXPERIMENTS
+// ================================================================================================
+
+// ================================================================================================
+__attribute__((noinline)) void runChurn_body(
+	std::atomic<bool>& done,
+	Random & rand,
+	Node * my_nodes[],
+	unsigned nslots,
+	local_stat_t & local,
+	LIST_VARIANT<Node> & list
+) {
+	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
+		int idx = rand.next() % nslots;
+		if (auto node = my_nodes[idx]) {
+			local.crc_in += node->value;
+			list.push(node);
+			my_nodes[idx] = nullptr;
+			local.in++;
+		}
+		else if(auto node = list.pop()) {
+			local.crc_out += node->value;
+			my_nodes[idx] = node;
+			local.out++;
+		}
+		else {
+			local.empty++;
+		}
+	}
+}
+
+void runChurn(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes, const unsigned nslots) {
+	std::cout << "Churn Benchmark" << std::endl;
+	assert(nnodes <= nslots);
+	// List being tested
+
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Data to check everything is OK
+	global_stat_t global;
+
+	// Flag to signal termination
+	std::atomic_bool done  = { false };
+
+	// Prep nodes
+	std::cout << "Initializing ";
+	size_t npushed = 0;
+	LIST_VARIANT<Node> list = { nthread, nqueues };
+	{
+		Node** all_nodes[nthread];
+		for(auto & nodes : all_nodes) {
+			nodes = new __attribute__((aligned(64))) Node*[nslots + 8];
+			Random rand(rdtscl());
+			for(unsigned i = 0; i < nnodes; i++) {
+				nodes[i] = new Node(rand.next() % 100);
+			}
+
+			for(unsigned i = nnodes; i < nslots; i++) {
+				nodes[i] = nullptr;
+			}
+
+			for(int i = 0; i < 10 && i < (int)nslots; i++) {
+				int idx = rand.next() % nslots;
+				if (auto node = nodes[idx]) {
+					global.crc_in += node->value;
+					list.push(node);
+					npushed++;
+					nodes[idx] = nullptr;
+				}
+			}
+		}
+
+		std::cout << nnodes << " nodes (" << nslots << " slots)" << std::endl;
+
+		enable_stats = true;
+
+		std::thread * threads[nthread];
+		unsigned i = 1;
+		for(auto & t : threads) {
+			auto & my_nodes = all_nodes[i - 1];
+			t = new std::thread([&done, &list, &barrier, &global, &my_nodes, nslots](unsigned tid) {
+				Random rand(tid + rdtscl());
+
+				local_stat_t local;
+
+				// affinity(tid);
+
+				barrier.wait(tid);
+
+				// EXPERIMENT START
+
+				runChurn_body(done, rand, my_nodes, nslots, local, list);
+
+				// EXPERIMENT END
+
+				barrier.wait(tid);
+
+				tally_stats(global, local);
+
+				for(unsigned i = 0; i < nslots; i++) {
+					delete my_nodes[i];
+				}
+			}, i++);
+		}
+
+		waitfor(duration, barrier, done);
+
+		for(auto t : threads) {
+			t->join();
+			delete t;
+		}
+
+		enable_stats = false;
+
+		while(auto node = list.pop()) {
+			global.crc_out += node->value;
+			delete node;
+		}
+
+		for(auto nodes : all_nodes) {
+			delete[] nodes;
+		}
+	}
+
+	print_stats(duration, nthread, global);
+}
+
+// ================================================================================================
+__attribute__((noinline)) void runPingPong_body(
+	std::atomic<bool>& done,
+	Node initial_nodes[],
+	unsigned nnodes,
+	local_stat_t & local,
+	LIST_VARIANT<Node> & list
+) {
+	Node * nodes[nnodes];
+	{
+		unsigned i = 0;
+		for(auto & n : nodes) {
+			n = &initial_nodes[i++];
+		}
+	}
+
+	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
+
+		for(Node * & node : nodes) {
+			local.crc_in += node->value;
+			list.push(node);
+			local.in++;
+		}
+
+		// -----
+
+		for(Node * & node : nodes) {
+			node = list.pop();
+			assert(node);
+			local.crc_out += node->value;
+			local.out++;
+		}
+	}
+}
+
+void runPingPong(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes) {
+	std::cout << "PingPong Benchmark" << std::endl;
+
+
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Data to check everything is OK
+	global_stat_t global;
+
+	// Flag to signal termination
+	std::atomic_bool done  = { false };
+
+	std::cout << "Initializing ";
+	// List being tested
+	LIST_VARIANT<Node> list = { nthread, nqueues };
+	{
+		enable_stats = true;
+
+		std::thread * threads[nthread];
+		unsigned i = 1;
+		for(auto & t : threads) {
+			t = new std::thread([&done, &list, &barrier, &global, nnodes](unsigned tid) {
+				Random rand(tid + rdtscl());
+
+				Node nodes[nnodes];
+				for(auto & n : nodes) {
+					n.value = (int)rand.next() % 100;
+				}
+
+				local_stat_t local;
+
+				// affinity(tid);
+
+				barrier.wait(tid);
+
+				// EXPERIMENT START
+
+				runPingPong_body(done, nodes, nnodes, local, list);
+
+				// EXPERIMENT END
+
+				barrier.wait(tid);
+
+				tally_stats(global, local);
+			}, i++);
+		}
+
+		waitfor(duration, barrier, done);
+
+		for(auto t : threads) {
+			t->join();
+			delete t;
+		}
+
+		enable_stats = false;
+	}
+
+	print_stats(duration, nthread, global);
+}
+
+// ================================================================================================
+struct __attribute__((aligned(64))) Slot {
+	Node * volatile node;
+};
+
+__attribute__((noinline)) void runProducer_body(
+	std::atomic<bool>& done,
+	Random & rand,
+	Slot * slots,
+	int nslots,
+	local_stat_t & local,
+	LIST_VARIANT<Node> & list
+) {
+	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
+
+		Node * node = list.pop();
+		if(!node) {
+			local.empty ++;
+			continue;
+		}
+
+		local.crc_out += node->value;
+		local.out++;
+
+		if(node->id == 0) {
+			unsigned cnt = 0;
+			for(int i = 0; i < nslots; i++) {
+				Node * found = __atomic_exchange_n( &slots[i].node, nullptr, __ATOMIC_SEQ_CST );
+				if( found ) {
+					local.crc_in += found->value;
+					local.in++;
+					cnt++;
+					list.push( found );
+				}
+			}
+
+			local.crc_in += node->value;
+			local.in++;
+			list.push( node );
+
+			local.comp.cnt++;
+			local.comp.val += cnt;
+		}
+		else {
+			unsigned len = 0;
+			while(true) {
+				auto off = rand.next();
+				for(int i = 0; i < nslots; i++) {
+					Node * expected = nullptr;
+					int idx = (i + off) % nslots;
+					Slot & slot = slots[ idx ];
+					if(
+						slot.node == nullptr &&
+						__atomic_compare_exchange_n( &slot.node, &expected, node, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST )
+					) {
+						local.subm.cnt++;
+						local.subm.val += len;
+						goto LOOP;
+					}
+					assert( expected != node );
+					len++;
+				}
+			}
+		}
+
+		LOOP:;
+	}
+}
+
+void runProducer(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes) {
+	std::cout << "Producer Benchmark" << std::endl;
+
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Data to check everything is OK
+	global_stat_t global;
+
+	// Flag to signal termination
+	std::atomic_bool done  = { false };
+
+	std::cout << "Initializing ";
+
+	int nslots = nnodes * 4;
+	Slot * slots = new Slot[nslots];
+	std::cout << nnodes << " nodes (" << nslots << " slots)" << std::endl;
+
+	// List being tested
+	LIST_VARIANT<Node> list = { nthread, nqueues };
+	{
+		Random rand(rdtscl());
+		for(unsigned i = 0; i < nnodes; i++) {
+			Node * node = new Node(rand.next() % 100);
+			node->id = i;
+			global.crc_in += node->value;
+			list.push(node);
+		}
+
+		for(int i = 0; i < nslots; i++) {
+			slots[i].node = nullptr;
+		}
+	}
+
+	{
+		enable_stats = true;
+
+		std::thread * threads[nthread];
+		unsigned i = 1;
+		for(auto & t : threads) {
+			t = new std::thread([&done, &list, &barrier, &global, slots, nslots](unsigned tid) {
+				Random rand(tid + rdtscl());
+
+				local_stat_t local;
+				barrier.wait(tid);
+
+				// EXPERIMENT START
+
+				runProducer_body(done, rand, slots, nslots, local, list);
+
+				// EXPERIMENT END
+
+				barrier.wait(tid);
+
+				tally_stats(global, local);
+			}, i++);
+		}
+
+		waitfor(duration, barrier, done);
+
+		for(auto t : threads) {
+			t->join();
+			delete t;
+		}
+
+		enable_stats = false;
+	}
+
+	{
+		while(Node * node = list.pop()) {
+			global.crc_out += node->value;
+			delete node;
+		}
+
+		for(int i = 0; i < nslots; i++) {
+			delete slots[i].node;
+		}
+
+		delete [] slots;
+	}
+
+	print_stats(duration, nthread, global);
+}
+
+// ================================================================================================
+__attribute__((noinline)) void runFairness_body(
+	unsigned tid,
+	size_t width,
+	size_t length,
+	int output[],
+	std::atomic_size_t & count,
+	Node initial_nodes[],
+	unsigned nnodes,
+	local_stat_t & local,
+	LIST_VARIANT<Node> & list
+) {
+	Node * nodes[nnodes];
+	{
+		unsigned i = 0;
+		for(auto & n : nodes) {
+			n = &initial_nodes[i++];
+		}
+	}
+
+	while(__builtin_expect(0 != count.load(std::memory_order_relaxed), true)) {
+
+		for(Node * & node : nodes) {
+			local.crc_in += node->id;
+			list.push(node);
+			local.in++;
+		}
+
+		// -----
+
+		for(Node * & node : nodes) {
+			node = list.pop();
+			assert(node);
+
+			if (unsigned(node->value) < length) {
+				size_t idx = (node->value * width) + node->id;
+				assert(idx < (width * length));
+				output[idx] = tid;
+			}
+
+			node->value++;
+			if(unsigned(node->value) == length) count--;
+
+			local.crc_out += node->id;
+			local.out++;
+		}
+	}
+}
+
+void runFairness(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes, const std::string & output) {
+	std::cout << "Fairness Benchmark, outputing to : " << output << std::endl;
+
+	// Barrier for synchronization
+	barrier_t barrier(nthread + 1);
+
+	// Data to check everything is OK
+	global_stat_t global;
+
+	std::cout << "Initializing ";
+
+	// Check fairness by creating a png of where the threads ran
+	size_t width = nthread * nnodes;
+	size_t length = 100000;
+
+	std::unique_ptr<int[]> data_out { new int[width * length] };
+
+	// Flag to signal termination
+	std::atomic_size_t count = width;
+
+	// List being tested
+	LIST_VARIANT<Node> list = { nthread, nqueues };
+	{
+		enable_stats = true;
+
+		std::thread * threads[nthread];
+		unsigned i = 1;
+		for(auto & t : threads) {
+			t = new std::thread([&count, &list, &barrier, &global, nnodes, width, length, data_out = data_out.get()](unsigned tid) {
+				unsigned int start = (tid - 1) * nnodes;
+				Node nodes[nnodes];
+				for(auto & n : nodes) {
+					n.id = start;
+					n.value = 0;
+					start++;
+				}
+
+				local_stat_t local;
+
+				// affinity(tid);
+
+				barrier.wait(tid);
+
+				// EXPERIMENT START
+
+				runFairness_body(tid, width, length, data_out, count, nodes, nnodes, local, list);
+
+				// EXPERIMENT END
+
+				barrier.wait(tid);
+
+				for(const auto & n : nodes) {
+					local.valmax = max(local.valmax, size_t(n.value));
+					local.valmin = min(local.valmin, size_t(n.value));
+				}
+
+				tally_stats(global, local);
+			}, i++);
+		}
+
+		waitfor(duration, barrier, count);
+
+		for(auto t : threads) {
+			t->join();
+			delete t;
+		}
+
+		enable_stats = false;
+	}
+
+	print_stats(duration, nthread, global);
+
+	// save_fairness(data_out.get(), 100, nthread, width, length, output);
+}
+
+// ================================================================================================
+
+bool iequals(const std::string& a, const std::string& b)
+{
+    return std::equal(a.begin(), a.end(),
+                      b.begin(), b.end(),
+                      [](char a, char b) {
+                          return std::tolower(a) == std::tolower(b);
+                      });
+}
+
+int main(int argc, char * argv[]) {
+
+	double duration   = 5.0;
+	unsigned nthreads = 2;
+	unsigned nqueues  = 4;
+	unsigned nnodes   = 100;
+	unsigned nslots   = 100;
+	std::string out   = "fairness.png";
+
+	enum {
+		Churn,
+		PingPong,
+		Producer,
+		Fairness,
+		NONE
+	} benchmark = NONE;
+
+	std::cout.imbue(std::locale(""));
+
+	for(;;) {
+		static struct option options[] = {
+			{"duration",  required_argument, 0, 'd'},
+			{"nthreads",  required_argument, 0, 't'},
+			{"nqueues",   required_argument, 0, 'q'},
+			{"benchmark", required_argument, 0, 'b'},
+			{0, 0, 0, 0}
+		};
+
+		int idx = 0;
+		int opt = getopt_long(argc, argv, "d:t:q:b:", options, &idx);
+
+		std::string arg = optarg ? optarg : "";
+		size_t len = 0;
+		switch(opt) {
+			// Exit Case
+			case -1:
+				/* paranoid */ assert(optind <= argc);
+				switch(benchmark) {
+				case NONE:
+					std::cerr << "Must specify a benchmark" << std::endl;
+					goto usage;
+				case PingPong:
+					nnodes = 1;
+					switch(argc - optind) {
+					case 0: break;
+					case 1:
+						try {
+							arg = optarg = argv[optind];
+							nnodes = stoul(optarg, &len);
+							if(len != arg.size()) { throw std::invalid_argument(""); }
+						} catch(std::invalid_argument &) {
+							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
+							goto usage;
+						}
+						break;
+					default:
+						std::cerr << "'PingPong' benchmark doesn't accept more than 1 extra arguments" << std::endl;
+						goto usage;
+					}
+					break;
+				case Producer:
+					nnodes = 32;
+					switch(argc - optind) {
+					case 0: break;
+					case 1:
+						try {
+							arg = optarg = argv[optind];
+							nnodes = stoul(optarg, &len);
+							if(len != arg.size()) { throw std::invalid_argument(""); }
+						} catch(std::invalid_argument &) {
+							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
+							goto usage;
+						}
+						break;
+					default:
+						std::cerr << "'Producer' benchmark doesn't accept more than 1 extra arguments" << std::endl;
+						goto usage;
+					}
+					break;
+				case Churn:
+					nnodes = 100;
+					nslots = 100;
+					switch(argc - optind) {
+					case 0: break;
+					case 1:
+						try {
+							arg = optarg = argv[optind];
+							nnodes = stoul(optarg, &len);
+							if(len != arg.size()) { throw std::invalid_argument(""); }
+							nslots = nnodes;
+						} catch(std::invalid_argument &) {
+							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
+							goto usage;
+						}
+						break;
+					case 2:
+						try {
+							arg = optarg = argv[optind];
+							nnodes = stoul(optarg, &len);
+							if(len != arg.size()) { throw std::invalid_argument(""); }
+						} catch(std::invalid_argument &) {
+							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
+							goto usage;
+						}
+						try {
+							arg = optarg = argv[optind + 1];
+							nslots = stoul(optarg, &len);
+							if(len != arg.size()) { throw std::invalid_argument(""); }
+						} catch(std::invalid_argument &) {
+							std::cerr << "Number of slots must be a positive integer, was " << arg << std::endl;
+							goto usage;
+						}
+						break;
+					default:
+						std::cerr << "'Churn' benchmark doesn't accept more than 2 extra arguments" << std::endl;
+						goto usage;
+					}
+					break;
+				case Fairness:
+					nnodes = 1;
+					switch(argc - optind) {
+					case 0: break;
+					case 1:
+						arg = optarg = argv[optind];
+						out = arg;
+						break;
+					default:
+						std::cerr << "'Churn' benchmark doesn't accept more than 2 extra arguments" << std::endl;
+						goto usage;
+					}
+				}
+				goto run;
+			// Benchmarks
+			case 'b':
+				if(benchmark != NONE) {
+					std::cerr << "Only when benchmark can be run" << std::endl;
+					goto usage;
+				}
+				if(iequals(arg, "churn")) {
+					benchmark = Churn;
+					break;
+				}
+				if(iequals(arg, "pingpong")) {
+					benchmark = PingPong;
+					break;
+				}
+				if(iequals(arg, "producer")) {
+					benchmark = Producer;
+					break;
+				}
+				if(iequals(arg, "fairness")) {
+					benchmark = Fairness;
+					break;
+				}
+				std::cerr << "Unkown benchmark " << arg << std::endl;
+				goto usage;
+			// Numeric Arguments
+			case 'd':
+				try {
+					duration = stod(optarg, &len);
+					if(len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Duration must be a valid double, was " << arg << std::endl;
+					goto usage;
+				}
+				break;
+			case 't':
+				try {
+					nthreads = stoul(optarg, &len);
+					if(len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Number of threads must be a positive integer, was " << arg << std::endl;
+					goto usage;
+				}
+				break;
+			case 'q':
+				try {
+					nqueues = stoul(optarg, &len);
+					if(len != arg.size()) { throw std::invalid_argument(""); }
+				} catch(std::invalid_argument &) {
+					std::cerr << "Number of queues must be a positive integer, was " << arg << std::endl;
+					goto usage;
+				}
+				break;
+			// Other cases
+			default: /* ? */
+				std::cerr << opt << std::endl;
+			usage:
+				std::cerr << "Usage: " << argv[0] << ": [options] -b churn [NNODES] [NSLOTS = NNODES]" << std::endl;
+				std::cerr << "  or:  " << argv[0] << ": [options] -b pingpong [NNODES]" << std::endl;
+				std::cerr << "  or:  " << argv[0] << ": [options] -b producer [NNODES]" << std::endl;
+				std::cerr << std::endl;
+				std::cerr << "  -d, --duration=DURATION  Duration of the experiment, in seconds" << std::endl;
+				std::cerr << "  -t, --nthreads=NTHREADS  Number of kernel threads" << std::endl;
+				std::cerr << "  -q, --nqueues=NQUEUES    Number of queues per threads" << std::endl;
+				std::exit(1);
+		}
+	}
+	run:
+
+	check_cache_line_size();
+
+	std::cout << "Running " << nthreads << " threads (" << (nthreads * nqueues) << " queues) for " << duration << " seconds" << std::endl;
+	std::cout << "Relaxed list variant: " << LIST_VARIANT<Node>::name() << std::endl;
+	switch(benchmark) {
+		case Churn:
+			runChurn(nthreads, nqueues, duration, nnodes, nslots);
+			break;
+		case PingPong:
+			runPingPong(nthreads, nqueues, duration, nnodes);
+			break;
+		case Producer:
+			runProducer(nthreads, nqueues, duration, nnodes);
+			break;
+		case Fairness:
+			runFairness(nthreads, nqueues, duration, nnodes, out);
+			break;
+		default:
+			abort();
+	}
+	return 0;
+}
+
+const char * __my_progname = "Relaxed List";
+
+struct rgb_t {
+    double r;       // a fraction between 0 and 1
+    double g;       // a fraction between 0 and 1
+    double b;       // a fraction between 0 and 1
+};
+
+struct hsv_t {
+    double h;       // angle in degrees
+    double s;       // a fraction between 0 and 1
+    double v;       // a fraction between 0 and 1
+};
+
+rgb_t hsv2rgb(hsv_t in) {
+	double hh, p, q, t, ff;
+	long   i;
+	rgb_t  out;
+
+	if(in.s <= 0.0) {       // < is bogus, just shuts up warnings
+		out.r = in.v;
+		out.g = in.v;
+		out.b = in.v;
+		return out;
+	}
+	hh = in.h;
+	if(hh >= 360.0) hh = 0.0;
+	hh /= 60.0;
+	i = (long)hh;
+	ff = hh - i;
+	p = in.v * (1.0 - in.s);
+	q = in.v * (1.0 - (in.s * ff));
+	t = in.v * (1.0 - (in.s * (1.0 - ff)));
+
+	switch(i) {
+	case 0:
+		out.r = in.v;
+		out.g = t;
+		out.b = p;
+		break;
+	case 1:
+		out.r = q;
+		out.g = in.v;
+		out.b = p;
+		break;
+	case 2:
+		out.r = p;
+		out.g = in.v;
+		out.b = t;
+		break;
+
+	case 3:
+		out.r = p;
+		out.g = q;
+		out.b = in.v;
+		break;
+	case 4:
+		out.r = t;
+		out.g = p;
+		out.b = in.v;
+		break;
+	case 5:
+	default:
+		out.r = in.v;
+		out.g = p;
+		out.b = q;
+		break;
+	}
+	return out;
+}
+
+// void save_fairness(const int data[], int factor, unsigned nthreads, size_t columns, size_t rows, const std::string & output) {
+// 	std::ofstream os(output);
+// 	os << "<html>\n";
+// 	os << "<head>\n";
+// 	os << "<style>\n";
+// 	os << "</style>\n";
+// 	os << "</head>\n";
+// 	os << "<body>\n";
+// 	os << "<table style=\"width=100%\">\n";
+
+// 	size_t idx = 0;
+// 	for(size_t r = 0ul; r < rows; r++) {
+// 		os << "<tr>\n";
+// 		for(size_t c = 0ul; c < columns; c++) {
+// 			os << "<td class=\"custom custom" << data[idx] << "\"></td>\n";
+// 			idx++;
+// 		}
+// 		os << "</tr>\n";
+// 	}
+
+// 	os << "</table>\n";
+// 	os << "</body>\n";
+// 	os << "</html>\n";
+// 	os << std::endl;
+// }
+
+// #include <png.h>
+// #include <setjmp.h>
+
+/*
+void save_fairness(const int data[], int factor, unsigned nthreads, size_t columns, size_t rows, const std::string & output) {
+	int width  = columns * factor;
+	int height = rows / factor;
+
+	int code = 0;
+	int idx = 0;
+	FILE *fp = NULL;
+	png_structp png_ptr = NULL;
+	png_infop info_ptr = NULL;
+	png_bytep row = NULL;
+
+	// Open file for writing (binary mode)
+	fp = fopen(output.c_str(), "wb");
+	if (fp == NULL) {
+		fprintf(stderr, "Could not open file %s for writing\n", output.c_str());
+		code = 1;
+		goto finalise;
+	}
+
+	   // Initialize write structure
+	png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+	if (png_ptr == NULL) {
+		fprintf(stderr, "Could not allocate write struct\n");
+		code = 1;
+		goto finalise;
+	}
+
+	// Initialize info structure
+	info_ptr = png_create_info_struct(png_ptr);
+	if (info_ptr == NULL) {
+		fprintf(stderr, "Could not allocate info struct\n");
+		code = 1;
+		goto finalise;
+	}
+
+	// Setup Exception handling
+	if (setjmp(png_jmpbuf(png_ptr))) {
+		fprintf(stderr, "Error during png creation\n");
+		code = 1;
+		goto finalise;
+	}
+
+	png_init_io(png_ptr, fp);
+
+	// Write header (8 bit colour depth)
+	png_set_IHDR(png_ptr, info_ptr, width, height,
+		8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
+		PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
+
+	png_write_info(png_ptr, info_ptr);
+
+	// Allocate memory for one row (3 bytes per pixel - RGB)
+	row = (png_bytep) malloc(3 * width * sizeof(png_byte));
+
+	// Write image data
+	int x, y;
+	for (y=0 ; y<height ; y++) {
+		for (x=0 ; x<width ; x++) {
+			auto & r = row[(x * 3) + 0];
+			auto & g = row[(x * 3) + 1];
+			auto & b = row[(x * 3) + 2];
+			assert(idx < (rows * columns));
+			int color = data[idx] - 1;
+			assert(color < nthreads);
+			assert(color >= 0);
+			idx++;
+
+			double angle = double(color) / double(nthreads);
+
+			auto c = hsv2rgb({ 360.0 * angle, 0.8, 0.8 });
+
+			r = char(c.r * 255.0);
+			g = char(c.g * 255.0);
+			b = char(c.b * 255.0);
+
+		}
+		png_write_row(png_ptr, row);
+	}
+
+	assert(idx == (rows * columns));
+
+	// End write
+	png_write_end(png_ptr, NULL);
+
+	finalise:
+	if (fp != NULL) fclose(fp);
+	if (info_ptr != NULL) png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
+	if (png_ptr != NULL) png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
+	if (row != NULL) free(row);
+}
+*/
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,555 @@
+#pragma once
+#define LIST_VARIANT relaxed_list
+
+#define VANILLA 0
+#define SNZI 1
+#define BITMASK 2
+#define DISCOVER 3
+#define SNZM 4
+#define BIAS 5
+#define BACK 6
+#define BACKBIAS 7
+
+#ifndef VARIANT
+#define VARIANT VANILLA
+#endif
+
+#ifndef NO_STATS
+#include <iostream>
+#endif
+
+#include <cmath>
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <type_traits>
+
+#include "assert.hpp"
+#include "utils.hpp"
+#include "links.hpp"
+#include "snzi.hpp"
+#include "snzi-packed.hpp"
+#include "snzm.hpp"
+
+using namespace std;
+
+struct pick_stat {
+	struct {
+		size_t attempt = 0;
+		size_t success = 0;
+		size_t local = 0;
+	} push;
+	struct {
+		size_t attempt = 0;
+		size_t success = 0;
+		size_t mask_attempt = 0;
+		size_t mask_reset = 0;
+		size_t local = 0;
+	} pop;
+};
+
+struct empty_stat {
+	struct {
+		size_t value = 0;
+		size_t count = 0;
+	} push;
+	struct {
+		size_t value = 0;
+		size_t count = 0;
+	} pop;
+};
+
+template<typename node_t>
+class __attribute__((aligned(128))) relaxed_list {
+	static_assert(std::is_same<decltype(node_t::_links), _LinksFields_t<node_t>>::value, "Node must have a links field");
+
+public:
+	static const char * name() {
+		const char * names[] = {
+			"RELAXED: VANILLA",
+			"RELAXED: SNZI",
+			"RELAXED: BITMASK",
+			"RELAXED: SNZI + DISCOVERED MASK",
+			"RELAXED: SNZI + MASK",
+			"RELAXED: SNZI + LOCAL BIAS",
+			"RELAXED: SNZI + REVERSE RNG",
+			"RELAXED: SNZI + LOCAL BIAS + REVERSE RNG"
+		};
+		return names[VARIANT];
+	}
+
+	relaxed_list(unsigned numThreads, unsigned numQueues)
+		: numLists(numThreads * numQueues)
+	  	, lists(new intrusive_queue_t<node_t>[numLists])
+		#if VARIANT == SNZI || VARIANT == BACK
+			, snzi( std::log2( numLists / (2 * numQueues) ), 2 )
+		#elif VARIANT == BIAS || VARIANT == BACKBIAS
+			#ifdef SNZI_PACKED
+				, snzi( std::ceil( std::log2(numLists) ) )
+			#else
+				, snzi( std::log2( numLists / (2 * numQueues) ), 2 )
+			#endif
+		#elif VARIANT == SNZM || VARIANT == DISCOVER
+			, snzm( numLists )
+		#endif
+	{
+		assertf(7 * 8 * 8 >= numLists, "List currently only supports 448 sublists");
+		std::cout << "Constructing Relaxed List with " << numLists << std::endl;
+	}
+
+	~relaxed_list() {
+		std::cout << "Destroying Relaxed List" << std::endl;
+		lists.reset();
+	}
+
+    	__attribute__((noinline, hot)) void push(node_t * node) {
+		node->_links.ts = rdtscl();
+
+		while(true) {
+			// Pick a random list
+			unsigned i = idx_from_r(tls.rng1.next(), VARIANT == BIAS || VARIANT == BACKBIAS);
+
+			#ifndef NO_STATS
+				tls.pick.push.attempt++;
+			#endif
+
+			// If we can't lock it retry
+			if( !lists[i].lock.try_lock() ) continue;
+
+			#if VARIANT == VANILLA || VARIANT == BITMASK
+				__attribute__((unused)) int num = numNonEmpty;
+			#endif
+
+			// Actually push it
+			if(lists[i].push(node)) {
+				#if VARIANT == DISCOVER
+					size_t qword = i >> 6ull;
+					size_t bit   = i & 63ull;
+					assert(qword == 0);
+					bts(tls.mask, bit);
+					snzm.arrive(i);
+				#elif VARIANT == SNZI || VARIANT == BIAS
+					snzi.arrive(i);
+				#elif VARIANT == BACK || VARIANT == BACKBIAS
+					snzi.arrive(i);
+					tls.rng2.set_raw_state( tls.rng1.get_raw_state());
+				#elif VARIANT == SNZM
+					snzm.arrive(i);
+				#elif VARIANT == BITMASK
+					numNonEmpty++;
+					size_t qword = i >> 6ull;
+					size_t bit   = i & 63ull;
+					assertf((list_mask[qword] & (1ul << bit)) == 0, "Before set %zu:%zu (%u), %zx & %zx", qword, bit, i, list_mask[qword].load(), (1ul << bit));
+					__attribute__((unused)) bool ret = bts(list_mask[qword], bit);
+					assert(!ret);
+					assertf((list_mask[qword] & (1ul << bit)) != 0, "After set %zu:%zu (%u), %zx & %zx", qword, bit, i, list_mask[qword].load(), (1ul << bit));
+				#else
+					numNonEmpty++;
+				#endif
+			}
+			#if VARIANT == VANILLA || VARIANT == BITMASK
+				assert(numNonEmpty <= (int)numLists);
+			#endif
+
+			// Unlock and return
+			lists[i].lock.unlock();
+
+			#ifndef NO_STATS
+				tls.pick.push.success++;
+				#if VARIANT == VANILLA || VARIANT == BITMASK
+					tls.empty.push.value += num;
+					tls.empty.push.count += 1;
+				#endif
+			#endif
+			return;
+		}
+    	}
+
+	__attribute__((noinline, hot)) node_t * pop() {
+		#if VARIANT == DISCOVER
+			assert(numLists <= 64);
+			while(snzm.query()) {
+				tls.pick.pop.mask_attempt++;
+				unsigned i, j;
+				{
+					// Pick first list totally randomly
+					i = tls.rng1.next() % numLists;
+
+					// Pick the other according to the bitmask
+					unsigned r = tls.rng1.next();
+
+					size_t mask = tls.mask.load(std::memory_order_relaxed);
+					if(mask == 0) {
+						tls.pick.pop.mask_reset++;
+						mask = (1U << numLists) - 1;
+						tls.mask.store(mask, std::memory_order_relaxed);
+					}
+
+					unsigned b = rand_bit(r, mask);
+
+					assertf(b < 64, "%zu %u", mask, b);
+
+					j = b;
+
+					assert(j < numLists);
+				}
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+		#elif VARIANT == SNZI
+			while(snzi.query()) {
+				// Pick two lists at random
+				int i = tls.rng1.next() % numLists;
+				int j = tls.rng1.next() % numLists;
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+
+		#elif VARIANT == BACK
+			while(snzi.query()) {
+				// Pick two lists at random
+				int i = tls.rng2.prev() % numLists;
+				int j = tls.rng2.prev() % numLists;
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+
+		#elif VARIANT == BACKBIAS
+			while(snzi.query()) {
+				// Pick two lists at random
+				int i = idx_from_r(tls.rng2.prev(), true);
+				int j = idx_from_r(tls.rng2.prev(), true);
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+
+		#elif VARIANT == BIAS
+			while(snzi.query()) {
+				// Pick two lists at random
+				unsigned ri = tls.rng1.next();
+				unsigned i;
+				unsigned j = tls.rng1.next();
+				if(0 == (ri & 0xF)) {
+					i = (ri >> 4) % numLists;
+				} else {
+					i = tls.my_queue + ((ri >> 4) % 4);
+					j = tls.my_queue + ((j >> 4) % 4);
+					tls.pick.pop.local++;
+				}
+				i %= numLists;
+				j %= numLists;
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+		#elif VARIANT == SNZM
+			//*
+			while(snzm.query()) {
+				tls.pick.pop.mask_attempt++;
+				unsigned i, j;
+				{
+					// Pick two random number
+					unsigned ri = tls.rng1.next();
+					unsigned rj = tls.rng1.next();
+
+					// Pick two nodes from it
+					unsigned wdxi = ri & snzm.mask;
+					// unsigned wdxj = rj & snzm.mask;
+
+					// Get the masks from the nodes
+					// size_t maski = snzm.masks(wdxi);
+					size_t maskj = snzm.masks(wdxj);
+
+					if(maski == 0 && maskj == 0) continue;
+
+					#if defined(__BMI2__)
+						uint64_t idxsi = _pext_u64(snzm.indexes, maski);
+						// uint64_t idxsj = _pext_u64(snzm.indexes, maskj);
+
+						auto pi = __builtin_popcountll(maski);
+						// auto pj = __builtin_popcountll(maskj);
+
+						ri = pi ? ri & ((pi >> 3) - 1) : 0;
+						rj = pj ? rj & ((pj >> 3) - 1) : 0;
+
+						unsigned bi = (idxsi >> (ri << 3)) & 0xff;
+						unsigned bj = (idxsj >> (rj << 3)) & 0xff;
+					#else
+						unsigned bi = rand_bit(ri >> snzm.depth, maski);
+						unsigned bj = rand_bit(rj >> snzm.depth, maskj);
+					#endif
+
+					i = (bi << snzm.depth) | wdxi;
+					j = (bj << snzm.depth) | wdxj;
+
+					/* paranoid */ assertf(i < numLists, "%u %u", bj, wdxi);
+					/* paranoid */ assertf(j < numLists, "%u %u", bj, wdxj);
+				}
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+			/*/
+			while(snzm.query()) {
+				// Pick two lists at random
+				int i = tls.rng1.next() % numLists;
+				int j = tls.rng1.next() % numLists;
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+			//*/
+		#elif VARIANT == BITMASK
+			int nnempty;
+			while(0 != (nnempty = numNonEmpty)) {
+				tls.pick.pop.mask_attempt++;
+				unsigned i, j;
+				{
+					// Pick two lists at random
+					unsigned num = ((numLists - 1) >> 6) + 1;
+
+					unsigned ri = tls.rng1.next();
+					unsigned rj = tls.rng1.next();
+
+					unsigned wdxi = (ri >> 6u) % num;
+					unsigned wdxj = (rj >> 6u) % num;
+
+					size_t maski = list_mask[wdxi].load(std::memory_order_relaxed);
+					size_t maskj = list_mask[wdxj].load(std::memory_order_relaxed);
+
+					if(maski == 0 && maskj == 0) continue;
+
+					unsigned bi = rand_bit(ri, maski);
+					unsigned bj = rand_bit(rj, maskj);
+
+					assertf(bi < 64, "%zu %u", maski, bi);
+					assertf(bj < 64, "%zu %u", maskj, bj);
+
+					i = bi | (wdxi << 6);
+					j = bj | (wdxj << 6);
+
+					assertf(i < numLists, "%u", wdxi << 6);
+					assertf(j < numLists, "%u", wdxj << 6);
+				}
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+		#else
+			while(numNonEmpty != 0) {
+				// Pick two lists at random
+				int i = tls.rng1.next() % numLists;
+				int j = tls.rng1.next() % numLists;
+
+				if(auto node = try_pop(i, j)) return node;
+			}
+		#endif
+
+		return nullptr;
+    	}
+
+private:
+	node_t * try_pop(unsigned i, unsigned j) {
+		#ifndef NO_STATS
+			tls.pick.pop.attempt++;
+		#endif
+
+		#if VARIANT == DISCOVER
+			if(lists[i].ts() > 0) bts(tls.mask, i); else btr(tls.mask, i);
+			if(lists[j].ts() > 0) bts(tls.mask, j); else btr(tls.mask, j);
+		#endif
+
+		// Pick the bet list
+		int w = i;
+		if( __builtin_expect(lists[j].ts() != 0, true) ) {
+			w = (lists[i].ts() < lists[j].ts()) ? i : j;
+		}
+
+		auto & list = lists[w];
+		// If list looks empty retry
+		if( list.ts() == 0 ) return nullptr;
+
+		// If we can't get the lock retry
+		if( !list.lock.try_lock() ) return nullptr;
+
+		#if VARIANT == VANILLA || VARIANT == BITMASK
+			__attribute__((unused)) int num = numNonEmpty;
+		#endif
+
+		// If list is empty, unlock and retry
+		if( list.ts() == 0 ) {
+			list.lock.unlock();
+			return nullptr;
+		}
+
+		// Actually pop the list
+		node_t * node;
+		bool emptied;
+		std::tie(node, emptied) = list.pop();
+		assert(node);
+
+		if(emptied) {
+			#if VARIANT == DISCOVER
+				size_t qword = w >> 6ull;
+				size_t bit   = w & 63ull;
+				assert(qword == 0);
+				__attribute__((unused)) bool ret = btr(tls.mask, bit);
+				snzm.depart(w);
+			#elif VARIANT == SNZI || VARIANT == BIAS || VARIANT == BACK || VARIANT == BACKBIAS
+				snzi.depart(w);
+			#elif VARIANT == SNZM
+				snzm.depart(w);
+			#elif VARIANT == BITMASK
+				numNonEmpty--;
+				size_t qword = w >> 6ull;
+				size_t bit   = w & 63ull;
+				assert((list_mask[qword] & (1ul << bit)) != 0);
+				__attribute__((unused)) bool ret = btr(list_mask[qword], bit);
+				assert(ret);
+				assert((list_mask[qword] & (1ul << bit)) == 0);
+			#else
+				numNonEmpty--;
+			#endif
+		}
+
+		// Unlock and return
+		list.lock.unlock();
+		#if VARIANT == VANILLA || VARIANT == BITMASK
+			assert(numNonEmpty >= 0);
+		#endif
+		#ifndef NO_STATS
+			tls.pick.pop.success++;
+			#if VARIANT == VANILLA || VARIANT == BITMASK
+				tls.empty.pop.value += num;
+				tls.empty.pop.count += 1;
+			#endif
+		#endif
+		return node;
+	}
+
+	inline unsigned idx_from_r(unsigned r, bool bias) {
+		unsigned i;
+		if(bias) {
+			if(0 == (r & 0x3F)) {
+				i = r >> 6;
+			} else {
+				i = tls.my_queue + ((r >> 6) % 4);
+				tls.pick.push.local++;
+			}
+		} else {
+			i = r;
+		}
+		return i % numLists;
+	}
+
+public:
+
+	static __attribute__((aligned(128))) thread_local struct TLS {
+		Random     rng1 = { unsigned(std::hash<std::thread::id>{}(std::this_thread::get_id()) ^ rdtscl()) };
+		Random     rng2 = { unsigned(std::hash<std::thread::id>{}(std::this_thread::get_id()) ^ rdtscl()) };
+		unsigned   my_queue = (ticket++) * 4;
+		pick_stat  pick;
+		empty_stat empty;
+		__attribute__((aligned(64))) std::atomic_size_t mask = { 0 };
+	} tls;
+
+private:
+	const unsigned numLists;
+    	__attribute__((aligned(64))) std::unique_ptr<intrusive_queue_t<node_t> []> lists;
+private:
+	#if VARIANT == SNZI || VARIANT == BACK
+		snzi_t snzi;
+	#elif VARIANT == BIAS || VARIANT == BACKBIAS
+		#ifdef SNZI_PACKED
+			snzip_t snzi;
+		#else
+			snzi_t snzi;
+		#endif
+	#elif VARIANT == SNZM || VARIANT == DISCOVER
+		snzm_t snzm;
+	#else
+		std::atomic_int numNonEmpty  = { 0 };  // number of non-empty lists
+	#endif
+	#if VARIANT == BITMASK
+		std::atomic_size_t list_mask[7] = { {0}, {0}, {0}, {0}, {0}, {0}, {0} }; // which queues are empty
+	#endif
+
+public:
+	static const constexpr size_t sizeof_queue = sizeof(intrusive_queue_t<node_t>);
+	static std::atomic_uint32_t ticket;
+
+#ifndef NO_STATS
+	static void stats_tls_tally() {
+		global_stats.pick.push.attempt += tls.pick.push.attempt;
+		global_stats.pick.push.success += tls.pick.push.success;
+		global_stats.pick.push.local += tls.pick.push.local;
+		global_stats.pick.pop .attempt += tls.pick.pop.attempt;
+		global_stats.pick.pop .success += tls.pick.pop.success;
+		global_stats.pick.pop .mask_attempt += tls.pick.pop.mask_attempt;
+		global_stats.pick.pop .mask_reset += tls.pick.pop.mask_reset;
+		global_stats.pick.pop .local += tls.pick.pop.local;
+
+		global_stats.qstat.push.value += tls.empty.push.value;
+		global_stats.qstat.push.count += tls.empty.push.count;
+		global_stats.qstat.pop .value += tls.empty.pop .value;
+		global_stats.qstat.pop .count += tls.empty.pop .count;
+	}
+
+private:
+	static struct GlobalStats {
+		struct {
+			struct {
+				std::atomic_size_t attempt = { 0 };
+				std::atomic_size_t success = { 0 };
+				std::atomic_size_t local = { 0 };
+			} push;
+			struct {
+				std::atomic_size_t attempt = { 0 };
+				std::atomic_size_t success = { 0 };
+				std::atomic_size_t mask_attempt = { 0 };
+				std::atomic_size_t mask_reset = { 0 };
+				std::atomic_size_t local = { 0 };
+			} pop;
+		} pick;
+		struct {
+			struct {
+				std::atomic_size_t value = { 0 };
+				std::atomic_size_t count = { 0 };
+			} push;
+			struct {
+				std::atomic_size_t value = { 0 };
+				std::atomic_size_t count = { 0 };
+			} pop;
+		} qstat;
+	} global_stats;
+
+public:
+	static void stats_print(std::ostream & os ) {
+		std::cout << "----- Relaxed List Stats -----" << std::endl;
+
+		const auto & global = global_stats;
+
+		double push_sur = (100.0 * double(global.pick.push.success) / global.pick.push.attempt);
+		double pop_sur  = (100.0 * double(global.pick.pop .success) / global.pick.pop .attempt);
+		double mpop_sur = (100.0 * double(global.pick.pop .success) / global.pick.pop .mask_attempt);
+		double rpop_sur = (100.0 * double(global.pick.pop .success) / global.pick.pop .mask_reset);
+
+		double push_len = double(global.pick.push.attempt     ) / global.pick.push.success;
+		double pop_len  = double(global.pick.pop .attempt     ) / global.pick.pop .success;
+		double mpop_len = double(global.pick.pop .mask_attempt) / global.pick.pop .success;
+		double rpop_len = double(global.pick.pop .mask_reset  ) / global.pick.pop .success;
+
+		os << "Push   Pick   : " << push_sur << " %, len " << push_len << " (" << global.pick.push.attempt      << " / " << global.pick.push.success << ")\n";
+		os << "Pop    Pick   : " << pop_sur  << " %, len " << pop_len  << " (" << global.pick.pop .attempt      << " / " << global.pick.pop .success << ")\n";
+		os << "TryPop Pick   : " << mpop_sur << " %, len " << mpop_len << " (" << global.pick.pop .mask_attempt << " / " << global.pick.pop .success << ")\n";
+		os << "Pop M Reset   : " << rpop_sur << " %, len " << rpop_len << " (" << global.pick.pop .mask_reset   << " / " << global.pick.pop .success << ")\n";
+
+		double avgQ_push = double(global.qstat.push.value) / global.qstat.push.count;
+		double avgQ_pop  = double(global.qstat.pop .value) / global.qstat.pop .count;
+		double avgQ      = double(global.qstat.push.value + global.qstat.pop .value) / (global.qstat.push.count + global.qstat.pop .count);
+		os << "Push   Avg Qs : " << avgQ_push << " (" << global.qstat.push.count << "ops)\n";
+		os << "Pop    Avg Qs : " << avgQ_pop  << " (" << global.qstat.pop .count << "ops)\n";
+		os << "Global Avg Qs : " << avgQ      << " (" << (global.qstat.push.count + global.qstat.pop .count) << "ops)\n";
+
+		os << "Local Push    : " << global.pick.push.local << "\n";
+		os << "Local Pop     : " << global.pick.pop .local << "\n";
+	}
+#endif
+};
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list_layout.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list_layout.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list_layout.cpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,23 @@
+#define NO_IO
+#define NDEBUG
+#include "relaxed_list.hpp"
+
+struct __attribute__((aligned(64))) Node {
+	static std::atomic_size_t creates;
+	static std::atomic_size_t destroys;
+
+	_LinksFields_t<Node> _links;
+
+	int value;
+	Node(int value): value(value) {
+		creates++;
+	}
+
+	~Node() {
+		destroys++;
+	}
+};
+
+int main() {
+	return sizeof(relaxed_list<Node>) + relaxed_list<Node>::sizeof_queue;
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/runperf.sh
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/runperf.sh	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/runperf.sh	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -e
+
+name=$1
+event=$2
+
+shift 2
+
+echo "perf record -F 99 -a -g -o raw/$name.data -e $event -- $@ > raw/$name.out"
+perf record -F 99 -a -g -o raw/$name.data -e $event -- $@ > raw/$name.out
+echo "=============================="
+cat raw/$name.out
+echo "=============================="
+./process.sh $name
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/scale.sh
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/scale.sh	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/scale.sh	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,7 @@
+#!/bin/bash
+taskset -c 24-31 ./a.out -t  1 -b churn | grep --color -E "(ns|Ops|Running)"
+taskset -c 24-31 ./a.out -t  2 -b churn | grep --color -E "(ns|Ops|Running)"
+taskset -c 24-31 ./a.out -t  4 -b churn | grep --color -E "(ns|Ops|Running)"
+taskset -c 24-31 ./a.out -t  8 -b churn | grep --color -E "(ns|Ops|Running)"
+taskset -c 16-31 ./a.out -t 16 -b churn | grep --color -E "(ns|Ops|Running)"
+taskset -c  0-31 ./a.out -t 32 -b churn | grep --color -E "(ns|Ops|Running)"
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi-packed.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi-packed.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi-packed.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,179 @@
+#pragma once
+
+#define SNZI_PACKED
+
+#include "utils.hpp"
+
+
+class snzip_t {
+	class node;
+	class node_aligned;
+public:
+	const unsigned mask;
+	const int root;
+	std::unique_ptr<snzip_t::node[]> leafs;
+	std::unique_ptr<snzip_t::node_aligned[]> nodes;
+
+	snzip_t(unsigned depth);
+
+	void arrive(int idx) {
+		// idx >>= 1;
+		idx %= mask;
+		leafs[idx].arrive();
+	}
+
+	void depart(int idx) {
+		// idx >>= 1;
+		idx %= mask;
+		leafs[idx].depart();
+	}
+
+	bool query() const {
+		return nodes[root].query();
+	}
+
+
+private:
+	class __attribute__((aligned(32))) node {
+		friend class snzip_t;
+	private:
+
+		union val_t {
+			static constexpr char Half = -1;
+
+			uint64_t _all;
+			struct __attribute__((packed)) {
+				char cnt;
+				uint64_t ver:56;
+			};
+
+			bool cas(val_t & exp, char _cnt, uint64_t _ver) volatile {
+				val_t t;
+				t.ver = _ver;
+				t.cnt = _cnt;
+				/* paranoid */ assert(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
+				return __atomic_compare_exchange_n(&this->_all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+			}
+
+			bool cas(val_t & exp, const val_t & tar) volatile {
+				return __atomic_compare_exchange_n(&this->_all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+			}
+
+			val_t() : _all(0) {}
+			val_t(const volatile val_t & o) : _all(o._all) {}
+		};
+
+		//--------------------------------------------------
+		// Hierarchical node
+		void arrive_h() {
+			int undoArr = 0;
+			bool success = false;
+			while(!success) {
+				auto x{ value };
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt >= 1 ) {
+					if( value.cas(x, x.cnt + 1, x.ver ) ) {
+						success = true;
+					}
+				}
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt == 0 ) {
+					if( value.cas(x, val_t::Half, x.ver + 1) ) {
+						success = true;
+						x.cnt = val_t::Half;
+						x.ver = x.ver + 1;
+					}
+				}
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt == val_t::Half ) {
+					/* paranoid */ assert(parent);
+					if(undoArr == 2) {
+						undoArr--;
+					} else {
+						parent->arrive();
+					}
+					if( !value.cas(x, 1, x.ver) ) {
+						undoArr = undoArr + 1;
+					}
+				}
+			}
+
+			for(int i = 0; i < undoArr; i++) {
+				/* paranoid */ assert(parent);
+				parent->depart();
+			}
+		}
+
+		void depart_h() {
+			while(true) {
+				auto x = (const val_t)value;
+				/* paranoid */ assertf(x.cnt >= 1, "%d", x.cnt);
+				if( value.cas( x, x.cnt - 1, x.ver ) ) {
+					if( x.cnt == 1 ) {
+						/* paranoid */ assert(parent);
+						parent->depart();
+					}
+					return;
+				}
+			}
+		}
+
+		//--------------------------------------------------
+		// Root node
+		void arrive_r() {
+			__atomic_fetch_add(&value._all, 1, __ATOMIC_SEQ_CST);
+		}
+
+		void depart_r() {
+			__atomic_fetch_sub(&value._all, 1, __ATOMIC_SEQ_CST);
+		}
+
+	private:
+		volatile val_t value;
+		class node * parent = nullptr;
+
+		bool is_root() {
+			return parent == nullptr;
+		}
+
+	public:
+		void arrive() {
+			if(is_root()) arrive_r();
+			else arrive_h();
+		}
+
+		void depart() {
+			if(is_root()) depart_r();
+			else depart_h();
+		}
+
+		bool query() {
+			/* paranoid */ assert(is_root());
+			return value._all > 0;
+		}
+	};
+
+	class __attribute__((aligned(128))) node_aligned : public node {};
+};
+
+snzip_t::snzip_t(unsigned depth)
+	: mask( std::pow(2, depth) )
+	, root( ((std::pow(2, depth + 1) - 1) / (2 -1)) - 1 - mask )
+	, leafs(new node[ mask ]())
+	, nodes(new node_aligned[ root + 1 ]())
+{
+	int width = std::pow(2, depth);
+	int hwdith = width / 2;
+	std::cout << "SNZI: " << depth << "x" << width << "(" << mask - 1 << ") " << (sizeof(snzip_t::node) * (root + 1)) << " bytes" << std::endl;
+	for(int i = 0; i < width; i++) {
+		int idx = i % hwdith;
+		std::cout << i << " -> " << idx + width << std::endl;
+		leafs[i].parent = &nodes[ idx ];
+	}
+
+	for(int i = 0; i < root; i++) {
+		int idx = (i / 2) + hwdith;
+		std::cout << i + width << " -> " << idx + width << std::endl;
+		nodes[i].parent = &nodes[ idx ];
+	}
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,164 @@
+#pragma once
+
+#include "utils.hpp"
+
+
+class snzi_t {
+	class node;
+public:
+	const unsigned mask;
+	const int root;
+	std::unique_ptr<snzi_t::node[]> nodes;
+
+	snzi_t(unsigned depth, unsigned base = 2);
+
+	void arrive(int idx) {
+		idx >>= 2;
+		idx %= mask;
+		nodes[idx].arrive();
+	}
+
+	void depart(int idx) {
+		idx >>= 2;
+		idx %= mask;
+		nodes[idx].depart();
+	}
+
+	bool query() const {
+		return nodes[root].query();
+	}
+
+
+private:
+	class __attribute__((aligned(128))) node {
+		friend class snzi_t;
+	private:
+
+		union val_t {
+			static constexpr char Half = -1;
+
+			uint64_t _all;
+			struct __attribute__((packed)) {
+				char cnt;
+				uint64_t ver:56;
+			};
+
+			bool cas(val_t & exp, char _cnt, uint64_t _ver) volatile {
+				val_t t;
+				t.ver = _ver;
+				t.cnt = _cnt;
+				/* paranoid */ assert(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
+				return __atomic_compare_exchange_n(&this->_all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+			}
+
+			bool cas(val_t & exp, const val_t & tar) volatile {
+				return __atomic_compare_exchange_n(&this->_all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+			}
+
+			val_t() : _all(0) {}
+			val_t(const volatile val_t & o) : _all(o._all) {}
+		};
+
+		//--------------------------------------------------
+		// Hierarchical node
+		void arrive_h() {
+			int undoArr = 0;
+			bool success = false;
+			while(!success) {
+				auto x{ value };
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt >= 1 ) {
+					if( value.cas(x, x.cnt + 1, x.ver ) ) {
+						success = true;
+					}
+				}
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt == 0 ) {
+					if( value.cas(x, val_t::Half, x.ver + 1) ) {
+						success = true;
+						x.cnt = val_t::Half;
+						x.ver = x.ver + 1;
+					}
+				}
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt == val_t::Half ) {
+					/* paranoid */ assert(parent);
+					if(undoArr == 2) {
+						undoArr--;
+					} else {
+						parent->arrive();
+					}
+					if( !value.cas(x, 1, x.ver) ) {
+						undoArr = undoArr + 1;
+					}
+				}
+			}
+
+			for(int i = 0; i < undoArr; i++) {
+				/* paranoid */ assert(parent);
+				parent->depart();
+			}
+		}
+
+		void depart_h() {
+			while(true) {
+				auto x = (const val_t)value;
+				/* paranoid */ assertf(x.cnt >= 1, "%d", x.cnt);
+				if( value.cas( x, x.cnt - 1, x.ver ) ) {
+					if( x.cnt == 1 ) {
+						/* paranoid */ assert(parent);
+						parent->depart();
+					}
+					return;
+				}
+			}
+		}
+
+		//--------------------------------------------------
+		// Root node
+		void arrive_r() {
+			__atomic_fetch_add(&value._all, 1, __ATOMIC_SEQ_CST);
+		}
+
+		void depart_r() {
+			__atomic_fetch_sub(&value._all, 1, __ATOMIC_SEQ_CST);
+		}
+
+	private:
+		volatile val_t value;
+		class node * parent = nullptr;
+
+		bool is_root() {
+			return parent == nullptr;
+		}
+
+	public:
+		void arrive() {
+			if(is_root()) arrive_r();
+			else arrive_h();
+		}
+
+		void depart() {
+			if(is_root()) depart_r();
+			else depart_h();
+		}
+
+		bool query() {
+			/* paranoid */ assert(is_root());
+			return value._all > 0;
+		}
+	};
+};
+
+snzi_t::snzi_t(unsigned depth, unsigned base)
+	: mask( std::pow(base, depth) )
+	, root( ((std::pow(base, depth + 1) - 1) / (base -1)) - 1 )
+	, nodes(new node[ root + 1 ]())
+{
+	int width = std::pow(base, depth);
+	std::cout << "SNZI: " << depth << "x" << width << "(" << mask - 1 << ") " << (sizeof(snzi_t::node) * (root + 1)) << " bytes" << std::endl;
+	for(int i = 0; i < root; i++) {
+		std::cout << i << " -> " << (i / base) + width << std::endl;
+		nodes[i].parent = &nodes[(i / base) + width];
+	}
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzm.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzm.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzm.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,213 @@
+#pragma once
+
+#include "utils.hpp"
+
+
+class snzm_t {
+	class node;
+public:
+	const unsigned depth;
+	const unsigned mask;
+	const int root;
+	std::unique_ptr<snzm_t::node[]> nodes;
+
+	#if defined(__BMI2__)
+		const uint64_t indexes = 0x0706050403020100;
+	#endif
+
+	snzm_t(unsigned numLists);
+
+	void arrive(int idx) {
+		int i = idx & mask;
+		nodes[i].arrive( idx >> depth);
+	}
+
+	void depart(int idx) {
+		int i = idx & mask;
+		nodes[i].depart( idx >> depth );
+	}
+
+	bool query() const {
+		return nodes[root].query();
+	}
+
+	uint64_t masks( unsigned node ) {
+		/* paranoid */ assert( (node & mask) == node );
+		#if defined(__BMI2__)
+			return nodes[node].mask_all;
+		#else
+			return nodes[node].mask;
+		#endif
+	}
+
+private:
+	class __attribute__((aligned(128))) node {
+		friend class snzm_t;
+	private:
+
+		union val_t {
+			static constexpr char Half = -1;
+
+			uint64_t _all;
+			struct __attribute__((packed)) {
+				char cnt;
+				uint64_t ver:56;
+			};
+
+			bool cas(val_t & exp, char _cnt, uint64_t _ver) volatile {
+				val_t t;
+				t.ver = _ver;
+				t.cnt = _cnt;
+				/* paranoid */ assert(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
+				return __atomic_compare_exchange_n(&this->_all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+			}
+
+			bool cas(val_t & exp, const val_t & tar) volatile {
+				return __atomic_compare_exchange_n(&this->_all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+			}
+
+			val_t() : _all(0) {}
+			val_t(const volatile val_t & o) : _all(o._all) {}
+		};
+
+		//--------------------------------------------------
+		// Hierarchical node
+		void arrive_h() {
+			int undoArr = 0;
+			bool success = false;
+			while(!success) {
+				auto x{ value };
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt >= 1 ) {
+					if( value.cas(x, x.cnt + 1, x.ver ) ) {
+						success = true;
+					}
+				}
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt == 0 ) {
+					if( value.cas(x, val_t::Half, x.ver + 1) ) {
+						success = true;
+						x.cnt = val_t::Half;
+						x.ver = x.ver + 1;
+					}
+				}
+				/* paranoid */ assert(x.cnt <= 120);
+				if( x.cnt == val_t::Half ) {
+					/* paranoid */ assert(parent);
+					parent->arrive();
+					if( !value.cas(x, 1, x.ver) ) {
+						undoArr = undoArr + 1;
+					}
+				}
+			}
+
+			for(int i = 0; i < undoArr; i++) {
+				/* paranoid */ assert(parent);
+				parent->depart();
+			}
+		}
+
+		void depart_h() {
+			while(true) {
+				auto x = (const val_t)value;
+				/* paranoid */ assertf(x.cnt >= 1, "%d", x.cnt);
+				if( value.cas( x, x.cnt - 1, x.ver ) ) {
+					if( x.cnt == 1 ) {
+						/* paranoid */ assert(parent);
+						parent->depart();
+					}
+					return;
+				}
+			}
+		}
+
+		//--------------------------------------------------
+		// Root node
+		void arrive_r() {
+			__atomic_fetch_add(&value._all, 1, __ATOMIC_SEQ_CST);
+		}
+
+		void depart_r() {
+			__atomic_fetch_sub(&value._all, 1, __ATOMIC_SEQ_CST);
+		}
+
+		//--------------------------------------------------
+		// Interface node
+		void arrive() {
+			/* paranoid */ assert(!is_leaf);
+			if(is_root()) arrive_r();
+			else arrive_h();
+		}
+
+		void depart() {
+			/* paranoid */ assert(!is_leaf);
+			if(is_root()) depart_r();
+			else depart_h();
+		}
+
+	private:
+		volatile val_t value;
+		#if defined(__BMI2__)
+			union __attribute__((packed)) {
+				volatile uint8_t mask[8];
+				volatile uint64_t mask_all;
+			};
+		#else
+			volatile size_t mask = 0;
+		#endif
+
+		class node * parent = nullptr;
+		bool is_leaf = false;
+
+		bool is_root() {
+			return parent == nullptr;
+		}
+
+	public:
+		void arrive( int bit ) {
+			/* paranoid */ assert( is_leaf );
+
+			arrive_h();
+			#if defined(__BMI2__)
+				/* paranoid */ assert( bit < 8 );
+				mask[bit] = 0xff;
+			#else
+				/* paranoid */ assert( (mask & ( 1 << bit )) == 0 );
+				__atomic_fetch_add( &mask, 1 << bit, __ATOMIC_RELAXED );
+			#endif
+
+		}
+
+		void depart( int bit ) {
+			/* paranoid */ assert( is_leaf );
+
+			#if defined(__BMI2__)
+				/* paranoid */ assert( bit < 8 );
+				mask[bit] = 0x00;
+			#else
+				/* paranoid */ assert( (mask & ( 1 << bit )) != 0 );
+				__atomic_fetch_sub( &mask, 1 << bit, __ATOMIC_RELAXED );
+			#endif
+			depart_h();
+		}
+
+		bool query() {
+			/* paranoid */ assert(is_root());
+			return value._all > 0;
+		}
+	};
+};
+
+snzm_t::snzm_t(unsigned numLists)
+	: depth( std::log2( numLists / 8 ) )
+	, mask( (1 << depth) - 1 )
+	, root( (1 << (depth + 1)) - 2 )
+	, nodes(new node[ root + 1 ]())
+{
+	int width = 1 << depth;
+	std::cout << "SNZI with Mask: " << depth << "x" << width << "(" << mask << ")" << std::endl;
+	for(int i = 0; i < root; i++) {
+		nodes[i].is_leaf = i < width;
+		nodes[i].parent = &nodes[(i / 2) + width ];
+	}
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/utils.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/utils.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/utils.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,250 @@
+#pragma once
+
+#include <cassert>
+#include <cstddef>
+#include <atomic>
+#include <chrono>
+#include <fstream>
+#include <iostream>
+
+#include <unistd.h>
+#include <sys/sysinfo.h>
+
+#include <x86intrin.h>
+
+// Barrier from
+class barrier_t {
+public:
+	barrier_t(size_t total)
+		: waiting(0)
+		, total(total)
+	{}
+
+	void wait(unsigned) {
+		size_t target = waiting++;
+		target = (target - (target % total)) + total;
+		while(waiting < target)
+			asm volatile("pause");
+
+		assert(waiting < (1ul << 60));
+    	}
+
+private:
+	std::atomic<size_t> waiting;
+	size_t total;
+};
+
+// class Random {
+// private:
+// 	unsigned int seed;
+// public:
+// 	Random(int seed) {
+// 		this->seed = seed;
+// 	}
+
+// 	/** returns pseudorandom x satisfying 0 <= x < n. **/
+// 	unsigned int next() {
+// 		seed ^= seed << 6;
+// 		seed ^= seed >> 21;
+// 		seed ^= seed << 7;
+// 		return seed;
+//     	}
+// };
+
+constexpr uint64_t extendedEuclidY(uint64_t a, uint64_t b);
+constexpr uint64_t extendedEuclidX(uint64_t a, uint64_t b){
+    return (b==0) ? 1 : extendedEuclidY(b, a - b * (a / b));
+}
+constexpr uint64_t extendedEuclidY(uint64_t a, uint64_t b){
+    return (b==0) ? 0 : extendedEuclidX(b, a - b * (a / b)) - (a / b) * extendedEuclidY(b, a - b * (a / b));
+}
+
+class Random {
+private:
+	uint64_t x;
+
+	static constexpr const uint64_t M  = 1ul << 48ul;
+	static constexpr const uint64_t A  = 25214903917;
+	static constexpr const uint64_t C  = 11;
+	static constexpr const uint64_t D  = 16;
+
+public:
+	static constexpr const uint64_t m  = M;
+	static constexpr const uint64_t a  = A;
+	static constexpr const uint64_t c  = C;
+	static constexpr const uint64_t d  = D;
+	static constexpr const uint64_t ai = extendedEuclidX(A, M);
+public:
+	Random(unsigned int seed) {
+		this->x = seed * a;
+	}
+
+	/** returns pseudorandom x satisfying 0 <= x < n. **/
+	unsigned int next() {
+		//nextx = (a * x + c) % m;
+		x = (A * x + C) & (M - 1);
+		return x >> D;
+	}
+	unsigned int prev() {
+		//prevx = (ainverse * (x - c)) mod m
+		unsigned int r = x >> D;
+		x = ai * (x - C) & (M - 1);
+		return r;
+	}
+
+	void set_raw_state(uint64_t _x) {
+		this->x = _x;
+	}
+
+	uint64_t get_raw_state() {
+		return this->x;
+	}
+};
+
+static inline long long rdtscl(void) {
+    unsigned int lo, hi;
+    __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
+    return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
+}
+
+static inline void affinity(int tid) {
+	static int cpus = get_nprocs();
+
+	cpu_set_t  mask;
+	CPU_ZERO(&mask);
+	int cpu = cpus - tid;  // Set CPU affinity to tid, starting from the end
+	CPU_SET(cpu, &mask);
+	auto result = sched_setaffinity(0, sizeof(mask), &mask);
+	if(result != 0) {
+		std::cerr << "Affinity set failed with " << result<< ", wanted " << cpu << std::endl;
+	}
+}
+
+static const constexpr std::size_t cache_line_size = 64;
+static inline void check_cache_line_size() {
+	std::cout << "Checking cache line size" << std::endl;
+	const std::string cache_file = "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size";
+
+	std::ifstream ifs (cache_file, std::ifstream::in);
+
+	if(!ifs.good()) {
+		std::cerr << "Could not open file to check cache line size" << std::endl;
+		std::cerr << "Looking for: " << cache_file << std::endl;
+		std::exit(2);
+	}
+
+	size_t got;
+	ifs >> got;
+
+	ifs.close();
+
+	if(cache_line_size != got) {
+		std::cerr << "Cache line has incorrect size : " << got << std::endl;
+		std::exit(1);
+	}
+
+	std::cout << "Done" << std::endl;
+}
+
+using Clock = std::chrono::high_resolution_clock;
+using duration_t = std::chrono::duration<double>;
+using std::chrono::nanoseconds;
+
+template<typename Ratio, typename T>
+T duration_cast(T seconds) {
+	return std::chrono::duration_cast<std::chrono::duration<T, Ratio>>(std::chrono::duration<T>(seconds)).count();
+}
+
+static inline unsigned rand_bit(unsigned rnum, size_t mask) __attribute__((artificial));
+static inline unsigned rand_bit(unsigned rnum, size_t mask) {
+	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
+#if !defined(__BMI2__)
+	uint64_t v = mask;   // Input value to find position with rank r.
+	unsigned int r = bit + 1;// Input: bit's desired rank [1-64].
+	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
+	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
+	unsigned int t;      // Bit count temporary.
+
+	// Do a normal parallel bit count for a 64-bit integer,
+	// but store all intermediate steps.
+	a =  v - ((v >> 1) & ~0UL/3);
+	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
+	c = (b + (b >> 4)) & ~0UL/0x11;
+	d = (c + (c >> 8)) & ~0UL/0x101;
+
+
+	t = (d >> 32) + (d >> 48);
+	// Now do branchless select!
+	s  = 64;
+	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
+	t  = (d >> (s - 16)) & 0xff;
+	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
+	t  = (c >> (s - 8)) & 0xf;
+	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
+	t  = (b >> (s - 4)) & 0x7;
+	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
+	t  = (a >> (s - 2)) & 0x3;
+	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
+	t  = (v >> (s - 1)) & 0x1;
+	s -= ((t - r) & 256) >> 8;
+	return s - 1;
+#else
+	uint64_t picked = _pdep_u64(1ul << bit, mask);
+	return picked ? __builtin_ctzl(picked) : 0;
+#endif
+}
+
+struct spinlock_t {
+	std::atomic_bool ll = { false };
+
+	inline void lock() {
+		while( __builtin_expect(ll.exchange(true),false) ) {
+			while(ll.load(std::memory_order_relaxed))
+				asm volatile("pause");
+		}
+	}
+
+	inline bool try_lock() {
+		return false == ll.exchange(true);
+	}
+
+	inline void unlock() {
+		ll.store(false, std::memory_order_release);
+	}
+
+	inline explicit operator bool() {
+		return ll.load(std::memory_order_relaxed);
+	}
+};
+
+static inline bool bts(std::atomic_size_t & target, size_t bit ) {
+	//*
+	int result = 0;
+	asm volatile(
+		"LOCK btsq %[bit], %[target]\n\t"
+		:"=@ccc" (result)
+		: [target] "m" (target), [bit] "r" (bit)
+	);
+	return result != 0;
+	/*/
+	size_t mask = 1ul << bit;
+	size_t ret = target.fetch_or(mask, std::memory_order_relaxed);
+	return (ret & mask) != 0;
+	//*/
+}
+
+static inline bool btr(std::atomic_size_t & target, size_t bit ) {
+	//*
+	int result = 0;
+	asm volatile(
+		"LOCK btrq %[bit], %[target]\n\t"
+		:"=@ccc" (result)
+		: [target] "m" (target), [bit] "r" (bit)
+	);
+	return result != 0;
+	/*/
+	size_t mask = 1ul << bit;
+	size_t ret = target.fetch_and(~mask, std::memory_order_relaxed);
+	return (ret & mask) != 0;
+	//*/
+}
Index: doc/theses/thierry_delisle_PhD/code/readyQ_proto/work_stealing.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/readyQ_proto/work_stealing.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
+++ doc/theses/thierry_delisle_PhD/code/readyQ_proto/work_stealing.hpp	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -0,0 +1,222 @@
+#pragma once
+#define LIST_VARIANT work_stealing
+
+#include <cmath>
+#include <iomanip>
+#include <memory>
+#include <mutex>
+#include <type_traits>
+
+#include "assert.hpp"
+#include "utils.hpp"
+#include "links.hpp"
+#include "snzi.hpp"
+
+using namespace std;
+
+template<typename node_t>
+class __attribute__((aligned(128))) work_stealing {
+	static_assert(std::is_same<decltype(node_t::_links), _LinksFields_t<node_t>>::value, "Node must have a links field");
+
+public:
+	static const char * name() {
+		return "Work Stealing";
+	}
+
+	work_stealing(unsigned _numThreads, unsigned)
+		: numThreads(_numThreads)
+		, lists(new intrusive_queue_t<node_t>[numThreads])
+		, snzi( std::log2( numThreads / 2 ), 2 )
+
+	{
+		std::cout << "Constructing Work Stealer with " << numThreads << std::endl;
+	}
+
+	~work_stealing() {
+		std::cout << "Destroying Work Stealer" << std::endl;
+		lists.reset();
+	}
+
+	__attribute__((noinline, hot)) void push(node_t * node) {
+		node->_links.ts = rdtscl();
+		if( node->_links.hint > numThreads ) {
+			node->_links.hint = tls.rng.next() % numThreads;
+			tls.stat.push.nhint++;
+		}
+
+		unsigned i = node->_links.hint;
+		auto & list = lists[i];
+		list.lock.lock();
+
+		if(list.push( node )) {
+			snzi.arrive(i);
+		}
+
+		list.lock.unlock();
+	}
+
+	__attribute__((noinline, hot)) node_t * pop() {
+		node_t * node;
+		while(true) {
+			if(!snzi.query()) {
+				return nullptr;
+			}
+
+			{
+				unsigned i = tls.my_queue;
+				auto & list = lists[i];
+				if( list.ts() != 0 ) {
+					list.lock.lock();
+					if((node = try_pop(i))) {
+						tls.stat.pop.local.success++;
+						break;
+					}
+					else {
+						tls.stat.pop.local.elock++;
+					}
+				}
+				else {
+					tls.stat.pop.local.espec++;
+				}
+			}
+
+			tls.stat.pop.steal.tried++;
+
+			int i = tls.rng.next() % numThreads;
+			auto & list = lists[i];
+			if( list.ts() == 0 ) {
+				tls.stat.pop.steal.empty++;
+				continue;
+			}
+
+			if( !list.lock.try_lock() ) {
+				tls.stat.pop.steal.locked++;
+				continue;
+			}
+
+			if((node = try_pop(i))) {
+				tls.stat.pop.steal.success++;
+				break;
+			}
+		}
+
+		#if defined(READ)
+			const unsigned f = READ;
+			if(0 == (tls.it % f)) {
+				unsigned i = tls.it / f;
+				lists[i % numThreads].ts();
+			}
+			// lists[tls.it].ts();
+			tls.it++;
+		#endif
+
+
+		return node;
+	}
+
+private:
+	node_t * try_pop(unsigned i) {
+		auto & list = lists[i];
+
+		// If list is empty, unlock and retry
+		if( list.ts() == 0 ) {
+			list.lock.unlock();
+			return nullptr;
+		}
+
+			// Actually pop the list
+		node_t * node;
+		bool emptied;
+		std::tie(node, emptied) = list.pop();
+		assert(node);
+
+		if(emptied) {
+			snzi.depart(i);
+		}
+
+		// Unlock and return
+		list.lock.unlock();
+		return node;
+	}
+
+
+public:
+
+	static std::atomic_uint32_t ticket;
+	static __attribute__((aligned(128))) thread_local struct TLS {
+		Random     rng = { int(rdtscl()) };
+		unsigned   my_queue = ticket++;
+		#if defined(READ)
+			unsigned it = 0;
+		#endif
+		struct {
+			struct {
+				std::size_t nhint = { 0 };
+			} push;
+			struct {
+				struct {
+					std::size_t success = { 0 };
+					std::size_t espec = { 0 };
+					std::size_t elock = { 0 };
+				} local;
+				struct {
+					std::size_t tried   = { 0 };
+					std::size_t locked  = { 0 };
+					std::size_t empty   = { 0 };
+					std::size_t success = { 0 };
+				} steal;
+			} pop;
+		} stat;
+	} tls;
+
+private:
+	const unsigned numThreads;
+    	std::unique_ptr<intrusive_queue_t<node_t> []> lists;
+	__attribute__((aligned(64))) snzi_t snzi;
+
+#ifndef NO_STATS
+private:
+	static struct GlobalStats {
+		struct {
+			std::atomic_size_t nhint = { 0 };
+		} push;
+		struct {
+			struct {
+				std::atomic_size_t success = { 0 };
+				std::atomic_size_t espec = { 0 };
+				std::atomic_size_t elock = { 0 };
+			} local;
+			struct {
+				std::atomic_size_t tried   = { 0 };
+				std::atomic_size_t locked  = { 0 };
+				std::atomic_size_t empty   = { 0 };
+				std::atomic_size_t success = { 0 };
+			} steal;
+		} pop;
+	} global_stats;
+
+public:
+	static void stats_tls_tally() {
+		global_stats.push.nhint += tls.stat.push.nhint;
+		global_stats.pop.local.success += tls.stat.pop.local.success;
+		global_stats.pop.local.espec   += tls.stat.pop.local.espec  ;
+		global_stats.pop.local.elock   += tls.stat.pop.local.elock  ;
+		global_stats.pop.steal.tried   += tls.stat.pop.steal.tried  ;
+		global_stats.pop.steal.locked  += tls.stat.pop.steal.locked ;
+		global_stats.pop.steal.empty   += tls.stat.pop.steal.empty  ;
+		global_stats.pop.steal.success += tls.stat.pop.steal.success;
+	}
+
+	static void stats_print(std::ostream & os ) {
+		std::cout << "----- Work Stealing Stats -----" << std::endl;
+
+		double stealSucc = double(global_stats.pop.steal.success) / global_stats.pop.steal.tried;
+		os << "Push to new Q : " << std::setw(15) << global_stats.push.nhint << "\n";
+		os << "Local Pop     : " << std::setw(15) << global_stats.pop.local.success << "\n";
+		os << "Steal Pop     : " << std::setw(15) << global_stats.pop.steal.success << "(" << global_stats.pop.local.espec << "s, " << global_stats.pop.local.elock << "l)\n";
+		os << "Steal Success : " << std::setw(15) << stealSucc << "(" << global_stats.pop.steal.tried << " tries)\n";
+		os << "Steal Fails   : " << std::setw(15) << global_stats.pop.steal.empty << "e, " << global_stats.pop.steal.locked << "l\n";
+	}
+private:
+#endif
+};
Index: doc/theses/thierry_delisle_PhD/code/relaxed_list.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/relaxed_list.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,1141 +1,0 @@
-#if !defined(LIST_VARIANT_HPP)
-#define LIST_VARIANT_HPP "relaxed_list.hpp"
-#endif
-
-#include LIST_VARIANT_HPP
-#if !defined(LIST_VARIANT)
-#error not variant selected
-#endif
-
-#include <array>
-#include <iomanip>
-#include <iostream>
-#include <locale>
-#include <string>
-#include <thread>
-#include <vector>
-
-#include <getopt.h>
-#include <unistd.h>
-#include <sys/sysinfo.h>
-
-#include "utils.hpp"
-
-struct __attribute__((aligned(64))) Node {
-	static std::atomic_size_t creates;
-	static std::atomic_size_t destroys;
-
-	_LinksFields_t<Node> _links;
-
-	int value;
-	int id;
-
-	Node() { creates++; }
-	Node(int value): value(value) { creates++; }
-	~Node() { destroys++; }
-};
-
-std::atomic_size_t Node::creates  = { 0 };
-std::atomic_size_t Node::destroys = { 0 };
-
-bool enable_stats = false;
-
-template<>
-thread_local LIST_VARIANT<Node>::TLS LIST_VARIANT<Node>::tls = {};
-
-template<>
-std::atomic_uint32_t LIST_VARIANT<Node>::ticket = { 0 };
-
-#ifndef NO_STATS
-template<>
-LIST_VARIANT<Node>::GlobalStats LIST_VARIANT<Node>::global_stats = {};
-#endif
-
-// ================================================================================================
-//                        UTILS
-// ================================================================================================
-
-struct local_stat_t {
-	size_t in  = 0;
-	size_t out = 0;
-	size_t empty = 0;
-	size_t crc_in  = 0;
-	size_t crc_out = 0;
-	size_t valmax = 0;
-	size_t valmin = 100000000ul;
-	struct {
-		size_t val = 0;
-		size_t cnt = 0;
-	} comp;
-	struct {
-		size_t val = 0;
-		size_t cnt = 0;
-	} subm;
-};
-
-struct global_stat_t {
-	std::atomic_size_t in  = { 0 };
-	std::atomic_size_t out = { 0 };
-	std::atomic_size_t empty = { 0 };
-	std::atomic_size_t crc_in  = { 0 };
-	std::atomic_size_t crc_out = { 0 };
-	std::atomic_size_t valmax = { 0 };
-	std::atomic_size_t valmin = { 100000000ul };
-	struct {
-		std::atomic_size_t val = { 0 };
-		std::atomic_size_t cnt = { 0 };
-	} comp;
-	struct {
-		std::atomic_size_t val = { 0 };
-		std::atomic_size_t cnt = { 0 };
-	} subm;
-};
-
-void atomic_max(std::atomic_size_t & target, size_t value) {
-	for(;;) {
-		size_t expect = target.load(std::memory_order_relaxed);
-		if(value <= expect) return;
-		bool success = target.compare_exchange_strong(expect, value);
-		if(success) return;
-	}
-}
-
-void atomic_min(std::atomic_size_t & target, size_t value) {
-	for(;;) {
-		size_t expect = target.load(std::memory_order_relaxed);
-		if(value >= expect) return;
-		bool success = target.compare_exchange_strong(expect, value);
-		if(success) return;
-	}
-}
-
-void tally_stats(global_stat_t & global, local_stat_t & local) {
-
-	global.in    += local.in;
-	global.out   += local.out;
-	global.empty += local.empty;
-
-	global.crc_in  += local.crc_in;
-	global.crc_out += local.crc_out;
-
-	global.comp.val += local.comp.val;
-	global.comp.cnt += local.comp.cnt;
-	global.subm.val += local.subm.val;
-	global.subm.cnt += local.subm.cnt;
-
-	atomic_max(global.valmax, local.valmax);
-	atomic_min(global.valmin, local.valmin);
-
-	LIST_VARIANT<Node>::stats_tls_tally();
-}
-
-void waitfor(double & duration, barrier_t & barrier, std::atomic_bool & done) {
-	std::cout << "Starting" << std::endl;
-	auto before = Clock::now();
-	barrier.wait(0);
-	bool is_tty = isatty(STDOUT_FILENO);
-
-	while(true) {
-		usleep(100000);
-		auto now = Clock::now();
-		duration_t durr = now - before;
-		if( durr.count() > duration ) {
-			done = true;
-			break;
-		}
-		if(is_tty) {
-			std::cout << "\r" << std::setprecision(4) << durr.count();
-			std::cout.flush();
-		}
-	}
-
-	barrier.wait(0);
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	duration = durr.count();
-	std::cout << "\rClosing down" << std::endl;
-}
-
-void waitfor(double & duration, barrier_t & barrier, const std::atomic_size_t & count) {
-	std::cout << "Starting" << std::endl;
-	auto before = Clock::now();
-	barrier.wait(0);
-
-	while(true) {
-		usleep(100000);
-		size_t c = count.load();
-		if( c == 0 ) {
-			break;
-		}
-		std::cout << "\r" << c;
-		std::cout.flush();
-	}
-
-	barrier.wait(0);
-	auto after = Clock::now();
-	duration_t durr = after - before;
-	duration = durr.count();
-	std::cout << "\rClosing down" << std::endl;
-}
-
-void print_stats(double duration, unsigned nthread, global_stat_t & global) {
-	assert(Node::creates == Node::destroys);
-	assert(global.crc_in == global.crc_out);
-
-	std::cout << "Done" << std::endl;
-
-	size_t ops = global.in + global.out;
-	size_t ops_sec = size_t(double(ops) / duration);
-	size_t ops_thread = ops_sec / nthread;
-	auto dur_nano = duration_cast<std::nano>(1.0);
-
-	if(global.valmax != 0) {
-		std::cout << "Max runs      : " << global.valmax << "\n";
-		std::cout << "Min runs      : " << global.valmin << "\n";
-	}
-	if(global.comp.cnt != 0) {
-		std::cout << "Submit count  : " << global.subm.cnt << "\n";
-		std::cout << "Submit average: " << ((double(global.subm.val)) / global.subm.cnt) << "\n";
-		std::cout << "Complete count: " << global.comp.cnt << "\n";
-		std::cout << "Complete avg  : " << ((double(global.comp.val)) / global.comp.cnt) << "\n";
-	}
-	std::cout << "Duration      : " << duration << "s\n";
-	std::cout << "ns/Op         : " << ( dur_nano / ops_thread )<< "\n";
-	std::cout << "Ops/sec/thread: " << ops_thread << "\n";
-	std::cout << "Ops/sec       : " << ops_sec << "\n";
-	std::cout << "Total ops     : " << ops << "(" << global.in << "i, " << global.out << "o, " << global.empty << "e)\n";
-	#ifndef NO_STATS
-		LIST_VARIANT<Node>::stats_print(std::cout);
-	#endif
-}
-
-void save_fairness(const int data[], int factor, unsigned nthreads, size_t columns, size_t rows, const std::string & output);
-
-// ================================================================================================
-//                        EXPERIMENTS
-// ================================================================================================
-
-// ================================================================================================
-__attribute__((noinline)) void runChurn_body(
-	std::atomic<bool>& done,
-	Random & rand,
-	Node * my_nodes[],
-	unsigned nslots,
-	local_stat_t & local,
-	LIST_VARIANT<Node> & list
-) {
-	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
-		int idx = rand.next() % nslots;
-		if (auto node = my_nodes[idx]) {
-			local.crc_in += node->value;
-			list.push(node);
-			my_nodes[idx] = nullptr;
-			local.in++;
-		}
-		else if(auto node = list.pop()) {
-			local.crc_out += node->value;
-			my_nodes[idx] = node;
-			local.out++;
-		}
-		else {
-			local.empty++;
-		}
-	}
-}
-
-void runChurn(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes, const unsigned nslots) {
-	std::cout << "Churn Benchmark" << std::endl;
-	assert(nnodes <= nslots);
-	// List being tested
-
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Data to check everything is OK
-	global_stat_t global;
-
-	// Flag to signal termination
-	std::atomic_bool done  = { false };
-
-	// Prep nodes
-	std::cout << "Initializing ";
-	size_t npushed = 0;
-	LIST_VARIANT<Node> list = { nthread, nqueues };
-	{
-		Node** all_nodes[nthread];
-		for(auto & nodes : all_nodes) {
-			nodes = new __attribute__((aligned(64))) Node*[nslots + 8];
-			Random rand(rdtscl());
-			for(unsigned i = 0; i < nnodes; i++) {
-				nodes[i] = new Node(rand.next() % 100);
-			}
-
-			for(unsigned i = nnodes; i < nslots; i++) {
-				nodes[i] = nullptr;
-			}
-
-			for(int i = 0; i < 10 && i < (int)nslots; i++) {
-				int idx = rand.next() % nslots;
-				if (auto node = nodes[idx]) {
-					global.crc_in += node->value;
-					list.push(node);
-					npushed++;
-					nodes[idx] = nullptr;
-				}
-			}
-		}
-
-		std::cout << nnodes << " nodes (" << nslots << " slots)" << std::endl;
-
-		enable_stats = true;
-
-		std::thread * threads[nthread];
-		unsigned i = 1;
-		for(auto & t : threads) {
-			auto & my_nodes = all_nodes[i - 1];
-			t = new std::thread([&done, &list, &barrier, &global, &my_nodes, nslots](unsigned tid) {
-				Random rand(tid + rdtscl());
-
-				local_stat_t local;
-
-				// affinity(tid);
-
-				barrier.wait(tid);
-
-				// EXPERIMENT START
-
-				runChurn_body(done, rand, my_nodes, nslots, local, list);
-
-				// EXPERIMENT END
-
-				barrier.wait(tid);
-
-				tally_stats(global, local);
-
-				for(unsigned i = 0; i < nslots; i++) {
-					delete my_nodes[i];
-				}
-			}, i++);
-		}
-
-		waitfor(duration, barrier, done);
-
-		for(auto t : threads) {
-			t->join();
-			delete t;
-		}
-
-		enable_stats = false;
-
-		while(auto node = list.pop()) {
-			global.crc_out += node->value;
-			delete node;
-		}
-
-		for(auto nodes : all_nodes) {
-			delete[] nodes;
-		}
-	}
-
-	print_stats(duration, nthread, global);
-}
-
-// ================================================================================================
-__attribute__((noinline)) void runPingPong_body(
-	std::atomic<bool>& done,
-	Node initial_nodes[],
-	unsigned nnodes,
-	local_stat_t & local,
-	LIST_VARIANT<Node> & list
-) {
-	Node * nodes[nnodes];
-	{
-		unsigned i = 0;
-		for(auto & n : nodes) {
-			n = &initial_nodes[i++];
-		}
-	}
-
-	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
-
-		for(Node * & node : nodes) {
-			local.crc_in += node->value;
-			list.push(node);
-			local.in++;
-		}
-
-		// -----
-
-		for(Node * & node : nodes) {
-			node = list.pop();
-			assert(node);
-			local.crc_out += node->value;
-			local.out++;
-		}
-	}
-}
-
-void runPingPong(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes) {
-	std::cout << "PingPong Benchmark" << std::endl;
-
-
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Data to check everything is OK
-	global_stat_t global;
-
-	// Flag to signal termination
-	std::atomic_bool done  = { false };
-
-	std::cout << "Initializing ";
-	// List being tested
-	LIST_VARIANT<Node> list = { nthread, nqueues };
-	{
-		enable_stats = true;
-
-		std::thread * threads[nthread];
-		unsigned i = 1;
-		for(auto & t : threads) {
-			t = new std::thread([&done, &list, &barrier, &global, nnodes](unsigned tid) {
-				Random rand(tid + rdtscl());
-
-				Node nodes[nnodes];
-				for(auto & n : nodes) {
-					n.value = (int)rand.next() % 100;
-				}
-
-				local_stat_t local;
-
-				// affinity(tid);
-
-				barrier.wait(tid);
-
-				// EXPERIMENT START
-
-				runPingPong_body(done, nodes, nnodes, local, list);
-
-				// EXPERIMENT END
-
-				barrier.wait(tid);
-
-				tally_stats(global, local);
-			}, i++);
-		}
-
-		waitfor(duration, barrier, done);
-
-		for(auto t : threads) {
-			t->join();
-			delete t;
-		}
-
-		enable_stats = false;
-	}
-
-	print_stats(duration, nthread, global);
-}
-
-// ================================================================================================
-struct __attribute__((aligned(64))) Slot {
-	Node * volatile node;
-};
-
-__attribute__((noinline)) void runProducer_body(
-	std::atomic<bool>& done,
-	Random & rand,
-	Slot * slots,
-	int nslots,
-	local_stat_t & local,
-	LIST_VARIANT<Node> & list
-) {
-	while(__builtin_expect(!done.load(std::memory_order_relaxed), true)) {
-
-		Node * node = list.pop();
-		if(!node) {
-			local.empty ++;
-			continue;
-		}
-
-		local.crc_out += node->value;
-		local.out++;
-
-		if(node->id == 0) {
-			unsigned cnt = 0;
-			for(int i = 0; i < nslots; i++) {
-				Node * found = __atomic_exchange_n( &slots[i].node, nullptr, __ATOMIC_SEQ_CST );
-				if( found ) {
-					local.crc_in += found->value;
-					local.in++;
-					cnt++;
-					list.push( found );
-				}
-			}
-
-			local.crc_in += node->value;
-			local.in++;
-			list.push( node );
-
-			local.comp.cnt++;
-			local.comp.val += cnt;
-		}
-		else {
-			unsigned len = 0;
-			while(true) {
-				auto off = rand.next();
-				for(int i = 0; i < nslots; i++) {
-					Node * expected = nullptr;
-					int idx = (i + off) % nslots;
-					Slot & slot = slots[ idx ];
-					if(
-						slot.node == nullptr &&
-						__atomic_compare_exchange_n( &slot.node, &expected, node, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST )
-					) {
-						local.subm.cnt++;
-						local.subm.val += len;
-						goto LOOP;
-					}
-					assert( expected != node );
-					len++;
-				}
-			}
-		}
-
-		LOOP:;
-	}
-}
-
-void runProducer(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes) {
-	std::cout << "Producer Benchmark" << std::endl;
-
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Data to check everything is OK
-	global_stat_t global;
-
-	// Flag to signal termination
-	std::atomic_bool done  = { false };
-
-	std::cout << "Initializing ";
-
-	int nslots = nnodes * 4;
-	Slot * slots = new Slot[nslots];
-	std::cout << nnodes << " nodes (" << nslots << " slots)" << std::endl;
-
-	// List being tested
-	LIST_VARIANT<Node> list = { nthread, nqueues };
-	{
-		Random rand(rdtscl());
-		for(unsigned i = 0; i < nnodes; i++) {
-			Node * node = new Node(rand.next() % 100);
-			node->id = i;
-			global.crc_in += node->value;
-			list.push(node);
-		}
-
-		for(int i = 0; i < nslots; i++) {
-			slots[i].node = nullptr;
-		}
-	}
-
-	{
-		enable_stats = true;
-
-		std::thread * threads[nthread];
-		unsigned i = 1;
-		for(auto & t : threads) {
-			t = new std::thread([&done, &list, &barrier, &global, slots, nslots](unsigned tid) {
-				Random rand(tid + rdtscl());
-
-				local_stat_t local;
-				barrier.wait(tid);
-
-				// EXPERIMENT START
-
-				runProducer_body(done, rand, slots, nslots, local, list);
-
-				// EXPERIMENT END
-
-				barrier.wait(tid);
-
-				tally_stats(global, local);
-			}, i++);
-		}
-
-		waitfor(duration, barrier, done);
-
-		for(auto t : threads) {
-			t->join();
-			delete t;
-		}
-
-		enable_stats = false;
-	}
-
-	{
-		while(Node * node = list.pop()) {
-			global.crc_out += node->value;
-			delete node;
-		}
-
-		for(int i = 0; i < nslots; i++) {
-			delete slots[i].node;
-		}
-
-		delete [] slots;
-	}
-
-	print_stats(duration, nthread, global);
-}
-
-// ================================================================================================
-__attribute__((noinline)) void runFairness_body(
-	unsigned tid,
-	size_t width,
-	size_t length,
-	int output[],
-	std::atomic_size_t & count,
-	Node initial_nodes[],
-	unsigned nnodes,
-	local_stat_t & local,
-	LIST_VARIANT<Node> & list
-) {
-	Node * nodes[nnodes];
-	{
-		unsigned i = 0;
-		for(auto & n : nodes) {
-			n = &initial_nodes[i++];
-		}
-	}
-
-	while(__builtin_expect(0 != count.load(std::memory_order_relaxed), true)) {
-
-		for(Node * & node : nodes) {
-			local.crc_in += node->id;
-			list.push(node);
-			local.in++;
-		}
-
-		// -----
-
-		for(Node * & node : nodes) {
-			node = list.pop();
-			assert(node);
-
-			if (unsigned(node->value) < length) {
-				size_t idx = (node->value * width) + node->id;
-				assert(idx < (width * length));
-				output[idx] = tid;
-			}
-
-			node->value++;
-			if(unsigned(node->value) == length) count--;
-
-			local.crc_out += node->id;
-			local.out++;
-		}
-	}
-}
-
-void runFairness(unsigned nthread, unsigned nqueues, double duration, unsigned nnodes, const std::string & output) {
-	std::cout << "Fairness Benchmark, outputing to : " << output << std::endl;
-
-	// Barrier for synchronization
-	barrier_t barrier(nthread + 1);
-
-	// Data to check everything is OK
-	global_stat_t global;
-
-	std::cout << "Initializing ";
-
-	// Check fairness by creating a png of where the threads ran
-	size_t width = nthread * nnodes;
-	size_t length = 100000;
-
-	std::unique_ptr<int[]> data_out { new int[width * length] };
-
-	// Flag to signal termination
-	std::atomic_size_t count = width;
-
-	// List being tested
-	LIST_VARIANT<Node> list = { nthread, nqueues };
-	{
-		enable_stats = true;
-
-		std::thread * threads[nthread];
-		unsigned i = 1;
-		for(auto & t : threads) {
-			t = new std::thread([&count, &list, &barrier, &global, nnodes, width, length, data_out = data_out.get()](unsigned tid) {
-				unsigned int start = (tid - 1) * nnodes;
-				Node nodes[nnodes];
-				for(auto & n : nodes) {
-					n.id = start;
-					n.value = 0;
-					start++;
-				}
-
-				local_stat_t local;
-
-				// affinity(tid);
-
-				barrier.wait(tid);
-
-				// EXPERIMENT START
-
-				runFairness_body(tid, width, length, data_out, count, nodes, nnodes, local, list);
-
-				// EXPERIMENT END
-
-				barrier.wait(tid);
-
-				for(const auto & n : nodes) {
-					local.valmax = max(local.valmax, size_t(n.value));
-					local.valmin = min(local.valmin, size_t(n.value));
-				}
-
-				tally_stats(global, local);
-			}, i++);
-		}
-
-		waitfor(duration, barrier, count);
-
-		for(auto t : threads) {
-			t->join();
-			delete t;
-		}
-
-		enable_stats = false;
-	}
-
-	print_stats(duration, nthread, global);
-
-	// save_fairness(data_out.get(), 100, nthread, width, length, output);
-}
-
-// ================================================================================================
-
-bool iequals(const std::string& a, const std::string& b)
-{
-    return std::equal(a.begin(), a.end(),
-                      b.begin(), b.end(),
-                      [](char a, char b) {
-                          return std::tolower(a) == std::tolower(b);
-                      });
-}
-
-int main(int argc, char * argv[]) {
-
-	double duration   = 5.0;
-	unsigned nthreads = 2;
-	unsigned nqueues  = 4;
-	unsigned nnodes   = 100;
-	unsigned nslots   = 100;
-	std::string out   = "fairness.png";
-
-	enum {
-		Churn,
-		PingPong,
-		Producer,
-		Fairness,
-		NONE
-	} benchmark = NONE;
-
-	std::cout.imbue(std::locale(""));
-
-	for(;;) {
-		static struct option options[] = {
-			{"duration",  required_argument, 0, 'd'},
-			{"nthreads",  required_argument, 0, 't'},
-			{"nqueues",   required_argument, 0, 'q'},
-			{"benchmark", required_argument, 0, 'b'},
-			{0, 0, 0, 0}
-		};
-
-		int idx = 0;
-		int opt = getopt_long(argc, argv, "d:t:q:b:", options, &idx);
-
-		std::string arg = optarg ? optarg : "";
-		size_t len = 0;
-		switch(opt) {
-			// Exit Case
-			case -1:
-				/* paranoid */ assert(optind <= argc);
-				switch(benchmark) {
-				case NONE:
-					std::cerr << "Must specify a benchmark" << std::endl;
-					goto usage;
-				case PingPong:
-					nnodes = 1;
-					switch(argc - optind) {
-					case 0: break;
-					case 1:
-						try {
-							arg = optarg = argv[optind];
-							nnodes = stoul(optarg, &len);
-							if(len != arg.size()) { throw std::invalid_argument(""); }
-						} catch(std::invalid_argument &) {
-							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
-							goto usage;
-						}
-						break;
-					default:
-						std::cerr << "'PingPong' benchmark doesn't accept more than 1 extra arguments" << std::endl;
-						goto usage;
-					}
-					break;
-				case Producer:
-					nnodes = 32;
-					switch(argc - optind) {
-					case 0: break;
-					case 1:
-						try {
-							arg = optarg = argv[optind];
-							nnodes = stoul(optarg, &len);
-							if(len != arg.size()) { throw std::invalid_argument(""); }
-						} catch(std::invalid_argument &) {
-							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
-							goto usage;
-						}
-						break;
-					default:
-						std::cerr << "'Producer' benchmark doesn't accept more than 1 extra arguments" << std::endl;
-						goto usage;
-					}
-					break;
-				case Churn:
-					nnodes = 100;
-					nslots = 100;
-					switch(argc - optind) {
-					case 0: break;
-					case 1:
-						try {
-							arg = optarg = argv[optind];
-							nnodes = stoul(optarg, &len);
-							if(len != arg.size()) { throw std::invalid_argument(""); }
-							nslots = nnodes;
-						} catch(std::invalid_argument &) {
-							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
-							goto usage;
-						}
-						break;
-					case 2:
-						try {
-							arg = optarg = argv[optind];
-							nnodes = stoul(optarg, &len);
-							if(len != arg.size()) { throw std::invalid_argument(""); }
-						} catch(std::invalid_argument &) {
-							std::cerr << "Number of nodes must be a positive integer, was " << arg << std::endl;
-							goto usage;
-						}
-						try {
-							arg = optarg = argv[optind + 1];
-							nslots = stoul(optarg, &len);
-							if(len != arg.size()) { throw std::invalid_argument(""); }
-						} catch(std::invalid_argument &) {
-							std::cerr << "Number of slots must be a positive integer, was " << arg << std::endl;
-							goto usage;
-						}
-						break;
-					default:
-						std::cerr << "'Churn' benchmark doesn't accept more than 2 extra arguments" << std::endl;
-						goto usage;
-					}
-					break;
-				case Fairness:
-					nnodes = 1;
-					switch(argc - optind) {
-					case 0: break;
-					case 1:
-						arg = optarg = argv[optind];
-						out = arg;
-						break;
-					default:
-						std::cerr << "'Churn' benchmark doesn't accept more than 2 extra arguments" << std::endl;
-						goto usage;
-					}
-				}
-				goto run;
-			// Benchmarks
-			case 'b':
-				if(benchmark != NONE) {
-					std::cerr << "Only when benchmark can be run" << std::endl;
-					goto usage;
-				}
-				if(iequals(arg, "churn")) {
-					benchmark = Churn;
-					break;
-				}
-				if(iequals(arg, "pingpong")) {
-					benchmark = PingPong;
-					break;
-				}
-				if(iequals(arg, "producer")) {
-					benchmark = Producer;
-					break;
-				}
-				if(iequals(arg, "fairness")) {
-					benchmark = Fairness;
-					break;
-				}
-				std::cerr << "Unkown benchmark " << arg << std::endl;
-				goto usage;
-			// Numeric Arguments
-			case 'd':
-				try {
-					duration = stod(optarg, &len);
-					if(len != arg.size()) { throw std::invalid_argument(""); }
-				} catch(std::invalid_argument &) {
-					std::cerr << "Duration must be a valid double, was " << arg << std::endl;
-					goto usage;
-				}
-				break;
-			case 't':
-				try {
-					nthreads = stoul(optarg, &len);
-					if(len != arg.size()) { throw std::invalid_argument(""); }
-				} catch(std::invalid_argument &) {
-					std::cerr << "Number of threads must be a positive integer, was " << arg << std::endl;
-					goto usage;
-				}
-				break;
-			case 'q':
-				try {
-					nqueues = stoul(optarg, &len);
-					if(len != arg.size()) { throw std::invalid_argument(""); }
-				} catch(std::invalid_argument &) {
-					std::cerr << "Number of queues must be a positive integer, was " << arg << std::endl;
-					goto usage;
-				}
-				break;
-			// Other cases
-			default: /* ? */
-				std::cerr << opt << std::endl;
-			usage:
-				std::cerr << "Usage: " << argv[0] << ": [options] -b churn [NNODES] [NSLOTS = NNODES]" << std::endl;
-				std::cerr << "  or:  " << argv[0] << ": [options] -b pingpong [NNODES]" << std::endl;
-				std::cerr << "  or:  " << argv[0] << ": [options] -b producer [NNODES]" << std::endl;
-				std::cerr << std::endl;
-				std::cerr << "  -d, --duration=DURATION  Duration of the experiment, in seconds" << std::endl;
-				std::cerr << "  -t, --nthreads=NTHREADS  Number of kernel threads" << std::endl;
-				std::cerr << "  -q, --nqueues=NQUEUES    Number of queues per threads" << std::endl;
-				std::exit(1);
-		}
-	}
-	run:
-
-	check_cache_line_size();
-
-	std::cout << "Running " << nthreads << " threads (" << (nthreads * nqueues) << " queues) for " << duration << " seconds" << std::endl;
-	std::cout << "Relaxed list variant: " << LIST_VARIANT<Node>::name() << std::endl;
-	switch(benchmark) {
-		case Churn:
-			runChurn(nthreads, nqueues, duration, nnodes, nslots);
-			break;
-		case PingPong:
-			runPingPong(nthreads, nqueues, duration, nnodes);
-			break;
-		case Producer:
-			runProducer(nthreads, nqueues, duration, nnodes);
-			break;
-		case Fairness:
-			runFairness(nthreads, nqueues, duration, nnodes, out);
-			break;
-		default:
-			abort();
-	}
-	return 0;
-}
-
-const char * __my_progname = "Relaxed List";
-
-struct rgb_t {
-    double r;       // a fraction between 0 and 1
-    double g;       // a fraction between 0 and 1
-    double b;       // a fraction between 0 and 1
-};
-
-struct hsv_t {
-    double h;       // angle in degrees
-    double s;       // a fraction between 0 and 1
-    double v;       // a fraction between 0 and 1
-};
-
-rgb_t hsv2rgb(hsv_t in) {
-	double hh, p, q, t, ff;
-	long   i;
-	rgb_t  out;
-
-	if(in.s <= 0.0) {       // < is bogus, just shuts up warnings
-		out.r = in.v;
-		out.g = in.v;
-		out.b = in.v;
-		return out;
-	}
-	hh = in.h;
-	if(hh >= 360.0) hh = 0.0;
-	hh /= 60.0;
-	i = (long)hh;
-	ff = hh - i;
-	p = in.v * (1.0 - in.s);
-	q = in.v * (1.0 - (in.s * ff));
-	t = in.v * (1.0 - (in.s * (1.0 - ff)));
-
-	switch(i) {
-	case 0:
-		out.r = in.v;
-		out.g = t;
-		out.b = p;
-		break;
-	case 1:
-		out.r = q;
-		out.g = in.v;
-		out.b = p;
-		break;
-	case 2:
-		out.r = p;
-		out.g = in.v;
-		out.b = t;
-		break;
-
-	case 3:
-		out.r = p;
-		out.g = q;
-		out.b = in.v;
-		break;
-	case 4:
-		out.r = t;
-		out.g = p;
-		out.b = in.v;
-		break;
-	case 5:
-	default:
-		out.r = in.v;
-		out.g = p;
-		out.b = q;
-		break;
-	}
-	return out;
-}
-
-// void save_fairness(const int data[], int factor, unsigned nthreads, size_t columns, size_t rows, const std::string & output) {
-// 	std::ofstream os(output);
-// 	os << "<html>\n";
-// 	os << "<head>\n";
-// 	os << "<style>\n";
-// 	os << "</style>\n";
-// 	os << "</head>\n";
-// 	os << "<body>\n";
-// 	os << "<table style=\"width=100%\">\n";
-
-// 	size_t idx = 0;
-// 	for(size_t r = 0ul; r < rows; r++) {
-// 		os << "<tr>\n";
-// 		for(size_t c = 0ul; c < columns; c++) {
-// 			os << "<td class=\"custom custom" << data[idx] << "\"></td>\n";
-// 			idx++;
-// 		}
-// 		os << "</tr>\n";
-// 	}
-
-// 	os << "</table>\n";
-// 	os << "</body>\n";
-// 	os << "</html>\n";
-// 	os << std::endl;
-// }
-
-// #include <png.h>
-// #include <setjmp.h>
-
-/*
-void save_fairness(const int data[], int factor, unsigned nthreads, size_t columns, size_t rows, const std::string & output) {
-	int width  = columns * factor;
-	int height = rows / factor;
-
-	int code = 0;
-	int idx = 0;
-	FILE *fp = NULL;
-	png_structp png_ptr = NULL;
-	png_infop info_ptr = NULL;
-	png_bytep row = NULL;
-
-	// Open file for writing (binary mode)
-	fp = fopen(output.c_str(), "wb");
-	if (fp == NULL) {
-		fprintf(stderr, "Could not open file %s for writing\n", output.c_str());
-		code = 1;
-		goto finalise;
-	}
-
-	   // Initialize write structure
-	png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
-	if (png_ptr == NULL) {
-		fprintf(stderr, "Could not allocate write struct\n");
-		code = 1;
-		goto finalise;
-	}
-
-	// Initialize info structure
-	info_ptr = png_create_info_struct(png_ptr);
-	if (info_ptr == NULL) {
-		fprintf(stderr, "Could not allocate info struct\n");
-		code = 1;
-		goto finalise;
-	}
-
-	// Setup Exception handling
-	if (setjmp(png_jmpbuf(png_ptr))) {
-		fprintf(stderr, "Error during png creation\n");
-		code = 1;
-		goto finalise;
-	}
-
-	png_init_io(png_ptr, fp);
-
-	// Write header (8 bit colour depth)
-	png_set_IHDR(png_ptr, info_ptr, width, height,
-		8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
-		PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
-
-	png_write_info(png_ptr, info_ptr);
-
-	// Allocate memory for one row (3 bytes per pixel - RGB)
-	row = (png_bytep) malloc(3 * width * sizeof(png_byte));
-
-	// Write image data
-	int x, y;
-	for (y=0 ; y<height ; y++) {
-		for (x=0 ; x<width ; x++) {
-			auto & r = row[(x * 3) + 0];
-			auto & g = row[(x * 3) + 1];
-			auto & b = row[(x * 3) + 2];
-			assert(idx < (rows * columns));
-			int color = data[idx] - 1;
-			assert(color < nthreads);
-			assert(color >= 0);
-			idx++;
-
-			double angle = double(color) / double(nthreads);
-
-			auto c = hsv2rgb({ 360.0 * angle, 0.8, 0.8 });
-
-			r = char(c.r * 255.0);
-			g = char(c.g * 255.0);
-			b = char(c.b * 255.0);
-
-		}
-		png_write_row(png_ptr, row);
-	}
-
-	assert(idx == (rows * columns));
-
-	// End write
-	png_write_end(png_ptr, NULL);
-
-	finalise:
-	if (fp != NULL) fclose(fp);
-	if (info_ptr != NULL) png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
-	if (png_ptr != NULL) png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
-	if (row != NULL) free(row);
-}
-*/
Index: doc/theses/thierry_delisle_PhD/code/relaxed_list.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/relaxed_list.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,555 +1,0 @@
-#pragma once
-#define LIST_VARIANT relaxed_list
-
-#define VANILLA 0
-#define SNZI 1
-#define BITMASK 2
-#define DISCOVER 3
-#define SNZM 4
-#define BIAS 5
-#define BACK 6
-#define BACKBIAS 7
-
-#ifndef VARIANT
-#define VARIANT VANILLA
-#endif
-
-#ifndef NO_STATS
-#include <iostream>
-#endif
-
-#include <cmath>
-#include <functional>
-#include <memory>
-#include <mutex>
-#include <thread>
-#include <type_traits>
-
-#include "assert.hpp"
-#include "utils.hpp"
-#include "links.hpp"
-#include "snzi.hpp"
-#include "snzi-packed.hpp"
-#include "snzm.hpp"
-
-using namespace std;
-
-struct pick_stat {
-	struct {
-		size_t attempt = 0;
-		size_t success = 0;
-		size_t local = 0;
-	} push;
-	struct {
-		size_t attempt = 0;
-		size_t success = 0;
-		size_t mask_attempt = 0;
-		size_t mask_reset = 0;
-		size_t local = 0;
-	} pop;
-};
-
-struct empty_stat {
-	struct {
-		size_t value = 0;
-		size_t count = 0;
-	} push;
-	struct {
-		size_t value = 0;
-		size_t count = 0;
-	} pop;
-};
-
-template<typename node_t>
-class __attribute__((aligned(128))) relaxed_list {
-	static_assert(std::is_same<decltype(node_t::_links), _LinksFields_t<node_t>>::value, "Node must have a links field");
-
-public:
-	static const char * name() {
-		const char * names[] = {
-			"RELAXED: VANILLA",
-			"RELAXED: SNZI",
-			"RELAXED: BITMASK",
-			"RELAXED: SNZI + DISCOVERED MASK",
-			"RELAXED: SNZI + MASK",
-			"RELAXED: SNZI + LOCAL BIAS",
-			"RELAXED: SNZI + REVERSE RNG",
-			"RELAXED: SNZI + LOCAL BIAS + REVERSE RNG"
-		};
-		return names[VARIANT];
-	}
-
-	relaxed_list(unsigned numThreads, unsigned numQueues)
-		: numLists(numThreads * numQueues)
-	  	, lists(new intrusive_queue_t<node_t>[numLists])
-		#if VARIANT == SNZI || VARIANT == BACK
-			, snzi( std::log2( numLists / (2 * numQueues) ), 2 )
-		#elif VARIANT == BIAS || VARIANT == BACKBIAS
-			#ifdef SNZI_PACKED
-				, snzi( std::ceil( std::log2(numLists) ) )
-			#else
-				, snzi( std::log2( numLists / (2 * numQueues) ), 2 )
-			#endif
-		#elif VARIANT == SNZM || VARIANT == DISCOVER
-			, snzm( numLists )
-		#endif
-	{
-		assertf(7 * 8 * 8 >= numLists, "List currently only supports 448 sublists");
-		std::cout << "Constructing Relaxed List with " << numLists << std::endl;
-	}
-
-	~relaxed_list() {
-		std::cout << "Destroying Relaxed List" << std::endl;
-		lists.reset();
-	}
-
-    	__attribute__((noinline, hot)) void push(node_t * node) {
-		node->_links.ts = rdtscl();
-
-		while(true) {
-			// Pick a random list
-			unsigned i = idx_from_r(tls.rng1.next(), VARIANT == BIAS || VARIANT == BACKBIAS);
-
-			#ifndef NO_STATS
-				tls.pick.push.attempt++;
-			#endif
-
-			// If we can't lock it retry
-			if( !lists[i].lock.try_lock() ) continue;
-
-			#if VARIANT == VANILLA || VARIANT == BITMASK
-				__attribute__((unused)) int num = numNonEmpty;
-			#endif
-
-			// Actually push it
-			if(lists[i].push(node)) {
-				#if VARIANT == DISCOVER
-					size_t qword = i >> 6ull;
-					size_t bit   = i & 63ull;
-					assert(qword == 0);
-					bts(tls.mask, bit);
-					snzm.arrive(i);
-				#elif VARIANT == SNZI || VARIANT == BIAS
-					snzi.arrive(i);
-				#elif VARIANT == BACK || VARIANT == BACKBIAS
-					snzi.arrive(i);
-					tls.rng2.set_raw_state( tls.rng1.get_raw_state());
-				#elif VARIANT == SNZM
-					snzm.arrive(i);
-				#elif VARIANT == BITMASK
-					numNonEmpty++;
-					size_t qword = i >> 6ull;
-					size_t bit   = i & 63ull;
-					assertf((list_mask[qword] & (1ul << bit)) == 0, "Before set %zu:%zu (%u), %zx & %zx", qword, bit, i, list_mask[qword].load(), (1ul << bit));
-					__attribute__((unused)) bool ret = bts(list_mask[qword], bit);
-					assert(!ret);
-					assertf((list_mask[qword] & (1ul << bit)) != 0, "After set %zu:%zu (%u), %zx & %zx", qword, bit, i, list_mask[qword].load(), (1ul << bit));
-				#else
-					numNonEmpty++;
-				#endif
-			}
-			#if VARIANT == VANILLA || VARIANT == BITMASK
-				assert(numNonEmpty <= (int)numLists);
-			#endif
-
-			// Unlock and return
-			lists[i].lock.unlock();
-
-			#ifndef NO_STATS
-				tls.pick.push.success++;
-				#if VARIANT == VANILLA || VARIANT == BITMASK
-					tls.empty.push.value += num;
-					tls.empty.push.count += 1;
-				#endif
-			#endif
-			return;
-		}
-    	}
-
-	__attribute__((noinline, hot)) node_t * pop() {
-		#if VARIANT == DISCOVER
-			assert(numLists <= 64);
-			while(snzm.query()) {
-				tls.pick.pop.mask_attempt++;
-				unsigned i, j;
-				{
-					// Pick first list totally randomly
-					i = tls.rng1.next() % numLists;
-
-					// Pick the other according to the bitmask
-					unsigned r = tls.rng1.next();
-
-					size_t mask = tls.mask.load(std::memory_order_relaxed);
-					if(mask == 0) {
-						tls.pick.pop.mask_reset++;
-						mask = (1U << numLists) - 1;
-						tls.mask.store(mask, std::memory_order_relaxed);
-					}
-
-					unsigned b = rand_bit(r, mask);
-
-					assertf(b < 64, "%zu %u", mask, b);
-
-					j = b;
-
-					assert(j < numLists);
-				}
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-		#elif VARIANT == SNZI
-			while(snzi.query()) {
-				// Pick two lists at random
-				int i = tls.rng1.next() % numLists;
-				int j = tls.rng1.next() % numLists;
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-
-		#elif VARIANT == BACK
-			while(snzi.query()) {
-				// Pick two lists at random
-				int i = tls.rng2.prev() % numLists;
-				int j = tls.rng2.prev() % numLists;
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-
-		#elif VARIANT == BACKBIAS
-			while(snzi.query()) {
-				// Pick two lists at random
-				int i = idx_from_r(tls.rng2.prev(), true);
-				int j = idx_from_r(tls.rng2.prev(), true);
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-
-		#elif VARIANT == BIAS
-			while(snzi.query()) {
-				// Pick two lists at random
-				unsigned ri = tls.rng1.next();
-				unsigned i;
-				unsigned j = tls.rng1.next();
-				if(0 == (ri & 0xF)) {
-					i = (ri >> 4) % numLists;
-				} else {
-					i = tls.my_queue + ((ri >> 4) % 4);
-					j = tls.my_queue + ((j >> 4) % 4);
-					tls.pick.pop.local++;
-				}
-				i %= numLists;
-				j %= numLists;
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-		#elif VARIANT == SNZM
-			//*
-			while(snzm.query()) {
-				tls.pick.pop.mask_attempt++;
-				unsigned i, j;
-				{
-					// Pick two random number
-					unsigned ri = tls.rng1.next();
-					unsigned rj = tls.rng1.next();
-
-					// Pick two nodes from it
-					unsigned wdxi = ri & snzm.mask;
-					// unsigned wdxj = rj & snzm.mask;
-
-					// Get the masks from the nodes
-					// size_t maski = snzm.masks(wdxi);
-					size_t maskj = snzm.masks(wdxj);
-
-					if(maski == 0 && maskj == 0) continue;
-
-					#if defined(__BMI2__)
-						uint64_t idxsi = _pext_u64(snzm.indexes, maski);
-						// uint64_t idxsj = _pext_u64(snzm.indexes, maskj);
-
-						auto pi = __builtin_popcountll(maski);
-						// auto pj = __builtin_popcountll(maskj);
-
-						ri = pi ? ri & ((pi >> 3) - 1) : 0;
-						rj = pj ? rj & ((pj >> 3) - 1) : 0;
-
-						unsigned bi = (idxsi >> (ri << 3)) & 0xff;
-						unsigned bj = (idxsj >> (rj << 3)) & 0xff;
-					#else
-						unsigned bi = rand_bit(ri >> snzm.depth, maski);
-						unsigned bj = rand_bit(rj >> snzm.depth, maskj);
-					#endif
-
-					i = (bi << snzm.depth) | wdxi;
-					j = (bj << snzm.depth) | wdxj;
-
-					/* paranoid */ assertf(i < numLists, "%u %u", bj, wdxi);
-					/* paranoid */ assertf(j < numLists, "%u %u", bj, wdxj);
-				}
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-			/*/
-			while(snzm.query()) {
-				// Pick two lists at random
-				int i = tls.rng1.next() % numLists;
-				int j = tls.rng1.next() % numLists;
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-			//*/
-		#elif VARIANT == BITMASK
-			int nnempty;
-			while(0 != (nnempty = numNonEmpty)) {
-				tls.pick.pop.mask_attempt++;
-				unsigned i, j;
-				{
-					// Pick two lists at random
-					unsigned num = ((numLists - 1) >> 6) + 1;
-
-					unsigned ri = tls.rng1.next();
-					unsigned rj = tls.rng1.next();
-
-					unsigned wdxi = (ri >> 6u) % num;
-					unsigned wdxj = (rj >> 6u) % num;
-
-					size_t maski = list_mask[wdxi].load(std::memory_order_relaxed);
-					size_t maskj = list_mask[wdxj].load(std::memory_order_relaxed);
-
-					if(maski == 0 && maskj == 0) continue;
-
-					unsigned bi = rand_bit(ri, maski);
-					unsigned bj = rand_bit(rj, maskj);
-
-					assertf(bi < 64, "%zu %u", maski, bi);
-					assertf(bj < 64, "%zu %u", maskj, bj);
-
-					i = bi | (wdxi << 6);
-					j = bj | (wdxj << 6);
-
-					assertf(i < numLists, "%u", wdxi << 6);
-					assertf(j < numLists, "%u", wdxj << 6);
-				}
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-		#else
-			while(numNonEmpty != 0) {
-				// Pick two lists at random
-				int i = tls.rng1.next() % numLists;
-				int j = tls.rng1.next() % numLists;
-
-				if(auto node = try_pop(i, j)) return node;
-			}
-		#endif
-
-		return nullptr;
-    	}
-
-private:
-	node_t * try_pop(unsigned i, unsigned j) {
-		#ifndef NO_STATS
-			tls.pick.pop.attempt++;
-		#endif
-
-		#if VARIANT == DISCOVER
-			if(lists[i].ts() > 0) bts(tls.mask, i); else btr(tls.mask, i);
-			if(lists[j].ts() > 0) bts(tls.mask, j); else btr(tls.mask, j);
-		#endif
-
-		// Pick the bet list
-		int w = i;
-		if( __builtin_expect(lists[j].ts() != 0, true) ) {
-			w = (lists[i].ts() < lists[j].ts()) ? i : j;
-		}
-
-		auto & list = lists[w];
-		// If list looks empty retry
-		if( list.ts() == 0 ) return nullptr;
-
-		// If we can't get the lock retry
-		if( !list.lock.try_lock() ) return nullptr;
-
-		#if VARIANT == VANILLA || VARIANT == BITMASK
-			__attribute__((unused)) int num = numNonEmpty;
-		#endif
-
-		// If list is empty, unlock and retry
-		if( list.ts() == 0 ) {
-			list.lock.unlock();
-			return nullptr;
-		}
-
-		// Actually pop the list
-		node_t * node;
-		bool emptied;
-		std::tie(node, emptied) = list.pop();
-		assert(node);
-
-		if(emptied) {
-			#if VARIANT == DISCOVER
-				size_t qword = w >> 6ull;
-				size_t bit   = w & 63ull;
-				assert(qword == 0);
-				__attribute__((unused)) bool ret = btr(tls.mask, bit);
-				snzm.depart(w);
-			#elif VARIANT == SNZI || VARIANT == BIAS || VARIANT == BACK || VARIANT == BACKBIAS
-				snzi.depart(w);
-			#elif VARIANT == SNZM
-				snzm.depart(w);
-			#elif VARIANT == BITMASK
-				numNonEmpty--;
-				size_t qword = w >> 6ull;
-				size_t bit   = w & 63ull;
-				assert((list_mask[qword] & (1ul << bit)) != 0);
-				__attribute__((unused)) bool ret = btr(list_mask[qword], bit);
-				assert(ret);
-				assert((list_mask[qword] & (1ul << bit)) == 0);
-			#else
-				numNonEmpty--;
-			#endif
-		}
-
-		// Unlock and return
-		list.lock.unlock();
-		#if VARIANT == VANILLA || VARIANT == BITMASK
-			assert(numNonEmpty >= 0);
-		#endif
-		#ifndef NO_STATS
-			tls.pick.pop.success++;
-			#if VARIANT == VANILLA || VARIANT == BITMASK
-				tls.empty.pop.value += num;
-				tls.empty.pop.count += 1;
-			#endif
-		#endif
-		return node;
-	}
-
-	inline unsigned idx_from_r(unsigned r, bool bias) {
-		unsigned i;
-		if(bias) {
-			if(0 == (r & 0x3F)) {
-				i = r >> 6;
-			} else {
-				i = tls.my_queue + ((r >> 6) % 4);
-				tls.pick.push.local++;
-			}
-		} else {
-			i = r;
-		}
-		return i % numLists;
-	}
-
-public:
-
-	static __attribute__((aligned(128))) thread_local struct TLS {
-		Random     rng1 = { unsigned(std::hash<std::thread::id>{}(std::this_thread::get_id()) ^ rdtscl()) };
-		Random     rng2 = { unsigned(std::hash<std::thread::id>{}(std::this_thread::get_id()) ^ rdtscl()) };
-		unsigned   my_queue = (ticket++) * 4;
-		pick_stat  pick;
-		empty_stat empty;
-		__attribute__((aligned(64))) std::atomic_size_t mask = { 0 };
-	} tls;
-
-private:
-	const unsigned numLists;
-    	__attribute__((aligned(64))) std::unique_ptr<intrusive_queue_t<node_t> []> lists;
-private:
-	#if VARIANT == SNZI || VARIANT == BACK
-		snzi_t snzi;
-	#elif VARIANT == BIAS || VARIANT == BACKBIAS
-		#ifdef SNZI_PACKED
-			snzip_t snzi;
-		#else
-			snzi_t snzi;
-		#endif
-	#elif VARIANT == SNZM || VARIANT == DISCOVER
-		snzm_t snzm;
-	#else
-		std::atomic_int numNonEmpty  = { 0 };  // number of non-empty lists
-	#endif
-	#if VARIANT == BITMASK
-		std::atomic_size_t list_mask[7] = { {0}, {0}, {0}, {0}, {0}, {0}, {0} }; // which queues are empty
-	#endif
-
-public:
-	static const constexpr size_t sizeof_queue = sizeof(intrusive_queue_t<node_t>);
-	static std::atomic_uint32_t ticket;
-
-#ifndef NO_STATS
-	static void stats_tls_tally() {
-		global_stats.pick.push.attempt += tls.pick.push.attempt;
-		global_stats.pick.push.success += tls.pick.push.success;
-		global_stats.pick.push.local += tls.pick.push.local;
-		global_stats.pick.pop .attempt += tls.pick.pop.attempt;
-		global_stats.pick.pop .success += tls.pick.pop.success;
-		global_stats.pick.pop .mask_attempt += tls.pick.pop.mask_attempt;
-		global_stats.pick.pop .mask_reset += tls.pick.pop.mask_reset;
-		global_stats.pick.pop .local += tls.pick.pop.local;
-
-		global_stats.qstat.push.value += tls.empty.push.value;
-		global_stats.qstat.push.count += tls.empty.push.count;
-		global_stats.qstat.pop .value += tls.empty.pop .value;
-		global_stats.qstat.pop .count += tls.empty.pop .count;
-	}
-
-private:
-	static struct GlobalStats {
-		struct {
-			struct {
-				std::atomic_size_t attempt = { 0 };
-				std::atomic_size_t success = { 0 };
-				std::atomic_size_t local = { 0 };
-			} push;
-			struct {
-				std::atomic_size_t attempt = { 0 };
-				std::atomic_size_t success = { 0 };
-				std::atomic_size_t mask_attempt = { 0 };
-				std::atomic_size_t mask_reset = { 0 };
-				std::atomic_size_t local = { 0 };
-			} pop;
-		} pick;
-		struct {
-			struct {
-				std::atomic_size_t value = { 0 };
-				std::atomic_size_t count = { 0 };
-			} push;
-			struct {
-				std::atomic_size_t value = { 0 };
-				std::atomic_size_t count = { 0 };
-			} pop;
-		} qstat;
-	} global_stats;
-
-public:
-	static void stats_print(std::ostream & os ) {
-		std::cout << "----- Relaxed List Stats -----" << std::endl;
-
-		const auto & global = global_stats;
-
-		double push_sur = (100.0 * double(global.pick.push.success) / global.pick.push.attempt);
-		double pop_sur  = (100.0 * double(global.pick.pop .success) / global.pick.pop .attempt);
-		double mpop_sur = (100.0 * double(global.pick.pop .success) / global.pick.pop .mask_attempt);
-		double rpop_sur = (100.0 * double(global.pick.pop .success) / global.pick.pop .mask_reset);
-
-		double push_len = double(global.pick.push.attempt     ) / global.pick.push.success;
-		double pop_len  = double(global.pick.pop .attempt     ) / global.pick.pop .success;
-		double mpop_len = double(global.pick.pop .mask_attempt) / global.pick.pop .success;
-		double rpop_len = double(global.pick.pop .mask_reset  ) / global.pick.pop .success;
-
-		os << "Push   Pick   : " << push_sur << " %, len " << push_len << " (" << global.pick.push.attempt      << " / " << global.pick.push.success << ")\n";
-		os << "Pop    Pick   : " << pop_sur  << " %, len " << pop_len  << " (" << global.pick.pop .attempt      << " / " << global.pick.pop .success << ")\n";
-		os << "TryPop Pick   : " << mpop_sur << " %, len " << mpop_len << " (" << global.pick.pop .mask_attempt << " / " << global.pick.pop .success << ")\n";
-		os << "Pop M Reset   : " << rpop_sur << " %, len " << rpop_len << " (" << global.pick.pop .mask_reset   << " / " << global.pick.pop .success << ")\n";
-
-		double avgQ_push = double(global.qstat.push.value) / global.qstat.push.count;
-		double avgQ_pop  = double(global.qstat.pop .value) / global.qstat.pop .count;
-		double avgQ      = double(global.qstat.push.value + global.qstat.pop .value) / (global.qstat.push.count + global.qstat.pop .count);
-		os << "Push   Avg Qs : " << avgQ_push << " (" << global.qstat.push.count << "ops)\n";
-		os << "Pop    Avg Qs : " << avgQ_pop  << " (" << global.qstat.pop .count << "ops)\n";
-		os << "Global Avg Qs : " << avgQ      << " (" << (global.qstat.push.count + global.qstat.pop .count) << "ops)\n";
-
-		os << "Local Push    : " << global.pick.push.local << "\n";
-		os << "Local Pop     : " << global.pick.pop .local << "\n";
-	}
-#endif
-};
Index: doc/theses/thierry_delisle_PhD/code/relaxed_list_layout.cpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/relaxed_list_layout.cpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,23 +1,0 @@
-#define NO_IO
-#define NDEBUG
-#include "relaxed_list.hpp"
-
-struct __attribute__((aligned(64))) Node {
-	static std::atomic_size_t creates;
-	static std::atomic_size_t destroys;
-
-	_LinksFields_t<Node> _links;
-
-	int value;
-	Node(int value): value(value) {
-		creates++;
-	}
-
-	~Node() {
-		destroys++;
-	}
-};
-
-int main() {
-	return sizeof(relaxed_list<Node>) + relaxed_list<Node>::sizeof_queue;
-}
Index: doc/theses/thierry_delisle_PhD/code/runperf.sh
===================================================================
--- doc/theses/thierry_delisle_PhD/code/runperf.sh	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,14 +1,0 @@
-#!/bin/bash
-set -e
-
-name=$1
-event=$2
-
-shift 2
-
-echo "perf record -F 99 -a -g -o raw/$name.data -e $event -- $@ > raw/$name.out"
-perf record -F 99 -a -g -o raw/$name.data -e $event -- $@ > raw/$name.out
-echo "=============================="
-cat raw/$name.out
-echo "=============================="
-./process.sh $name
Index: doc/theses/thierry_delisle_PhD/code/scale.sh
===================================================================
--- doc/theses/thierry_delisle_PhD/code/scale.sh	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,7 +1,0 @@
-#!/bin/bash
-taskset -c 24-31 ./a.out -t  1 -b churn | grep --color -E "(ns|Ops|Running)"
-taskset -c 24-31 ./a.out -t  2 -b churn | grep --color -E "(ns|Ops|Running)"
-taskset -c 24-31 ./a.out -t  4 -b churn | grep --color -E "(ns|Ops|Running)"
-taskset -c 24-31 ./a.out -t  8 -b churn | grep --color -E "(ns|Ops|Running)"
-taskset -c 16-31 ./a.out -t 16 -b churn | grep --color -E "(ns|Ops|Running)"
-taskset -c  0-31 ./a.out -t 32 -b churn | grep --color -E "(ns|Ops|Running)"
Index: doc/theses/thierry_delisle_PhD/code/snzi-packed.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/snzi-packed.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,179 +1,0 @@
-#pragma once
-
-#define SNZI_PACKED
-
-#include "utils.hpp"
-
-
-class snzip_t {
-	class node;
-	class node_aligned;
-public:
-	const unsigned mask;
-	const int root;
-	std::unique_ptr<snzip_t::node[]> leafs;
-	std::unique_ptr<snzip_t::node_aligned[]> nodes;
-
-	snzip_t(unsigned depth);
-
-	void arrive(int idx) {
-		// idx >>= 1;
-		idx %= mask;
-		leafs[idx].arrive();
-	}
-
-	void depart(int idx) {
-		// idx >>= 1;
-		idx %= mask;
-		leafs[idx].depart();
-	}
-
-	bool query() const {
-		return nodes[root].query();
-	}
-
-
-private:
-	class __attribute__((aligned(32))) node {
-		friend class snzip_t;
-	private:
-
-		union val_t {
-			static constexpr char Half = -1;
-
-			uint64_t _all;
-			struct __attribute__((packed)) {
-				char cnt;
-				uint64_t ver:56;
-			};
-
-			bool cas(val_t & exp, char _cnt, uint64_t _ver) volatile {
-				val_t t;
-				t.ver = _ver;
-				t.cnt = _cnt;
-				/* paranoid */ assert(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
-				return __atomic_compare_exchange_n(&this->_all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-			}
-
-			bool cas(val_t & exp, const val_t & tar) volatile {
-				return __atomic_compare_exchange_n(&this->_all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-			}
-
-			val_t() : _all(0) {}
-			val_t(const volatile val_t & o) : _all(o._all) {}
-		};
-
-		//--------------------------------------------------
-		// Hierarchical node
-		void arrive_h() {
-			int undoArr = 0;
-			bool success = false;
-			while(!success) {
-				auto x{ value };
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt >= 1 ) {
-					if( value.cas(x, x.cnt + 1, x.ver ) ) {
-						success = true;
-					}
-				}
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt == 0 ) {
-					if( value.cas(x, val_t::Half, x.ver + 1) ) {
-						success = true;
-						x.cnt = val_t::Half;
-						x.ver = x.ver + 1;
-					}
-				}
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt == val_t::Half ) {
-					/* paranoid */ assert(parent);
-					if(undoArr == 2) {
-						undoArr--;
-					} else {
-						parent->arrive();
-					}
-					if( !value.cas(x, 1, x.ver) ) {
-						undoArr = undoArr + 1;
-					}
-				}
-			}
-
-			for(int i = 0; i < undoArr; i++) {
-				/* paranoid */ assert(parent);
-				parent->depart();
-			}
-		}
-
-		void depart_h() {
-			while(true) {
-				auto x = (const val_t)value;
-				/* paranoid */ assertf(x.cnt >= 1, "%d", x.cnt);
-				if( value.cas( x, x.cnt - 1, x.ver ) ) {
-					if( x.cnt == 1 ) {
-						/* paranoid */ assert(parent);
-						parent->depart();
-					}
-					return;
-				}
-			}
-		}
-
-		//--------------------------------------------------
-		// Root node
-		void arrive_r() {
-			__atomic_fetch_add(&value._all, 1, __ATOMIC_SEQ_CST);
-		}
-
-		void depart_r() {
-			__atomic_fetch_sub(&value._all, 1, __ATOMIC_SEQ_CST);
-		}
-
-	private:
-		volatile val_t value;
-		class node * parent = nullptr;
-
-		bool is_root() {
-			return parent == nullptr;
-		}
-
-	public:
-		void arrive() {
-			if(is_root()) arrive_r();
-			else arrive_h();
-		}
-
-		void depart() {
-			if(is_root()) depart_r();
-			else depart_h();
-		}
-
-		bool query() {
-			/* paranoid */ assert(is_root());
-			return value._all > 0;
-		}
-	};
-
-	class __attribute__((aligned(128))) node_aligned : public node {};
-};
-
-snzip_t::snzip_t(unsigned depth)
-	: mask( std::pow(2, depth) )
-	, root( ((std::pow(2, depth + 1) - 1) / (2 -1)) - 1 - mask )
-	, leafs(new node[ mask ]())
-	, nodes(new node_aligned[ root + 1 ]())
-{
-	int width = std::pow(2, depth);
-	int hwdith = width / 2;
-	std::cout << "SNZI: " << depth << "x" << width << "(" << mask - 1 << ") " << (sizeof(snzip_t::node) * (root + 1)) << " bytes" << std::endl;
-	for(int i = 0; i < width; i++) {
-		int idx = i % hwdith;
-		std::cout << i << " -> " << idx + width << std::endl;
-		leafs[i].parent = &nodes[ idx ];
-	}
-
-	for(int i = 0; i < root; i++) {
-		int idx = (i / 2) + hwdith;
-		std::cout << i + width << " -> " << idx + width << std::endl;
-		nodes[i].parent = &nodes[ idx ];
-	}
-}
Index: doc/theses/thierry_delisle_PhD/code/snzi.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/snzi.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,164 +1,0 @@
-#pragma once
-
-#include "utils.hpp"
-
-
-class snzi_t {
-	class node;
-public:
-	const unsigned mask;
-	const int root;
-	std::unique_ptr<snzi_t::node[]> nodes;
-
-	snzi_t(unsigned depth, unsigned base = 2);
-
-	void arrive(int idx) {
-		idx >>= 2;
-		idx %= mask;
-		nodes[idx].arrive();
-	}
-
-	void depart(int idx) {
-		idx >>= 2;
-		idx %= mask;
-		nodes[idx].depart();
-	}
-
-	bool query() const {
-		return nodes[root].query();
-	}
-
-
-private:
-	class __attribute__((aligned(128))) node {
-		friend class snzi_t;
-	private:
-
-		union val_t {
-			static constexpr char Half = -1;
-
-			uint64_t _all;
-			struct __attribute__((packed)) {
-				char cnt;
-				uint64_t ver:56;
-			};
-
-			bool cas(val_t & exp, char _cnt, uint64_t _ver) volatile {
-				val_t t;
-				t.ver = _ver;
-				t.cnt = _cnt;
-				/* paranoid */ assert(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
-				return __atomic_compare_exchange_n(&this->_all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-			}
-
-			bool cas(val_t & exp, const val_t & tar) volatile {
-				return __atomic_compare_exchange_n(&this->_all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-			}
-
-			val_t() : _all(0) {}
-			val_t(const volatile val_t & o) : _all(o._all) {}
-		};
-
-		//--------------------------------------------------
-		// Hierarchical node
-		void arrive_h() {
-			int undoArr = 0;
-			bool success = false;
-			while(!success) {
-				auto x{ value };
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt >= 1 ) {
-					if( value.cas(x, x.cnt + 1, x.ver ) ) {
-						success = true;
-					}
-				}
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt == 0 ) {
-					if( value.cas(x, val_t::Half, x.ver + 1) ) {
-						success = true;
-						x.cnt = val_t::Half;
-						x.ver = x.ver + 1;
-					}
-				}
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt == val_t::Half ) {
-					/* paranoid */ assert(parent);
-					if(undoArr == 2) {
-						undoArr--;
-					} else {
-						parent->arrive();
-					}
-					if( !value.cas(x, 1, x.ver) ) {
-						undoArr = undoArr + 1;
-					}
-				}
-			}
-
-			for(int i = 0; i < undoArr; i++) {
-				/* paranoid */ assert(parent);
-				parent->depart();
-			}
-		}
-
-		void depart_h() {
-			while(true) {
-				auto x = (const val_t)value;
-				/* paranoid */ assertf(x.cnt >= 1, "%d", x.cnt);
-				if( value.cas( x, x.cnt - 1, x.ver ) ) {
-					if( x.cnt == 1 ) {
-						/* paranoid */ assert(parent);
-						parent->depart();
-					}
-					return;
-				}
-			}
-		}
-
-		//--------------------------------------------------
-		// Root node
-		void arrive_r() {
-			__atomic_fetch_add(&value._all, 1, __ATOMIC_SEQ_CST);
-		}
-
-		void depart_r() {
-			__atomic_fetch_sub(&value._all, 1, __ATOMIC_SEQ_CST);
-		}
-
-	private:
-		volatile val_t value;
-		class node * parent = nullptr;
-
-		bool is_root() {
-			return parent == nullptr;
-		}
-
-	public:
-		void arrive() {
-			if(is_root()) arrive_r();
-			else arrive_h();
-		}
-
-		void depart() {
-			if(is_root()) depart_r();
-			else depart_h();
-		}
-
-		bool query() {
-			/* paranoid */ assert(is_root());
-			return value._all > 0;
-		}
-	};
-};
-
-snzi_t::snzi_t(unsigned depth, unsigned base)
-	: mask( std::pow(base, depth) )
-	, root( ((std::pow(base, depth + 1) - 1) / (base -1)) - 1 )
-	, nodes(new node[ root + 1 ]())
-{
-	int width = std::pow(base, depth);
-	std::cout << "SNZI: " << depth << "x" << width << "(" << mask - 1 << ") " << (sizeof(snzi_t::node) * (root + 1)) << " bytes" << std::endl;
-	for(int i = 0; i < root; i++) {
-		std::cout << i << " -> " << (i / base) + width << std::endl;
-		nodes[i].parent = &nodes[(i / base) + width];
-	}
-}
Index: doc/theses/thierry_delisle_PhD/code/snzm.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/snzm.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,213 +1,0 @@
-#pragma once
-
-#include "utils.hpp"
-
-
-class snzm_t {
-	class node;
-public:
-	const unsigned depth;
-	const unsigned mask;
-	const int root;
-	std::unique_ptr<snzm_t::node[]> nodes;
-
-	#if defined(__BMI2__)
-		const uint64_t indexes = 0x0706050403020100;
-	#endif
-
-	snzm_t(unsigned numLists);
-
-	void arrive(int idx) {
-		int i = idx & mask;
-		nodes[i].arrive( idx >> depth);
-	}
-
-	void depart(int idx) {
-		int i = idx & mask;
-		nodes[i].depart( idx >> depth );
-	}
-
-	bool query() const {
-		return nodes[root].query();
-	}
-
-	uint64_t masks( unsigned node ) {
-		/* paranoid */ assert( (node & mask) == node );
-		#if defined(__BMI2__)
-			return nodes[node].mask_all;
-		#else
-			return nodes[node].mask;
-		#endif
-	}
-
-private:
-	class __attribute__((aligned(128))) node {
-		friend class snzm_t;
-	private:
-
-		union val_t {
-			static constexpr char Half = -1;
-
-			uint64_t _all;
-			struct __attribute__((packed)) {
-				char cnt;
-				uint64_t ver:56;
-			};
-
-			bool cas(val_t & exp, char _cnt, uint64_t _ver) volatile {
-				val_t t;
-				t.ver = _ver;
-				t.cnt = _cnt;
-				/* paranoid */ assert(t._all == ((_ver << 8) | ((unsigned char)_cnt)));
-				return __atomic_compare_exchange_n(&this->_all, &exp._all, t._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-			}
-
-			bool cas(val_t & exp, const val_t & tar) volatile {
-				return __atomic_compare_exchange_n(&this->_all, &exp._all, tar._all, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-			}
-
-			val_t() : _all(0) {}
-			val_t(const volatile val_t & o) : _all(o._all) {}
-		};
-
-		//--------------------------------------------------
-		// Hierarchical node
-		void arrive_h() {
-			int undoArr = 0;
-			bool success = false;
-			while(!success) {
-				auto x{ value };
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt >= 1 ) {
-					if( value.cas(x, x.cnt + 1, x.ver ) ) {
-						success = true;
-					}
-				}
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt == 0 ) {
-					if( value.cas(x, val_t::Half, x.ver + 1) ) {
-						success = true;
-						x.cnt = val_t::Half;
-						x.ver = x.ver + 1;
-					}
-				}
-				/* paranoid */ assert(x.cnt <= 120);
-				if( x.cnt == val_t::Half ) {
-					/* paranoid */ assert(parent);
-					parent->arrive();
-					if( !value.cas(x, 1, x.ver) ) {
-						undoArr = undoArr + 1;
-					}
-				}
-			}
-
-			for(int i = 0; i < undoArr; i++) {
-				/* paranoid */ assert(parent);
-				parent->depart();
-			}
-		}
-
-		void depart_h() {
-			while(true) {
-				auto x = (const val_t)value;
-				/* paranoid */ assertf(x.cnt >= 1, "%d", x.cnt);
-				if( value.cas( x, x.cnt - 1, x.ver ) ) {
-					if( x.cnt == 1 ) {
-						/* paranoid */ assert(parent);
-						parent->depart();
-					}
-					return;
-				}
-			}
-		}
-
-		//--------------------------------------------------
-		// Root node
-		void arrive_r() {
-			__atomic_fetch_add(&value._all, 1, __ATOMIC_SEQ_CST);
-		}
-
-		void depart_r() {
-			__atomic_fetch_sub(&value._all, 1, __ATOMIC_SEQ_CST);
-		}
-
-		//--------------------------------------------------
-		// Interface node
-		void arrive() {
-			/* paranoid */ assert(!is_leaf);
-			if(is_root()) arrive_r();
-			else arrive_h();
-		}
-
-		void depart() {
-			/* paranoid */ assert(!is_leaf);
-			if(is_root()) depart_r();
-			else depart_h();
-		}
-
-	private:
-		volatile val_t value;
-		#if defined(__BMI2__)
-			union __attribute__((packed)) {
-				volatile uint8_t mask[8];
-				volatile uint64_t mask_all;
-			};
-		#else
-			volatile size_t mask = 0;
-		#endif
-
-		class node * parent = nullptr;
-		bool is_leaf = false;
-
-		bool is_root() {
-			return parent == nullptr;
-		}
-
-	public:
-		void arrive( int bit ) {
-			/* paranoid */ assert( is_leaf );
-
-			arrive_h();
-			#if defined(__BMI2__)
-				/* paranoid */ assert( bit < 8 );
-				mask[bit] = 0xff;
-			#else
-				/* paranoid */ assert( (mask & ( 1 << bit )) == 0 );
-				__atomic_fetch_add( &mask, 1 << bit, __ATOMIC_RELAXED );
-			#endif
-
-		}
-
-		void depart( int bit ) {
-			/* paranoid */ assert( is_leaf );
-
-			#if defined(__BMI2__)
-				/* paranoid */ assert( bit < 8 );
-				mask[bit] = 0x00;
-			#else
-				/* paranoid */ assert( (mask & ( 1 << bit )) != 0 );
-				__atomic_fetch_sub( &mask, 1 << bit, __ATOMIC_RELAXED );
-			#endif
-			depart_h();
-		}
-
-		bool query() {
-			/* paranoid */ assert(is_root());
-			return value._all > 0;
-		}
-	};
-};
-
-snzm_t::snzm_t(unsigned numLists)
-	: depth( std::log2( numLists / 8 ) )
-	, mask( (1 << depth) - 1 )
-	, root( (1 << (depth + 1)) - 2 )
-	, nodes(new node[ root + 1 ]())
-{
-	int width = 1 << depth;
-	std::cout << "SNZI with Mask: " << depth << "x" << width << "(" << mask << ")" << std::endl;
-	for(int i = 0; i < root; i++) {
-		nodes[i].is_leaf = i < width;
-		nodes[i].parent = &nodes[(i / 2) + width ];
-	}
-}
Index: doc/theses/thierry_delisle_PhD/code/utils.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/utils.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,250 +1,0 @@
-#pragma once
-
-#include <cassert>
-#include <cstddef>
-#include <atomic>
-#include <chrono>
-#include <fstream>
-#include <iostream>
-
-#include <unistd.h>
-#include <sys/sysinfo.h>
-
-#include <x86intrin.h>
-
-// Barrier from
-class barrier_t {
-public:
-	barrier_t(size_t total)
-		: waiting(0)
-		, total(total)
-	{}
-
-	void wait(unsigned) {
-		size_t target = waiting++;
-		target = (target - (target % total)) + total;
-		while(waiting < target)
-			asm volatile("pause");
-
-		assert(waiting < (1ul << 60));
-    	}
-
-private:
-	std::atomic<size_t> waiting;
-	size_t total;
-};
-
-// class Random {
-// private:
-// 	unsigned int seed;
-// public:
-// 	Random(int seed) {
-// 		this->seed = seed;
-// 	}
-
-// 	/** returns pseudorandom x satisfying 0 <= x < n. **/
-// 	unsigned int next() {
-// 		seed ^= seed << 6;
-// 		seed ^= seed >> 21;
-// 		seed ^= seed << 7;
-// 		return seed;
-//     	}
-// };
-
-constexpr uint64_t extendedEuclidY(uint64_t a, uint64_t b);
-constexpr uint64_t extendedEuclidX(uint64_t a, uint64_t b){
-    return (b==0) ? 1 : extendedEuclidY(b, a - b * (a / b));
-}
-constexpr uint64_t extendedEuclidY(uint64_t a, uint64_t b){
-    return (b==0) ? 0 : extendedEuclidX(b, a - b * (a / b)) - (a / b) * extendedEuclidY(b, a - b * (a / b));
-}
-
-class Random {
-private:
-	uint64_t x;
-
-	static constexpr const uint64_t M  = 1ul << 48ul;
-	static constexpr const uint64_t A  = 25214903917;
-	static constexpr const uint64_t C  = 11;
-	static constexpr const uint64_t D  = 16;
-
-public:
-	static constexpr const uint64_t m  = M;
-	static constexpr const uint64_t a  = A;
-	static constexpr const uint64_t c  = C;
-	static constexpr const uint64_t d  = D;
-	static constexpr const uint64_t ai = extendedEuclidX(A, M);
-public:
-	Random(unsigned int seed) {
-		this->x = seed * a;
-	}
-
-	/** returns pseudorandom x satisfying 0 <= x < n. **/
-	unsigned int next() {
-		//nextx = (a * x + c) % m;
-		x = (A * x + C) & (M - 1);
-		return x >> D;
-	}
-	unsigned int prev() {
-		//prevx = (ainverse * (x - c)) mod m
-		unsigned int r = x >> D;
-		x = ai * (x - C) & (M - 1);
-		return r;
-	}
-
-	void set_raw_state(uint64_t _x) {
-		this->x = _x;
-	}
-
-	uint64_t get_raw_state() {
-		return this->x;
-	}
-};
-
-static inline long long rdtscl(void) {
-    unsigned int lo, hi;
-    __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
-    return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
-}
-
-static inline void affinity(int tid) {
-	static int cpus = get_nprocs();
-
-	cpu_set_t  mask;
-	CPU_ZERO(&mask);
-	int cpu = cpus - tid;  // Set CPU affinity to tid, starting from the end
-	CPU_SET(cpu, &mask);
-	auto result = sched_setaffinity(0, sizeof(mask), &mask);
-	if(result != 0) {
-		std::cerr << "Affinity set failed with " << result<< ", wanted " << cpu << std::endl;
-	}
-}
-
-static const constexpr std::size_t cache_line_size = 64;
-static inline void check_cache_line_size() {
-	std::cout << "Checking cache line size" << std::endl;
-	const std::string cache_file = "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size";
-
-	std::ifstream ifs (cache_file, std::ifstream::in);
-
-	if(!ifs.good()) {
-		std::cerr << "Could not open file to check cache line size" << std::endl;
-		std::cerr << "Looking for: " << cache_file << std::endl;
-		std::exit(2);
-	}
-
-	size_t got;
-	ifs >> got;
-
-	ifs.close();
-
-	if(cache_line_size != got) {
-		std::cerr << "Cache line has incorrect size : " << got << std::endl;
-		std::exit(1);
-	}
-
-	std::cout << "Done" << std::endl;
-}
-
-using Clock = std::chrono::high_resolution_clock;
-using duration_t = std::chrono::duration<double>;
-using std::chrono::nanoseconds;
-
-template<typename Ratio, typename T>
-T duration_cast(T seconds) {
-	return std::chrono::duration_cast<std::chrono::duration<T, Ratio>>(std::chrono::duration<T>(seconds)).count();
-}
-
-static inline unsigned rand_bit(unsigned rnum, size_t mask) __attribute__((artificial));
-static inline unsigned rand_bit(unsigned rnum, size_t mask) {
-	unsigned bit = mask ? rnum % __builtin_popcountl(mask) : 0;
-#if !defined(__BMI2__)
-	uint64_t v = mask;   // Input value to find position with rank r.
-	unsigned int r = bit + 1;// Input: bit's desired rank [1-64].
-	unsigned int s;      // Output: Resulting position of bit with rank r [1-64]
-	uint64_t a, b, c, d; // Intermediate temporaries for bit count.
-	unsigned int t;      // Bit count temporary.
-
-	// Do a normal parallel bit count for a 64-bit integer,
-	// but store all intermediate steps.
-	a =  v - ((v >> 1) & ~0UL/3);
-	b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
-	c = (b + (b >> 4)) & ~0UL/0x11;
-	d = (c + (c >> 8)) & ~0UL/0x101;
-
-
-	t = (d >> 32) + (d >> 48);
-	// Now do branchless select!
-	s  = 64;
-	s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
-	t  = (d >> (s - 16)) & 0xff;
-	s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
-	t  = (c >> (s - 8)) & 0xf;
-	s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
-	t  = (b >> (s - 4)) & 0x7;
-	s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
-	t  = (a >> (s - 2)) & 0x3;
-	s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
-	t  = (v >> (s - 1)) & 0x1;
-	s -= ((t - r) & 256) >> 8;
-	return s - 1;
-#else
-	uint64_t picked = _pdep_u64(1ul << bit, mask);
-	return picked ? __builtin_ctzl(picked) : 0;
-#endif
-}
-
-struct spinlock_t {
-	std::atomic_bool ll = { false };
-
-	inline void lock() {
-		while( __builtin_expect(ll.exchange(true),false) ) {
-			while(ll.load(std::memory_order_relaxed))
-				asm volatile("pause");
-		}
-	}
-
-	inline bool try_lock() {
-		return false == ll.exchange(true);
-	}
-
-	inline void unlock() {
-		ll.store(false, std::memory_order_release);
-	}
-
-	inline explicit operator bool() {
-		return ll.load(std::memory_order_relaxed);
-	}
-};
-
-static inline bool bts(std::atomic_size_t & target, size_t bit ) {
-	//*
-	int result = 0;
-	asm volatile(
-		"LOCK btsq %[bit], %[target]\n\t"
-		:"=@ccc" (result)
-		: [target] "m" (target), [bit] "r" (bit)
-	);
-	return result != 0;
-	/*/
-	size_t mask = 1ul << bit;
-	size_t ret = target.fetch_or(mask, std::memory_order_relaxed);
-	return (ret & mask) != 0;
-	//*/
-}
-
-static inline bool btr(std::atomic_size_t & target, size_t bit ) {
-	//*
-	int result = 0;
-	asm volatile(
-		"LOCK btrq %[bit], %[target]\n\t"
-		:"=@ccc" (result)
-		: [target] "m" (target), [bit] "r" (bit)
-	);
-	return result != 0;
-	/*/
-	size_t mask = 1ul << bit;
-	size_t ret = target.fetch_and(~mask, std::memory_order_relaxed);
-	return (ret & mask) != 0;
-	//*/
-}
Index: doc/theses/thierry_delisle_PhD/code/work_stealing.hpp
===================================================================
--- doc/theses/thierry_delisle_PhD/code/work_stealing.hpp	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ 	(revision )
@@ -1,222 +1,0 @@
-#pragma once
-#define LIST_VARIANT work_stealing
-
-#include <cmath>
-#include <iomanip>
-#include <memory>
-#include <mutex>
-#include <type_traits>
-
-#include "assert.hpp"
-#include "utils.hpp"
-#include "links.hpp"
-#include "snzi.hpp"
-
-using namespace std;
-
-template<typename node_t>
-class __attribute__((aligned(128))) work_stealing {
-	static_assert(std::is_same<decltype(node_t::_links), _LinksFields_t<node_t>>::value, "Node must have a links field");
-
-public:
-	static const char * name() {
-		return "Work Stealing";
-	}
-
-	work_stealing(unsigned _numThreads, unsigned)
-		: numThreads(_numThreads)
-		, lists(new intrusive_queue_t<node_t>[numThreads])
-		, snzi( std::log2( numThreads / 2 ), 2 )
-
-	{
-		std::cout << "Constructing Work Stealer with " << numThreads << std::endl;
-	}
-
-	~work_stealing() {
-		std::cout << "Destroying Work Stealer" << std::endl;
-		lists.reset();
-	}
-
-	__attribute__((noinline, hot)) void push(node_t * node) {
-		node->_links.ts = rdtscl();
-		if( node->_links.hint > numThreads ) {
-			node->_links.hint = tls.rng.next() % numThreads;
-			tls.stat.push.nhint++;
-		}
-
-		unsigned i = node->_links.hint;
-		auto & list = lists[i];
-		list.lock.lock();
-
-		if(list.push( node )) {
-			snzi.arrive(i);
-		}
-
-		list.lock.unlock();
-	}
-
-	__attribute__((noinline, hot)) node_t * pop() {
-		node_t * node;
-		while(true) {
-			if(!snzi.query()) {
-				return nullptr;
-			}
-
-			{
-				unsigned i = tls.my_queue;
-				auto & list = lists[i];
-				if( list.ts() != 0 ) {
-					list.lock.lock();
-					if((node = try_pop(i))) {
-						tls.stat.pop.local.success++;
-						break;
-					}
-					else {
-						tls.stat.pop.local.elock++;
-					}
-				}
-				else {
-					tls.stat.pop.local.espec++;
-				}
-			}
-
-			tls.stat.pop.steal.tried++;
-
-			int i = tls.rng.next() % numThreads;
-			auto & list = lists[i];
-			if( list.ts() == 0 ) {
-				tls.stat.pop.steal.empty++;
-				continue;
-			}
-
-			if( !list.lock.try_lock() ) {
-				tls.stat.pop.steal.locked++;
-				continue;
-			}
-
-			if((node = try_pop(i))) {
-				tls.stat.pop.steal.success++;
-				break;
-			}
-		}
-
-		#if defined(READ)
-			const unsigned f = READ;
-			if(0 == (tls.it % f)) {
-				unsigned i = tls.it / f;
-				lists[i % numThreads].ts();
-			}
-			// lists[tls.it].ts();
-			tls.it++;
-		#endif
-
-
-		return node;
-	}
-
-private:
-	node_t * try_pop(unsigned i) {
-		auto & list = lists[i];
-
-		// If list is empty, unlock and retry
-		if( list.ts() == 0 ) {
-			list.lock.unlock();
-			return nullptr;
-		}
-
-			// Actually pop the list
-		node_t * node;
-		bool emptied;
-		std::tie(node, emptied) = list.pop();
-		assert(node);
-
-		if(emptied) {
-			snzi.depart(i);
-		}
-
-		// Unlock and return
-		list.lock.unlock();
-		return node;
-	}
-
-
-public:
-
-	static std::atomic_uint32_t ticket;
-	static __attribute__((aligned(128))) thread_local struct TLS {
-		Random     rng = { int(rdtscl()) };
-		unsigned   my_queue = ticket++;
-		#if defined(READ)
-			unsigned it = 0;
-		#endif
-		struct {
-			struct {
-				std::size_t nhint = { 0 };
-			} push;
-			struct {
-				struct {
-					std::size_t success = { 0 };
-					std::size_t espec = { 0 };
-					std::size_t elock = { 0 };
-				} local;
-				struct {
-					std::size_t tried   = { 0 };
-					std::size_t locked  = { 0 };
-					std::size_t empty   = { 0 };
-					std::size_t success = { 0 };
-				} steal;
-			} pop;
-		} stat;
-	} tls;
-
-private:
-	const unsigned numThreads;
-    	std::unique_ptr<intrusive_queue_t<node_t> []> lists;
-	__attribute__((aligned(64))) snzi_t snzi;
-
-#ifndef NO_STATS
-private:
-	static struct GlobalStats {
-		struct {
-			std::atomic_size_t nhint = { 0 };
-		} push;
-		struct {
-			struct {
-				std::atomic_size_t success = { 0 };
-				std::atomic_size_t espec = { 0 };
-				std::atomic_size_t elock = { 0 };
-			} local;
-			struct {
-				std::atomic_size_t tried   = { 0 };
-				std::atomic_size_t locked  = { 0 };
-				std::atomic_size_t empty   = { 0 };
-				std::atomic_size_t success = { 0 };
-			} steal;
-		} pop;
-	} global_stats;
-
-public:
-	static void stats_tls_tally() {
-		global_stats.push.nhint += tls.stat.push.nhint;
-		global_stats.pop.local.success += tls.stat.pop.local.success;
-		global_stats.pop.local.espec   += tls.stat.pop.local.espec  ;
-		global_stats.pop.local.elock   += tls.stat.pop.local.elock  ;
-		global_stats.pop.steal.tried   += tls.stat.pop.steal.tried  ;
-		global_stats.pop.steal.locked  += tls.stat.pop.steal.locked ;
-		global_stats.pop.steal.empty   += tls.stat.pop.steal.empty  ;
-		global_stats.pop.steal.success += tls.stat.pop.steal.success;
-	}
-
-	static void stats_print(std::ostream & os ) {
-		std::cout << "----- Work Stealing Stats -----" << std::endl;
-
-		double stealSucc = double(global_stats.pop.steal.success) / global_stats.pop.steal.tried;
-		os << "Push to new Q : " << std::setw(15) << global_stats.push.nhint << "\n";
-		os << "Local Pop     : " << std::setw(15) << global_stats.pop.local.success << "\n";
-		os << "Steal Pop     : " << std::setw(15) << global_stats.pop.steal.success << "(" << global_stats.pop.local.espec << "s, " << global_stats.pop.local.elock << "l)\n";
-		os << "Steal Success : " << std::setw(15) << stealSucc << "(" << global_stats.pop.steal.tried << " tries)\n";
-		os << "Steal Fails   : " << std::setw(15) << global_stats.pop.steal.empty << "e, " << global_stats.pop.steal.locked << "l\n";
-	}
-private:
-#endif
-};
Index: doc/user/Makefile
===================================================================
--- doc/user/Makefile	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/user/Makefile	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -55,5 +55,5 @@
 
 ${DOCUMENT} : ${BASE}.ps
-	ps2pdf $<
+	ps2pdf -dPDFSETTINGS=/prepress $<
 
 ${BASE}.ps : ${BASE}.dvi
Index: doc/user/user.tex
===================================================================
--- doc/user/user.tex	(revision 33c3dedce60b8ef643a2e67759069cd5d6be774b)
+++ doc/user/user.tex	(revision 223a63306c486d5cffd07812bcf08afea940d3c4)
@@ -11,6 +11,6 @@
 %% Created On       : Wed Apr  6 14:53:29 2016
 %% Last Modified By : Peter A. Buhr
-%% Last Modified On : Fri Mar  6 13:34:52 2020
-%% Update Count     : 3924
+%% Last Modified On : Mon Oct  5 08:57:29 2020
+%% Update Count     : 3998
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
@@ -30,35 +30,12 @@
 \usepackage{upquote}									% switch curled `'" to straight
 \usepackage{calc}
-\usepackage{xspace}
 \usepackage{varioref}									% extended references
-\usepackage{listings}									% format program code
+\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt]{subfig}
+\renewcommand{\thesubfigure}{\alph{subfigure})}
 \usepackage[flushmargin]{footmisc}						% support label/reference in footnote
 \usepackage{latexsym}                                   % \Box glyph
 \usepackage{mathptmx}                                   % better math font with "times"
 \usepackage[usenames]{color}
-\input{common}                                          % common CFA document macros
-\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref}
-\usepackage{breakurl}
-
-\usepackage[pagewise]{lineno}
-\renewcommand{\linenumberfont}{\scriptsize\sffamily}
-\usepackage[firstpage]{draftwatermark}
-\SetWatermarkLightness{0.9}
-
-% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
-% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
-% AFTER HYPERREF.
-\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
-
-\setlength{\topmargin}{-0.45in}							% move running title into header
-\setlength{\headsep}{0.25in}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\CFAStyle												% use default CFA format-style
-\lstnewenvironment{C++}[1][]                            % use C++ style
-{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{®}{®},#1}}
-{}
-
+\newcommand{\CFALatin}{}
 % inline code ©...© (copyright symbol) emacs: C-q M-)
 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
@@ -68,4 +45,32 @@
 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
 % math escape $...$ (dollar symbol)
+\input{common}                                          % common CFA document macros
+\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref}
+\usepackage{breakurl}
+
+\renewcommand\footnoterule{\kern -3pt\rule{0.3\linewidth}{0.15pt}\kern 2pt}
+
+\usepackage[pagewise]{lineno}
+\renewcommand{\linenumberfont}{\scriptsize\sffamily}
+\usepackage[firstpage]{draftwatermark}
+\SetWatermarkLightness{0.9}
+
+% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
+% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
+% AFTER HYPERREF.
+\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
+
+\setlength{\topmargin}{-0.45in}							% move running title into header
+\setlength{\headsep}{0.25in}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\CFAStyle												% use default CFA format-style
+\lstnewenvironment{C++}[1][]                            % use C++ style
+{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{®}{®},#1}}
+{}
+
+\newsavebox{\myboxA}
+\newsavebox{\myboxB}
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -79,6 +84,4 @@
 \newcommand{\G}[1]{{\Textbf[OliveGreen]{#1}}}
 \newcommand{\KWC}{K-W C\xspace}
-
-\newsavebox{\LstBox}
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -253,8 +256,8 @@
 
 The signature feature of \CFA is \emph{\Index{overload}able} \Index{parametric-polymorphic} functions~\cite{forceone:impl,Cormack90,Duggan96} with functions generalized using a ©forall© clause (giving the language its name):
-\begin{lstlisting}
+\begin{cfa}
 ®forall( otype T )® T identity( T val ) { return val; }
 int forty_two = identity( 42 ); §\C{// T is bound to int, forty\_two == 42}§
-\end{lstlisting}
+\end{cfa}
 % extending the C type system with parametric polymorphism and overloading, as opposed to the \Index*[C++]{\CC{}} approach of object-oriented extensions.
 \CFA{}\hspace{1pt}'s polymorphism was originally formalized by \Index*{Glen Ditchfield}\index{Ditchfield, Glen}~\cite{Ditchfield92}, and first implemented by \Index*{Richard Bilson}\index{Bilson, Richard}~\cite{Bilson03}.
@@ -275,5 +278,5 @@
 \begin{comment}
 A simple example is leveraging the existing type-unsafe (©void *©) C ©bsearch© to binary search a sorted floating array:
-\begin{lstlisting}
+\begin{cfa}
 void * bsearch( const void * key, const void * base, size_t dim, size_t size,
 				int (* compar)( const void *, const void * ));
@@ -284,7 +287,7 @@
 double key = 5.0, vals[10] = { /* 10 sorted floating values */ };
 double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp ); §\C{// search sorted array}§
-\end{lstlisting}
+\end{cfa}
 which can be augmented simply with a polymorphic, type-safe, \CFA-overloaded wrappers:
-\begin{lstlisting}
+\begin{cfa}
 forall( otype T | { int ?<?( T, T ); } ) T * bsearch( T key, const T * arr, size_t size ) {
 	int comp( const void * t1, const void * t2 ) { /* as above with double changed to T */ }
@@ -297,5 +300,5 @@
 double * val = bsearch( 5.0, vals, 10 ); §\C{// selection based on return type}§
 int posn = bsearch( 5.0, vals, 10 );
-\end{lstlisting}
+\end{cfa}
 The nested function ©comp© provides the hidden interface from typed \CFA to untyped (©void *©) C, plus the cast of the result.
 Providing a hidden ©comp© function in \CC is awkward as lambdas do not use C calling-conventions and template declarations cannot appear at block scope.
@@ -305,10 +308,10 @@
 \CFA has replacement libraries condensing hundreds of existing C functions into tens of \CFA overloaded functions, all without rewriting the actual computations.
 For example, it is possible to write a type-safe \CFA wrapper ©malloc© based on the C ©malloc©:
-\begin{lstlisting}
+\begin{cfa}
 forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)malloc( sizeof(T) ); }
 int * ip = malloc(); §\C{// select type and size from left-hand side}§
 double * dp = malloc();
 struct S {...} * sp = malloc();
-\end{lstlisting}
+\end{cfa}
 where the return type supplies the type/size of the allocation, which is impossible in most type systems.
 \end{comment}
@@ -943,39 +946,4 @@
 the same level as a ©case© clause; the target label may be case ©default©, but only associated
 with the current ©switch©/©choose© statement.
-
-
-\subsection{Loop Control}
-
-The ©for©/©while©/©do-while© loop-control allows empty or simplified ranges (see Figure~\ref{f:LoopControlExamples}).
-\begin{itemize}
-\item
-The loop index is polymorphic in the type of the comparison value N (when the start value is implicit) or the start value M.
-\item
-An empty conditional implies comparison value of ©1© (true).
-\item
-A comparison N is implicit up-to exclusive range [0,N©®)®©.
-\item
-A comparison ©=© N is implicit up-to inclusive range [0,N©®]®©.
-\item
-The up-to range M ©~©\index{~@©~©} N means exclusive range [M,N©®)®©.
-\item
-The up-to range M ©~=©\index{~=@©~=©} N means inclusive range [M,N©®]®©.
-\item
-The down-to range M ©-~©\index{-~@©-~©} N means exclusive range [N,M©®)®©.
-\item
-The down-to range M ©-~=©\index{-~=@©-~=©} N means inclusive range [N,M©®]®©.
-\item
-©0© is the implicit start value;
-\item
-©1© is the implicit increment value.
-\item
-The up-to range uses operator ©+=© for increment;
-\item
-The down-to range uses operator ©-=© for decrement.
-\item
-©@© means put nothing in this field.
-\item
-©:© means start another index.
-\end{itemize}
 
 \begin{figure}
@@ -1086,4 +1054,39 @@
 
 
+\subsection{Loop Control}
+
+The ©for©/©while©/©do-while© loop-control allows empty or simplified ranges (see Figure~\ref{f:LoopControlExamples}).
+\begin{itemize}
+\item
+The loop index is polymorphic in the type of the comparison value N (when the start value is implicit) or the start value M.
+\item
+An empty conditional implies comparison value of ©1© (true).
+\item
+A comparison N is implicit up-to exclusive range [0,N©®)®©.
+\item
+A comparison ©=© N is implicit up-to inclusive range [0,N©®]®©.
+\item
+The up-to range M ©~©\index{~@©~©} N means exclusive range [M,N©®)®©.
+\item
+The up-to range M ©~=©\index{~=@©~=©} N means inclusive range [M,N©®]®©.
+\item
+The down-to range M ©-~©\index{-~@©-~©} N means exclusive range [N,M©®)®©.
+\item
+The down-to range M ©-~=©\index{-~=@©-~=©} N means inclusive range [N,M©®]®©.
+\item
+©0© is the implicit start value;
+\item
+©1© is the implicit increment value.
+\item
+The up-to range uses operator ©+=© for increment;
+\item
+The down-to range uses operator ©-=© for decrement.
+\item
+©@© means put nothing in this field.
+\item
+©:© means start another index.
+\end{itemize}
+
+
 %\subsection{\texorpdfstring{Labelled \protect\lstinline@continue@ / \protect\lstinline@break@}{Labelled continue / break}}
 \subsection{\texorpdfstring{Labelled \LstKeywordStyle{continue} / \LstKeywordStyle{break} Statement}{Labelled continue / break Statement}}
@@ -1095,81 +1098,73 @@
 for ©break©, the target label can also be associated with a ©switch©, ©if© or compound (©{}©) statement.
 \VRef[Figure]{f:MultiLevelExit} shows ©continue© and ©break© indicating the specific control structure, and the corresponding C program using only ©goto© and labels.
-The innermost loop has 7 exit points, which cause continuation or termination of one or more of the 7 \Index{nested control-structure}s.
+The innermost loop has 8 exit points, which cause continuation or termination of one or more of the 7 \Index{nested control-structure}s.
 
 \begin{figure}
-\begin{tabular}{@{\hspace{\parindentlnth}}l@{\hspace{\parindentlnth}}l@{\hspace{\parindentlnth}}l@{}}
-\multicolumn{1}{@{\hspace{\parindentlnth}}c@{\hspace{\parindentlnth}}}{\textbf{\CFA}}	& \multicolumn{1}{@{\hspace{\parindentlnth}}c}{\textbf{C}}	\\
-\begin{cfa}
-®LC:® {
-	... §declarations§ ...
-	®LS:® switch ( ... ) {
-	  case 3:
-		®LIF:® if ( ... ) {
-			®LF:® for ( ... ) {
-				®LW:® while ( ... ) {
-					... break ®LC®; ...
-					... break ®LS®; ...
-					... break ®LIF®; ...
-					... continue ®LF;® ...
-					... break ®LF®; ...
-					... continue ®LW®; ...
-					... break ®LW®; ...
-				} // while
-			} // for
-		} else {
-			... break ®LIF®; ...
-		} // if
-	} // switch
+\centering
+\begin{lrbox}{\myboxA}
+\begin{cfa}[tabsize=3]
+®Compound:® {
+	®Try:® try {
+		®For:® for ( ... ) {
+			®While:® while ( ... ) {
+				®Do:® do {
+					®If:® if ( ... ) {
+						®Switch:® switch ( ... ) {
+							case 3:
+								®break Compound®;
+								®break Try®;
+								®break For®;      /* or */  ®continue For®;
+								®break While®;  /* or */  ®continue While®;
+								®break Do®;      /* or */  ®continue Do®;
+								®break If®;
+								®break Switch®;
+							} // switch
+						} else {
+							... ®break If®; ...	// terminate if
+						} // if
+				} while ( ... ); // do
+			} // while
+		} // for
+	} ®finally® { // always executed
+	} // try
 } // compound
 \end{cfa}
-&
-\begin{cfa}
+\end{lrbox}
+
+\begin{lrbox}{\myboxB}
+\begin{cfa}[tabsize=3]
 {
-	... §declarations§ ...
-	switch ( ... ) {
-	  case 3:
-		if ( ... ) {
-			for ( ... ) {
-				while ( ... ) {
-					... goto ®LC®; ...
-					... goto ®LS®; ...
-					... goto ®LIF®; ...
-					... goto ®LFC®; ...
-					... goto ®LFB®; ...
-					... goto ®LWC®; ...
-					... goto ®LWB®; ...
-				  ®LWC®: ; } ®LWB:® ;
-			  ®LFC:® ; } ®LFB:® ;
-		} else {
-			... goto ®LIF®; ...
-		} ®L3:® ;
-	} ®LS:® ;
-} ®LC:® ;
-\end{cfa}
-&
-\begin{cfa}
-
-
-
-
-
-
-
-// terminate compound
-// terminate switch
-// terminate if
-// continue loop
-// terminate loop
-// continue loop
-// terminate loop
-
-
-
-// terminate if
-
-
-
-\end{cfa}
-\end{tabular}
+
+		®ForC:® for ( ... ) {
+			®WhileC:® while ( ... ) {
+				®DoC:® do {
+					if ( ... ) {
+						switch ( ... ) {
+							case 3:
+								®goto Compound®;
+								®goto Try®;
+								®goto ForB®;      /* or */  ®goto ForC®;
+								®goto WhileB®;  /* or */  ®goto WhileC®;
+								®goto DoB®;      /* or */  ®goto DoC®;
+								®goto If®;
+								®goto Switch®;
+							} ®Switch:® ;
+						} else {
+							... ®goto If®; ...	// terminate if
+						} ®If:®;
+				} while ( ... ); ®DoB:® ;
+			} ®WhileB:® ;
+		} ®ForB:® ;
+
+
+} ®Compound:® ;
+\end{cfa}
+\end{lrbox}
+
+\subfloat[\CFA]{\label{f:CFibonacci}\usebox\myboxA}
+\hspace{2pt}
+\vrule
+\hspace{2pt}
+\subfloat[C]{\label{f:CFAFibonacciGen}\usebox\myboxB}
 \caption{Multi-level Exit}
 \label{f:MultiLevelExit}
@@ -1426,7 +1421,7 @@
 try {
 	f(...);
-} catch( E e ; §boolean-predicate§ ) {		§\C[8cm]{// termination handler}§
+} catch( E e ; §boolean-predicate§ ) {		§\C{// termination handler}§
 	// recover and continue
-} catchResume( E e ; §boolean-predicate§ ) { §\C{// resumption handler}\CRT§
+} catchResume( E e ; §boolean-predicate§ ) { §\C{// resumption handler}§
 	// repair and return
 } finally {
@@ -3491,5 +3486,5 @@
 For implicit formatted input, the common case is reading a sequence of values separated by whitespace, where the type of an input constant must match with the type of the input variable.
 \begin{cquote}
-\begin{lrbox}{\LstBox}
+\begin{lrbox}{\myboxA}
 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
 int x;   double y   char z;
@@ -3497,5 +3492,5 @@
 \end{lrbox}
 \begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{3em}}l@{}}
-\multicolumn{1}{@{}l@{}}{\usebox\LstBox} \\
+\multicolumn{1}{@{}l@{}}{\usebox\myboxA} \\
 \multicolumn{1}{c@{\hspace{2em}}}{\textbf{\CFA}}	& \multicolumn{1}{c@{\hspace{2em}}}{\textbf{\CC}}	& \multicolumn{1}{c}{\textbf{Python}}	\\
 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
@@ -6672,4 +6667,7 @@
 For example, an initial alignment and fill capability are preserved during a resize copy so the copy has the same alignment and extended storage is filled.
 Without sticky properties it is dangerous to use ©realloc©, resulting in an idiom of manually performing the reallocation to maintain correctness.
+\begin{cfa}
+
+\end{cfa}
 
 \CFA memory management extends allocation to support constructors for initialization of allocated storage, \eg in
@@ -6721,24 +6719,26 @@
 
 	// §\CFA§ safe general allocation, fill, resize, alignment, array
-	T * alloc( void );§\indexc{alloc}§
-	T * alloc( size_t dim );
-	T * alloc( T ptr[], size_t dim );
-	T * alloc_set( char fill );§\indexc{alloc_set}§
-	T * alloc_set( T fill );
-	T * alloc_set( size_t dim, char fill );
-	T * alloc_set( size_t dim, T fill );
-	T * alloc_set( size_t dim, const T fill[] );
-	T * alloc_set( T ptr[], size_t dim, char fill );
-
-	T * alloc_align( size_t align );
-	T * alloc_align( size_t align, size_t dim );
-	T * alloc_align( T ptr[], size_t align ); // aligned realloc array
-	T * alloc_align( T ptr[], size_t align, size_t dim ); // aligned realloc array
-	T * alloc_align_set( size_t align, char fill );
-	T * alloc_align_set( size_t align, T fill );
-	T * alloc_align_set( size_t align, size_t dim, char fill );
-	T * alloc_align_set( size_t align, size_t dim, T fill );
-	T * alloc_align_set( size_t align, size_t dim, const T fill[] );
-	T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill );
+	T * alloc( void );§\indexc{alloc}§					§\C[3.5in]{// variable, T size}§
+	T * alloc( size_t dim );							§\C{// array[dim], T size elements}§
+	T * alloc( T ptr[], size_t dim );					§\C{// realloc array[dim], T size elements}§
+
+	T * alloc_set( char fill );§\indexc{alloc_set}§		§\C{// variable, T size, fill bytes with value}§
+	T * alloc_set( T fill );							§\C{// variable, T size, fill with value}§
+	T * alloc_set( size_t dim, char fill );				§\C{// array[dim], T size elements, fill bytes with value}§
+	T * alloc_set( size_t dim, T fill );				§\C{// array[dim], T size elements, fill elements with value}§
+	T * alloc_set( size_t dim, const T fill[] );		§\C{// array[dim], T size elements, fill elements with array}§
+	T * alloc_set( T ptr[], size_t dim, char fill );	§\C{// realloc array[dim], T size elements, fill bytes with value}§
+
+	T * alloc_align( size_t align );					§\C{// aligned variable, T size}§
+	T * alloc_align( size_t align, size_t dim );		§\C{// aligned array[dim], T size elements}§
+	T * alloc_align( T ptr[], size_t align );			§\C{// realloc new aligned array}§
+	T * alloc_align( T ptr[], size_t align, size_t dim ); §\C{// realloc new aligned array[dim]}§
+
+	T * alloc_align_set( size_t align, char fill );		§\C{// aligned variable, T size, fill bytes with value}§
+	T * alloc_align_set( size_t align, T fill );		§\C{// aligned variable, T size, fill with value}§
+	T * alloc_align_set( size_t align, size_t dim, char fill ); §\C{// aligned array[dim], T size elements, fill bytes with value}§
+	T * alloc_align_set( size_t align, size_t dim, T fill ); §\C{// aligned array[dim], T size elements, fill elements with value}§
+	T * alloc_align_set( size_t align, size_t dim, const T fill[] ); §\C{// aligned array[dim], T size elements, fill elements with array}§
+	T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ); §\C{// realloc new aligned array[dim], fill new bytes with value}§
 
 	// §\CFA§ safe initialization/copy, i.e., implicit size specification
