Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -8,126 +8,121 @@
 -2
 1200 2
-6 5919 5250 6375 5775
-5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
-5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
-	 6284 5501 6284 5410
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
-	 6102 5410 6102 5501 6192 5501 6192 5410
--6
-6 7442 6525 7875 6900
+5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 1 3376.136 2169.318 2250 2625 2775 3225 3525 3375
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+6 3466 2774 3899 3149
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 7501 6584 7442 6900
+	 3525 2833 3466 3149
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 7856 6584 7836 6703
+	 3880 2833 3860 2952
 3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
-	 7481 6703 7599 6663 7737 6722 7836 6703
+	 3505 2952 3623 2912 3761 2971 3860 2952
 	 0.000 -0.500 -0.500 0.000
 3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
-	 7503 6579 7621 6540 7759 6599 7857 6579
+	 3527 2828 3645 2789 3783 2848 3881 2828
 	 0.000 -0.500 -0.500 0.000
 -6
-6 7575 6825 7950 7325
+6 3599 3074 3974 3574
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 7575 6950 7700 6825 7950 6825 7950 7325 7575 7325 7575 6950
-	 7700 6950 7700 6825
+	 3599 3199 3724 3074 3974 3074 3974 3574 3599 3574 3599 3199
+	 3724 3199 3724 3074
 -6
-6 9092 6525 9525 6900
+6 5116 2774 5549 3149
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 9151 6584 9092 6900
+	 5175 2833 5116 3149
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 9506 6584 9486 6703
+	 5530 2833 5510 2952
 3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
-	 9131 6703 9249 6663 9387 6722 9486 6703
+	 5155 2952 5273 2912 5411 2971 5510 2952
 	 0.000 -0.500 -0.500 0.000
 3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
-	 9153 6579 9271 6540 9409 6599 9507 6579
+	 5177 2828 5295 2789 5433 2848 5531 2828
 	 0.000 -0.500 -0.500 0.000
 -6
-6 9225 6825 9600 7325
+6 5249 3074 5625 3574
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 9225 6950 9350 6825 9600 6825 9600 7325 9225 7325 9225 6950
-	 9350 6950 9350 6825
+	 5249 3199 5374 3074 5625 3074 5625 3574 5249 3574 5249 3199
+	 5374 3199 5374 3074
 -6
-6 10742 6525 11175 6900
+6 6766 2774 7199 3149
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 10801 6584 10742 6900
+	 6825 2833 6766 3149
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 11156 6584 11136 6703
+	 7180 2833 7160 2952
 3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
-	 10781 6703 10899 6663 11037 6722 11136 6703
+	 6805 2952 6923 2912 7061 2971 7160 2952
 	 0.000 -0.500 -0.500 0.000
 3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
-	 10803 6579 10921 6540 11059 6599 11157 6579
+	 6827 2828 6945 2789 7083 2848 7181 2828
 	 0.000 -0.500 -0.500 0.000
 -6
-6 10875 6825 11250 7325
+6 6899 3074 7274 3574
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 10875 6950 11000 6825 11250 6825 11250 7325 10875 7325 10875 6950
-	 11000 6950 11000 6825
+	 6899 3199 7024 3074 7274 3074 7274 3574 6899 3574 6899 3199
+	 7024 3199 7024 3074
+-6
+6 1875 1500 2331 2025
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 2104.000 1660.011 2058 1660 2103 1614 2148 1660
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 2104.000 1661.000 1966 1660 2103 1523 2240 1660
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 1966 1660 1966 1751 1875 1751 1875 2025 2331 2025 2331 1751
+	 2240 1751 2240 1660
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 2058 1660 2058 1751 2148 1751 2148 1660
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 5850 6150 6675 6150
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 5850 5250 6675 5250 6675 6600 5850 6600 5850 5250
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 7725 6150 7725 6525
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 9375 6150 9375 6525
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 11025 6150 11025 6525
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
-	 10500 5854
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
-	 8850 5854
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
-	 7200 5854
+	 1800 2400 2699 2399
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 6450 5925 7275 5925
+	 3749 2399 3749 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 8025 5925 8925 5925
+	 5399 2399 5399 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 9675 5925 10575 5925
+	 2550 2175 3299 2174
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 10725 5775 9825 5775
+	 4049 2174 4949 2174
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 9075 5775 8175 5775
-3 2 0 1 0 7 50 -1 -1 0.000 0 1 1 4
+	 5699 2174 6599 2174
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 6300 6375 6375 6825 6750 7050 7350 6975
-	 0.000 -0.500 -0.500 0.000
-4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
-4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
-4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
-4 0 0 50 -1 0 11 0.0000 2 135 540 5775 6900 Atomic\001
-4 0 0 50 -1 0 11 0.0000 2 135 630 5775 7125 Pointer\001
-4 0 0 50 -1 0 11 0.0000 2 165 810 7950 6675 Benaphore\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 8025 7125 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 165 810 9600 6675 Benaphore\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 9675 7125 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 165 810 11250 6675 Benaphore\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 11325 7125 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
+	 6749 2024 5849 2024
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 5099 2024 4199 2024
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1800 1499 2699 1499 2699 2850 1800 2850 1800 1499
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4950 1650 5850 1650 5850 2550 4950 2550 4950 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 3300 1650 4200 1650 4200 2550 3300 2550 3300 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 6600 1650 7500 1650 7500 2550 6600 2550 6600 1650
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 7049 2399 7049 2774
+4 0 0 50 -1 0 11 0.0000 2 120 525 1799 3149 Atomic\001
+4 0 0 50 -1 0 11 0.0000 2 120 510 1799 3374 Pointer\001
+4 0 0 50 -1 0 11 0.0000 2 180 765 3974 2924 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 4049 3374 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 180 765 5625 2924 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 5699 3374 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 180 765 7274 2924 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 7349 3374 Event FD\001
+4 2 0 50 -1 0 11 0.0000 2 135 585 1725 1800 Idle List\001
+4 2 0 50 -1 0 11 0.0000 2 135 360 1725 1950 Lock\001
+4 1 0 50 -1 0 11 0.0000 2 135 585 2250 1425 Idle List\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 3750 1575 Idle Processor\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 5400 1575 Idle Processor\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 7050 1575 Idle Processor\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -8,78 +8,75 @@
 -2
 1200 2
-6 5919 5250 6375 5775
-5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
-5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+6 1875 1500 2331 2025
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 2104.000 1660.011 2058 1660 2103 1614 2148 1660
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 2104.000 1661.000 1966 1660 2103 1523 2240 1660
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
-	 6284 5501 6284 5410
+	 1966 1660 1966 1751 1875 1751 1875 2025 2331 2025 2331 1751
+	 2240 1751 2240 1660
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
-	 6102 5410 6102 5501 6192 5501 6192 5410
+	 2058 1660 2058 1751 2148 1751 2148 1660
 -6
-6 7575 6525 7950 7025
+6 3599 2774 3974 3274
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 7575 6650 7700 6525 7950 6525 7950 7025 7575 7025 7575 6650
-	 7700 6650 7700 6525
+	 3599 2899 3724 2774 3974 2774 3974 3274 3599 3274 3599 2899
+	 3724 2899 3724 2774
 -6
-6 9225 6525 9600 7025
+6 5249 2774 5625 3274
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 9225 6650 9350 6525 9600 6525 9600 7025 9225 7025 9225 6650
-	 9350 6650 9350 6525
+	 5249 2899 5374 2774 5625 2774 5625 3274 5249 3274 5249 2899
+	 5374 2899 5374 2774
 -6
-6 10875 6525 11250 7025
+6 6899 2774 7274 3274
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 10875 6650 11000 6525 11250 6525 11250 7025 10875 7025 10875 6650
-	 11000 6650 11000 6525
+	 6899 2899 7024 2774 7274 2774 7274 3274 6899 3274 6899 2899
+	 7024 2899 7024 2774
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 7725 6150 7725 6525
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 9375 6150 9375 6525
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 11025 6150 11025 6525
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
-	 10500 5854
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
-	 8850 5854
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
-	 7200 5854
+	7 1 1.00 60.00 60.00
+	 3749 2399 3749 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 6450 5925 7275 5925
+	 5399 2399 5399 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 8025 5925 8925 5925
+	 7049 2399 7049 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 9675 5925 10575 5925
+	 2550 2175 3299 2174
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 10725 5775 9825 5775
+	 4049 2174 4949 2174
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 9075 5775 8175 5775
+	 5699 2174 6599 2174
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6749 2024 5849 2024
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 5099 2024 4199 2024
 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 5850 5250 6675 5250 6675 6075 5850 6075 5850 5250
-4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
-4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
-4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 8025 6825 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 9675 6825 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 11325 6825 Event FD\001
+	 4950 1650 5850 1650 5850 2550 4950 2550 4950 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 3300 1650 4200 1650 4200 2550 3300 2550 3300 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 6600 1650 7500 1650 7500 2550 6600 2550 6600 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1800 1499 2699 1499 2699 2400 1800 2400 1800 1499
+4 2 0 50 -1 0 11 0.0000 2 135 585 1725 1800 Idle List\001
+4 2 0 50 -1 0 11 0.0000 2 135 360 1725 1950 Lock\001
+4 1 0 50 -1 0 11 0.0000 2 135 585 2250 1425 Idle List\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 3750 1575 Idle Processor\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 5400 1575 Idle Processor\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 7050 1575 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 4049 3074 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 5699 3074 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 7349 3074 Event FD\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -8,87 +8,82 @@
 -2
 1200 2
-6 5919 5250 6375 5775
-5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
-5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 1 3150.000 2106.250 2250 2625 2775 3075 3525 3075
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+6 1875 1500 2331 2025
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 2104.000 1660.011 2058 1660 2103 1614 2148 1660
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 2104.000 1661.000 1966 1660 2103 1523 2240 1660
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
-	 6284 5501 6284 5410
+	 1966 1660 1966 1751 1875 1751 1875 2025 2331 2025 2331 1751
+	 2240 1751 2240 1660
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
-	 6102 5410 6102 5501 6192 5501 6192 5410
+	 2058 1660 2058 1751 2148 1751 2148 1660
 -6
-6 7575 6525 7950 7025
+6 3599 2774 3974 3274
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 7575 6650 7700 6525 7950 6525 7950 7025 7575 7025 7575 6650
-	 7700 6650 7700 6525
+	 3599 2899 3724 2774 3974 2774 3974 3274 3599 3274 3599 2899
+	 3724 2899 3724 2774
 -6
-6 9225 6525 9600 7025
+6 5249 2774 5625 3274
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 9225 6650 9350 6525 9600 6525 9600 7025 9225 7025 9225 6650
-	 9350 6650 9350 6525
+	 5249 2899 5374 2774 5625 2774 5625 3274 5249 3274 5249 2899
+	 5374 2899 5374 2774
 -6
-6 10875 6525 11250 7025
+6 6899 2774 7274 3274
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
-	 10875 6650 11000 6525 11250 6525 11250 7025 10875 7025 10875 6650
-	 11000 6650 11000 6525
+	 6899 2899 7024 2774 7274 2774 7274 3274 6899 3274 6899 2899
+	 7024 2899 7024 2774
 -6
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
-	 5850 6150 6675 6150
-2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
-	 5850 5250 6675 5250 6675 6600 5850 6600 5850 5250
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 7725 6150 7725 6525
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 9375 6150 9375 6525
-2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
-	1 1 1.00 60.00 120.00
-	7 0 1.00 60.00 60.00
-	 11025 6150 11025 6525
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
-	 10500 5854
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
-	 8850 5854
-2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
-	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
-	 7200 5854
+	 1800 2400 2699 2399
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 6450 5925 7275 5925
+	 3749 2399 3749 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 8025 5925 8925 5925
+	 5399 2399 5399 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 9675 5925 10575 5925
+	 7049 2399 7049 2774
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 10725 5775 9825 5775
+	 2550 2175 3299 2174
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 9075 5775 8175 5775
-3 2 0 1 0 7 50 -1 -1 0.000 0 1 1 4
+	 4049 2174 4949 2174
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
 	1 1 1.00 60.00 120.00
 	7 1 1.00 60.00 60.00
-	 6300 6375 6375 6825 6900 6975 7500 6750
-	 0.000 -0.500 -0.500 0.000
-4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
-4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
-4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
-4 0 0 50 -1 0 11 0.0000 2 135 540 5775 6900 Atomic\001
-4 0 0 50 -1 0 11 0.0000 2 135 630 5775 7125 Pointer\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 8025 6825 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 9675 6825 Event FD\001
-4 0 0 50 -1 0 11 0.0000 2 135 720 11325 6825 Event FD\001
+	 5699 2174 6599 2174
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6749 2024 5849 2024
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 5099 2024 4199 2024
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 1800 1499 2699 1499 2699 2850 1800 2850 1800 1499
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 4950 1650 5850 1650 5850 2550 4950 2550 4950 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 3300 1650 4200 1650 4200 2550 3300 2550 3300 1650
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 6600 1650 7500 1650 7500 2550 6600 2550 6600 1650
+4 0 0 50 -1 0 11 0.0000 2 120 525 1799 3149 Atomic\001
+4 0 0 50 -1 0 11 0.0000 2 120 510 1799 3374 Pointer\001
+4 2 0 50 -1 0 11 0.0000 2 135 585 1725 1800 Idle List\001
+4 2 0 50 -1 0 11 0.0000 2 135 360 1725 1950 Lock\001
+4 1 0 50 -1 0 11 0.0000 2 135 585 2250 1425 Idle List\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 3750 1575 Idle Processor\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 5400 1575 Idle Processor\001
+4 1 0 50 -1 0 11 0.0000 2 135 1020 7050 1575 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 4049 3074 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 5699 3074 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 120 690 7349 3074 Event FD\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -8,20 +8,20 @@
 -2
 1200 2
-1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3900 3600 571 571 3900 3600 3375 3375
-1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6300 3600 605 605 6300 3600 5775 3300
-1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 5100 5400 600 600 5100 5400 4500 5400
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3000 3600 600 600 3000 3600 2400 3600
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1800 1800 600 600 1800 1800 1200 1800
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 4205 1800 600 600 4205 1800 3605 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	0 0 1.00 60.00 120.00
-	 4200 4125 4725 4950
+	1 1 1.00 60.00 120.00
+	 2100 2325 2625 3150
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	0 0 1.00 60.00 120.00
-	 4500 3600 5700 3600
+	1 1 1.00 60.00 120.00
+	 2400 1800 3600 1800
 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
-	0 0 1.00 60.00 120.00
-	 5923 4125 5475 4875
-4 1 0 50 -1 0 11 0.0000 2 135 450 5100 5475 AWAKE\001
-4 1 0 50 -1 0 11 0.0000 2 135 450 6300 3675 SLEEP\001
-4 1 0 50 -1 0 11 0.0000 2 135 540 3900 3675 SEARCH\001
-4 0 0 50 -1 0 11 0.0000 2 135 360 5775 4650 WAKE\001
-4 2 0 50 -1 0 11 0.0000 2 135 540 4350 4650 CANCEL\001
-4 1 0 50 -1 0 11 0.0000 2 135 630 5025 3450 CONFIRM\001
+	1 1 1.00 60.00 120.00
+	 3900 2325 3375 3150
+4 1 0 50 -1 0 11 0.0000 2 120 675 3000 3675 AWAKE\001
+4 1 0 50 -1 0 11 0.0000 2 120 525 4200 1875 SLEEP\001
+4 1 0 50 -1 0 11 0.0000 2 120 720 1800 1875 SEARCH\001
+4 2 0 50 -1 0 11 0.0000 2 120 720 2250 2850 CANCEL\001
+4 1 0 50 -1 0 11 0.0000 2 120 840 2925 1650 CONFIRM\001
+4 0 0 50 -1 0 11 0.0000 2 120 540 3750 2850 WAKE\001
Index: doc/theses/thierry_delisle_PhD/thesis/local.bib
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -499,8 +499,9 @@
 }
 
-@article{MAN:linux/cfs/balancing,
+@misc{MAN:linux/cfs/balancing,
   title={Reworking {CFS} load balancing},
-  journal={LWN article, available at: https://lwn.net/Articles/793427/},
-  year={2013}
+  journal={LWN article},
+  year={2019},
+  howpublished = {\href{https://lwn.net/Articles/793427}{https://\-lwn.net/\-Articles/\-793427}},
 }
 
@@ -539,5 +540,5 @@
 }
 
-@online{GITHUB:go,
+@misc{GITHUB:go,
   title = {GitHub - The Go Programming Language},
   author = {The Go Programming Language},
@@ -561,6 +562,4 @@
   howpublished = {\href{http://www.erlang.se/euc/08/euc_smp.pdf}{http://\-www.erlang.se/\-euc/\-08/\-euc_smp.pdf}}
 }
-
-
 
 @manual{MAN:tbb/scheduler,
@@ -701,4 +700,5 @@
   note = "[Online; accessed 12-April-2022]"
 }
+
 @misc{wiki:binpak,
   author = "{Wikipedia contributors}",
@@ -754,2 +754,13 @@
 }
 
+@inproceedings{Albers12,
+    author	= {Susanne Albers and Antonios Antoniadis},
+    title	= {Race to Idle: New Algorithms for Speed Scaling with a Sleep State},
+    booktitle	= {Proceedings of the 2012  Annual ACM-SIAM Symposium on Discrete Algorithms (SODA)},
+    doi		= {10.1137/1.9781611973099.100},
+    URL		= {https://epubs.siam.org/doi/abs/10.1137/1.9781611973099.100},
+    eprint	= {https://epubs.siam.org/doi/pdf/10.1137/1.9781611973099.100},
+    year	= 2012,
+    month	= jan,
+    pages	= {1266-1285},
+}
Index: doc/theses/thierry_delisle_PhD/thesis/text/core.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/core.tex	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/text/core.tex	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -341,4 +341,5 @@
 
 \subsection{Topological Work Stealing}
+\label{s:TopologicalWorkStealing}
 Therefore, the approach used in the \CFA scheduler is to have per-\proc subqueues, but have an explicit data-structure track which cache substructure each subqueue is tied to.
 This tracking requires some finesse because reading this data structure must lead to fewer cache misses than not having the data structure in the first place.
Index: doc/theses/thierry_delisle_PhD/thesis/text/io.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/io.tex	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/text/io.tex	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -250,5 +250,5 @@
 In this design, allocation and submission form a partitioned ring buffer as shown in Figure~\ref{fig:pring}.
 Once added to the ring buffer, the attached \gls{proc} has a significant amount of flexibility with regards to when to perform the system call.
-Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of \glspl{thrd}, etc.
+Possible options are: when the \gls{proc} runs out of \glspl{thrd} to run, after running a given number of \glspl{thrd}, \etc.
 
 \begin{figure}
Index: doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision 847bb6fe1321be72f2e6a488bae6a3d8574fe886)
+++ doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision d67735510bcb770552b5898e5b45fc6bc07534aa)
@@ -1,143 +1,175 @@
 \chapter{Scheduling in practice}\label{practice}
-The scheduling algorithm discribed in Chapter~\ref{core} addresses scheduling in a stable state.
-However, it does not address problems that occur when the system changes state.
+The scheduling algorithm described in Chapter~\ref{core} addresses scheduling in a stable state.
+This chapter addresses problems that occur when the system state changes.
 Indeed the \CFA runtime, supports expanding and shrinking the number of \procs, both manually and, to some extent, automatically.
-This entails that the scheduling algorithm must support these transitions.
-
-More precise \CFA supports adding \procs using the RAII object @processor@.
-These objects can be created at any time and can be destroyed at any time.
-They are normally created as automatic stack variables, but this is not a requirement.
-
-The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence.
+These changes affect the scheduling algorithm, which must dynamically alter its behaviour.
+
+In detail, \CFA supports adding \procs using the type @processor@, in both RAII and heap coding scenarios.
+\begin{lstlisting}
+{
+	processor p[4]; // 4 new kernel threads
+	... // execute on 4 processors
+	processor * dp = new( processor, 6 ); // 6 new kernel threads
+	... // execute on 10 processors
+	delete( dp );   // delete 6 kernel threads
+	... // execute on 4 processors
+} // delete 4 kernel threads
+\end{lstlisting}
+Dynamically allocated processors can be deleted an any time, \ie their lifetime exceeds the block of creation.
+The consequence is that the scheduler and \io subsystems must know when these \procs come in and out of existence and roll them into the appropriate scheduling algorithms.
 
 \section{Manual Resizing}
 Manual resizing is expected to be a rare operation.
-Programmers are mostly expected to resize clusters on startup or teardown.
-Therefore dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state.
-As such all internal arrays that are sized based on the number of \procs need to be @realloc@ed.
-This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing when shrinkingbecause some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}.
+Programmers normally create/delete processors on a clusters at startup/teardown.
+Therefore, dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state.
+As such, all internal scheduling arrays that are sized based on the number of \procs need to be @realloc@ed.
+This requirement also means any references into these arrays, \eg pointers or indexes, may need to be updated if elements are moved for compaction or any other reason.
+% \footnote{Indexes may still need fixing when shrinking because some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}
 
 There are no performance requirements, within reason, for resizing since it is expected to be rare.
-However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks.
+However, this operation has strict correctness requirements since updating and idle sleep can easily lead to deadlocks.
 It should also avoid as much as possible any effect on performance when the number of \procs remain constant.
 This later requirement prohibits naive solutions, like simply adding a global lock to the ready-queue arrays.
 
 \subsection{Read-Copy-Update}
-One solution is to use the Read-Copy-Update\cite{wiki:rcu} pattern.
-In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy.
-This approach potentially has the advantage that it may not need any synchronization to do the switch.
-However, there is a race where \procs could still use the previous, original, data structure after the copy was switched in.
-This race not only requires some added memory reclamation scheme, it also requires that operations made on the stale original version be eventually moved to the copy.
-
-For linked-lists, enqueing is only somewhat problematic, \ats enqueued to the original queues need to be transferred to the new, which might not preserve ordering.
-Dequeing is more challenging.
-Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at.
-Fixing this requires more synchronization or more indirection on the queues.
-
-Another challenge is that the original must be kept until all \procs have witnessed the change.
-This is a straight forward memory reclamation challenge but it does mean that every operation will need \emph{some} form of synchronization.
-If each of these operation does need synchronization then it is possible a simpler solution achieves the same performance.
-Because in addition to the classic challenge of memory reclamation, transferring the original data to the copy before reclaiming it poses additional challenges.
+One solution is to use the Read-Copy-Update pattern~\cite{wiki:rcu}.
+In this pattern, resizing is done by creating a copy of the internal data structures (\eg see Figure~\ref{fig:base-ts2}), updating the copy with the desired changes, and then attempt an Indiana Jones Switch to replace the original with the copy.
+This approach has the advantage that it may not need any synchronization to do the switch.
+However, there is a race where \procs still use the original data structure after the copy is switched.
+This race not only requires adding a memory-reclamation scheme, it also requires that operations made on the stale original version are eventually moved to the copy.
+
+Specifically, the original data structure must be kept until all \procs have witnessed the change.
+This requirement is the \newterm{memory reclamation challenge} and means every operation needs \emph{some} form of synchronization.
+If all operations need synchronization, then the overall cost of this technique is likely to be similar to an uncontended lock approach.
+In addition to the classic challenge of memory reclamation, transferring the original data to the copy before reclaiming it poses additional challenges.
 Especially merging subqueues while having a minimal impact on fairness and locality.
 
-\subsection{Read-Writer Lock}
-A simpler approach would be to use a \newterm{Readers-Writer Lock}\cite{wiki:rwlock} where the resizing requires acquiring the lock as a writer while simply enqueing/dequeing \ats requires acquiring the lock as a reader.
+For example, given a linked-list, having a node enqueued onto the original and new list is not necessarily a problem depending on the chosen list structure.
+If the list supports arbitrary insertions, then inconsistencies in the tail pointer do not break the list;
+however, ordering may not be preserved.
+Furthermore, nodes enqueued to the original queues eventually need to be uniquely transferred to the new queues, which may further perturb ordering.
+Dequeuing is more challenging when nodes appear on both lists because of pending reclamation: dequeuing a node from one list does not remove it from the other nor is that node in the same place on the other list.
+This situation can lead to multiple \procs dequeuing the same \at.
+Fixing these challenges requires more synchronization or more indirection to the queues, plus coordinated searching to ensure unique elements.
+
+\subsection{Readers-Writer Lock}
+A simpler approach is to use a \newterm{Readers-Writer Lock}~\cite{wiki:rwlock}, where the resizing requires acquiring the lock as a writer while simply enqueueing/dequeuing \ats requires acquiring the lock as a reader.
 Using a Readers-Writer lock solves the problem of dynamically resizing and leaves the challenge of finding or building a lock with sufficient good read-side performance.
-Since this is not a very complex challenge and an ad-hoc solution is perfectly acceptable, building a Readers-Writer lock was the path taken.
-
-To maximize reader scalability, the readers should not contend with eachother when attempting to acquire and release the critical sections.
-This effectively requires that each reader have its own piece of memory to mark as locked and unlocked.
-Reades then acquire the lock wait for writers to finish the critical section and then acquire their local spinlocks.
-Writers acquire the global lock, so writers have mutual exclusion among themselves, and then acquires each of the local reader locks.
-Acquiring all the local locks guarantees mutual exclusion between the readers and the writer, while the wait on the read side prevents readers from continously starving the writer.
-\todo{reference listings}
-
+Since this approach is not a very complex challenge and an ad-hoc solution is perfectly acceptable, building a Readers-Writer lock was the path taken.
+
+To maximize reader scalability, readers should not contend with each other when attempting to acquire and release a critical section.
+To achieve this goal requires each reader to have its own memory to mark as locked and unlocked.
+The read acquire possibly waits for a writer to finish the critical section and then acquires a reader's local spinlock.
+The write acquire acquires the global lock, guaranteeing mutual exclusion among writers, and then acquires each of the local reader locks.
+Acquiring all the local read locks guarantees mutual exclusion among the readers and the writer, while the wait on the read side prevents readers from continuously starving the writer.
+
+Figure~\ref{f:SpecializedReadersWriterLock} shows the outline for this specialized readers-writer lock.
+The lock in nonblocking, so both readers and writers spin while the lock is held.
+\todo{finish explanation}
+
+\begin{figure}
 \begin{lstlisting}
 void read_lock() {
 	// Step 1 : make sure no writers in
 	while write_lock { Pause(); }
-
-	// May need fence here
-
 	// Step 2 : acquire our local lock
-	while atomic_xchg( tls.lock ) {
-		Pause();
-	}
-}
-
+	while atomic_xchg( tls.lock ) { Pause(); }
+}
 void read_unlock() {
 	tls.lock = false;
 }
-\end{lstlisting}
-
-\begin{lstlisting}
 void write_lock()  {
 	// Step 1 : lock global lock
-	while atomic_xchg( write_lock ) {
-		Pause();
-	}
-
+	while atomic_xchg( write_lock ) { Pause(); }
 	// Step 2 : lock per-proc locks
 	for t in all_tls {
-		while atomic_xchg( t.lock ) {
-			Pause();
-		}
+		while atomic_xchg( t.lock ) { Pause(); }
 	}
 }
-
 void write_unlock() {
 	// Step 1 : release local locks
-	for t in all_tls {
-		t.lock = false;
-	}
-
+	for t in all_tls { t.lock = false; }
 	// Step 2 : release global lock
 	write_lock = false;
 }
 \end{lstlisting}
+\caption{Specialized Readers-Writer Lock}
+\label{f:SpecializedReadersWriterLock}
+\end{figure}
 
 \section{Idle-Sleep}
-In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful.
-While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time.
-Furthermore, race conditions that spuriously lead to the impression that no \ats are ready are actually common in practice.
-Therefore resources associated with \procs should not be freed but \procs simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
-This state is referred to as \newterm{Idle-Sleep}.
+While manual resizing of \procs is expected to be rare, the number of \ats can vary significantly over an application's lifetime, which means there are times when there are too few or too many \procs.
+For this work, it is the programer's responsibility to manually create \procs, so if there a too few \procs, the application must address this issue.
+This leaves too many \procs when there are not enough \ats for all the \procs to be useful.
+These idle \procs cannot be removed because their lifetime is controlled by the application, and only the application knows when the number of \ats may increase or decrease.
+While idle \procs can spin until work appears, this approach wastes the processor (from other applications), energy and heat.
+Therefore, idle \procs are put into an idle state, called \newterm{Idle-Sleep}, where the \gls{kthrd} is blocked until the scheduler deems it is needed.
 
 Idle sleep effectively encompasses several challenges.
-First some data structure needs to keep track of all \procs that are in idle sleep.
-Because of idle sleep can be spurious, this data structure has strict performance requirements in addition to the strict correctness requirements.
-Next, some tool must be used to block kernel threads \glspl{kthrd}, \eg @pthread_cond_wait@, pthread semaphores.
-The complexity here is to support \at parking and unparking, timers, \io operations and all other \CFA features with minimal complexity.
-Finally, idle sleep also includes a heuristic to determine the appropriate number of \procs to be in idle sleep an any given time.
-This third challenge is however outside the scope of this thesis because developping a general heuristic is involved enough to justify its own work.
-The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work.
+First, a data structure needs to keep track of all \procs that are in idle sleep.
+Because idle sleep is spurious, this data structure has strict performance requirements, in addition to strict correctness requirements.
+Next, some mechanism is needed to block \glspl{kthrd}, \eg @pthread_cond_wait@ on a pthread semaphore.
+The complexity here is to support \at parking and unparking, user-level locking, timers, \io operations, and all other \CFA features with minimal complexity.
+Finally, the scheduler needs a heuristic to determine when to block and unblock an appropriate number of \procs.
+However, this third challenge is outside the scope of this thesis because developing a general heuristic is complex enough to justify its own work.
+Therefore, the \CFA scheduler simply follows the ``Race-to-Idle''~\cite{Albers12} approach where a sleeping \proc is woken any time a \at becomes ready and \procs go to idle sleep anytime they run out of work.
 
 \section{Sleeping}
 As usual, the corner-stone of any feature related to the kernel is the choice of system call.
-In terms of blocking a \gls{kthrd} until some event occurs the linux kernel has many available options:
-
-\paragraph{\lstinline{pthread_mutex}/\lstinline{pthread_cond}}
-The most classic option is to use some combination of @pthread_mutex@ and @pthread_cond@.
-These serve as straight forward mutual exclusion and synchronization tools and allow a \gls{kthrd} to wait on a @pthread_cond@ until signalled.
-While this approach is generally perfectly appropriate for \glspl{kthrd} waiting after eachother, \io operations do not signal @pthread_cond@s.
-For \io results to wake a \proc waiting on a @pthread_cond@ means that a different \glspl{kthrd} must be woken up first, and then the \proc can be signalled.
+In terms of blocking a \gls{kthrd} until some event occurs, the Linux kernel has many available options.
+
+\subsection{\lstinline{pthread_mutex}/\lstinline{pthread_cond}}
+The classic option is to use some combination of the pthread mutual exclusion and synchronization locks, allowing a safe park/unpark of a \gls{kthrd} to/from a @pthread_cond@.
+While this approach works for \glspl{kthrd} waiting among themselves, \io operations do not provide a mechanism to signal @pthread_cond@s.
+For \io results to wake a \proc waiting on a @pthread_cond@ means a different \glspl{kthrd} must be woken up first, which then signals the \proc.
 
 \subsection{\lstinline{io_uring} and Epoll}
-An alternative is to flip the problem on its head and block waiting for \io, using @io_uring@ or even @epoll@.
-This creates the inverse situation, where \io operations directly wake sleeping \procs but waking \proc from a running \gls{kthrd} must use an indirect scheme.
-This generally takes the form of creating a file descriptor, \eg, a dummy file, a pipe or an event fd, and using that file descriptor when \procs need to wake eachother.
-This leads to additional complexity because there can be a race between these artificial \io operations and genuine \io operations.
-If not handled correctly, this can lead to the artificial files going out of sync.
+An alternative is to flip the problem on its head and block waiting for \io, using @io_uring@ or @epoll@.
+This creates the inverse situation, where \io operations directly wake sleeping \procs but waking blocked \procs must use an indirect scheme.
+This generally takes the form of creating a file descriptor, \eg, dummy file, pipe, or event fd, and using that file descriptor when \procs need to wake each other.
+This leads to additional complexity because there can be a race between these artificial \io and genuine \io operations.
+If not handled correctly, this can lead to artificial files getting delaying too long behind genuine files, resulting in longer latency.
 
 \subsection{Event FDs}
 Another interesting approach is to use an event file descriptor\cit{eventfd}.
-This is a Linux feature that is a file descriptor that behaves like \io, \ie, uses @read@ and @write@, but also behaves like a semaphore.
-Indeed, all read and writes must use 64bits large values\footnote{On 64-bit Linux, a 32-bit Linux would use 32 bits values.}.
-Writes add their values to the buffer, that is arithmetic addition and not buffer append, and reads zero out the buffer and return the buffer values so far\footnote{
-This is without the \lstinline{EFD_SEMAPHORE} flag. This flags changes the behavior of \lstinline{read} but is not needed for this work.}.
+This Linux feature is a file descriptor that behaves like \io, \ie, uses @read@ and @write@, but also behaves like a semaphore.
+Indeed, all reads and writes must use a word-sized values, \ie 64 or 32 bits.
+Writes \emph{add} their values to a buffer using arithmetic addition versus buffer append, and reads zero out the buffer and return the buffer values so far.\footnote{
+This behaviour is without the \lstinline{EFD_SEMAPHORE} flag, which changes the behaviour of \lstinline{read} but is not needed for this work.}
 If a read is made while the buffer is already 0, the read blocks until a non-0 value is added.
-What makes this feature particularly interesting is that @io_uring@ supports the @IORING_REGISTER_EVENTFD@ command, to register an event fd to a particular instance.
-Once that instance is registered, any \io completion will result in @io\_uring@ writing to the event FD.
-This means that a \proc waiting on the event FD can be \emph{directly} woken up by either other \procs or incomming \io.
+What makes this feature particularly interesting is that @io_uring@ supports the @IORING_REGISTER_EVENTFD@ command to register an event @fd@ to a particular instance.
+Once that instance is registered, any \io completion results in @io_uring@ writing to the event @fd@.
+This means that a \proc waiting on the event @fd@ can be \emph{directly} woken up by either other \procs or incoming \io.
+
+\section{Tracking Sleepers}
+Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
+The classic challenge occurs when a \at is made ready while a \proc is going to sleep: there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
+Since \ats can be made ready by timers, \io operations, or other events outside a cluster, this race can occur even if the \proc going to sleep is the only \proc awake.
+As a result, improper handling of this race leads to all \procs going to sleep when there are ready \ats and the system deadlocks.
+
+Furthermore, the ``Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers.
+Contention can be tolerated for \procs attempting to sleep or wake-up because these \procs are not doing useful work, and therefore, not contributing to overall performance.
+However, notifying, checking if a \proc must be woken-up, and doing so if needed, can significantly affect overall performance and must be low cost.
+
+\subsection{Sleepers List}
+Each cluster maintains a list of idle \procs, organized as a stack.
+This ordering allows \procs at the head of the list to stay constantly active and those at the tail to stay in idle sleep for extended period of times.
+Because of unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \procs handle as much of the work as possible.
+The idle \procs maintain the stack of sleepers among themselves and notifying a sleeping \proc takes as little work as possible.
+This approach means that maintaining the list is fairly straightforward.
+The list can simply use a single lock per cluster and only \procs that are getting in and out of the idle state contend for that lock.
+
+This approach also simplifies notification.
+Indeed, \procs not only need to be notify when a new \at is readied, but also must be notified during manual resizing, so the \gls{kthrd} can be joined.
+These requirements mean whichever entity removes idle \procs from the sleeper list must be able to do so in any order.
+Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure.
+The single lock also means the notification process simply needs to wake-up the desired idle \proc, using @pthread_cond_signal@, @write@ on an @fd@, \etc, and the \proc handles the rest.
+
+\subsection{Reducing Latency}
+As mentioned in this section, \procs going to sleep for extremely short periods of time is likely in certain scenarios.
+Therefore, the latency of doing a system call to read from and writing to an event @fd@ can negatively affect overall performance in a notable way.
+Hence, it is important to reduce latency and contention of the notification as much as possible.
+Figure~\ref{fig:idle1} shows the basic idle-sleep data structure.
+For the notifiers, this data structure can cause contention on the lock and the event @fd@ syscall can cause notable latency.
 
 \begin{figure}
@@ -145,73 +177,40 @@
 	\input{idle1.pstex_t}
 	\caption[Basic Idle Sleep Data Structure]{Basic Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
-	Each \proc has a private event FD.}
+	Each \proc has a private event \lstinline{fd}.}
 	\label{fig:idle1}
 \end{figure}
 
-
-\section{Tracking Sleepers}
-Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
-The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
-Since \ats can be made ready by timers, \io operations or other events outside a clusre, this race can occur even if the \proc going to sleep is the only \proc awake.
-As a result, improper handling of this race can lead to all \procs going to sleep and the system deadlocking.
-
-Furthermore, the ``Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers.
-Contention slowing down \procs attempting to sleep or wake-up can be tolerated.
-These \procs are not doing useful work and therefore not contributing to overall performance.
-However, notifying, checking if a \proc must be woken-up and doing so if needed, can significantly affect overall performance and must be low cost.
-
-\subsection{Sleepers List}
-Each cluster maintains a list of idle \procs, organized as a stack.
-This ordering hopefully allows \proc at the tail to stay in idle sleep for extended period of times.
-Because of these unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \proc handle as much of the work as possible.
-The idle \procs maintain the of sleepers among themselves and notifying a sleeping \proc takes as little work as possible.
-This approach means that maintaining the list is fairly straightforward.
-The list can simply use a single lock per cluster and only \procs that are getting in and out of idle state will contend for that lock.
-
-This approach also simplifies notification.
-Indeed, \procs need to be notify when a new \at is readied, but they also must be notified during resizing, so the \gls{kthrd} can be joined.
-This means that whichever entity removes idle \procs from the sleeper list must be able to do so in any order.
-Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure.
-The notification process then simply needs to wake-up the desired idle \proc, using @pthread_cond_signal@, @write@ on an fd, etc., and the \proc will handle the rest.
-
-\subsection{Reducing Latency}
-As mentioned in this section, \procs going idle for extremely short periods of time is likely in certain common scenarios.
-Therefore, the latency of doing a system call to read from and writing to the event fd can actually negatively affect overall performance in a notable way.
-Is it important to reduce latency and contention of the notification as much as possible.
-Figure~\ref{fig:idle1} shoes the basic idle sleep data structure.
-For the notifiers, this data structure can cause contention on the lock and the event fd syscall can cause notable latency.
-
-\begin{figure}
+Contention occurs because the idle-list lock must be held to access the idle list, \eg by \procs attempting to go to sleep, \procs waking, or notification attempts.
+The contention from the \procs attempting to go to sleep can be mitigated slightly by using @try_acquire@, so the \procs simply busy wait again searching for \ats if the lock is held.
+This trick cannot be used when waking \procs since the waker needs to return immediately to what it was doing.
+Interestingly, general notification, \ie waking any idle processor versus a specific one, does not strictly require modifying the list.
+Here, contention can be reduced notably by having notifiers avoid the lock entirely by adding a pointer to the event @fd@ of the first idle \proc, as in Figure~\ref{fig:idle2}.
+To avoid contention among notifiers, notifiers atomically exchange it to @NULL@ so only one notifier contends on the system call.
+\todo{Expand explanation of how a notification works.}
+
+\begin{figure}[t]
 	\centering
 	\input{idle2.pstex_t}
-	\caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
+	\caption[Improved Idle-Sleep Data Structure]{Improved Idle-Sleep Data Structure \smallskip\newline An atomic pointer is added to the list pointing to the Event FD of the first \proc on the list.}
 	\label{fig:idle2}
 \end{figure}
 
-The contention is mostly due to the lock on the list needing to be held to get to the head \proc.
-That lock can be contended by \procs attempting to go to sleep, \procs waking or notification attempts.
-The contentention from the \procs attempting to go to sleep can be mitigated slightly by using @try\_acquire@ instead, so the \procs simply continue searching for \ats if the lock is held.
-This trick cannot be used for waking \procs since they are not in a state where they can run \ats.
-However, it is worth nothing that notification does not strictly require accessing the list or the head \proc.
-Therefore, contention can be reduced notably by having notifiers avoid the lock entirely and adding a pointer to the event fd of the first idle \proc, as in Figure~\ref{fig:idle2}.
-To avoid contention between the notifiers, instead of simply reading the atomic pointer, notifiers atomically exchange it to @null@ so only only notifier will contend on the system call.
+The next optimization is to avoid the latency of the event @fd@, which can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event @fd@.
+A simple three state flag is added beside the event @fd@ to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}.
+In Topological Work Stealing (see Section~\ref{s:TopologicalWorkStealing}), a \proc without \ats begins searching by setting the state flag to @SEARCH@.
+If no \ats can be found to steal, the \proc then confirms it is going to sleep by atomically swapping the state to @SLEEP@.
+If the previous state is still @SEARCH@, then the \proc does read the event @fd@.
+Meanwhile, notifiers atomically exchange the state to @AWAKE@ state.
+If the previous state is @SLEEP@, then the notifier must write to the event @fd@.
+However, if the notify arrives almost immediately after the \proc marks itself sleeping (idle), then both reads and writes on the event @fd@ can be omitted, which reduces latency notably.
+These extensions leads to the final data structure shown in Figure~\ref{fig:idle}.
+\todo{You never talk about the Beaphore. What is its purpose and when is it used?}
 
 \begin{figure}
 	\centering
 	\input{idle_state.pstex_t}
-	\caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
+	\caption[Improved Idle-Sleep Latency]{Improved Idle-Sleep Latency \smallskip\newline A three state flag is added to the event \lstinline{fd}.}
 	\label{fig:idle:state}
 \end{figure}
-
-The next optimization that can be done is to avoid the latency of the event fd when possible.
-This can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event fd.
-A simple three state flag is added beside the event fd to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}.
-The flag starts in state @SEARCH@, while the \proc is searching for \ats to run.
-The \proc then confirms the sleep by atomically swaping the state to @SLEEP@.
-If the previous state was still @SEARCH@, then the \proc does read the event fd.
-Meanwhile, notifiers atomically exchange the state to @AWAKE@ state.
-if the previous state was @SLEEP@, then the notifier must write to the event fd.
-However, if the notify arrives almost immediately after the \proc marks itself idle, then both reads and writes on the event fd can be omitted, which reduces latency notably.
-This leads to the final data structure shown in Figure~\ref{fig:idle}.
 
 \begin{figure}
@@ -219,6 +218,6 @@
 	\input{idle.pstex_t}
 	\caption[Low-latency Idle Sleep Data Structure]{Low-latency Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
-	Each \proc has a private event FD with a benaphore in front of it.
-	The list also has an atomic pointer to the event fd and benaphore of the first \proc on the list.}
+	Each \proc has a private event \lstinline{fd} with a benaphore in front of it.
+	The list also has an atomic pointer to the event \lstinline{fd} and benaphore of the first \proc on the list.}
 	\label{fig:idle}
 \end{figure}
